]> git.proxmox.com Git - qemu.git/blob - tcg/sparc/tcg-target.c
Remove unused CONFIG_TCG_PASS_AREG0 and dead code
[qemu.git] / tcg / sparc / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
59 };
60 #endif
61
62 #define ARG_OFFSET 1
63
64 static const int tcg_target_reg_alloc_order[] = {
65 TCG_REG_L0,
66 TCG_REG_L1,
67 TCG_REG_L2,
68 TCG_REG_L3,
69 TCG_REG_L4,
70 TCG_REG_L5,
71 TCG_REG_L6,
72 TCG_REG_L7,
73 TCG_REG_I0,
74 TCG_REG_I1,
75 TCG_REG_I2,
76 TCG_REG_I3,
77 TCG_REG_I4,
78 };
79
80 static const int tcg_target_call_iarg_regs[6] = {
81 TCG_REG_O0,
82 TCG_REG_O1,
83 TCG_REG_O2,
84 TCG_REG_O3,
85 TCG_REG_O4,
86 TCG_REG_O5,
87 };
88
89 static const int tcg_target_call_oarg_regs[] = {
90 TCG_REG_O0,
91 TCG_REG_O1,
92 TCG_REG_O2,
93 TCG_REG_O3,
94 };
95
96 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
97 {
98 return (val << ((sizeof(tcg_target_long) * 8 - bits))
99 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
100 }
101
102 static inline int check_fit_i32(uint32_t val, unsigned int bits)
103 {
104 return ((val << (32 - bits)) >> (32 - bits)) == val;
105 }
106
107 static void patch_reloc(uint8_t *code_ptr, int type,
108 tcg_target_long value, tcg_target_long addend)
109 {
110 value += addend;
111 switch (type) {
112 case R_SPARC_32:
113 if (value != (uint32_t)value)
114 tcg_abort();
115 *(uint32_t *)code_ptr = value;
116 break;
117 case R_SPARC_WDISP22:
118 value -= (long)code_ptr;
119 value >>= 2;
120 if (!check_fit_tl(value, 22))
121 tcg_abort();
122 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
123 break;
124 case R_SPARC_WDISP19:
125 value -= (long)code_ptr;
126 value >>= 2;
127 if (!check_fit_tl(value, 19))
128 tcg_abort();
129 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
130 break;
131 default:
132 tcg_abort();
133 }
134 }
135
136 /* maximum number of register used for input function arguments */
137 static inline int tcg_target_get_call_iarg_regs_count(int flags)
138 {
139 return 6;
140 }
141
142 /* parse target specific constraints */
143 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
144 {
145 const char *ct_str;
146
147 ct_str = *pct_str;
148 switch (ct_str[0]) {
149 case 'r':
150 ct->ct |= TCG_CT_REG;
151 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
152 break;
153 case 'L': /* qemu_ld/st constraint */
154 ct->ct |= TCG_CT_REG;
155 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
156 // Helper args
157 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
158 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
159 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
160 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O3);
161 break;
162 case 'I':
163 ct->ct |= TCG_CT_CONST_S11;
164 break;
165 case 'J':
166 ct->ct |= TCG_CT_CONST_S13;
167 break;
168 default:
169 return -1;
170 }
171 ct_str++;
172 *pct_str = ct_str;
173 return 0;
174 }
175
176 /* test if a constant matches the constraint */
177 static inline int tcg_target_const_match(tcg_target_long val,
178 const TCGArgConstraint *arg_ct)
179 {
180 int ct;
181
182 ct = arg_ct->ct;
183 if (ct & TCG_CT_CONST)
184 return 1;
185 else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
186 return 1;
187 else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
188 return 1;
189 else
190 return 0;
191 }
192
193 #define INSN_OP(x) ((x) << 30)
194 #define INSN_OP2(x) ((x) << 22)
195 #define INSN_OP3(x) ((x) << 19)
196 #define INSN_OPF(x) ((x) << 5)
197 #define INSN_RD(x) ((x) << 25)
198 #define INSN_RS1(x) ((x) << 14)
199 #define INSN_RS2(x) (x)
200 #define INSN_ASI(x) ((x) << 5)
201
202 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
203 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
204 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
205 #define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
206
207 #define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
208 #define COND_N 0x0
209 #define COND_E 0x1
210 #define COND_LE 0x2
211 #define COND_L 0x3
212 #define COND_LEU 0x4
213 #define COND_CS 0x5
214 #define COND_NEG 0x6
215 #define COND_VS 0x7
216 #define COND_A 0x8
217 #define COND_NE 0x9
218 #define COND_G 0xa
219 #define COND_GE 0xb
220 #define COND_GU 0xc
221 #define COND_CC 0xd
222 #define COND_POS 0xe
223 #define COND_VC 0xf
224 #define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
225
226 #define MOVCC_ICC (1 << 18)
227 #define MOVCC_XCC (1 << 18 | 1 << 12)
228
229 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
230 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
231 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
232 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
233 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
234 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
235 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
236 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
237 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
238 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
239 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x10))
240 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
241 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
242 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
243 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
244 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
245 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
246 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
247 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
248
249 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
250 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
251 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
252
253 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
254 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
255 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
256
257 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
258 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
259 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
260 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
261 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
262 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
263 #define CALL INSN_OP(1)
264 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
265 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
266 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
267 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
268 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
269 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
270 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
271 #define STB (INSN_OP(3) | INSN_OP3(0x05))
272 #define STH (INSN_OP(3) | INSN_OP3(0x06))
273 #define STW (INSN_OP(3) | INSN_OP3(0x04))
274 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
275 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
276 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
277 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
278 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
279 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
280 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
281 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
282 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
283 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
284 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
285 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
286
287 #ifndef ASI_PRIMARY_LITTLE
288 #define ASI_PRIMARY_LITTLE 0x88
289 #endif
290
291 static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
292 int op)
293 {
294 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
295 INSN_RS2(rs2));
296 }
297
298 static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
299 uint32_t offset, int op)
300 {
301 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
302 INSN_IMM13(offset));
303 }
304
305 static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
306 int val2, int val2const, int op)
307 {
308 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
309 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
310 }
311
312 static inline void tcg_out_mov(TCGContext *s, TCGType type,
313 TCGReg ret, TCGReg arg)
314 {
315 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
316 }
317
318 static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
319 {
320 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
321 }
322
323 static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
324 {
325 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
326 }
327
328 static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
329 {
330 if (check_fit_tl(arg, 13))
331 tcg_out_movi_imm13(s, ret, arg);
332 else {
333 tcg_out_sethi(s, ret, arg);
334 if (arg & 0x3ff)
335 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
336 }
337 }
338
339 static inline void tcg_out_movi(TCGContext *s, TCGType type,
340 TCGReg ret, tcg_target_long arg)
341 {
342 /* All 32-bit constants, as well as 64-bit constants with
343 no high bits set go through movi_imm32. */
344 if (TCG_TARGET_REG_BITS == 32
345 || type == TCG_TYPE_I32
346 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
347 tcg_out_movi_imm32(s, ret, arg);
348 } else if (check_fit_tl(arg, 13)) {
349 /* A 13-bit constant sign-extended to 64-bits. */
350 tcg_out_movi_imm13(s, ret, arg);
351 } else if (check_fit_tl(arg, 32)) {
352 /* A 32-bit constant sign-extended to 64-bits. */
353 tcg_out_sethi(s, ret, ~arg);
354 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
355 } else {
356 tcg_out_movi_imm32(s, TCG_REG_I4, arg >> (TCG_TARGET_REG_BITS / 2));
357 tcg_out_arithi(s, TCG_REG_I4, TCG_REG_I4, 32, SHIFT_SLLX);
358 tcg_out_movi_imm32(s, ret, arg);
359 tcg_out_arith(s, ret, ret, TCG_REG_I4, ARITH_OR);
360 }
361 }
362
363 static inline void tcg_out_ld_raw(TCGContext *s, int ret,
364 tcg_target_long arg)
365 {
366 tcg_out_sethi(s, ret, arg);
367 tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
368 INSN_IMM13(arg & 0x3ff));
369 }
370
371 static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
372 tcg_target_long arg)
373 {
374 if (!check_fit_tl(arg, 10))
375 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ffULL);
376 if (TCG_TARGET_REG_BITS == 64) {
377 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) |
378 INSN_IMM13(arg & 0x3ff));
379 } else {
380 tcg_out32(s, LDUW | INSN_RD(ret) | INSN_RS1(ret) |
381 INSN_IMM13(arg & 0x3ff));
382 }
383 }
384
385 static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset, int op)
386 {
387 if (check_fit_tl(offset, 13))
388 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
389 INSN_IMM13(offset));
390 else {
391 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
392 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
393 INSN_RS2(addr));
394 }
395 }
396
397 static inline void tcg_out_ldst_asi(TCGContext *s, int ret, int addr,
398 int offset, int op, int asi)
399 {
400 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, offset);
401 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(TCG_REG_I5) |
402 INSN_ASI(asi) | INSN_RS2(addr));
403 }
404
405 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
406 TCGReg arg1, tcg_target_long arg2)
407 {
408 if (type == TCG_TYPE_I32)
409 tcg_out_ldst(s, ret, arg1, arg2, LDUW);
410 else
411 tcg_out_ldst(s, ret, arg1, arg2, LDX);
412 }
413
414 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
415 TCGReg arg1, tcg_target_long arg2)
416 {
417 if (type == TCG_TYPE_I32)
418 tcg_out_ldst(s, arg, arg1, arg2, STW);
419 else
420 tcg_out_ldst(s, arg, arg1, arg2, STX);
421 }
422
423 static inline void tcg_out_sety(TCGContext *s, int rs)
424 {
425 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
426 }
427
428 static inline void tcg_out_rdy(TCGContext *s, int rd)
429 {
430 tcg_out32(s, RDY | INSN_RD(rd));
431 }
432
433 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
434 {
435 if (val != 0) {
436 if (check_fit_tl(val, 13))
437 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
438 else {
439 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I5, val);
440 tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_ADD);
441 }
442 }
443 }
444
445 static inline void tcg_out_andi(TCGContext *s, int reg, tcg_target_long val)
446 {
447 if (val != 0) {
448 if (check_fit_tl(val, 13))
449 tcg_out_arithi(s, reg, reg, val, ARITH_AND);
450 else {
451 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, val);
452 tcg_out_arith(s, reg, reg, TCG_REG_I5, ARITH_AND);
453 }
454 }
455 }
456
457 static void tcg_out_div32(TCGContext *s, int rd, int rs1,
458 int val2, int val2const, int uns)
459 {
460 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
461 if (uns) {
462 tcg_out_sety(s, TCG_REG_G0);
463 } else {
464 tcg_out_arithi(s, TCG_REG_I5, rs1, 31, SHIFT_SRA);
465 tcg_out_sety(s, TCG_REG_I5);
466 }
467
468 tcg_out_arithc(s, rd, rs1, val2, val2const,
469 uns ? ARITH_UDIV : ARITH_SDIV);
470 }
471
472 static inline void tcg_out_nop(TCGContext *s)
473 {
474 tcg_out_sethi(s, TCG_REG_G0, 0);
475 }
476
477 static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
478 {
479 TCGLabel *l = &s->labels[label_index];
480
481 if (l->has_value) {
482 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2)
483 | INSN_OFF22(l->u.value - (unsigned long)s->code_ptr)));
484 } else {
485 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
486 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | 0));
487 }
488 }
489
490 #if TCG_TARGET_REG_BITS == 64
491 static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
492 {
493 TCGLabel *l = &s->labels[label_index];
494
495 if (l->has_value) {
496 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
497 (0x5 << 19) |
498 INSN_OFF19(l->u.value - (unsigned long)s->code_ptr)));
499 } else {
500 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
501 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
502 (0x5 << 19) | 0));
503 }
504 }
505 #endif
506
507 static const uint8_t tcg_cond_to_bcond[10] = {
508 [TCG_COND_EQ] = COND_E,
509 [TCG_COND_NE] = COND_NE,
510 [TCG_COND_LT] = COND_L,
511 [TCG_COND_GE] = COND_GE,
512 [TCG_COND_LE] = COND_LE,
513 [TCG_COND_GT] = COND_G,
514 [TCG_COND_LTU] = COND_CS,
515 [TCG_COND_GEU] = COND_CC,
516 [TCG_COND_LEU] = COND_LEU,
517 [TCG_COND_GTU] = COND_GU,
518 };
519
520 static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
521 {
522 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
523 }
524
525 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
526 TCGArg arg1, TCGArg arg2, int const_arg2,
527 int label_index)
528 {
529 tcg_out_cmp(s, arg1, arg2, const_arg2);
530 tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
531 tcg_out_nop(s);
532 }
533
534 #if TCG_TARGET_REG_BITS == 64
535 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
536 TCGArg arg1, TCGArg arg2, int const_arg2,
537 int label_index)
538 {
539 tcg_out_cmp(s, arg1, arg2, const_arg2);
540 tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
541 tcg_out_nop(s);
542 }
543 #else
544 static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
545 TCGArg al, TCGArg ah,
546 TCGArg bl, int blconst,
547 TCGArg bh, int bhconst, int label_dest)
548 {
549 int cc, label_next = gen_new_label();
550
551 tcg_out_cmp(s, ah, bh, bhconst);
552
553 /* Note that we fill one of the delay slots with the second compare. */
554 switch (cond) {
555 case TCG_COND_EQ:
556 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
557 tcg_out_branch_i32(s, cc, label_next);
558 tcg_out_cmp(s, al, bl, blconst);
559 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_EQ], 0);
560 tcg_out_branch_i32(s, cc, label_dest);
561 break;
562
563 case TCG_COND_NE:
564 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
565 tcg_out_branch_i32(s, cc, label_dest);
566 tcg_out_cmp(s, al, bl, blconst);
567 tcg_out_branch_i32(s, cc, label_dest);
568 break;
569
570 default:
571 /* ??? One could fairly easily special-case 64-bit unsigned
572 compares against 32-bit zero-extended constants. For instance,
573 we know that (unsigned)AH < 0 is false and need not emit it.
574 Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
575 second branch will never be taken. */
576 cc = INSN_COND(tcg_cond_to_bcond[cond], 0);
577 tcg_out_branch_i32(s, cc, label_dest);
578 tcg_out_nop(s);
579 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
580 tcg_out_branch_i32(s, cc, label_next);
581 tcg_out_cmp(s, al, bl, blconst);
582 cc = INSN_COND(tcg_cond_to_bcond[tcg_unsigned_cond(cond)], 0);
583 tcg_out_branch_i32(s, cc, label_dest);
584 break;
585 }
586 tcg_out_nop(s);
587
588 tcg_out_label(s, label_next, s->code_ptr);
589 }
590 #endif
591
592 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
593 TCGArg c1, TCGArg c2, int c2const)
594 {
595 TCGArg t;
596
597 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
598 switch (cond) {
599 case TCG_COND_EQ:
600 case TCG_COND_NE:
601 if (c2 != 0) {
602 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
603 }
604 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
605 cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
606 break;
607
608 case TCG_COND_GTU:
609 case TCG_COND_GEU:
610 if (c2const && c2 != 0) {
611 tcg_out_movi_imm13(s, TCG_REG_I5, c2);
612 c2 = TCG_REG_I5;
613 }
614 t = c1, c1 = c2, c2 = t, c2const = 0;
615 cond = tcg_swap_cond(cond);
616 break;
617
618 case TCG_COND_LTU:
619 case TCG_COND_LEU:
620 break;
621
622 default:
623 tcg_out_cmp(s, c1, c2, c2const);
624 #if defined(__sparc_v9__) || defined(__sparc_v8plus__)
625 tcg_out_movi_imm13(s, ret, 0);
626 tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
627 | INSN_RS1(tcg_cond_to_bcond[cond])
628 | MOVCC_ICC | INSN_IMM11(1));
629 #else
630 t = gen_new_label();
631 tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), t);
632 tcg_out_movi_imm13(s, ret, 1);
633 tcg_out_movi_imm13(s, ret, 0);
634 tcg_out_label(s, t, s->code_ptr);
635 #endif
636 return;
637 }
638
639 tcg_out_cmp(s, c1, c2, c2const);
640 if (cond == TCG_COND_LTU) {
641 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
642 } else {
643 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
644 }
645 }
646
647 #if TCG_TARGET_REG_BITS == 64
648 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
649 TCGArg c1, TCGArg c2, int c2const)
650 {
651 tcg_out_cmp(s, c1, c2, c2const);
652 tcg_out_movi_imm13(s, ret, 0);
653 tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
654 | INSN_RS1(tcg_cond_to_bcond[cond])
655 | MOVCC_XCC | INSN_IMM11(1));
656 }
657 #else
658 static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
659 TCGArg al, TCGArg ah,
660 TCGArg bl, int blconst,
661 TCGArg bh, int bhconst)
662 {
663 int lab;
664
665 switch (cond) {
666 case TCG_COND_EQ:
667 tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_I5, al, bl, blconst);
668 tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
669 tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_AND);
670 break;
671
672 case TCG_COND_NE:
673 tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_I5, al, al, blconst);
674 tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
675 tcg_out_arith(s, ret, ret, TCG_REG_I5, ARITH_OR);
676 break;
677
678 default:
679 lab = gen_new_label();
680
681 tcg_out_cmp(s, ah, bh, bhconst);
682 tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), lab);
683 tcg_out_movi_imm13(s, ret, 1);
684 tcg_out_branch_i32(s, INSN_COND(COND_NE, 1), lab);
685 tcg_out_movi_imm13(s, ret, 0);
686
687 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), ret, al, bl, blconst);
688
689 tcg_out_label(s, lab, s->code_ptr);
690 break;
691 }
692 }
693 #endif
694
695 /* Generate global QEMU prologue and epilogue code */
696 static void tcg_target_qemu_prologue(TCGContext *s)
697 {
698 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_CALL_STACK_OFFSET,
699 CPU_TEMP_BUF_NLONGS * (int)sizeof(long));
700 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
701 INSN_IMM13(-(TCG_TARGET_STACK_MINFRAME +
702 CPU_TEMP_BUF_NLONGS * (int)sizeof(long))));
703 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
704 INSN_RS2(TCG_REG_G0));
705 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_I0);
706 }
707
708 #if defined(CONFIG_SOFTMMU)
709
710 #include "../../softmmu_defs.h"
711
712 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
713 int mmu_idx) */
714 static const void * const qemu_ld_helpers[4] = {
715 helper_ldb_mmu,
716 helper_ldw_mmu,
717 helper_ldl_mmu,
718 helper_ldq_mmu,
719 };
720
721 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
722 uintxx_t val, int mmu_idx) */
723 static const void * const qemu_st_helpers[4] = {
724 helper_stb_mmu,
725 helper_stw_mmu,
726 helper_stl_mmu,
727 helper_stq_mmu,
728 };
729 #endif
730
731 #if TARGET_LONG_BITS == 32
732 #define TARGET_LD_OP LDUW
733 #else
734 #define TARGET_LD_OP LDX
735 #endif
736
737 #if defined(CONFIG_SOFTMMU)
738 #if HOST_LONG_BITS == 32
739 #define TARGET_ADDEND_LD_OP LDUW
740 #else
741 #define TARGET_ADDEND_LD_OP LDX
742 #endif
743 #endif
744
745 #ifdef __arch64__
746 #define HOST_LD_OP LDX
747 #define HOST_ST_OP STX
748 #define HOST_SLL_OP SHIFT_SLLX
749 #define HOST_SRA_OP SHIFT_SRAX
750 #else
751 #define HOST_LD_OP LDUW
752 #define HOST_ST_OP STW
753 #define HOST_SLL_OP SHIFT_SLL
754 #define HOST_SRA_OP SHIFT_SRA
755 #endif
756
757 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args,
758 int opc)
759 {
760 int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
761 #if defined(CONFIG_SOFTMMU)
762 uint32_t *label1_ptr, *label2_ptr;
763 #endif
764
765 data_reg = *args++;
766 addr_reg = *args++;
767 mem_index = *args;
768 s_bits = opc & 3;
769
770 arg0 = TCG_REG_O0;
771 arg1 = TCG_REG_O1;
772 arg2 = TCG_REG_O2;
773
774 #if defined(CONFIG_SOFTMMU)
775 /* srl addr_reg, x, arg1 */
776 tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
777 SHIFT_SRL);
778 /* and addr_reg, x, arg0 */
779 tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
780 ARITH_AND);
781
782 /* and arg1, x, arg1 */
783 tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
784
785 /* add arg1, x, arg1 */
786 tcg_out_addi(s, arg1, offsetof(CPUArchState,
787 tlb_table[mem_index][0].addr_read));
788
789 /* add env, arg1, arg1 */
790 tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
791
792 /* ld [arg1], arg2 */
793 tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
794 INSN_RS2(TCG_REG_G0));
795
796 /* subcc arg0, arg2, %g0 */
797 tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
798
799 /* will become:
800 be label1
801 or
802 be,pt %xcc label1 */
803 label1_ptr = (uint32_t *)s->code_ptr;
804 tcg_out32(s, 0);
805
806 /* mov (delay slot) */
807 tcg_out_mov(s, TCG_TYPE_PTR, arg0, addr_reg);
808
809 /* mov */
810 tcg_out_movi(s, TCG_TYPE_I32, arg1, mem_index);
811 /* XXX/FIXME: suboptimal */
812 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
813 tcg_target_call_iarg_regs[2]);
814 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
815 tcg_target_call_iarg_regs[1]);
816 tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
817 tcg_target_call_iarg_regs[0]);
818 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
819 TCG_AREG0);
820
821 /* XXX: move that code at the end of the TB */
822 /* qemu_ld_helper[s_bits](arg0, arg1) */
823 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
824 - (tcg_target_ulong)s->code_ptr) >> 2)
825 & 0x3fffffff));
826 /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
827 global registers */
828 // delay slot
829 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
830 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
831 sizeof(long), HOST_ST_OP);
832 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
833 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
834 sizeof(long), HOST_LD_OP);
835
836 /* data_reg = sign_extend(arg0) */
837 switch(opc) {
838 case 0 | 4:
839 /* sll arg0, 24/56, data_reg */
840 tcg_out_arithi(s, data_reg, arg0, (int)sizeof(tcg_target_long) * 8 - 8,
841 HOST_SLL_OP);
842 /* sra data_reg, 24/56, data_reg */
843 tcg_out_arithi(s, data_reg, data_reg,
844 (int)sizeof(tcg_target_long) * 8 - 8, HOST_SRA_OP);
845 break;
846 case 1 | 4:
847 /* sll arg0, 16/48, data_reg */
848 tcg_out_arithi(s, data_reg, arg0,
849 (int)sizeof(tcg_target_long) * 8 - 16, HOST_SLL_OP);
850 /* sra data_reg, 16/48, data_reg */
851 tcg_out_arithi(s, data_reg, data_reg,
852 (int)sizeof(tcg_target_long) * 8 - 16, HOST_SRA_OP);
853 break;
854 case 2 | 4:
855 /* sll arg0, 32, data_reg */
856 tcg_out_arithi(s, data_reg, arg0, 32, HOST_SLL_OP);
857 /* sra data_reg, 32, data_reg */
858 tcg_out_arithi(s, data_reg, data_reg, 32, HOST_SRA_OP);
859 break;
860 case 0:
861 case 1:
862 case 2:
863 case 3:
864 default:
865 /* mov */
866 tcg_out_mov(s, TCG_TYPE_REG, data_reg, arg0);
867 break;
868 }
869
870 /* will become:
871 ba label2 */
872 label2_ptr = (uint32_t *)s->code_ptr;
873 tcg_out32(s, 0);
874
875 /* nop (delay slot */
876 tcg_out_nop(s);
877
878 /* label1: */
879 #if TARGET_LONG_BITS == 32
880 /* be label1 */
881 *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
882 INSN_OFF22((unsigned long)s->code_ptr -
883 (unsigned long)label1_ptr));
884 #else
885 /* be,pt %xcc label1 */
886 *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1) |
887 (0x5 << 19) | INSN_OFF19((unsigned long)s->code_ptr -
888 (unsigned long)label1_ptr));
889 #endif
890
891 /* ld [arg1 + x], arg1 */
892 tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
893 offsetof(CPUTLBEntry, addr_read), TARGET_ADDEND_LD_OP);
894
895 #if TARGET_LONG_BITS == 32
896 /* and addr_reg, x, arg0 */
897 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
898 tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
899 /* add arg0, arg1, arg0 */
900 tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
901 #else
902 /* add addr_reg, arg1, arg0 */
903 tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
904 #endif
905
906 #else
907 arg0 = addr_reg;
908 #endif
909
910 switch(opc) {
911 case 0:
912 /* ldub [arg0], data_reg */
913 tcg_out_ldst(s, data_reg, arg0, 0, LDUB);
914 break;
915 case 0 | 4:
916 /* ldsb [arg0], data_reg */
917 tcg_out_ldst(s, data_reg, arg0, 0, LDSB);
918 break;
919 case 1:
920 #ifdef TARGET_WORDS_BIGENDIAN
921 /* lduh [arg0], data_reg */
922 tcg_out_ldst(s, data_reg, arg0, 0, LDUH);
923 #else
924 /* lduha [arg0] ASI_PRIMARY_LITTLE, data_reg */
925 tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUHA, ASI_PRIMARY_LITTLE);
926 #endif
927 break;
928 case 1 | 4:
929 #ifdef TARGET_WORDS_BIGENDIAN
930 /* ldsh [arg0], data_reg */
931 tcg_out_ldst(s, data_reg, arg0, 0, LDSH);
932 #else
933 /* ldsha [arg0] ASI_PRIMARY_LITTLE, data_reg */
934 tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSHA, ASI_PRIMARY_LITTLE);
935 #endif
936 break;
937 case 2:
938 #ifdef TARGET_WORDS_BIGENDIAN
939 /* lduw [arg0], data_reg */
940 tcg_out_ldst(s, data_reg, arg0, 0, LDUW);
941 #else
942 /* lduwa [arg0] ASI_PRIMARY_LITTLE, data_reg */
943 tcg_out_ldst_asi(s, data_reg, arg0, 0, LDUWA, ASI_PRIMARY_LITTLE);
944 #endif
945 break;
946 case 2 | 4:
947 #ifdef TARGET_WORDS_BIGENDIAN
948 /* ldsw [arg0], data_reg */
949 tcg_out_ldst(s, data_reg, arg0, 0, LDSW);
950 #else
951 /* ldswa [arg0] ASI_PRIMARY_LITTLE, data_reg */
952 tcg_out_ldst_asi(s, data_reg, arg0, 0, LDSWA, ASI_PRIMARY_LITTLE);
953 #endif
954 break;
955 case 3:
956 #ifdef TARGET_WORDS_BIGENDIAN
957 /* ldx [arg0], data_reg */
958 tcg_out_ldst(s, data_reg, arg0, 0, LDX);
959 #else
960 /* ldxa [arg0] ASI_PRIMARY_LITTLE, data_reg */
961 tcg_out_ldst_asi(s, data_reg, arg0, 0, LDXA, ASI_PRIMARY_LITTLE);
962 #endif
963 break;
964 default:
965 tcg_abort();
966 }
967
968 #if defined(CONFIG_SOFTMMU)
969 /* label2: */
970 *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
971 INSN_OFF22((unsigned long)s->code_ptr -
972 (unsigned long)label2_ptr));
973 #endif
974 }
975
976 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args,
977 int opc)
978 {
979 int addr_reg, data_reg, arg0, arg1, arg2, mem_index, s_bits;
980 #if defined(CONFIG_SOFTMMU)
981 uint32_t *label1_ptr, *label2_ptr;
982 #endif
983
984 data_reg = *args++;
985 addr_reg = *args++;
986 mem_index = *args;
987
988 s_bits = opc;
989
990 arg0 = TCG_REG_O0;
991 arg1 = TCG_REG_O1;
992 arg2 = TCG_REG_O2;
993
994 #if defined(CONFIG_SOFTMMU)
995 /* srl addr_reg, x, arg1 */
996 tcg_out_arithi(s, arg1, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
997 SHIFT_SRL);
998
999 /* and addr_reg, x, arg0 */
1000 tcg_out_arithi(s, arg0, addr_reg, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
1001 ARITH_AND);
1002
1003 /* and arg1, x, arg1 */
1004 tcg_out_andi(s, arg1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
1005
1006 /* add arg1, x, arg1 */
1007 tcg_out_addi(s, arg1, offsetof(CPUArchState,
1008 tlb_table[mem_index][0].addr_write));
1009
1010 /* add env, arg1, arg1 */
1011 tcg_out_arith(s, arg1, TCG_AREG0, arg1, ARITH_ADD);
1012
1013 /* ld [arg1], arg2 */
1014 tcg_out32(s, TARGET_LD_OP | INSN_RD(arg2) | INSN_RS1(arg1) |
1015 INSN_RS2(TCG_REG_G0));
1016
1017 /* subcc arg0, arg2, %g0 */
1018 tcg_out_arith(s, TCG_REG_G0, arg0, arg2, ARITH_SUBCC);
1019
1020 /* will become:
1021 be label1
1022 or
1023 be,pt %xcc label1 */
1024 label1_ptr = (uint32_t *)s->code_ptr;
1025 tcg_out32(s, 0);
1026
1027 /* mov (delay slot) */
1028 tcg_out_mov(s, TCG_TYPE_PTR, arg0, addr_reg);
1029
1030 /* mov */
1031 tcg_out_mov(s, TCG_TYPE_REG, arg1, data_reg);
1032
1033 /* mov */
1034 tcg_out_movi(s, TCG_TYPE_I32, arg2, mem_index);
1035
1036 /* XXX/FIXME: suboptimal */
1037 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
1038 tcg_target_call_iarg_regs[2]);
1039 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1040 tcg_target_call_iarg_regs[1]);
1041 tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
1042 tcg_target_call_iarg_regs[0]);
1043 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
1044 TCG_AREG0);
1045 /* XXX: move that code at the end of the TB */
1046 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1047 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[s_bits]
1048 - (tcg_target_ulong)s->code_ptr) >> 2)
1049 & 0x3fffffff));
1050 /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
1051 global registers */
1052 // delay slot
1053 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
1054 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
1055 sizeof(long), HOST_ST_OP);
1056 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
1057 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
1058 sizeof(long), HOST_LD_OP);
1059
1060 /* will become:
1061 ba label2 */
1062 label2_ptr = (uint32_t *)s->code_ptr;
1063 tcg_out32(s, 0);
1064
1065 /* nop (delay slot) */
1066 tcg_out_nop(s);
1067
1068 #if TARGET_LONG_BITS == 32
1069 /* be label1 */
1070 *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x2) |
1071 INSN_OFF22((unsigned long)s->code_ptr -
1072 (unsigned long)label1_ptr));
1073 #else
1074 /* be,pt %xcc label1 */
1075 *label1_ptr = (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1) |
1076 (0x5 << 19) | INSN_OFF19((unsigned long)s->code_ptr -
1077 (unsigned long)label1_ptr));
1078 #endif
1079
1080 /* ld [arg1 + x], arg1 */
1081 tcg_out_ldst(s, arg1, arg1, offsetof(CPUTLBEntry, addend) -
1082 offsetof(CPUTLBEntry, addr_write), TARGET_ADDEND_LD_OP);
1083
1084 #if TARGET_LONG_BITS == 32
1085 /* and addr_reg, x, arg0 */
1086 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_I5, 0xffffffff);
1087 tcg_out_arith(s, arg0, addr_reg, TCG_REG_I5, ARITH_AND);
1088 /* add arg0, arg1, arg0 */
1089 tcg_out_arith(s, arg0, arg0, arg1, ARITH_ADD);
1090 #else
1091 /* add addr_reg, arg1, arg0 */
1092 tcg_out_arith(s, arg0, addr_reg, arg1, ARITH_ADD);
1093 #endif
1094
1095 #else
1096 arg0 = addr_reg;
1097 #endif
1098
1099 switch(opc) {
1100 case 0:
1101 /* stb data_reg, [arg0] */
1102 tcg_out_ldst(s, data_reg, arg0, 0, STB);
1103 break;
1104 case 1:
1105 #ifdef TARGET_WORDS_BIGENDIAN
1106 /* sth data_reg, [arg0] */
1107 tcg_out_ldst(s, data_reg, arg0, 0, STH);
1108 #else
1109 /* stha data_reg, [arg0] ASI_PRIMARY_LITTLE */
1110 tcg_out_ldst_asi(s, data_reg, arg0, 0, STHA, ASI_PRIMARY_LITTLE);
1111 #endif
1112 break;
1113 case 2:
1114 #ifdef TARGET_WORDS_BIGENDIAN
1115 /* stw data_reg, [arg0] */
1116 tcg_out_ldst(s, data_reg, arg0, 0, STW);
1117 #else
1118 /* stwa data_reg, [arg0] ASI_PRIMARY_LITTLE */
1119 tcg_out_ldst_asi(s, data_reg, arg0, 0, STWA, ASI_PRIMARY_LITTLE);
1120 #endif
1121 break;
1122 case 3:
1123 #ifdef TARGET_WORDS_BIGENDIAN
1124 /* stx data_reg, [arg0] */
1125 tcg_out_ldst(s, data_reg, arg0, 0, STX);
1126 #else
1127 /* stxa data_reg, [arg0] ASI_PRIMARY_LITTLE */
1128 tcg_out_ldst_asi(s, data_reg, arg0, 0, STXA, ASI_PRIMARY_LITTLE);
1129 #endif
1130 break;
1131 default:
1132 tcg_abort();
1133 }
1134
1135 #if defined(CONFIG_SOFTMMU)
1136 /* label2: */
1137 *label2_ptr = (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2) |
1138 INSN_OFF22((unsigned long)s->code_ptr -
1139 (unsigned long)label2_ptr));
1140 #endif
1141 }
1142
1143 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1144 const int *const_args)
1145 {
1146 int c;
1147
1148 switch (opc) {
1149 case INDEX_op_exit_tb:
1150 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1151 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1152 INSN_IMM13(8));
1153 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1154 INSN_RS2(TCG_REG_G0));
1155 break;
1156 case INDEX_op_goto_tb:
1157 if (s->tb_jmp_offset) {
1158 /* direct jump method */
1159 tcg_out_sethi(s, TCG_REG_I5, args[0] & 0xffffe000);
1160 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
1161 INSN_IMM13((args[0] & 0x1fff)));
1162 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1163 } else {
1164 /* indirect jump method */
1165 tcg_out_ld_ptr(s, TCG_REG_I5, (tcg_target_long)(s->tb_next + args[0]));
1166 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I5) |
1167 INSN_RS2(TCG_REG_G0));
1168 }
1169 tcg_out_nop(s);
1170 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1171 break;
1172 case INDEX_op_call:
1173 if (const_args[0])
1174 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1175 - (tcg_target_ulong)s->code_ptr) >> 2)
1176 & 0x3fffffff));
1177 else {
1178 tcg_out_ld_ptr(s, TCG_REG_I5,
1179 (tcg_target_long)(s->tb_next + args[0]));
1180 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_I5) |
1181 INSN_RS2(TCG_REG_G0));
1182 }
1183 /* Store AREG0 in stack to avoid ugly glibc bugs that mangle
1184 global registers */
1185 // delay slot
1186 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
1187 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
1188 sizeof(long), HOST_ST_OP);
1189 tcg_out_ldst(s, TCG_AREG0, TCG_REG_CALL_STACK,
1190 TCG_TARGET_CALL_STACK_OFFSET - TCG_STATIC_CALL_ARGS_SIZE -
1191 sizeof(long), HOST_LD_OP);
1192 break;
1193 case INDEX_op_jmp:
1194 case INDEX_op_br:
1195 tcg_out_branch_i32(s, COND_A, args[0]);
1196 tcg_out_nop(s);
1197 break;
1198 case INDEX_op_movi_i32:
1199 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1200 break;
1201
1202 #if TCG_TARGET_REG_BITS == 64
1203 #define OP_32_64(x) \
1204 glue(glue(case INDEX_op_, x), _i32): \
1205 glue(glue(case INDEX_op_, x), _i64)
1206 #else
1207 #define OP_32_64(x) \
1208 glue(glue(case INDEX_op_, x), _i32)
1209 #endif
1210 OP_32_64(ld8u):
1211 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1212 break;
1213 OP_32_64(ld8s):
1214 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1215 break;
1216 OP_32_64(ld16u):
1217 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1218 break;
1219 OP_32_64(ld16s):
1220 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1221 break;
1222 case INDEX_op_ld_i32:
1223 #if TCG_TARGET_REG_BITS == 64
1224 case INDEX_op_ld32u_i64:
1225 #endif
1226 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1227 break;
1228 OP_32_64(st8):
1229 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1230 break;
1231 OP_32_64(st16):
1232 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1233 break;
1234 case INDEX_op_st_i32:
1235 #if TCG_TARGET_REG_BITS == 64
1236 case INDEX_op_st32_i64:
1237 #endif
1238 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1239 break;
1240 OP_32_64(add):
1241 c = ARITH_ADD;
1242 goto gen_arith;
1243 OP_32_64(sub):
1244 c = ARITH_SUB;
1245 goto gen_arith;
1246 OP_32_64(and):
1247 c = ARITH_AND;
1248 goto gen_arith;
1249 OP_32_64(andc):
1250 c = ARITH_ANDN;
1251 goto gen_arith;
1252 OP_32_64(or):
1253 c = ARITH_OR;
1254 goto gen_arith;
1255 OP_32_64(orc):
1256 c = ARITH_ORN;
1257 goto gen_arith;
1258 OP_32_64(xor):
1259 c = ARITH_XOR;
1260 goto gen_arith;
1261 case INDEX_op_shl_i32:
1262 c = SHIFT_SLL;
1263 goto gen_arith;
1264 case INDEX_op_shr_i32:
1265 c = SHIFT_SRL;
1266 goto gen_arith;
1267 case INDEX_op_sar_i32:
1268 c = SHIFT_SRA;
1269 goto gen_arith;
1270 case INDEX_op_mul_i32:
1271 c = ARITH_UMUL;
1272 goto gen_arith;
1273
1274 OP_32_64(neg):
1275 c = ARITH_SUB;
1276 goto gen_arith1;
1277 OP_32_64(not):
1278 c = ARITH_ORN;
1279 goto gen_arith1;
1280
1281 case INDEX_op_div_i32:
1282 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1283 break;
1284 case INDEX_op_divu_i32:
1285 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1286 break;
1287
1288 case INDEX_op_rem_i32:
1289 case INDEX_op_remu_i32:
1290 tcg_out_div32(s, TCG_REG_I5, args[1], args[2], const_args[2],
1291 opc == INDEX_op_remu_i32);
1292 tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
1293 ARITH_UMUL);
1294 tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
1295 break;
1296
1297 case INDEX_op_brcond_i32:
1298 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1299 args[3]);
1300 break;
1301 case INDEX_op_setcond_i32:
1302 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1303 args[2], const_args[2]);
1304 break;
1305
1306 #if TCG_TARGET_REG_BITS == 32
1307 case INDEX_op_brcond2_i32:
1308 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1309 args[2], const_args[2],
1310 args[3], const_args[3], args[5]);
1311 break;
1312 case INDEX_op_setcond2_i32:
1313 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1314 args[3], const_args[3],
1315 args[4], const_args[4]);
1316 break;
1317 case INDEX_op_add2_i32:
1318 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1319 ARITH_ADDCC);
1320 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1321 ARITH_ADDX);
1322 break;
1323 case INDEX_op_sub2_i32:
1324 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1325 ARITH_SUBCC);
1326 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1327 ARITH_SUBX);
1328 break;
1329 case INDEX_op_mulu2_i32:
1330 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1331 ARITH_UMUL);
1332 tcg_out_rdy(s, args[1]);
1333 break;
1334 #endif
1335
1336 case INDEX_op_qemu_ld8u:
1337 tcg_out_qemu_ld(s, args, 0);
1338 break;
1339 case INDEX_op_qemu_ld8s:
1340 tcg_out_qemu_ld(s, args, 0 | 4);
1341 break;
1342 case INDEX_op_qemu_ld16u:
1343 tcg_out_qemu_ld(s, args, 1);
1344 break;
1345 case INDEX_op_qemu_ld16s:
1346 tcg_out_qemu_ld(s, args, 1 | 4);
1347 break;
1348 case INDEX_op_qemu_ld32:
1349 #if TCG_TARGET_REG_BITS == 64
1350 case INDEX_op_qemu_ld32u:
1351 #endif
1352 tcg_out_qemu_ld(s, args, 2);
1353 break;
1354 #if TCG_TARGET_REG_BITS == 64
1355 case INDEX_op_qemu_ld32s:
1356 tcg_out_qemu_ld(s, args, 2 | 4);
1357 break;
1358 #endif
1359 case INDEX_op_qemu_st8:
1360 tcg_out_qemu_st(s, args, 0);
1361 break;
1362 case INDEX_op_qemu_st16:
1363 tcg_out_qemu_st(s, args, 1);
1364 break;
1365 case INDEX_op_qemu_st32:
1366 tcg_out_qemu_st(s, args, 2);
1367 break;
1368
1369 #if TCG_TARGET_REG_BITS == 64
1370 case INDEX_op_movi_i64:
1371 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1372 break;
1373 case INDEX_op_ld32s_i64:
1374 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1375 break;
1376 case INDEX_op_ld_i64:
1377 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1378 break;
1379 case INDEX_op_st_i64:
1380 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1381 break;
1382 case INDEX_op_shl_i64:
1383 c = SHIFT_SLLX;
1384 goto gen_arith;
1385 case INDEX_op_shr_i64:
1386 c = SHIFT_SRLX;
1387 goto gen_arith;
1388 case INDEX_op_sar_i64:
1389 c = SHIFT_SRAX;
1390 goto gen_arith;
1391 case INDEX_op_mul_i64:
1392 c = ARITH_MULX;
1393 goto gen_arith;
1394 case INDEX_op_div_i64:
1395 c = ARITH_SDIVX;
1396 goto gen_arith;
1397 case INDEX_op_divu_i64:
1398 c = ARITH_UDIVX;
1399 goto gen_arith;
1400 case INDEX_op_rem_i64:
1401 case INDEX_op_remu_i64:
1402 tcg_out_arithc(s, TCG_REG_I5, args[1], args[2], const_args[2],
1403 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1404 tcg_out_arithc(s, TCG_REG_I5, TCG_REG_I5, args[2], const_args[2],
1405 ARITH_MULX);
1406 tcg_out_arith(s, args[0], args[1], TCG_REG_I5, ARITH_SUB);
1407 break;
1408 case INDEX_op_ext32s_i64:
1409 if (const_args[1]) {
1410 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1411 } else {
1412 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1413 }
1414 break;
1415 case INDEX_op_ext32u_i64:
1416 if (const_args[1]) {
1417 tcg_out_movi_imm32(s, args[0], args[1]);
1418 } else {
1419 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1420 }
1421 break;
1422
1423 case INDEX_op_brcond_i64:
1424 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1425 args[3]);
1426 break;
1427 case INDEX_op_setcond_i64:
1428 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1429 args[2], const_args[2]);
1430 break;
1431
1432 case INDEX_op_qemu_ld64:
1433 tcg_out_qemu_ld(s, args, 3);
1434 break;
1435 case INDEX_op_qemu_st64:
1436 tcg_out_qemu_st(s, args, 3);
1437 break;
1438
1439 #endif
1440 gen_arith:
1441 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1442 break;
1443
1444 gen_arith1:
1445 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1446 break;
1447
1448 default:
1449 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1450 tcg_abort();
1451 }
1452 }
1453
1454 static const TCGTargetOpDef sparc_op_defs[] = {
1455 { INDEX_op_exit_tb, { } },
1456 { INDEX_op_goto_tb, { } },
1457 { INDEX_op_call, { "ri" } },
1458 { INDEX_op_jmp, { "ri" } },
1459 { INDEX_op_br, { } },
1460
1461 { INDEX_op_mov_i32, { "r", "r" } },
1462 { INDEX_op_movi_i32, { "r" } },
1463 { INDEX_op_ld8u_i32, { "r", "r" } },
1464 { INDEX_op_ld8s_i32, { "r", "r" } },
1465 { INDEX_op_ld16u_i32, { "r", "r" } },
1466 { INDEX_op_ld16s_i32, { "r", "r" } },
1467 { INDEX_op_ld_i32, { "r", "r" } },
1468 { INDEX_op_st8_i32, { "r", "r" } },
1469 { INDEX_op_st16_i32, { "r", "r" } },
1470 { INDEX_op_st_i32, { "r", "r" } },
1471
1472 { INDEX_op_add_i32, { "r", "r", "rJ" } },
1473 { INDEX_op_mul_i32, { "r", "r", "rJ" } },
1474 { INDEX_op_div_i32, { "r", "r", "rJ" } },
1475 { INDEX_op_divu_i32, { "r", "r", "rJ" } },
1476 { INDEX_op_rem_i32, { "r", "r", "rJ" } },
1477 { INDEX_op_remu_i32, { "r", "r", "rJ" } },
1478 { INDEX_op_sub_i32, { "r", "r", "rJ" } },
1479 { INDEX_op_and_i32, { "r", "r", "rJ" } },
1480 { INDEX_op_andc_i32, { "r", "r", "rJ" } },
1481 { INDEX_op_or_i32, { "r", "r", "rJ" } },
1482 { INDEX_op_orc_i32, { "r", "r", "rJ" } },
1483 { INDEX_op_xor_i32, { "r", "r", "rJ" } },
1484
1485 { INDEX_op_shl_i32, { "r", "r", "rJ" } },
1486 { INDEX_op_shr_i32, { "r", "r", "rJ" } },
1487 { INDEX_op_sar_i32, { "r", "r", "rJ" } },
1488
1489 { INDEX_op_neg_i32, { "r", "rJ" } },
1490 { INDEX_op_not_i32, { "r", "rJ" } },
1491
1492 { INDEX_op_brcond_i32, { "r", "rJ" } },
1493 { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
1494
1495 #if TCG_TARGET_REG_BITS == 32
1496 { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
1497 { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
1498 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1499 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1500 { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
1501 #endif
1502
1503 { INDEX_op_qemu_ld8u, { "r", "L" } },
1504 { INDEX_op_qemu_ld8s, { "r", "L" } },
1505 { INDEX_op_qemu_ld16u, { "r", "L" } },
1506 { INDEX_op_qemu_ld16s, { "r", "L" } },
1507 { INDEX_op_qemu_ld32, { "r", "L" } },
1508 #if TCG_TARGET_REG_BITS == 64
1509 { INDEX_op_qemu_ld32u, { "r", "L" } },
1510 { INDEX_op_qemu_ld32s, { "r", "L" } },
1511 #endif
1512
1513 { INDEX_op_qemu_st8, { "L", "L" } },
1514 { INDEX_op_qemu_st16, { "L", "L" } },
1515 { INDEX_op_qemu_st32, { "L", "L" } },
1516
1517 #if TCG_TARGET_REG_BITS == 64
1518 { INDEX_op_mov_i64, { "r", "r" } },
1519 { INDEX_op_movi_i64, { "r" } },
1520 { INDEX_op_ld8u_i64, { "r", "r" } },
1521 { INDEX_op_ld8s_i64, { "r", "r" } },
1522 { INDEX_op_ld16u_i64, { "r", "r" } },
1523 { INDEX_op_ld16s_i64, { "r", "r" } },
1524 { INDEX_op_ld32u_i64, { "r", "r" } },
1525 { INDEX_op_ld32s_i64, { "r", "r" } },
1526 { INDEX_op_ld_i64, { "r", "r" } },
1527 { INDEX_op_st8_i64, { "r", "r" } },
1528 { INDEX_op_st16_i64, { "r", "r" } },
1529 { INDEX_op_st32_i64, { "r", "r" } },
1530 { INDEX_op_st_i64, { "r", "r" } },
1531 { INDEX_op_qemu_ld64, { "L", "L" } },
1532 { INDEX_op_qemu_st64, { "L", "L" } },
1533
1534 { INDEX_op_add_i64, { "r", "r", "rJ" } },
1535 { INDEX_op_mul_i64, { "r", "r", "rJ" } },
1536 { INDEX_op_div_i64, { "r", "r", "rJ" } },
1537 { INDEX_op_divu_i64, { "r", "r", "rJ" } },
1538 { INDEX_op_rem_i64, { "r", "r", "rJ" } },
1539 { INDEX_op_remu_i64, { "r", "r", "rJ" } },
1540 { INDEX_op_sub_i64, { "r", "r", "rJ" } },
1541 { INDEX_op_and_i64, { "r", "r", "rJ" } },
1542 { INDEX_op_andc_i64, { "r", "r", "rJ" } },
1543 { INDEX_op_or_i64, { "r", "r", "rJ" } },
1544 { INDEX_op_orc_i64, { "r", "r", "rJ" } },
1545 { INDEX_op_xor_i64, { "r", "r", "rJ" } },
1546
1547 { INDEX_op_shl_i64, { "r", "r", "rJ" } },
1548 { INDEX_op_shr_i64, { "r", "r", "rJ" } },
1549 { INDEX_op_sar_i64, { "r", "r", "rJ" } },
1550
1551 { INDEX_op_neg_i64, { "r", "rJ" } },
1552 { INDEX_op_not_i64, { "r", "rJ" } },
1553
1554 { INDEX_op_ext32s_i64, { "r", "ri" } },
1555 { INDEX_op_ext32u_i64, { "r", "ri" } },
1556
1557 { INDEX_op_brcond_i64, { "r", "rJ" } },
1558 { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
1559 #endif
1560 { -1 },
1561 };
1562
1563 static void tcg_target_init(TCGContext *s)
1564 {
1565 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1566 #if TCG_TARGET_REG_BITS == 64
1567 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1568 #endif
1569 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1570 (1 << TCG_REG_G1) |
1571 (1 << TCG_REG_G2) |
1572 (1 << TCG_REG_G3) |
1573 (1 << TCG_REG_G4) |
1574 (1 << TCG_REG_G5) |
1575 (1 << TCG_REG_G6) |
1576 (1 << TCG_REG_G7) |
1577 (1 << TCG_REG_O0) |
1578 (1 << TCG_REG_O1) |
1579 (1 << TCG_REG_O2) |
1580 (1 << TCG_REG_O3) |
1581 (1 << TCG_REG_O4) |
1582 (1 << TCG_REG_O5) |
1583 (1 << TCG_REG_O7));
1584
1585 tcg_regset_clear(s->reserved_regs);
1586 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0);
1587 #if TCG_TARGET_REG_BITS == 64
1588 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I4); // for internal use
1589 #endif
1590 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I5); // for internal use
1591 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6);
1592 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7);
1593 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6);
1594 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O7);
1595 tcg_add_target_add_op_defs(sparc_op_defs);
1596 }
1597
1598 #if TCG_TARGET_REG_BITS == 64
1599 # define ELF_HOST_MACHINE EM_SPARCV9
1600 #elif defined(__sparc_v8plus__)
1601 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1602 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1603 #else
1604 # define ELF_HOST_MACHINE EM_SPARC
1605 #endif
1606
1607 typedef struct {
1608 uint32_t len __attribute__((aligned((sizeof(void *)))));
1609 uint32_t id;
1610 uint8_t version;
1611 char augmentation[1];
1612 uint8_t code_align;
1613 uint8_t data_align;
1614 uint8_t return_column;
1615 } DebugFrameCIE;
1616
1617 typedef struct {
1618 uint32_t len __attribute__((aligned((sizeof(void *)))));
1619 uint32_t cie_offset;
1620 tcg_target_long func_start __attribute__((packed));
1621 tcg_target_long func_len __attribute__((packed));
1622 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1623 uint8_t win_save;
1624 uint8_t ret_save[3];
1625 } DebugFrameFDE;
1626
1627 typedef struct {
1628 DebugFrameCIE cie;
1629 DebugFrameFDE fde;
1630 } DebugFrame;
1631
1632 static DebugFrame debug_frame = {
1633 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1634 .cie.id = -1,
1635 .cie.version = 1,
1636 .cie.code_align = 1,
1637 .cie.data_align = -sizeof(void *) & 0x7f,
1638 .cie.return_column = 15, /* o7 */
1639
1640 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1641 .fde.def_cfa = {
1642 #if TCG_TARGET_REG_BITS == 64
1643 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1644 (2047 & 0x7f) | 0x80, (2047 >> 7)
1645 #else
1646 13, 30 /* DW_CFA_def_cfa_register i6 */
1647 #endif
1648 },
1649 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1650 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1651 };
1652
1653 void tcg_register_jit(void *buf, size_t buf_size)
1654 {
1655 debug_frame.fde.func_start = (tcg_target_long) buf;
1656 debug_frame.fde.func_len = buf_size;
1657
1658 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1659 }