]> git.proxmox.com Git - qemu.git/blob - tcg/sparc/tcg-target.c
exec: move include files to include/exec/
[qemu.git] / tcg / sparc / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
59 };
60 #endif
61
62 /* Define some temporary registers. T2 is used for constant generation. */
63 #define TCG_REG_T1 TCG_REG_G1
64 #define TCG_REG_T2 TCG_REG_O7
65
66 #ifdef CONFIG_USE_GUEST_BASE
67 # define TCG_GUEST_BASE_REG TCG_REG_I5
68 #else
69 # define TCG_GUEST_BASE_REG TCG_REG_G0
70 #endif
71
72 static const int tcg_target_reg_alloc_order[] = {
73 TCG_REG_L0,
74 TCG_REG_L1,
75 TCG_REG_L2,
76 TCG_REG_L3,
77 TCG_REG_L4,
78 TCG_REG_L5,
79 TCG_REG_L6,
80 TCG_REG_L7,
81
82 TCG_REG_I0,
83 TCG_REG_I1,
84 TCG_REG_I2,
85 TCG_REG_I3,
86 TCG_REG_I4,
87 TCG_REG_I5,
88
89 TCG_REG_G2,
90 TCG_REG_G3,
91 TCG_REG_G4,
92 TCG_REG_G5,
93
94 TCG_REG_O0,
95 TCG_REG_O1,
96 TCG_REG_O2,
97 TCG_REG_O3,
98 TCG_REG_O4,
99 TCG_REG_O5,
100 };
101
102 static const int tcg_target_call_iarg_regs[6] = {
103 TCG_REG_O0,
104 TCG_REG_O1,
105 TCG_REG_O2,
106 TCG_REG_O3,
107 TCG_REG_O4,
108 TCG_REG_O5,
109 };
110
111 static const int tcg_target_call_oarg_regs[] = {
112 TCG_REG_O0,
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
116 };
117
118 #define INSN_OP(x) ((x) << 30)
119 #define INSN_OP2(x) ((x) << 22)
120 #define INSN_OP3(x) ((x) << 19)
121 #define INSN_OPF(x) ((x) << 5)
122 #define INSN_RD(x) ((x) << 25)
123 #define INSN_RS1(x) ((x) << 14)
124 #define INSN_RS2(x) (x)
125 #define INSN_ASI(x) ((x) << 5)
126
127 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
128 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
129 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
130 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
131 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
132 #define INSN_COND(x) ((x) << 25)
133
134 #define COND_N 0x0
135 #define COND_E 0x1
136 #define COND_LE 0x2
137 #define COND_L 0x3
138 #define COND_LEU 0x4
139 #define COND_CS 0x5
140 #define COND_NEG 0x6
141 #define COND_VS 0x7
142 #define COND_A 0x8
143 #define COND_NE 0x9
144 #define COND_G 0xa
145 #define COND_GE 0xb
146 #define COND_GU 0xc
147 #define COND_CC 0xd
148 #define COND_POS 0xe
149 #define COND_VC 0xf
150 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
151
152 #define RCOND_Z 1
153 #define RCOND_LEZ 2
154 #define RCOND_LZ 3
155 #define RCOND_NZ 5
156 #define RCOND_GZ 6
157 #define RCOND_GEZ 7
158
159 #define MOVCC_ICC (1 << 18)
160 #define MOVCC_XCC (1 << 18 | 1 << 12)
161
162 #define BPCC_ICC 0
163 #define BPCC_XCC (2 << 20)
164 #define BPCC_PT (1 << 19)
165 #define BPCC_PN 0
166 #define BPCC_A (1 << 29)
167
168 #define BPR_PT BPCC_PT
169
170 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
171 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
172 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
173 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
174 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
175 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
176 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
177 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
178 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
179 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
180 #define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
181 #define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
182 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
183 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
184 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
185 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
186 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
187 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
188 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
189 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
190
191 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
192 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
193 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
194
195 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
196 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
197 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
198
199 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
200 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
201 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
202 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
203 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
204 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
205 #define CALL INSN_OP(1)
206 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
207 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
208 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
209 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
210 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
211 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
212 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
213 #define STB (INSN_OP(3) | INSN_OP3(0x05))
214 #define STH (INSN_OP(3) | INSN_OP3(0x06))
215 #define STW (INSN_OP(3) | INSN_OP3(0x04))
216 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
217 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
218 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
219 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
220 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
221 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
222 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
223 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
224 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
225 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
226 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
227 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
228
229 #ifndef ASI_PRIMARY_LITTLE
230 #define ASI_PRIMARY_LITTLE 0x88
231 #endif
232
233 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
234 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
235 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
236 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
237 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
238
239 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
240 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
241 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
242
243 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
244 {
245 return (val << ((sizeof(tcg_target_long) * 8 - bits))
246 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
247 }
248
249 static inline int check_fit_i32(uint32_t val, unsigned int bits)
250 {
251 return ((val << (32 - bits)) >> (32 - bits)) == val;
252 }
253
254 static void patch_reloc(uint8_t *code_ptr, int type,
255 tcg_target_long value, tcg_target_long addend)
256 {
257 uint32_t insn;
258 value += addend;
259 switch (type) {
260 case R_SPARC_32:
261 if (value != (uint32_t)value) {
262 tcg_abort();
263 }
264 *(uint32_t *)code_ptr = value;
265 break;
266 case R_SPARC_WDISP16:
267 value -= (long)code_ptr;
268 if (!check_fit_tl(value >> 2, 16)) {
269 tcg_abort();
270 }
271 insn = *(uint32_t *)code_ptr;
272 insn &= ~INSN_OFF16(-1);
273 insn |= INSN_OFF16(value);
274 *(uint32_t *)code_ptr = insn;
275 break;
276 case R_SPARC_WDISP19:
277 value -= (long)code_ptr;
278 if (!check_fit_tl(value >> 2, 19)) {
279 tcg_abort();
280 }
281 insn = *(uint32_t *)code_ptr;
282 insn &= ~INSN_OFF19(-1);
283 insn |= INSN_OFF19(value);
284 *(uint32_t *)code_ptr = insn;
285 break;
286 default:
287 tcg_abort();
288 }
289 }
290
291 /* parse target specific constraints */
292 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
293 {
294 const char *ct_str;
295
296 ct_str = *pct_str;
297 switch (ct_str[0]) {
298 case 'r':
299 ct->ct |= TCG_CT_REG;
300 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
301 break;
302 case 'L': /* qemu_ld/st constraint */
303 ct->ct |= TCG_CT_REG;
304 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
305 // Helper args
306 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
307 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
308 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
309 break;
310 case 'I':
311 ct->ct |= TCG_CT_CONST_S11;
312 break;
313 case 'J':
314 ct->ct |= TCG_CT_CONST_S13;
315 break;
316 case 'Z':
317 ct->ct |= TCG_CT_CONST_ZERO;
318 break;
319 default:
320 return -1;
321 }
322 ct_str++;
323 *pct_str = ct_str;
324 return 0;
325 }
326
327 /* test if a constant matches the constraint */
328 static inline int tcg_target_const_match(tcg_target_long val,
329 const TCGArgConstraint *arg_ct)
330 {
331 int ct = arg_ct->ct;
332
333 if (ct & TCG_CT_CONST) {
334 return 1;
335 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
336 return 1;
337 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
338 return 1;
339 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
340 return 1;
341 } else {
342 return 0;
343 }
344 }
345
346 static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
347 int op)
348 {
349 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
350 INSN_RS2(rs2));
351 }
352
353 static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
354 uint32_t offset, int op)
355 {
356 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
357 INSN_IMM13(offset));
358 }
359
360 static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
361 int val2, int val2const, int op)
362 {
363 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
364 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
365 }
366
367 static inline void tcg_out_mov(TCGContext *s, TCGType type,
368 TCGReg ret, TCGReg arg)
369 {
370 if (ret != arg) {
371 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
372 }
373 }
374
375 static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
376 {
377 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
378 }
379
380 static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
381 {
382 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
383 }
384
385 static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
386 {
387 if (check_fit_tl(arg, 13))
388 tcg_out_movi_imm13(s, ret, arg);
389 else {
390 tcg_out_sethi(s, ret, arg);
391 if (arg & 0x3ff)
392 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
393 }
394 }
395
396 static inline void tcg_out_movi(TCGContext *s, TCGType type,
397 TCGReg ret, tcg_target_long arg)
398 {
399 /* All 32-bit constants, as well as 64-bit constants with
400 no high bits set go through movi_imm32. */
401 if (TCG_TARGET_REG_BITS == 32
402 || type == TCG_TYPE_I32
403 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
404 tcg_out_movi_imm32(s, ret, arg);
405 } else if (check_fit_tl(arg, 13)) {
406 /* A 13-bit constant sign-extended to 64-bits. */
407 tcg_out_movi_imm13(s, ret, arg);
408 } else if (check_fit_tl(arg, 32)) {
409 /* A 32-bit constant sign-extended to 64-bits. */
410 tcg_out_sethi(s, ret, ~arg);
411 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
412 } else {
413 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
414 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
415 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
416 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
417 }
418 }
419
420 static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
421 int a2, int op)
422 {
423 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
424 }
425
426 static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
427 int offset, int op)
428 {
429 if (check_fit_tl(offset, 13)) {
430 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
431 INSN_IMM13(offset));
432 } else {
433 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
434 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
435 }
436 }
437
438 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
439 TCGReg arg1, tcg_target_long arg2)
440 {
441 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
442 }
443
444 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
445 TCGReg arg1, tcg_target_long arg2)
446 {
447 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
448 }
449
450 static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
451 tcg_target_long arg)
452 {
453 if (!check_fit_tl(arg, 10)) {
454 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
455 }
456 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
457 }
458
459 static inline void tcg_out_sety(TCGContext *s, int rs)
460 {
461 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
462 }
463
464 static inline void tcg_out_rdy(TCGContext *s, int rd)
465 {
466 tcg_out32(s, RDY | INSN_RD(rd));
467 }
468
469 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
470 {
471 if (val != 0) {
472 if (check_fit_tl(val, 13))
473 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
474 else {
475 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
476 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
477 }
478 }
479 }
480
481 static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
482 tcg_target_long val)
483 {
484 if (val != 0) {
485 if (check_fit_tl(val, 13))
486 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
487 else {
488 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
489 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
490 }
491 }
492 }
493
494 static void tcg_out_div32(TCGContext *s, int rd, int rs1,
495 int val2, int val2const, int uns)
496 {
497 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
498 if (uns) {
499 tcg_out_sety(s, TCG_REG_G0);
500 } else {
501 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
502 tcg_out_sety(s, TCG_REG_T1);
503 }
504
505 tcg_out_arithc(s, rd, rs1, val2, val2const,
506 uns ? ARITH_UDIV : ARITH_SDIV);
507 }
508
509 static inline void tcg_out_nop(TCGContext *s)
510 {
511 tcg_out_sethi(s, TCG_REG_G0, 0);
512 }
513
514 static const uint8_t tcg_cond_to_bcond[] = {
515 [TCG_COND_EQ] = COND_E,
516 [TCG_COND_NE] = COND_NE,
517 [TCG_COND_LT] = COND_L,
518 [TCG_COND_GE] = COND_GE,
519 [TCG_COND_LE] = COND_LE,
520 [TCG_COND_GT] = COND_G,
521 [TCG_COND_LTU] = COND_CS,
522 [TCG_COND_GEU] = COND_CC,
523 [TCG_COND_LEU] = COND_LEU,
524 [TCG_COND_GTU] = COND_GU,
525 };
526
527 static const uint8_t tcg_cond_to_rcond[] = {
528 [TCG_COND_EQ] = RCOND_Z,
529 [TCG_COND_NE] = RCOND_NZ,
530 [TCG_COND_LT] = RCOND_LZ,
531 [TCG_COND_GT] = RCOND_GZ,
532 [TCG_COND_LE] = RCOND_LEZ,
533 [TCG_COND_GE] = RCOND_GEZ
534 };
535
536 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
537 {
538 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
539 }
540
541 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
542 {
543 TCGLabel *l = &s->labels[label];
544 int off19;
545
546 if (l->has_value) {
547 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
548 } else {
549 /* Make sure to preserve destinations during retranslation. */
550 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
551 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
552 }
553 tcg_out_bpcc0(s, scond, flags, off19);
554 }
555
556 static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
557 {
558 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
559 }
560
561 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
562 TCGArg arg2, int const_arg2, int label)
563 {
564 tcg_out_cmp(s, arg1, arg2, const_arg2);
565 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
566 tcg_out_nop(s);
567 }
568
569 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
570 TCGArg v1, int v1const)
571 {
572 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
573 | INSN_RS1(tcg_cond_to_bcond[cond])
574 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
575 }
576
577 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
578 TCGArg c1, TCGArg c2, int c2const,
579 TCGArg v1, int v1const)
580 {
581 tcg_out_cmp(s, c1, c2, c2const);
582 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
583 }
584
585 #if TCG_TARGET_REG_BITS == 64
586 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
587 TCGArg arg2, int const_arg2, int label)
588 {
589 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
590 if (arg2 == 0 && !is_unsigned_cond(cond)) {
591 TCGLabel *l = &s->labels[label];
592 int off16;
593
594 if (l->has_value) {
595 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
596 } else {
597 /* Make sure to preserve destinations during retranslation. */
598 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
599 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
600 }
601 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
602 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
603 } else {
604 tcg_out_cmp(s, arg1, arg2, const_arg2);
605 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
606 }
607 tcg_out_nop(s);
608 }
609
610 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
611 TCGArg v1, int v1const)
612 {
613 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
614 | (tcg_cond_to_rcond[cond] << 10)
615 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
616 }
617
618 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
619 TCGArg c1, TCGArg c2, int c2const,
620 TCGArg v1, int v1const)
621 {
622 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
623 Note that the immediate range is one bit smaller, so we must check
624 for that as well. */
625 if (c2 == 0 && !is_unsigned_cond(cond)
626 && (!v1const || check_fit_tl(v1, 10))) {
627 tcg_out_movr(s, cond, ret, c1, v1, v1const);
628 } else {
629 tcg_out_cmp(s, c1, c2, c2const);
630 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
631 }
632 }
633 #else
634 static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
635 TCGArg al, TCGArg ah,
636 TCGArg bl, int blconst,
637 TCGArg bh, int bhconst, int label_dest)
638 {
639 int scond, label_next = gen_new_label();
640
641 tcg_out_cmp(s, ah, bh, bhconst);
642
643 /* Note that we fill one of the delay slots with the second compare. */
644 switch (cond) {
645 case TCG_COND_EQ:
646 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
647 tcg_out_cmp(s, al, bl, blconst);
648 tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
649 break;
650
651 case TCG_COND_NE:
652 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
653 tcg_out_cmp(s, al, bl, blconst);
654 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
655 break;
656
657 default:
658 scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
659 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
660 tcg_out_nop(s);
661 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
662 tcg_out_cmp(s, al, bl, blconst);
663 scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
664 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
665 break;
666 }
667 tcg_out_nop(s);
668
669 tcg_out_label(s, label_next, s->code_ptr);
670 }
671 #endif
672
673 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
674 TCGArg c1, TCGArg c2, int c2const)
675 {
676 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
677 switch (cond) {
678 case TCG_COND_LTU:
679 case TCG_COND_GEU:
680 /* The result of the comparison is in the carry bit. */
681 break;
682
683 case TCG_COND_EQ:
684 case TCG_COND_NE:
685 /* For equality, we can transform to inequality vs zero. */
686 if (c2 != 0) {
687 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
688 }
689 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
690 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
691 break;
692
693 case TCG_COND_GTU:
694 case TCG_COND_LEU:
695 /* If we don't need to load a constant into a register, we can
696 swap the operands on GTU/LEU. There's no benefit to loading
697 the constant into a temporary register. */
698 if (!c2const || c2 == 0) {
699 TCGArg t = c1;
700 c1 = c2;
701 c2 = t;
702 c2const = 0;
703 cond = tcg_swap_cond(cond);
704 break;
705 }
706 /* FALLTHRU */
707
708 default:
709 tcg_out_cmp(s, c1, c2, c2const);
710 tcg_out_movi_imm13(s, ret, 0);
711 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
712 return;
713 }
714
715 tcg_out_cmp(s, c1, c2, c2const);
716 if (cond == TCG_COND_LTU) {
717 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
718 } else {
719 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
720 }
721 }
722
723 #if TCG_TARGET_REG_BITS == 64
724 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
725 TCGArg c1, TCGArg c2, int c2const)
726 {
727 /* For 64-bit signed comparisons vs zero, we can avoid the compare
728 if the input does not overlap the output. */
729 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
730 tcg_out_movi_imm13(s, ret, 0);
731 tcg_out_movr(s, cond, ret, c1, 1, 1);
732 } else {
733 tcg_out_cmp(s, c1, c2, c2const);
734 tcg_out_movi_imm13(s, ret, 0);
735 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
736 }
737 }
738 #else
739 static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
740 TCGArg al, TCGArg ah,
741 TCGArg bl, int blconst,
742 TCGArg bh, int bhconst)
743 {
744 int tmp = TCG_REG_T1;
745
746 /* Note that the low parts are fully consumed before tmp is set. */
747 if (ret != ah && (bhconst || ret != bh)) {
748 tmp = ret;
749 }
750
751 switch (cond) {
752 case TCG_COND_EQ:
753 case TCG_COND_NE:
754 if (bl == 0 && bh == 0) {
755 if (cond == TCG_COND_EQ) {
756 tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
757 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
758 } else {
759 tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
760 }
761 } else {
762 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
763 tcg_out_cmp(s, ah, bh, bhconst);
764 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
765 }
766 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
767 break;
768
769 default:
770 /* <= : ah < bh | (ah == bh && al <= bl) */
771 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
772 tcg_out_cmp(s, ah, bh, bhconst);
773 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
774 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
775 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
776 break;
777 }
778 }
779
780 static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
781 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
782 TCGArg bh, int bhconst, int opl, int oph)
783 {
784 TCGArg tmp = TCG_REG_T1;
785
786 /* Note that the low parts are fully consumed before tmp is set. */
787 if (rl != ah && (bhconst || rl != bh)) {
788 tmp = rl;
789 }
790
791 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
792 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
793 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
794 }
795 #endif
796
797 /* Generate global QEMU prologue and epilogue code */
798 static void tcg_target_qemu_prologue(TCGContext *s)
799 {
800 int tmp_buf_size, frame_size;
801
802 /* The TCG temp buffer is at the top of the frame, immediately
803 below the frame pointer. */
804 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
805 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
806 tmp_buf_size);
807
808 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
809 otherwise the minimal frame usable by callees. */
810 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
811 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
812 frame_size += TCG_TARGET_STACK_ALIGN - 1;
813 frame_size &= -TCG_TARGET_STACK_ALIGN;
814 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
815 INSN_IMM13(-frame_size));
816
817 #ifdef CONFIG_USE_GUEST_BASE
818 if (GUEST_BASE != 0) {
819 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
820 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
821 }
822 #endif
823
824 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
825 INSN_RS2(TCG_REG_G0));
826 /* delay slot */
827 tcg_out_nop(s);
828
829 /* No epilogue required. We issue ret + restore directly in the TB. */
830 }
831
832 #if defined(CONFIG_SOFTMMU)
833
834 #include "exec/softmmu_defs.h"
835
836 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
837 int mmu_idx) */
838 static const void * const qemu_ld_helpers[4] = {
839 helper_ldb_mmu,
840 helper_ldw_mmu,
841 helper_ldl_mmu,
842 helper_ldq_mmu,
843 };
844
845 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
846 uintxx_t val, int mmu_idx) */
847 static const void * const qemu_st_helpers[4] = {
848 helper_stb_mmu,
849 helper_stw_mmu,
850 helper_stl_mmu,
851 helper_stq_mmu,
852 };
853
854 /* Perform the TLB load and compare.
855
856 Inputs:
857 ADDRLO_IDX contains the index into ARGS of the low part of the
858 address; the high part of the address is at ADDR_LOW_IDX+1.
859
860 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
861
862 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
863 This should be offsetof addr_read or addr_write.
864
865 The result of the TLB comparison is in %[ix]cc. The sanitized address
866 is in the returned register, maybe %o0. The TLB addend is in %o1. */
867
868 static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
869 int s_bits, const TCGArg *args, int which)
870 {
871 const int addrlo = args[addrlo_idx];
872 const int r0 = TCG_REG_O0;
873 const int r1 = TCG_REG_O1;
874 const int r2 = TCG_REG_O2;
875 int addr = addrlo;
876 int tlb_ofs;
877
878 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
879 /* Assemble the 64-bit address in R0. */
880 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
881 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
882 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
883 }
884
885 /* Shift the page number down to tlb-entry. */
886 tcg_out_arithi(s, r1, addrlo,
887 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
888
889 /* Mask out the page offset, except for the required alignment. */
890 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
891
892 /* Compute tlb index, modulo tlb size. */
893 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
894
895 /* Relative to the current ENV. */
896 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
897
898 /* Find a base address that can load both tlb comparator and addend. */
899 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
900 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
901 tcg_out_addi(s, r1, tlb_ofs);
902 tlb_ofs = 0;
903 }
904
905 /* Load the tlb comparator and the addend. */
906 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
907 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
908
909 /* subcc arg0, arg2, %g0 */
910 tcg_out_cmp(s, r0, r2, 0);
911
912 /* If the guest address must be zero-extended, do so now. */
913 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
914 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
915 return r0;
916 }
917 return addrlo;
918 }
919 #endif /* CONFIG_SOFTMMU */
920
921 static const int qemu_ld_opc[8] = {
922 #ifdef TARGET_WORDS_BIGENDIAN
923 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
924 #else
925 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
926 #endif
927 };
928
929 static const int qemu_st_opc[4] = {
930 #ifdef TARGET_WORDS_BIGENDIAN
931 STB, STH, STW, STX
932 #else
933 STB, STH_LE, STW_LE, STX_LE
934 #endif
935 };
936
937 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
938 {
939 int addrlo_idx = 1, datalo, datahi, addr_reg;
940 #if defined(CONFIG_SOFTMMU)
941 int memi_idx, memi, s_bits, n;
942 uint32_t *label_ptr[2];
943 #endif
944
945 datahi = datalo = args[0];
946 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
947 datahi = args[1];
948 addrlo_idx = 2;
949 }
950
951 #if defined(CONFIG_SOFTMMU)
952 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
953 memi = args[memi_idx];
954 s_bits = sizeop & 3;
955
956 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
957 offsetof(CPUTLBEntry, addr_read));
958
959 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
960 int reg64;
961
962 /* bne,pn %[xi]cc, label0 */
963 label_ptr[0] = (uint32_t *)s->code_ptr;
964 tcg_out_bpcc0(s, COND_NE, BPCC_PN
965 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
966
967 /* TLB Hit. */
968 /* Load all 64-bits into an O/G register. */
969 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
970 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
971
972 /* Move the two 32-bit pieces into the destination registers. */
973 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
974 if (reg64 != datalo) {
975 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
976 }
977
978 /* b,a,pt label1 */
979 label_ptr[1] = (uint32_t *)s->code_ptr;
980 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
981 } else {
982 /* The fast path is exactly one insn. Thus we can perform the
983 entire TLB Hit in the (annulled) delay slot of the branch
984 over the TLB Miss case. */
985
986 /* beq,a,pt %[xi]cc, label0 */
987 label_ptr[0] = NULL;
988 label_ptr[1] = (uint32_t *)s->code_ptr;
989 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
990 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
991 /* delay slot */
992 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
993 }
994
995 /* TLB Miss. */
996
997 if (label_ptr[0]) {
998 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
999 (unsigned long)label_ptr[0]);
1000 }
1001 n = 0;
1002 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1003 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1004 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1005 args[addrlo_idx + 1]);
1006 }
1007 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1008 args[addrlo_idx]);
1009
1010 /* qemu_ld_helper[s_bits](arg0, arg1) */
1011 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
1012 - (tcg_target_ulong)s->code_ptr) >> 2)
1013 & 0x3fffffff));
1014 /* delay slot */
1015 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
1016
1017 n = tcg_target_call_oarg_regs[0];
1018 /* datalo = sign_extend(arg0) */
1019 switch (sizeop) {
1020 case 0 | 4:
1021 /* Recall that SRA sign extends from bit 31 through bit 63. */
1022 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
1023 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
1024 break;
1025 case 1 | 4:
1026 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
1027 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
1028 break;
1029 case 2 | 4:
1030 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
1031 break;
1032 case 3:
1033 if (TCG_TARGET_REG_BITS == 32) {
1034 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
1035 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
1036 break;
1037 }
1038 /* FALLTHRU */
1039 case 0:
1040 case 1:
1041 case 2:
1042 default:
1043 /* mov */
1044 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
1045 break;
1046 }
1047
1048 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1049 (unsigned long)label_ptr[1]);
1050 #else
1051 addr_reg = args[addrlo_idx];
1052 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1053 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1054 addr_reg = TCG_REG_T1;
1055 }
1056 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1057 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
1058
1059 tcg_out_ldst_rr(s, reg64, addr_reg,
1060 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1061 qemu_ld_opc[sizeop]);
1062
1063 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1064 if (reg64 != datalo) {
1065 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1066 }
1067 } else {
1068 tcg_out_ldst_rr(s, datalo, addr_reg,
1069 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1070 qemu_ld_opc[sizeop]);
1071 }
1072 #endif /* CONFIG_SOFTMMU */
1073 }
1074
1075 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
1076 {
1077 int addrlo_idx = 1, datalo, datahi, addr_reg;
1078 #if defined(CONFIG_SOFTMMU)
1079 int memi_idx, memi, n, datafull;
1080 uint32_t *label_ptr;
1081 #endif
1082
1083 datahi = datalo = args[0];
1084 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1085 datahi = args[1];
1086 addrlo_idx = 2;
1087 }
1088
1089 #if defined(CONFIG_SOFTMMU)
1090 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1091 memi = args[memi_idx];
1092
1093 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1094 offsetof(CPUTLBEntry, addr_write));
1095
1096 datafull = datalo;
1097 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1098 /* Reconstruct the full 64-bit value. */
1099 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1100 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1101 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1102 datafull = TCG_REG_O2;
1103 }
1104
1105 /* The fast path is exactly one insn. Thus we can perform the entire
1106 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1107 /* beq,a,pt %[xi]cc, label0 */
1108 label_ptr = (uint32_t *)s->code_ptr;
1109 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1110 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1111 /* delay slot */
1112 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1113
1114 /* TLB Miss. */
1115
1116 n = 0;
1117 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1118 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1119 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1120 args[addrlo_idx + 1]);
1121 }
1122 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1123 args[addrlo_idx]);
1124 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1125 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1126 }
1127 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
1128
1129 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
1130 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
1131 - (tcg_target_ulong)s->code_ptr) >> 2)
1132 & 0x3fffffff));
1133 /* delay slot */
1134 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
1135
1136 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1137 (unsigned long)label_ptr);
1138 #else
1139 addr_reg = args[addrlo_idx];
1140 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1141 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1142 addr_reg = TCG_REG_T1;
1143 }
1144 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1145 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
1146 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
1147 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1148 datalo = TCG_REG_O2;
1149 }
1150 tcg_out_ldst_rr(s, datalo, addr_reg,
1151 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1152 qemu_st_opc[sizeop]);
1153 #endif /* CONFIG_SOFTMMU */
1154 }
1155
1156 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1157 const int *const_args)
1158 {
1159 int c;
1160
1161 switch (opc) {
1162 case INDEX_op_exit_tb:
1163 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1164 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
1165 INSN_IMM13(8));
1166 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1167 INSN_RS2(TCG_REG_G0));
1168 break;
1169 case INDEX_op_goto_tb:
1170 if (s->tb_jmp_offset) {
1171 /* direct jump method */
1172 uint32_t old_insn = *(uint32_t *)s->code_ptr;
1173 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1174 /* Make sure to preserve links during retranslation. */
1175 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
1176 } else {
1177 /* indirect jump method */
1178 tcg_out_ld_ptr(s, TCG_REG_T1,
1179 (tcg_target_long)(s->tb_next + args[0]));
1180 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
1181 INSN_RS2(TCG_REG_G0));
1182 }
1183 tcg_out_nop(s);
1184 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1185 break;
1186 case INDEX_op_call:
1187 if (const_args[0]) {
1188 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1189 - (tcg_target_ulong)s->code_ptr) >> 2)
1190 & 0x3fffffff));
1191 } else {
1192 tcg_out_ld_ptr(s, TCG_REG_T1,
1193 (tcg_target_long)(s->tb_next + args[0]));
1194 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
1195 INSN_RS2(TCG_REG_G0));
1196 }
1197 /* delay slot */
1198 tcg_out_nop(s);
1199 break;
1200 case INDEX_op_br:
1201 tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
1202 tcg_out_nop(s);
1203 break;
1204 case INDEX_op_movi_i32:
1205 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1206 break;
1207
1208 #if TCG_TARGET_REG_BITS == 64
1209 #define OP_32_64(x) \
1210 glue(glue(case INDEX_op_, x), _i32): \
1211 glue(glue(case INDEX_op_, x), _i64)
1212 #else
1213 #define OP_32_64(x) \
1214 glue(glue(case INDEX_op_, x), _i32)
1215 #endif
1216 OP_32_64(ld8u):
1217 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1218 break;
1219 OP_32_64(ld8s):
1220 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1221 break;
1222 OP_32_64(ld16u):
1223 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1224 break;
1225 OP_32_64(ld16s):
1226 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1227 break;
1228 case INDEX_op_ld_i32:
1229 #if TCG_TARGET_REG_BITS == 64
1230 case INDEX_op_ld32u_i64:
1231 #endif
1232 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1233 break;
1234 OP_32_64(st8):
1235 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1236 break;
1237 OP_32_64(st16):
1238 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1239 break;
1240 case INDEX_op_st_i32:
1241 #if TCG_TARGET_REG_BITS == 64
1242 case INDEX_op_st32_i64:
1243 #endif
1244 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1245 break;
1246 OP_32_64(add):
1247 c = ARITH_ADD;
1248 goto gen_arith;
1249 OP_32_64(sub):
1250 c = ARITH_SUB;
1251 goto gen_arith;
1252 OP_32_64(and):
1253 c = ARITH_AND;
1254 goto gen_arith;
1255 OP_32_64(andc):
1256 c = ARITH_ANDN;
1257 goto gen_arith;
1258 OP_32_64(or):
1259 c = ARITH_OR;
1260 goto gen_arith;
1261 OP_32_64(orc):
1262 c = ARITH_ORN;
1263 goto gen_arith;
1264 OP_32_64(xor):
1265 c = ARITH_XOR;
1266 goto gen_arith;
1267 case INDEX_op_shl_i32:
1268 c = SHIFT_SLL;
1269 do_shift32:
1270 /* Limit immediate shift count lest we create an illegal insn. */
1271 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1272 break;
1273 case INDEX_op_shr_i32:
1274 c = SHIFT_SRL;
1275 goto do_shift32;
1276 case INDEX_op_sar_i32:
1277 c = SHIFT_SRA;
1278 goto do_shift32;
1279 case INDEX_op_mul_i32:
1280 c = ARITH_UMUL;
1281 goto gen_arith;
1282
1283 OP_32_64(neg):
1284 c = ARITH_SUB;
1285 goto gen_arith1;
1286 OP_32_64(not):
1287 c = ARITH_ORN;
1288 goto gen_arith1;
1289
1290 case INDEX_op_div_i32:
1291 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1292 break;
1293 case INDEX_op_divu_i32:
1294 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1295 break;
1296
1297 case INDEX_op_rem_i32:
1298 case INDEX_op_remu_i32:
1299 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
1300 opc == INDEX_op_remu_i32);
1301 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1302 ARITH_UMUL);
1303 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1304 break;
1305
1306 case INDEX_op_brcond_i32:
1307 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1308 args[3]);
1309 break;
1310 case INDEX_op_setcond_i32:
1311 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1312 args[2], const_args[2]);
1313 break;
1314 case INDEX_op_movcond_i32:
1315 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1316 args[2], const_args[2], args[3], const_args[3]);
1317 break;
1318
1319 #if TCG_TARGET_REG_BITS == 32
1320 case INDEX_op_brcond2_i32:
1321 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1322 args[2], const_args[2],
1323 args[3], const_args[3], args[5]);
1324 break;
1325 case INDEX_op_setcond2_i32:
1326 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1327 args[3], const_args[3],
1328 args[4], const_args[4]);
1329 break;
1330 case INDEX_op_add2_i32:
1331 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1332 args[4], const_args[4], args[5], const_args[5],
1333 ARITH_ADDCC, ARITH_ADDX);
1334 break;
1335 case INDEX_op_sub2_i32:
1336 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1337 args[4], const_args[4], args[5], const_args[5],
1338 ARITH_SUBCC, ARITH_SUBX);
1339 break;
1340 case INDEX_op_mulu2_i32:
1341 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1342 ARITH_UMUL);
1343 tcg_out_rdy(s, args[1]);
1344 break;
1345 #endif
1346
1347 case INDEX_op_qemu_ld8u:
1348 tcg_out_qemu_ld(s, args, 0);
1349 break;
1350 case INDEX_op_qemu_ld8s:
1351 tcg_out_qemu_ld(s, args, 0 | 4);
1352 break;
1353 case INDEX_op_qemu_ld16u:
1354 tcg_out_qemu_ld(s, args, 1);
1355 break;
1356 case INDEX_op_qemu_ld16s:
1357 tcg_out_qemu_ld(s, args, 1 | 4);
1358 break;
1359 case INDEX_op_qemu_ld32:
1360 #if TCG_TARGET_REG_BITS == 64
1361 case INDEX_op_qemu_ld32u:
1362 #endif
1363 tcg_out_qemu_ld(s, args, 2);
1364 break;
1365 #if TCG_TARGET_REG_BITS == 64
1366 case INDEX_op_qemu_ld32s:
1367 tcg_out_qemu_ld(s, args, 2 | 4);
1368 break;
1369 #endif
1370 case INDEX_op_qemu_ld64:
1371 tcg_out_qemu_ld(s, args, 3);
1372 break;
1373 case INDEX_op_qemu_st8:
1374 tcg_out_qemu_st(s, args, 0);
1375 break;
1376 case INDEX_op_qemu_st16:
1377 tcg_out_qemu_st(s, args, 1);
1378 break;
1379 case INDEX_op_qemu_st32:
1380 tcg_out_qemu_st(s, args, 2);
1381 break;
1382 case INDEX_op_qemu_st64:
1383 tcg_out_qemu_st(s, args, 3);
1384 break;
1385
1386 #if TCG_TARGET_REG_BITS == 64
1387 case INDEX_op_movi_i64:
1388 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1389 break;
1390 case INDEX_op_ld32s_i64:
1391 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1392 break;
1393 case INDEX_op_ld_i64:
1394 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1395 break;
1396 case INDEX_op_st_i64:
1397 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1398 break;
1399 case INDEX_op_shl_i64:
1400 c = SHIFT_SLLX;
1401 do_shift64:
1402 /* Limit immediate shift count lest we create an illegal insn. */
1403 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1404 break;
1405 case INDEX_op_shr_i64:
1406 c = SHIFT_SRLX;
1407 goto do_shift64;
1408 case INDEX_op_sar_i64:
1409 c = SHIFT_SRAX;
1410 goto do_shift64;
1411 case INDEX_op_mul_i64:
1412 c = ARITH_MULX;
1413 goto gen_arith;
1414 case INDEX_op_div_i64:
1415 c = ARITH_SDIVX;
1416 goto gen_arith;
1417 case INDEX_op_divu_i64:
1418 c = ARITH_UDIVX;
1419 goto gen_arith;
1420 case INDEX_op_rem_i64:
1421 case INDEX_op_remu_i64:
1422 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
1423 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
1424 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
1425 ARITH_MULX);
1426 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
1427 break;
1428 case INDEX_op_ext32s_i64:
1429 if (const_args[1]) {
1430 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1431 } else {
1432 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1433 }
1434 break;
1435 case INDEX_op_ext32u_i64:
1436 if (const_args[1]) {
1437 tcg_out_movi_imm32(s, args[0], args[1]);
1438 } else {
1439 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1440 }
1441 break;
1442
1443 case INDEX_op_brcond_i64:
1444 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1445 args[3]);
1446 break;
1447 case INDEX_op_setcond_i64:
1448 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1449 args[2], const_args[2]);
1450 break;
1451 case INDEX_op_movcond_i64:
1452 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1453 args[2], const_args[2], args[3], const_args[3]);
1454 break;
1455 #endif
1456 gen_arith:
1457 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
1458 break;
1459
1460 gen_arith1:
1461 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1462 break;
1463
1464 default:
1465 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1466 tcg_abort();
1467 }
1468 }
1469
1470 static const TCGTargetOpDef sparc_op_defs[] = {
1471 { INDEX_op_exit_tb, { } },
1472 { INDEX_op_goto_tb, { } },
1473 { INDEX_op_call, { "ri" } },
1474 { INDEX_op_br, { } },
1475
1476 { INDEX_op_mov_i32, { "r", "r" } },
1477 { INDEX_op_movi_i32, { "r" } },
1478 { INDEX_op_ld8u_i32, { "r", "r" } },
1479 { INDEX_op_ld8s_i32, { "r", "r" } },
1480 { INDEX_op_ld16u_i32, { "r", "r" } },
1481 { INDEX_op_ld16s_i32, { "r", "r" } },
1482 { INDEX_op_ld_i32, { "r", "r" } },
1483 { INDEX_op_st8_i32, { "rZ", "r" } },
1484 { INDEX_op_st16_i32, { "rZ", "r" } },
1485 { INDEX_op_st_i32, { "rZ", "r" } },
1486
1487 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1488 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1489 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1490 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1491 { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1492 { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1493 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1494 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1495 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1496 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1497 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1498 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1499
1500 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1501 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1502 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1503
1504 { INDEX_op_neg_i32, { "r", "rJ" } },
1505 { INDEX_op_not_i32, { "r", "rJ" } },
1506
1507 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1508 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1509 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1510
1511 #if TCG_TARGET_REG_BITS == 32
1512 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1513 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
1514 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1515 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1516 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1517 #endif
1518
1519 #if TCG_TARGET_REG_BITS == 64
1520 { INDEX_op_mov_i64, { "r", "r" } },
1521 { INDEX_op_movi_i64, { "r" } },
1522 { INDEX_op_ld8u_i64, { "r", "r" } },
1523 { INDEX_op_ld8s_i64, { "r", "r" } },
1524 { INDEX_op_ld16u_i64, { "r", "r" } },
1525 { INDEX_op_ld16s_i64, { "r", "r" } },
1526 { INDEX_op_ld32u_i64, { "r", "r" } },
1527 { INDEX_op_ld32s_i64, { "r", "r" } },
1528 { INDEX_op_ld_i64, { "r", "r" } },
1529 { INDEX_op_st8_i64, { "rZ", "r" } },
1530 { INDEX_op_st16_i64, { "rZ", "r" } },
1531 { INDEX_op_st32_i64, { "rZ", "r" } },
1532 { INDEX_op_st_i64, { "rZ", "r" } },
1533
1534 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1535 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1536 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1537 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1538 { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1539 { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1540 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1541 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1542 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1543 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1544 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1545 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1546
1547 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1548 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1549 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
1550
1551 { INDEX_op_neg_i64, { "r", "rJ" } },
1552 { INDEX_op_not_i64, { "r", "rJ" } },
1553
1554 { INDEX_op_ext32s_i64, { "r", "ri" } },
1555 { INDEX_op_ext32u_i64, { "r", "ri" } },
1556
1557 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1558 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1559 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
1560 #endif
1561
1562 #if TCG_TARGET_REG_BITS == 64
1563 { INDEX_op_qemu_ld8u, { "r", "L" } },
1564 { INDEX_op_qemu_ld8s, { "r", "L" } },
1565 { INDEX_op_qemu_ld16u, { "r", "L" } },
1566 { INDEX_op_qemu_ld16s, { "r", "L" } },
1567 { INDEX_op_qemu_ld32, { "r", "L" } },
1568 { INDEX_op_qemu_ld32u, { "r", "L" } },
1569 { INDEX_op_qemu_ld32s, { "r", "L" } },
1570 { INDEX_op_qemu_ld64, { "r", "L" } },
1571
1572 { INDEX_op_qemu_st8, { "L", "L" } },
1573 { INDEX_op_qemu_st16, { "L", "L" } },
1574 { INDEX_op_qemu_st32, { "L", "L" } },
1575 { INDEX_op_qemu_st64, { "L", "L" } },
1576 #elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1577 { INDEX_op_qemu_ld8u, { "r", "L" } },
1578 { INDEX_op_qemu_ld8s, { "r", "L" } },
1579 { INDEX_op_qemu_ld16u, { "r", "L" } },
1580 { INDEX_op_qemu_ld16s, { "r", "L" } },
1581 { INDEX_op_qemu_ld32, { "r", "L" } },
1582 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1583
1584 { INDEX_op_qemu_st8, { "L", "L" } },
1585 { INDEX_op_qemu_st16, { "L", "L" } },
1586 { INDEX_op_qemu_st32, { "L", "L" } },
1587 { INDEX_op_qemu_st64, { "L", "L", "L" } },
1588 #else
1589 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1590 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1591 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1592 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1593 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1594 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1595
1596 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1597 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1598 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1599 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
1600 #endif
1601
1602 { -1 },
1603 };
1604
1605 static void tcg_target_init(TCGContext *s)
1606 {
1607 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1608 #if TCG_TARGET_REG_BITS == 64
1609 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1610 #endif
1611 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1612 (1 << TCG_REG_G1) |
1613 (1 << TCG_REG_G2) |
1614 (1 << TCG_REG_G3) |
1615 (1 << TCG_REG_G4) |
1616 (1 << TCG_REG_G5) |
1617 (1 << TCG_REG_G6) |
1618 (1 << TCG_REG_G7) |
1619 (1 << TCG_REG_O0) |
1620 (1 << TCG_REG_O1) |
1621 (1 << TCG_REG_O2) |
1622 (1 << TCG_REG_O3) |
1623 (1 << TCG_REG_O4) |
1624 (1 << TCG_REG_O5) |
1625 (1 << TCG_REG_O7));
1626
1627 tcg_regset_clear(s->reserved_regs);
1628 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1629 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1630 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1631 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1632 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1633 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1634 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1635 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1636
1637 tcg_add_target_add_op_defs(sparc_op_defs);
1638 }
1639
1640 #if TCG_TARGET_REG_BITS == 64
1641 # define ELF_HOST_MACHINE EM_SPARCV9
1642 #else
1643 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1644 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1645 #endif
1646
1647 typedef struct {
1648 uint32_t len __attribute__((aligned((sizeof(void *)))));
1649 uint32_t id;
1650 uint8_t version;
1651 char augmentation[1];
1652 uint8_t code_align;
1653 uint8_t data_align;
1654 uint8_t return_column;
1655 } DebugFrameCIE;
1656
1657 typedef struct {
1658 uint32_t len __attribute__((aligned((sizeof(void *)))));
1659 uint32_t cie_offset;
1660 tcg_target_long func_start __attribute__((packed));
1661 tcg_target_long func_len __attribute__((packed));
1662 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1663 uint8_t win_save;
1664 uint8_t ret_save[3];
1665 } DebugFrameFDE;
1666
1667 typedef struct {
1668 DebugFrameCIE cie;
1669 DebugFrameFDE fde;
1670 } DebugFrame;
1671
1672 static DebugFrame debug_frame = {
1673 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1674 .cie.id = -1,
1675 .cie.version = 1,
1676 .cie.code_align = 1,
1677 .cie.data_align = -sizeof(void *) & 0x7f,
1678 .cie.return_column = 15, /* o7 */
1679
1680 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1681 .fde.def_cfa = {
1682 #if TCG_TARGET_REG_BITS == 64
1683 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1684 (2047 & 0x7f) | 0x80, (2047 >> 7)
1685 #else
1686 13, 30 /* DW_CFA_def_cfa_register i6 */
1687 #endif
1688 },
1689 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1690 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1691 };
1692
1693 void tcg_register_jit(void *buf, size_t buf_size)
1694 {
1695 debug_frame.fde.func_start = (tcg_target_long) buf;
1696 debug_frame.fde.func_len = buf_size;
1697
1698 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1699 }
1700
1701 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1702 {
1703 uint32_t *ptr = (uint32_t *)jmp_addr;
1704 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1705
1706 /* We can reach the entire address space for 32-bit. For 64-bit
1707 the code_gen_buffer can't be larger than 2GB. */
1708 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1709 tcg_abort();
1710 }
1711
1712 *ptr = CALL | (disp & 0x3fffffff);
1713 flush_icache_range(jmp_addr, jmp_addr + 4);
1714 }