]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg: Add is_unsigned_cond
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
d4a9eb1f 25#ifndef NDEBUG
8289b279
BS
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
59};
d4a9eb1f 60#endif
8289b279 61
375816f8
RH
62/* Define some temporary registers. T2 is used for constant generation. */
63#define TCG_REG_T1 TCG_REG_G1
64#define TCG_REG_T2 TCG_REG_O7
65
c6f7e4fb 66#ifdef CONFIG_USE_GUEST_BASE
375816f8 67# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
68#else
69# define TCG_GUEST_BASE_REG TCG_REG_G0
70#endif
e141ab52 71
0954d0d9 72static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
73 TCG_REG_L0,
74 TCG_REG_L1,
75 TCG_REG_L2,
76 TCG_REG_L3,
77 TCG_REG_L4,
78 TCG_REG_L5,
79 TCG_REG_L6,
80 TCG_REG_L7,
26adfb75 81
8289b279
BS
82 TCG_REG_I0,
83 TCG_REG_I1,
84 TCG_REG_I2,
85 TCG_REG_I3,
86 TCG_REG_I4,
375816f8 87 TCG_REG_I5,
26adfb75
RH
88
89 TCG_REG_G2,
90 TCG_REG_G3,
91 TCG_REG_G4,
92 TCG_REG_G5,
93
94 TCG_REG_O0,
95 TCG_REG_O1,
96 TCG_REG_O2,
97 TCG_REG_O3,
98 TCG_REG_O4,
99 TCG_REG_O5,
8289b279
BS
100};
101
102static const int tcg_target_call_iarg_regs[6] = {
103 TCG_REG_O0,
104 TCG_REG_O1,
105 TCG_REG_O2,
106 TCG_REG_O3,
107 TCG_REG_O4,
108 TCG_REG_O5,
109};
110
26a74ae3 111static const int tcg_target_call_oarg_regs[] = {
8289b279 112 TCG_REG_O0,
e141ab52
BS
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
8289b279
BS
116};
117
57e49b40 118static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
f5ef6aac 119{
57e49b40
BS
120 return (val << ((sizeof(tcg_target_long) * 8 - bits))
121 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
122}
123
124static inline int check_fit_i32(uint32_t val, unsigned int bits)
125{
126 return ((val << (32 - bits)) >> (32 - bits)) == val;
f5ef6aac
BS
127}
128
8289b279 129static void patch_reloc(uint8_t *code_ptr, int type,
f54b3f92 130 tcg_target_long value, tcg_target_long addend)
8289b279 131{
f54b3f92 132 value += addend;
8289b279
BS
133 switch (type) {
134 case R_SPARC_32:
135 if (value != (uint32_t)value)
136 tcg_abort();
137 *(uint32_t *)code_ptr = value;
138 break;
f5ef6aac
BS
139 case R_SPARC_WDISP22:
140 value -= (long)code_ptr;
141 value >>= 2;
57e49b40 142 if (!check_fit_tl(value, 22))
f5ef6aac
BS
143 tcg_abort();
144 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
145 break;
1da92db2
BS
146 case R_SPARC_WDISP19:
147 value -= (long)code_ptr;
148 value >>= 2;
149 if (!check_fit_tl(value, 19))
150 tcg_abort();
151 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
152 break;
8289b279
BS
153 default:
154 tcg_abort();
155 }
156}
157
8289b279
BS
158/* parse target specific constraints */
159static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
160{
161 const char *ct_str;
162
163 ct_str = *pct_str;
164 switch (ct_str[0]) {
165 case 'r':
5e143c43
RH
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
168 break;
8289b279
BS
169 case 'L': /* qemu_ld/st constraint */
170 ct->ct |= TCG_CT_REG;
171 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
53c37487
BS
172 // Helper args
173 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
174 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
175 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
8289b279
BS
176 break;
177 case 'I':
178 ct->ct |= TCG_CT_CONST_S11;
179 break;
180 case 'J':
181 ct->ct |= TCG_CT_CONST_S13;
182 break;
183 default:
184 return -1;
185 }
186 ct_str++;
187 *pct_str = ct_str;
188 return 0;
189}
190
8289b279
BS
191/* test if a constant matches the constraint */
192static inline int tcg_target_const_match(tcg_target_long val,
193 const TCGArgConstraint *arg_ct)
194{
195 int ct;
196
197 ct = arg_ct->ct;
198 if (ct & TCG_CT_CONST)
199 return 1;
57e49b40 200 else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
8289b279 201 return 1;
57e49b40 202 else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
8289b279
BS
203 return 1;
204 else
205 return 0;
206}
207
208#define INSN_OP(x) ((x) << 30)
209#define INSN_OP2(x) ((x) << 22)
210#define INSN_OP3(x) ((x) << 19)
211#define INSN_OPF(x) ((x) << 5)
212#define INSN_RD(x) ((x) << 25)
213#define INSN_RS1(x) ((x) << 14)
214#define INSN_RS2(x) (x)
8384dd67 215#define INSN_ASI(x) ((x) << 5)
8289b279 216
dbfe80e1 217#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 218#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
1da92db2 219#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
b3db8758 220#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
8289b279 221
b3db8758 222#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
cf7c2ca5
BS
223#define COND_N 0x0
224#define COND_E 0x1
225#define COND_LE 0x2
226#define COND_L 0x3
227#define COND_LEU 0x4
228#define COND_CS 0x5
229#define COND_NEG 0x6
230#define COND_VS 0x7
b3db8758 231#define COND_A 0x8
cf7c2ca5
BS
232#define COND_NE 0x9
233#define COND_G 0xa
234#define COND_GE 0xb
235#define COND_GU 0xc
236#define COND_CC 0xd
237#define COND_POS 0xe
238#define COND_VC 0xf
b3db8758 239#define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
8289b279 240
dbfe80e1
RH
241#define MOVCC_ICC (1 << 18)
242#define MOVCC_XCC (1 << 18 | 1 << 12)
243
8289b279 244#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 245#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 246#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 247#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 248#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 249#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 250#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 251#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
252#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
253#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 254#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
255#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
256#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
257#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
258#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
259#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
260#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
261#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 262#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
8289b279
BS
263
264#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
265#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
266#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
267
268#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
269#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
270#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
271
7a3766f3 272#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 273#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
274#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
275#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
276#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
277#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
278#define CALL INSN_OP(1)
279#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
280#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
281#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
282#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
283#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
284#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
285#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
286#define STB (INSN_OP(3) | INSN_OP3(0x05))
287#define STH (INSN_OP(3) | INSN_OP3(0x06))
288#define STW (INSN_OP(3) | INSN_OP3(0x04))
289#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
290#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
291#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
292#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
293#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
294#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
295#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
296#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
297#define STBA (INSN_OP(3) | INSN_OP3(0x15))
298#define STHA (INSN_OP(3) | INSN_OP3(0x16))
299#define STWA (INSN_OP(3) | INSN_OP3(0x14))
300#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
301
302#ifndef ASI_PRIMARY_LITTLE
303#define ASI_PRIMARY_LITTLE 0x88
304#endif
8289b279 305
a0ce341a
RH
306#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
307#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
308#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
309#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
310#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
311
312#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
313#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
314#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
315
26cc915c
BS
316static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
317 int op)
318{
319 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
320 INSN_RS2(rs2));
321}
322
6f41b777
BS
323static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
324 uint32_t offset, int op)
26cc915c
BS
325{
326 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
327 INSN_IMM13(offset));
328}
329
ba225198
RH
330static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
331 int val2, int val2const, int op)
332{
333 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
334 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
335}
336
2a534aff
RH
337static inline void tcg_out_mov(TCGContext *s, TCGType type,
338 TCGReg ret, TCGReg arg)
8289b279 339{
26cc915c
BS
340 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
341}
342
343static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
344{
345 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
346}
347
b101234a
BS
348static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
349{
350 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
351}
352
353static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
8289b279 354{
4a09aa89 355 if (check_fit_tl(arg, 13))
b101234a 356 tcg_out_movi_imm13(s, ret, arg);
8289b279 357 else {
26cc915c 358 tcg_out_sethi(s, ret, arg);
8289b279 359 if (arg & 0x3ff)
b101234a 360 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
8289b279
BS
361 }
362}
363
b101234a 364static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 365 TCGReg ret, tcg_target_long arg)
b101234a 366{
43172207
RH
367 /* All 32-bit constants, as well as 64-bit constants with
368 no high bits set go through movi_imm32. */
369 if (TCG_TARGET_REG_BITS == 32
370 || type == TCG_TYPE_I32
371 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
372 tcg_out_movi_imm32(s, ret, arg);
373 } else if (check_fit_tl(arg, 13)) {
374 /* A 13-bit constant sign-extended to 64-bits. */
375 tcg_out_movi_imm13(s, ret, arg);
376 } else if (check_fit_tl(arg, 32)) {
377 /* A 32-bit constant sign-extended to 64-bits. */
378 tcg_out_sethi(s, ret, ~arg);
379 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
380 } else {
375816f8
RH
381 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
382 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
383 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
384 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 385 }
b101234a
BS
386}
387
a0ce341a
RH
388static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
389 int a2, int op)
8289b279 390{
a0ce341a 391 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
392}
393
a0ce341a
RH
394static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
395 int offset, int op)
8289b279 396{
a0ce341a 397 if (check_fit_tl(offset, 13)) {
8289b279
BS
398 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
399 INSN_IMM13(offset));
a0ce341a 400 } else {
375816f8
RH
401 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
402 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 403 }
8289b279
BS
404}
405
2a534aff
RH
406static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
407 TCGReg arg1, tcg_target_long arg2)
8289b279 408{
a0ce341a 409 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
410}
411
2a534aff
RH
412static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
413 TCGReg arg1, tcg_target_long arg2)
8289b279 414{
a0ce341a
RH
415 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
416}
417
418static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
419 tcg_target_long arg)
420{
421 if (!check_fit_tl(arg, 10)) {
422 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
423 }
424 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
425}
426
583d1215 427static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 428{
583d1215 429 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
430}
431
7a3766f3
RH
432static inline void tcg_out_rdy(TCGContext *s, int rd)
433{
434 tcg_out32(s, RDY | INSN_RD(rd));
435}
436
8289b279
BS
437static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
438{
439 if (val != 0) {
57e49b40 440 if (check_fit_tl(val, 13))
8289b279 441 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 442 else {
375816f8
RH
443 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
444 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 445 }
8289b279
BS
446 }
447}
448
a0ce341a
RH
449static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
450 tcg_target_long val)
53c37487
BS
451{
452 if (val != 0) {
453 if (check_fit_tl(val, 13))
a0ce341a 454 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
53c37487 455 else {
375816f8
RH
456 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
457 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
53c37487
BS
458 }
459 }
460}
461
583d1215
RH
462static void tcg_out_div32(TCGContext *s, int rd, int rs1,
463 int val2, int val2const, int uns)
464{
465 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
466 if (uns) {
467 tcg_out_sety(s, TCG_REG_G0);
468 } else {
375816f8
RH
469 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
470 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
471 }
472
473 tcg_out_arithc(s, rd, rs1, val2, val2const,
474 uns ? ARITH_UDIV : ARITH_SDIV);
475}
476
8289b279
BS
477static inline void tcg_out_nop(TCGContext *s)
478{
26cc915c 479 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
480}
481
1da92db2 482static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
cf7c2ca5 483{
cf7c2ca5 484 TCGLabel *l = &s->labels[label_index];
f4bf0b91 485 uint32_t off22;
cf7c2ca5
BS
486
487 if (l->has_value) {
f4bf0b91 488 off22 = INSN_OFF22(l->u.value - (unsigned long)s->code_ptr);
f5ef6aac 489 } else {
f4bf0b91
RH
490 /* Make sure to preserve destinations during retranslation. */
491 off22 = *(uint32_t *)s->code_ptr & INSN_OFF22(-1);
f5ef6aac 492 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
f5ef6aac 493 }
f4bf0b91 494 tcg_out32(s, INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | off22);
cf7c2ca5
BS
495}
496
a212ea75 497#if TCG_TARGET_REG_BITS == 64
1da92db2
BS
498static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
499{
1da92db2 500 TCGLabel *l = &s->labels[label_index];
f4bf0b91 501 uint32_t off19;
1da92db2
BS
502
503 if (l->has_value) {
f4bf0b91 504 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
1da92db2 505 } else {
f4bf0b91
RH
506 /* Make sure to preserve destinations during retranslation. */
507 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
1da92db2 508 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
1da92db2 509 }
f4bf0b91
RH
510 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
511 (0x5 << 19) | off19));
1da92db2
BS
512}
513#endif
514
cf7c2ca5
BS
515static const uint8_t tcg_cond_to_bcond[10] = {
516 [TCG_COND_EQ] = COND_E,
517 [TCG_COND_NE] = COND_NE,
518 [TCG_COND_LT] = COND_L,
519 [TCG_COND_GE] = COND_GE,
520 [TCG_COND_LE] = COND_LE,
521 [TCG_COND_GT] = COND_G,
522 [TCG_COND_LTU] = COND_CS,
523 [TCG_COND_GEU] = COND_CC,
524 [TCG_COND_LEU] = COND_LEU,
525 [TCG_COND_GTU] = COND_GU,
526};
527
56f4927e
RH
528static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
529{
ba225198 530 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
531}
532
8a56e840 533static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
1da92db2
BS
534 TCGArg arg1, TCGArg arg2, int const_arg2,
535 int label_index)
cf7c2ca5 536{
56f4927e 537 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2 538 tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
cf7c2ca5
BS
539 tcg_out_nop(s);
540}
541
a212ea75 542#if TCG_TARGET_REG_BITS == 64
8a56e840 543static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
1da92db2
BS
544 TCGArg arg1, TCGArg arg2, int const_arg2,
545 int label_index)
546{
56f4927e 547 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2
BS
548 tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
549 tcg_out_nop(s);
550}
56f4927e 551#else
8a56e840 552static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
553 TCGArg al, TCGArg ah,
554 TCGArg bl, int blconst,
555 TCGArg bh, int bhconst, int label_dest)
556{
557 int cc, label_next = gen_new_label();
558
559 tcg_out_cmp(s, ah, bh, bhconst);
560
561 /* Note that we fill one of the delay slots with the second compare. */
562 switch (cond) {
563 case TCG_COND_EQ:
564 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
565 tcg_out_branch_i32(s, cc, label_next);
566 tcg_out_cmp(s, al, bl, blconst);
567 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_EQ], 0);
568 tcg_out_branch_i32(s, cc, label_dest);
569 break;
570
571 case TCG_COND_NE:
572 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
573 tcg_out_branch_i32(s, cc, label_dest);
574 tcg_out_cmp(s, al, bl, blconst);
575 tcg_out_branch_i32(s, cc, label_dest);
576 break;
577
578 default:
579 /* ??? One could fairly easily special-case 64-bit unsigned
580 compares against 32-bit zero-extended constants. For instance,
581 we know that (unsigned)AH < 0 is false and need not emit it.
582 Similarly, (unsigned)AH > 0 being true implies AH != 0, so the
583 second branch will never be taken. */
584 cc = INSN_COND(tcg_cond_to_bcond[cond], 0);
585 tcg_out_branch_i32(s, cc, label_dest);
586 tcg_out_nop(s);
587 cc = INSN_COND(tcg_cond_to_bcond[TCG_COND_NE], 0);
588 tcg_out_branch_i32(s, cc, label_next);
589 tcg_out_cmp(s, al, bl, blconst);
590 cc = INSN_COND(tcg_cond_to_bcond[tcg_unsigned_cond(cond)], 0);
591 tcg_out_branch_i32(s, cc, label_dest);
592 break;
593 }
594 tcg_out_nop(s);
595
9d6fca70 596 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 597}
1da92db2
BS
598#endif
599
8a56e840 600static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
601 TCGArg c1, TCGArg c2, int c2const)
602{
603 TCGArg t;
604
605 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
606 switch (cond) {
607 case TCG_COND_EQ:
608 case TCG_COND_NE:
609 if (c2 != 0) {
610 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
611 }
612 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
613 cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
614 break;
615
616 case TCG_COND_GTU:
617 case TCG_COND_GEU:
618 if (c2const && c2 != 0) {
375816f8
RH
619 tcg_out_movi_imm13(s, TCG_REG_T1, c2);
620 c2 = TCG_REG_T1;
dbfe80e1
RH
621 }
622 t = c1, c1 = c2, c2 = t, c2const = 0;
623 cond = tcg_swap_cond(cond);
624 break;
625
626 case TCG_COND_LTU:
627 case TCG_COND_LEU:
628 break;
629
630 default:
631 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 632 tcg_out_movi_imm13(s, ret, 0);
9b9c37c3
RH
633 tcg_out32(s, ARITH_MOVCC | INSN_RD(ret)
634 | INSN_RS1(tcg_cond_to_bcond[cond])
635 | MOVCC_ICC | INSN_IMM11(1));
dbfe80e1
RH
636 return;
637 }
638
639 tcg_out_cmp(s, c1, c2, c2const);
640 if (cond == TCG_COND_LTU) {
641 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
642 } else {
643 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
644 }
645}
646
647#if TCG_TARGET_REG_BITS == 64
8a56e840 648static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
649 TCGArg c1, TCGArg c2, int c2const)
650{
651 tcg_out_cmp(s, c1, c2, c2const);
652 tcg_out_movi_imm13(s, ret, 0);
653 tcg_out32 (s, ARITH_MOVCC | INSN_RD(ret)
654 | INSN_RS1(tcg_cond_to_bcond[cond])
655 | MOVCC_XCC | INSN_IMM11(1));
656}
657#else
8a56e840 658static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
659 TCGArg al, TCGArg ah,
660 TCGArg bl, int blconst,
661 TCGArg bh, int bhconst)
662{
663 int lab;
664
665 switch (cond) {
666 case TCG_COND_EQ:
375816f8 667 tcg_out_setcond_i32(s, TCG_COND_EQ, TCG_REG_T1, al, bl, blconst);
dbfe80e1 668 tcg_out_setcond_i32(s, TCG_COND_EQ, ret, ah, bh, bhconst);
375816f8 669 tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_AND);
dbfe80e1
RH
670 break;
671
672 case TCG_COND_NE:
375816f8 673 tcg_out_setcond_i32(s, TCG_COND_NE, TCG_REG_T1, al, al, blconst);
dbfe80e1 674 tcg_out_setcond_i32(s, TCG_COND_NE, ret, ah, bh, bhconst);
375816f8 675 tcg_out_arith(s, ret, ret, TCG_REG_T1, ARITH_OR);
dbfe80e1
RH
676 break;
677
678 default:
679 lab = gen_new_label();
680
681 tcg_out_cmp(s, ah, bh, bhconst);
682 tcg_out_branch_i32(s, INSN_COND(tcg_cond_to_bcond[cond], 1), lab);
683 tcg_out_movi_imm13(s, ret, 1);
684 tcg_out_branch_i32(s, INSN_COND(COND_NE, 1), lab);
685 tcg_out_movi_imm13(s, ret, 0);
686
687 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), ret, al, bl, blconst);
688
9d6fca70 689 tcg_out_label(s, lab, s->code_ptr);
dbfe80e1
RH
690 break;
691 }
692}
693#endif
694
7d551702 695/* Generate global QEMU prologue and epilogue code */
e4d58b41 696static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 697{
4c3204cb
RH
698 int tmp_buf_size, frame_size;
699
700 /* The TCG temp buffer is at the top of the frame, immediately
701 below the frame pointer. */
702 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
703 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
704 tmp_buf_size);
705
706 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
707 otherwise the minimal frame usable by callees. */
708 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
709 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
710 frame_size += TCG_TARGET_STACK_ALIGN - 1;
711 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 712 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 713 INSN_IMM13(-frame_size));
c6f7e4fb
RH
714
715#ifdef CONFIG_USE_GUEST_BASE
716 if (GUEST_BASE != 0) {
717 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
718 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
719 }
720#endif
721
cea5f9a2 722 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
7d551702 723 INSN_RS2(TCG_REG_G0));
0c554161
RH
724 /* delay slot */
725 tcg_out_nop(s);
4c3204cb
RH
726
727 /* No epilogue required. We issue ret + restore directly in the TB. */
b3db8758
BS
728}
729
f5ef6aac 730#if defined(CONFIG_SOFTMMU)
f5ef6aac 731
79383c9c 732#include "../../softmmu_defs.h"
f5ef6aac 733
e141ab52
BS
734/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
735 int mmu_idx) */
736static const void * const qemu_ld_helpers[4] = {
737 helper_ldb_mmu,
738 helper_ldw_mmu,
739 helper_ldl_mmu,
740 helper_ldq_mmu,
741};
742
743/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
744 uintxx_t val, int mmu_idx) */
745static const void * const qemu_st_helpers[4] = {
746 helper_stb_mmu,
747 helper_stw_mmu,
748 helper_stl_mmu,
749 helper_stq_mmu,
750};
f5ef6aac 751
a0ce341a 752/* Perform the TLB load and compare.
bffe1431 753
a0ce341a
RH
754 Inputs:
755 ADDRLO_IDX contains the index into ARGS of the low part of the
756 address; the high part of the address is at ADDR_LOW_IDX+1.
757
758 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
759
760 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
761 This should be offsetof addr_read or addr_write.
762
763 The result of the TLB comparison is in %[ix]cc. The sanitized address
764 is in the returned register, maybe %o0. The TLB addend is in %o1. */
765
766static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
767 int s_bits, const TCGArg *args, int which)
768{
769 const int addrlo = args[addrlo_idx];
770 const int r0 = TCG_REG_O0;
771 const int r1 = TCG_REG_O1;
772 const int r2 = TCG_REG_O2;
773 int addr = addrlo;
774 int tlb_ofs;
775
776 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
777 /* Assemble the 64-bit address in R0. */
778 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
779 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
780 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
781 }
782
783 /* Shift the page number down to tlb-entry. */
784 tcg_out_arithi(s, r1, addrlo,
785 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
786
787 /* Mask out the page offset, except for the required alignment. */
788 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
789
790 /* Compute tlb index, modulo tlb size. */
791 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
792
793 /* Relative to the current ENV. */
794 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
795
796 /* Find a base address that can load both tlb comparator and addend. */
797 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
798 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
799 tcg_out_addi(s, r1, tlb_ofs);
800 tlb_ofs = 0;
801 }
802
803 /* Load the tlb comparator and the addend. */
804 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
805 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
806
807 /* subcc arg0, arg2, %g0 */
808 tcg_out_cmp(s, r0, r2, 0);
809
810 /* If the guest address must be zero-extended, do so now. */
811 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
812 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
813 return r0;
814 }
815 return addrlo;
816}
817#endif /* CONFIG_SOFTMMU */
818
819static const int qemu_ld_opc[8] = {
820#ifdef TARGET_WORDS_BIGENDIAN
821 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
65850a02 822#else
a0ce341a 823 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
65850a02 824#endif
a0ce341a 825};
9d0efc88 826
a0ce341a
RH
827static const int qemu_st_opc[4] = {
828#ifdef TARGET_WORDS_BIGENDIAN
829 STB, STH, STW, STX
bffe1431 830#else
a0ce341a 831 STB, STH_LE, STW_LE, STX_LE
bffe1431 832#endif
a0ce341a 833};
bffe1431 834
a0ce341a 835static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 836{
a0ce341a 837 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 838#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
839 int memi_idx, memi, s_bits, n;
840 uint32_t *label_ptr[2];
f5ef6aac
BS
841#endif
842
a0ce341a
RH
843 datahi = datalo = args[0];
844 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
845 datahi = args[1];
846 addrlo_idx = 2;
847 }
f5ef6aac 848
f5ef6aac 849#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
850 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
851 memi = args[memi_idx];
852 s_bits = sizeop & 3;
853
854 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
855 offsetof(CPUTLBEntry, addr_read));
856
857 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
858 int reg64;
859
860 /* bne,pn %[xi]cc, label0 */
861 label_ptr[0] = (uint32_t *)s->code_ptr;
862 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
863 | ((TARGET_LONG_BITS == 64) << 21)));
864
865 /* TLB Hit. */
866 /* Load all 64-bits into an O/G register. */
867 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
868 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
869
870 /* Move the two 32-bit pieces into the destination registers. */
871 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
872 if (reg64 != datalo) {
873 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
874 }
f5ef6aac 875
a0ce341a
RH
876 /* b,a,pt label1 */
877 label_ptr[1] = (uint32_t *)s->code_ptr;
878 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
879 | (1 << 29) | (1 << 19)));
880 } else {
881 /* The fast path is exactly one insn. Thus we can perform the
882 entire TLB Hit in the (annulled) delay slot of the branch
883 over the TLB Miss case. */
884
885 /* beq,a,pt %[xi]cc, label0 */
886 label_ptr[0] = NULL;
887 label_ptr[1] = (uint32_t *)s->code_ptr;
888 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
889 | ((TARGET_LONG_BITS == 64) << 21)
890 | (1 << 29) | (1 << 19)));
891 /* delay slot */
892 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
893 }
53c37487 894
a0ce341a 895 /* TLB Miss. */
f5ef6aac 896
a0ce341a
RH
897 if (label_ptr[0]) {
898 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
899 (unsigned long)label_ptr[0]);
900 }
901 n = 0;
902 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
903 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
904 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
905 args[addrlo_idx + 1]);
906 }
907 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
908 args[addrlo_idx]);
f5ef6aac 909
53c37487 910 /* qemu_ld_helper[s_bits](arg0, arg1) */
f5ef6aac
BS
911 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
912 - (tcg_target_ulong)s->code_ptr) >> 2)
913 & 0x3fffffff));
a0ce341a
RH
914 /* delay slot */
915 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
916
a0ce341a
RH
917 n = tcg_target_call_oarg_regs[0];
918 /* datalo = sign_extend(arg0) */
919 switch (sizeop) {
f5ef6aac 920 case 0 | 4:
a0ce341a
RH
921 /* Recall that SRA sign extends from bit 31 through bit 63. */
922 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
923 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
f5ef6aac
BS
924 break;
925 case 1 | 4:
a0ce341a
RH
926 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
927 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
f5ef6aac
BS
928 break;
929 case 2 | 4:
a0ce341a 930 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
f5ef6aac 931 break;
a0ce341a
RH
932 case 3:
933 if (TCG_TARGET_REG_BITS == 32) {
934 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
935 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
936 break;
937 }
938 /* FALLTHRU */
f5ef6aac
BS
939 case 0:
940 case 1:
941 case 2:
f5ef6aac
BS
942 default:
943 /* mov */
a0ce341a 944 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
f5ef6aac
BS
945 break;
946 }
947
a0ce341a
RH
948 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
949 (unsigned long)label_ptr[1]);
90cbed46 950#else
a0ce341a
RH
951 addr_reg = args[addrlo_idx];
952 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
953 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
954 addr_reg = TCG_REG_T1;
a0ce341a
RH
955 }
956 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
957 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 958
c6f7e4fb
RH
959 tcg_out_ldst_rr(s, reg64, addr_reg,
960 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
961 qemu_ld_opc[sizeop]);
f5ef6aac 962
a0ce341a
RH
963 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
964 if (reg64 != datalo) {
965 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
966 }
967 } else {
c6f7e4fb
RH
968 tcg_out_ldst_rr(s, datalo, addr_reg,
969 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
970 qemu_ld_opc[sizeop]);
f5ef6aac 971 }
a0ce341a 972#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
973}
974
a0ce341a 975static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 976{
a0ce341a 977 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 978#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
979 int memi_idx, memi, n;
980 uint32_t *label_ptr;
f5ef6aac
BS
981#endif
982
a0ce341a
RH
983 datahi = datalo = args[0];
984 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
985 datahi = args[1];
986 addrlo_idx = 2;
987 }
f5ef6aac 988
f5ef6aac 989#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
990 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
991 memi = args[memi_idx];
992
993 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
994 offsetof(CPUTLBEntry, addr_write));
995
996 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8
RH
997 /* Reconstruct the full 64-bit value. */
998 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 999 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1000 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1001 datalo = TCG_REG_O2;
a0ce341a 1002 }
f5ef6aac 1003
a0ce341a
RH
1004 /* The fast path is exactly one insn. Thus we can perform the entire
1005 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1006 /* beq,a,pt %[xi]cc, label0 */
1007 label_ptr = (uint32_t *)s->code_ptr;
1008 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
1009 | ((TARGET_LONG_BITS == 64) << 21)
1010 | (1 << 29) | (1 << 19)));
1011 /* delay slot */
1012 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
1013
1014 /* TLB Miss. */
1015
1016 n = 0;
1017 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1018 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1019 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1020 args[addrlo_idx + 1]);
1021 }
1022 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1023 args[addrlo_idx]);
1024 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1025 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1026 }
1027 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
53c37487 1028
53c37487 1029 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
a0ce341a 1030 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
f5ef6aac
BS
1031 - (tcg_target_ulong)s->code_ptr) >> 2)
1032 & 0x3fffffff));
a0ce341a
RH
1033 /* delay slot */
1034 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
f5ef6aac 1035
a0ce341a
RH
1036 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1037 (unsigned long)label_ptr);
8384dd67 1038#else
a0ce341a
RH
1039 addr_reg = args[addrlo_idx];
1040 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1041 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1042 addr_reg = TCG_REG_T1;
f5ef6aac 1043 }
a0ce341a 1044 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8 1045 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1046 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1047 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1048 datalo = TCG_REG_O2;
a0ce341a 1049 }
c6f7e4fb
RH
1050 tcg_out_ldst_rr(s, datalo, addr_reg,
1051 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1052 qemu_st_opc[sizeop]);
a0ce341a 1053#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1054}
1055
a9751609 1056static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1057 const int *const_args)
1058{
1059 int c;
1060
1061 switch (opc) {
1062 case INDEX_op_exit_tb:
b3db8758
BS
1063 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1064 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
8289b279 1065 INSN_IMM13(8));
b3db8758
BS
1066 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1067 INSN_RS2(TCG_REG_G0));
8289b279
BS
1068 break;
1069 case INDEX_op_goto_tb:
1070 if (s->tb_jmp_offset) {
1071 /* direct jump method */
5bbd2cae 1072 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1073 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1074 /* Make sure to preserve links during retranslation. */
1075 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1076 } else {
1077 /* indirect jump method */
375816f8
RH
1078 tcg_out_ld_ptr(s, TCG_REG_T1,
1079 (tcg_target_long)(s->tb_next + args[0]));
1080 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
b3db8758 1081 INSN_RS2(TCG_REG_G0));
8289b279 1082 }
53cd9273 1083 tcg_out_nop(s);
8289b279
BS
1084 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1085 break;
1086 case INDEX_op_call:
375816f8 1087 if (const_args[0]) {
bffe1431
BS
1088 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1089 - (tcg_target_ulong)s->code_ptr) >> 2)
1090 & 0x3fffffff));
375816f8
RH
1091 } else {
1092 tcg_out_ld_ptr(s, TCG_REG_T1,
bffe1431 1093 (tcg_target_long)(s->tb_next + args[0]));
375816f8 1094 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
bffe1431 1095 INSN_RS2(TCG_REG_G0));
8289b279 1096 }
4c3204cb
RH
1097 /* delay slot */
1098 tcg_out_nop(s);
8289b279 1099 break;
8289b279 1100 case INDEX_op_br:
1da92db2 1101 tcg_out_branch_i32(s, COND_A, args[0]);
f5ef6aac 1102 tcg_out_nop(s);
8289b279
BS
1103 break;
1104 case INDEX_op_movi_i32:
1105 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1106 break;
1107
a212ea75 1108#if TCG_TARGET_REG_BITS == 64
8289b279 1109#define OP_32_64(x) \
ba225198
RH
1110 glue(glue(case INDEX_op_, x), _i32): \
1111 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1112#else
1113#define OP_32_64(x) \
ba225198 1114 glue(glue(case INDEX_op_, x), _i32)
8289b279 1115#endif
ba225198 1116 OP_32_64(ld8u):
8289b279
BS
1117 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1118 break;
ba225198 1119 OP_32_64(ld8s):
8289b279
BS
1120 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1121 break;
ba225198 1122 OP_32_64(ld16u):
8289b279
BS
1123 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1124 break;
ba225198 1125 OP_32_64(ld16s):
8289b279
BS
1126 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1127 break;
1128 case INDEX_op_ld_i32:
a212ea75 1129#if TCG_TARGET_REG_BITS == 64
53cd9273 1130 case INDEX_op_ld32u_i64:
8289b279
BS
1131#endif
1132 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1133 break;
ba225198 1134 OP_32_64(st8):
8289b279
BS
1135 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1136 break;
ba225198 1137 OP_32_64(st16):
8289b279
BS
1138 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1139 break;
1140 case INDEX_op_st_i32:
a212ea75 1141#if TCG_TARGET_REG_BITS == 64
53cd9273 1142 case INDEX_op_st32_i64:
8289b279
BS
1143#endif
1144 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1145 break;
ba225198 1146 OP_32_64(add):
53cd9273 1147 c = ARITH_ADD;
ba225198
RH
1148 goto gen_arith;
1149 OP_32_64(sub):
8289b279 1150 c = ARITH_SUB;
ba225198
RH
1151 goto gen_arith;
1152 OP_32_64(and):
8289b279 1153 c = ARITH_AND;
ba225198 1154 goto gen_arith;
dc69960d
RH
1155 OP_32_64(andc):
1156 c = ARITH_ANDN;
1157 goto gen_arith;
ba225198 1158 OP_32_64(or):
8289b279 1159 c = ARITH_OR;
ba225198 1160 goto gen_arith;
18c8f7a3
RH
1161 OP_32_64(orc):
1162 c = ARITH_ORN;
1163 goto gen_arith;
ba225198 1164 OP_32_64(xor):
8289b279 1165 c = ARITH_XOR;
ba225198 1166 goto gen_arith;
8289b279
BS
1167 case INDEX_op_shl_i32:
1168 c = SHIFT_SLL;
1fd95946
RH
1169 do_shift32:
1170 /* Limit immediate shift count lest we create an illegal insn. */
1171 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1172 break;
8289b279
BS
1173 case INDEX_op_shr_i32:
1174 c = SHIFT_SRL;
1fd95946 1175 goto do_shift32;
8289b279
BS
1176 case INDEX_op_sar_i32:
1177 c = SHIFT_SRA;
1fd95946 1178 goto do_shift32;
8289b279
BS
1179 case INDEX_op_mul_i32:
1180 c = ARITH_UMUL;
ba225198 1181 goto gen_arith;
583d1215 1182
4b5a85c1
RH
1183 OP_32_64(neg):
1184 c = ARITH_SUB;
1185 goto gen_arith1;
be6551b1
RH
1186 OP_32_64(not):
1187 c = ARITH_ORN;
1188 goto gen_arith1;
4b5a85c1 1189
583d1215
RH
1190 case INDEX_op_div_i32:
1191 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1192 break;
1193 case INDEX_op_divu_i32:
1194 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1195 break;
1196
1197 case INDEX_op_rem_i32:
1198 case INDEX_op_remu_i32:
375816f8 1199 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1200 opc == INDEX_op_remu_i32);
375816f8 1201 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1202 ARITH_UMUL);
375816f8 1203 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1204 break;
8289b279
BS
1205
1206 case INDEX_op_brcond_i32:
1da92db2
BS
1207 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1208 args[3]);
8289b279 1209 break;
dbfe80e1
RH
1210 case INDEX_op_setcond_i32:
1211 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1212 args[2], const_args[2]);
1213 break;
1214
56f4927e
RH
1215#if TCG_TARGET_REG_BITS == 32
1216 case INDEX_op_brcond2_i32:
1217 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1218 args[2], const_args[2],
1219 args[3], const_args[3], args[5]);
1220 break;
dbfe80e1
RH
1221 case INDEX_op_setcond2_i32:
1222 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1223 args[3], const_args[3],
1224 args[4], const_args[4]);
1225 break;
7a3766f3
RH
1226 case INDEX_op_add2_i32:
1227 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1228 ARITH_ADDCC);
1229 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1230 ARITH_ADDX);
1231 break;
1232 case INDEX_op_sub2_i32:
1233 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1234 ARITH_SUBCC);
1235 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1236 ARITH_SUBX);
1237 break;
1238 case INDEX_op_mulu2_i32:
1239 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1240 ARITH_UMUL);
1241 tcg_out_rdy(s, args[1]);
1242 break;
56f4927e 1243#endif
8289b279
BS
1244
1245 case INDEX_op_qemu_ld8u:
f5ef6aac 1246 tcg_out_qemu_ld(s, args, 0);
8289b279
BS
1247 break;
1248 case INDEX_op_qemu_ld8s:
f5ef6aac 1249 tcg_out_qemu_ld(s, args, 0 | 4);
8289b279
BS
1250 break;
1251 case INDEX_op_qemu_ld16u:
f5ef6aac 1252 tcg_out_qemu_ld(s, args, 1);
8289b279
BS
1253 break;
1254 case INDEX_op_qemu_ld16s:
f5ef6aac 1255 tcg_out_qemu_ld(s, args, 1 | 4);
8289b279 1256 break;
86feb1c8
RH
1257 case INDEX_op_qemu_ld32:
1258#if TCG_TARGET_REG_BITS == 64
8289b279 1259 case INDEX_op_qemu_ld32u:
86feb1c8 1260#endif
f5ef6aac 1261 tcg_out_qemu_ld(s, args, 2);
8289b279 1262 break;
30c0c76c 1263#if TCG_TARGET_REG_BITS == 64
8289b279 1264 case INDEX_op_qemu_ld32s:
f5ef6aac 1265 tcg_out_qemu_ld(s, args, 2 | 4);
8289b279 1266 break;
30c0c76c 1267#endif
a0ce341a
RH
1268 case INDEX_op_qemu_ld64:
1269 tcg_out_qemu_ld(s, args, 3);
1270 break;
8289b279 1271 case INDEX_op_qemu_st8:
f5ef6aac 1272 tcg_out_qemu_st(s, args, 0);
8289b279
BS
1273 break;
1274 case INDEX_op_qemu_st16:
f5ef6aac 1275 tcg_out_qemu_st(s, args, 1);
8289b279
BS
1276 break;
1277 case INDEX_op_qemu_st32:
f5ef6aac 1278 tcg_out_qemu_st(s, args, 2);
8289b279 1279 break;
a0ce341a
RH
1280 case INDEX_op_qemu_st64:
1281 tcg_out_qemu_st(s, args, 3);
1282 break;
8289b279 1283
a212ea75 1284#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1285 case INDEX_op_movi_i64:
1286 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1287 break;
53cd9273
BS
1288 case INDEX_op_ld32s_i64:
1289 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1290 break;
8289b279
BS
1291 case INDEX_op_ld_i64:
1292 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1293 break;
1294 case INDEX_op_st_i64:
1295 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1296 break;
1297 case INDEX_op_shl_i64:
1298 c = SHIFT_SLLX;
1fd95946
RH
1299 do_shift64:
1300 /* Limit immediate shift count lest we create an illegal insn. */
1301 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1302 break;
8289b279
BS
1303 case INDEX_op_shr_i64:
1304 c = SHIFT_SRLX;
1fd95946 1305 goto do_shift64;
8289b279
BS
1306 case INDEX_op_sar_i64:
1307 c = SHIFT_SRAX;
1fd95946 1308 goto do_shift64;
8289b279
BS
1309 case INDEX_op_mul_i64:
1310 c = ARITH_MULX;
ba225198 1311 goto gen_arith;
583d1215 1312 case INDEX_op_div_i64:
53cd9273 1313 c = ARITH_SDIVX;
ba225198 1314 goto gen_arith;
583d1215 1315 case INDEX_op_divu_i64:
8289b279 1316 c = ARITH_UDIVX;
ba225198 1317 goto gen_arith;
583d1215
RH
1318 case INDEX_op_rem_i64:
1319 case INDEX_op_remu_i64:
375816f8 1320 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1321 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
375816f8 1322 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1323 ARITH_MULX);
375816f8 1324 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1325 break;
cc6dfecf
RH
1326 case INDEX_op_ext32s_i64:
1327 if (const_args[1]) {
1328 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1329 } else {
1330 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1331 }
1332 break;
1333 case INDEX_op_ext32u_i64:
1334 if (const_args[1]) {
1335 tcg_out_movi_imm32(s, args[0], args[1]);
1336 } else {
1337 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1338 }
1339 break;
8289b279
BS
1340
1341 case INDEX_op_brcond_i64:
1da92db2
BS
1342 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1343 args[3]);
8289b279 1344 break;
dbfe80e1
RH
1345 case INDEX_op_setcond_i64:
1346 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1347 args[2], const_args[2]);
1348 break;
1349
8289b279 1350#endif
ba225198
RH
1351 gen_arith:
1352 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1353 break;
1354
4b5a85c1
RH
1355 gen_arith1:
1356 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1357 break;
1358
8289b279
BS
1359 default:
1360 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1361 tcg_abort();
1362 }
1363}
1364
1365static const TCGTargetOpDef sparc_op_defs[] = {
1366 { INDEX_op_exit_tb, { } },
b3db8758 1367 { INDEX_op_goto_tb, { } },
8289b279 1368 { INDEX_op_call, { "ri" } },
8289b279
BS
1369 { INDEX_op_br, { } },
1370
1371 { INDEX_op_mov_i32, { "r", "r" } },
1372 { INDEX_op_movi_i32, { "r" } },
1373 { INDEX_op_ld8u_i32, { "r", "r" } },
1374 { INDEX_op_ld8s_i32, { "r", "r" } },
1375 { INDEX_op_ld16u_i32, { "r", "r" } },
1376 { INDEX_op_ld16s_i32, { "r", "r" } },
1377 { INDEX_op_ld_i32, { "r", "r" } },
1378 { INDEX_op_st8_i32, { "r", "r" } },
1379 { INDEX_op_st16_i32, { "r", "r" } },
1380 { INDEX_op_st_i32, { "r", "r" } },
1381
53cd9273
BS
1382 { INDEX_op_add_i32, { "r", "r", "rJ" } },
1383 { INDEX_op_mul_i32, { "r", "r", "rJ" } },
583d1215
RH
1384 { INDEX_op_div_i32, { "r", "r", "rJ" } },
1385 { INDEX_op_divu_i32, { "r", "r", "rJ" } },
1386 { INDEX_op_rem_i32, { "r", "r", "rJ" } },
1387 { INDEX_op_remu_i32, { "r", "r", "rJ" } },
53cd9273
BS
1388 { INDEX_op_sub_i32, { "r", "r", "rJ" } },
1389 { INDEX_op_and_i32, { "r", "r", "rJ" } },
dc69960d 1390 { INDEX_op_andc_i32, { "r", "r", "rJ" } },
53cd9273 1391 { INDEX_op_or_i32, { "r", "r", "rJ" } },
18c8f7a3 1392 { INDEX_op_orc_i32, { "r", "r", "rJ" } },
53cd9273 1393 { INDEX_op_xor_i32, { "r", "r", "rJ" } },
8289b279 1394
53cd9273
BS
1395 { INDEX_op_shl_i32, { "r", "r", "rJ" } },
1396 { INDEX_op_shr_i32, { "r", "r", "rJ" } },
1397 { INDEX_op_sar_i32, { "r", "r", "rJ" } },
8289b279 1398
4b5a85c1 1399 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1400 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1401
56f4927e 1402 { INDEX_op_brcond_i32, { "r", "rJ" } },
dbfe80e1
RH
1403 { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
1404
56f4927e
RH
1405#if TCG_TARGET_REG_BITS == 32
1406 { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
dbfe80e1 1407 { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
7a3766f3
RH
1408 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1409 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1410 { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
56f4927e 1411#endif
8289b279 1412
a212ea75 1413#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1414 { INDEX_op_mov_i64, { "r", "r" } },
1415 { INDEX_op_movi_i64, { "r" } },
1416 { INDEX_op_ld8u_i64, { "r", "r" } },
1417 { INDEX_op_ld8s_i64, { "r", "r" } },
1418 { INDEX_op_ld16u_i64, { "r", "r" } },
1419 { INDEX_op_ld16s_i64, { "r", "r" } },
1420 { INDEX_op_ld32u_i64, { "r", "r" } },
1421 { INDEX_op_ld32s_i64, { "r", "r" } },
1422 { INDEX_op_ld_i64, { "r", "r" } },
1423 { INDEX_op_st8_i64, { "r", "r" } },
1424 { INDEX_op_st16_i64, { "r", "r" } },
1425 { INDEX_op_st32_i64, { "r", "r" } },
1426 { INDEX_op_st_i64, { "r", "r" } },
1427
53cd9273
BS
1428 { INDEX_op_add_i64, { "r", "r", "rJ" } },
1429 { INDEX_op_mul_i64, { "r", "r", "rJ" } },
583d1215
RH
1430 { INDEX_op_div_i64, { "r", "r", "rJ" } },
1431 { INDEX_op_divu_i64, { "r", "r", "rJ" } },
1432 { INDEX_op_rem_i64, { "r", "r", "rJ" } },
1433 { INDEX_op_remu_i64, { "r", "r", "rJ" } },
53cd9273
BS
1434 { INDEX_op_sub_i64, { "r", "r", "rJ" } },
1435 { INDEX_op_and_i64, { "r", "r", "rJ" } },
dc69960d 1436 { INDEX_op_andc_i64, { "r", "r", "rJ" } },
53cd9273 1437 { INDEX_op_or_i64, { "r", "r", "rJ" } },
18c8f7a3 1438 { INDEX_op_orc_i64, { "r", "r", "rJ" } },
53cd9273 1439 { INDEX_op_xor_i64, { "r", "r", "rJ" } },
8289b279 1440
53cd9273
BS
1441 { INDEX_op_shl_i64, { "r", "r", "rJ" } },
1442 { INDEX_op_shr_i64, { "r", "r", "rJ" } },
1443 { INDEX_op_sar_i64, { "r", "r", "rJ" } },
4b5a85c1
RH
1444
1445 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1446 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1447
cc6dfecf
RH
1448 { INDEX_op_ext32s_i64, { "r", "ri" } },
1449 { INDEX_op_ext32u_i64, { "r", "ri" } },
8289b279 1450
56f4927e 1451 { INDEX_op_brcond_i64, { "r", "rJ" } },
dbfe80e1 1452 { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
8289b279 1453#endif
a0ce341a
RH
1454
1455#if TCG_TARGET_REG_BITS == 64
1456 { INDEX_op_qemu_ld8u, { "r", "L" } },
1457 { INDEX_op_qemu_ld8s, { "r", "L" } },
1458 { INDEX_op_qemu_ld16u, { "r", "L" } },
1459 { INDEX_op_qemu_ld16s, { "r", "L" } },
1460 { INDEX_op_qemu_ld32, { "r", "L" } },
1461 { INDEX_op_qemu_ld32u, { "r", "L" } },
1462 { INDEX_op_qemu_ld32s, { "r", "L" } },
1463 { INDEX_op_qemu_ld64, { "r", "L" } },
1464
1465 { INDEX_op_qemu_st8, { "L", "L" } },
1466 { INDEX_op_qemu_st16, { "L", "L" } },
1467 { INDEX_op_qemu_st32, { "L", "L" } },
1468 { INDEX_op_qemu_st64, { "L", "L" } },
1469#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1470 { INDEX_op_qemu_ld8u, { "r", "L" } },
1471 { INDEX_op_qemu_ld8s, { "r", "L" } },
1472 { INDEX_op_qemu_ld16u, { "r", "L" } },
1473 { INDEX_op_qemu_ld16s, { "r", "L" } },
1474 { INDEX_op_qemu_ld32, { "r", "L" } },
1475 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1476
1477 { INDEX_op_qemu_st8, { "L", "L" } },
1478 { INDEX_op_qemu_st16, { "L", "L" } },
1479 { INDEX_op_qemu_st32, { "L", "L" } },
3ee60ad4 1480 { INDEX_op_qemu_st64, { "L", "L", "L" } },
a0ce341a
RH
1481#else
1482 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1483 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1484 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1485 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1486 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1487 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1488
1489 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1490 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1491 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1492 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
8289b279 1493#endif
a0ce341a 1494
8289b279
BS
1495 { -1 },
1496};
1497
e4d58b41 1498static void tcg_target_init(TCGContext *s)
8289b279
BS
1499{
1500 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
a212ea75 1501#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1502 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1503#endif
1504 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1505 (1 << TCG_REG_G1) |
1506 (1 << TCG_REG_G2) |
1507 (1 << TCG_REG_G3) |
1508 (1 << TCG_REG_G4) |
1509 (1 << TCG_REG_G5) |
1510 (1 << TCG_REG_G6) |
1511 (1 << TCG_REG_G7) |
8289b279
BS
1512 (1 << TCG_REG_O0) |
1513 (1 << TCG_REG_O1) |
1514 (1 << TCG_REG_O2) |
1515 (1 << TCG_REG_O3) |
1516 (1 << TCG_REG_O4) |
1517 (1 << TCG_REG_O5) |
8289b279
BS
1518 (1 << TCG_REG_O7));
1519
1520 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1521 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1522 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1523 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1524 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1525 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1526 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1527 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1528 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1529
8289b279
BS
1530 tcg_add_target_add_op_defs(sparc_op_defs);
1531}
cb1977d3
RH
1532
1533#if TCG_TARGET_REG_BITS == 64
1534# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1535#else
cb1977d3
RH
1536# define ELF_HOST_MACHINE EM_SPARC32PLUS
1537# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1538#endif
1539
1540typedef struct {
1541 uint32_t len __attribute__((aligned((sizeof(void *)))));
1542 uint32_t id;
1543 uint8_t version;
1544 char augmentation[1];
1545 uint8_t code_align;
1546 uint8_t data_align;
1547 uint8_t return_column;
1548} DebugFrameCIE;
1549
1550typedef struct {
1551 uint32_t len __attribute__((aligned((sizeof(void *)))));
1552 uint32_t cie_offset;
1553 tcg_target_long func_start __attribute__((packed));
1554 tcg_target_long func_len __attribute__((packed));
1555 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1556 uint8_t win_save;
1557 uint8_t ret_save[3];
1558} DebugFrameFDE;
1559
1560typedef struct {
1561 DebugFrameCIE cie;
1562 DebugFrameFDE fde;
1563} DebugFrame;
1564
1565static DebugFrame debug_frame = {
1566 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1567 .cie.id = -1,
1568 .cie.version = 1,
1569 .cie.code_align = 1,
1570 .cie.data_align = -sizeof(void *) & 0x7f,
1571 .cie.return_column = 15, /* o7 */
1572
1573 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1574 .fde.def_cfa = {
1575#if TCG_TARGET_REG_BITS == 64
1576 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1577 (2047 & 0x7f) | 0x80, (2047 >> 7)
1578#else
1579 13, 30 /* DW_CFA_def_cfa_register i6 */
1580#endif
1581 },
1582 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1583 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1584};
1585
1586void tcg_register_jit(void *buf, size_t buf_size)
1587{
1588 debug_frame.fde.func_start = (tcg_target_long) buf;
1589 debug_frame.fde.func_len = buf_size;
1590
1591 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1592}
5bbd2cae
RH
1593
1594void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1595{
1596 uint32_t *ptr = (uint32_t *)jmp_addr;
1597 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1598
1599 /* We can reach the entire address space for 32-bit. For 64-bit
1600 the code_gen_buffer can't be larger than 2GB. */
1601 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1602 tcg_abort();
1603 }
1604
1605 *ptr = CALL | (disp & 0x3fffffff);
1606 flush_icache_range(jmp_addr, jmp_addr + 4);
1607}