]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Fix qemu_st for 32-bit
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
d4a9eb1f 25#ifndef NDEBUG
8289b279
BS
26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%g0",
28 "%g1",
29 "%g2",
30 "%g3",
31 "%g4",
32 "%g5",
33 "%g6",
34 "%g7",
35 "%o0",
36 "%o1",
37 "%o2",
38 "%o3",
39 "%o4",
40 "%o5",
41 "%o6",
42 "%o7",
43 "%l0",
44 "%l1",
45 "%l2",
46 "%l3",
47 "%l4",
48 "%l5",
49 "%l6",
50 "%l7",
51 "%i0",
52 "%i1",
53 "%i2",
54 "%i3",
55 "%i4",
56 "%i5",
57 "%i6",
58 "%i7",
59};
d4a9eb1f 60#endif
8289b279 61
375816f8
RH
62/* Define some temporary registers. T2 is used for constant generation. */
63#define TCG_REG_T1 TCG_REG_G1
64#define TCG_REG_T2 TCG_REG_O7
65
c6f7e4fb 66#ifdef CONFIG_USE_GUEST_BASE
375816f8 67# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
68#else
69# define TCG_GUEST_BASE_REG TCG_REG_G0
70#endif
e141ab52 71
0954d0d9 72static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
73 TCG_REG_L0,
74 TCG_REG_L1,
75 TCG_REG_L2,
76 TCG_REG_L3,
77 TCG_REG_L4,
78 TCG_REG_L5,
79 TCG_REG_L6,
80 TCG_REG_L7,
26adfb75 81
8289b279
BS
82 TCG_REG_I0,
83 TCG_REG_I1,
84 TCG_REG_I2,
85 TCG_REG_I3,
86 TCG_REG_I4,
375816f8 87 TCG_REG_I5,
26adfb75
RH
88
89 TCG_REG_G2,
90 TCG_REG_G3,
91 TCG_REG_G4,
92 TCG_REG_G5,
93
94 TCG_REG_O0,
95 TCG_REG_O1,
96 TCG_REG_O2,
97 TCG_REG_O3,
98 TCG_REG_O4,
99 TCG_REG_O5,
8289b279
BS
100};
101
102static const int tcg_target_call_iarg_regs[6] = {
103 TCG_REG_O0,
104 TCG_REG_O1,
105 TCG_REG_O2,
106 TCG_REG_O3,
107 TCG_REG_O4,
108 TCG_REG_O5,
109};
110
26a74ae3 111static const int tcg_target_call_oarg_regs[] = {
8289b279 112 TCG_REG_O0,
e141ab52
BS
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
8289b279
BS
116};
117
57e49b40 118static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
f5ef6aac 119{
57e49b40
BS
120 return (val << ((sizeof(tcg_target_long) * 8 - bits))
121 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
122}
123
124static inline int check_fit_i32(uint32_t val, unsigned int bits)
125{
126 return ((val << (32 - bits)) >> (32 - bits)) == val;
f5ef6aac
BS
127}
128
8289b279 129static void patch_reloc(uint8_t *code_ptr, int type,
f54b3f92 130 tcg_target_long value, tcg_target_long addend)
8289b279 131{
f54b3f92 132 value += addend;
8289b279
BS
133 switch (type) {
134 case R_SPARC_32:
135 if (value != (uint32_t)value)
136 tcg_abort();
137 *(uint32_t *)code_ptr = value;
138 break;
f5ef6aac
BS
139 case R_SPARC_WDISP22:
140 value -= (long)code_ptr;
141 value >>= 2;
57e49b40 142 if (!check_fit_tl(value, 22))
f5ef6aac
BS
143 tcg_abort();
144 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x3fffff) | value;
145 break;
1da92db2
BS
146 case R_SPARC_WDISP19:
147 value -= (long)code_ptr;
148 value >>= 2;
149 if (!check_fit_tl(value, 19))
150 tcg_abort();
151 *(uint32_t *)code_ptr = ((*(uint32_t *)code_ptr) & ~0x7ffff) | value;
152 break;
8289b279
BS
153 default:
154 tcg_abort();
155 }
156}
157
8289b279
BS
158/* parse target specific constraints */
159static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
160{
161 const char *ct_str;
162
163 ct_str = *pct_str;
164 switch (ct_str[0]) {
165 case 'r':
5e143c43
RH
166 ct->ct |= TCG_CT_REG;
167 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
168 break;
8289b279
BS
169 case 'L': /* qemu_ld/st constraint */
170 ct->ct |= TCG_CT_REG;
171 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
53c37487
BS
172 // Helper args
173 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
174 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
175 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
8289b279
BS
176 break;
177 case 'I':
178 ct->ct |= TCG_CT_CONST_S11;
179 break;
180 case 'J':
181 ct->ct |= TCG_CT_CONST_S13;
182 break;
183 default:
184 return -1;
185 }
186 ct_str++;
187 *pct_str = ct_str;
188 return 0;
189}
190
8289b279
BS
191/* test if a constant matches the constraint */
192static inline int tcg_target_const_match(tcg_target_long val,
193 const TCGArgConstraint *arg_ct)
194{
195 int ct;
196
197 ct = arg_ct->ct;
198 if (ct & TCG_CT_CONST)
199 return 1;
57e49b40 200 else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11))
8289b279 201 return 1;
57e49b40 202 else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13))
8289b279
BS
203 return 1;
204 else
205 return 0;
206}
207
208#define INSN_OP(x) ((x) << 30)
209#define INSN_OP2(x) ((x) << 22)
210#define INSN_OP3(x) ((x) << 19)
211#define INSN_OPF(x) ((x) << 5)
212#define INSN_RD(x) ((x) << 25)
213#define INSN_RS1(x) ((x) << 14)
214#define INSN_RS2(x) (x)
8384dd67 215#define INSN_ASI(x) ((x) << 5)
8289b279 216
dbfe80e1 217#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 218#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
1da92db2 219#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
b3db8758 220#define INSN_OFF22(x) (((x) >> 2) & 0x3fffff)
8289b279 221
b3db8758 222#define INSN_COND(x, a) (((x) << 25) | ((a) << 29))
cf7c2ca5
BS
223#define COND_N 0x0
224#define COND_E 0x1
225#define COND_LE 0x2
226#define COND_L 0x3
227#define COND_LEU 0x4
228#define COND_CS 0x5
229#define COND_NEG 0x6
230#define COND_VS 0x7
b3db8758 231#define COND_A 0x8
cf7c2ca5
BS
232#define COND_NE 0x9
233#define COND_G 0xa
234#define COND_GE 0xb
235#define COND_GU 0xc
236#define COND_CC 0xd
237#define COND_POS 0xe
238#define COND_VC 0xf
b3db8758 239#define BA (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x2))
8289b279 240
dbfe80e1
RH
241#define MOVCC_ICC (1 << 18)
242#define MOVCC_XCC (1 << 18 | 1 << 12)
243
8289b279 244#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 245#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 246#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 247#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 248#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 249#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 250#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 251#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
252#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
253#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 254#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
255#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
256#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
257#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
258#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
259#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
260#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
261#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 262#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
8289b279
BS
263
264#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
265#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
266#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
267
268#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
269#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
270#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
271
7a3766f3 272#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 273#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
274#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
275#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
276#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
277#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
278#define CALL INSN_OP(1)
279#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
280#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
281#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
282#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
283#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
284#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
285#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
286#define STB (INSN_OP(3) | INSN_OP3(0x05))
287#define STH (INSN_OP(3) | INSN_OP3(0x06))
288#define STW (INSN_OP(3) | INSN_OP3(0x04))
289#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
290#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
291#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
292#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
293#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
294#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
295#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
296#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
297#define STBA (INSN_OP(3) | INSN_OP3(0x15))
298#define STHA (INSN_OP(3) | INSN_OP3(0x16))
299#define STWA (INSN_OP(3) | INSN_OP3(0x14))
300#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
301
302#ifndef ASI_PRIMARY_LITTLE
303#define ASI_PRIMARY_LITTLE 0x88
304#endif
8289b279 305
a0ce341a
RH
306#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
307#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
308#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
309#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
310#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
311
312#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
313#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
314#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
315
26cc915c
BS
316static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
317 int op)
318{
319 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
320 INSN_RS2(rs2));
321}
322
6f41b777
BS
323static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
324 uint32_t offset, int op)
26cc915c
BS
325{
326 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
327 INSN_IMM13(offset));
328}
329
ba225198
RH
330static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
331 int val2, int val2const, int op)
332{
333 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
334 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
335}
336
2a534aff
RH
337static inline void tcg_out_mov(TCGContext *s, TCGType type,
338 TCGReg ret, TCGReg arg)
8289b279 339{
dda73c78
RH
340 if (ret != arg) {
341 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
342 }
26cc915c
BS
343}
344
345static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
346{
347 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
348}
349
b101234a
BS
350static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
351{
352 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
353}
354
355static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
8289b279 356{
4a09aa89 357 if (check_fit_tl(arg, 13))
b101234a 358 tcg_out_movi_imm13(s, ret, arg);
8289b279 359 else {
26cc915c 360 tcg_out_sethi(s, ret, arg);
8289b279 361 if (arg & 0x3ff)
b101234a 362 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
8289b279
BS
363 }
364}
365
b101234a 366static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 367 TCGReg ret, tcg_target_long arg)
b101234a 368{
43172207
RH
369 /* All 32-bit constants, as well as 64-bit constants with
370 no high bits set go through movi_imm32. */
371 if (TCG_TARGET_REG_BITS == 32
372 || type == TCG_TYPE_I32
373 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
374 tcg_out_movi_imm32(s, ret, arg);
375 } else if (check_fit_tl(arg, 13)) {
376 /* A 13-bit constant sign-extended to 64-bits. */
377 tcg_out_movi_imm13(s, ret, arg);
378 } else if (check_fit_tl(arg, 32)) {
379 /* A 32-bit constant sign-extended to 64-bits. */
380 tcg_out_sethi(s, ret, ~arg);
381 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
382 } else {
375816f8
RH
383 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
384 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
385 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
386 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 387 }
b101234a
BS
388}
389
a0ce341a
RH
390static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
391 int a2, int op)
8289b279 392{
a0ce341a 393 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
394}
395
a0ce341a
RH
396static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
397 int offset, int op)
8289b279 398{
a0ce341a 399 if (check_fit_tl(offset, 13)) {
8289b279
BS
400 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
401 INSN_IMM13(offset));
a0ce341a 402 } else {
375816f8
RH
403 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
404 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 405 }
8289b279
BS
406}
407
2a534aff
RH
408static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
409 TCGReg arg1, tcg_target_long arg2)
8289b279 410{
a0ce341a 411 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
412}
413
2a534aff
RH
414static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
415 TCGReg arg1, tcg_target_long arg2)
8289b279 416{
a0ce341a
RH
417 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
418}
419
420static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
421 tcg_target_long arg)
422{
423 if (!check_fit_tl(arg, 10)) {
424 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
425 }
426 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
427}
428
583d1215 429static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 430{
583d1215 431 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
432}
433
7a3766f3
RH
434static inline void tcg_out_rdy(TCGContext *s, int rd)
435{
436 tcg_out32(s, RDY | INSN_RD(rd));
437}
438
8289b279
BS
439static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
440{
441 if (val != 0) {
57e49b40 442 if (check_fit_tl(val, 13))
8289b279 443 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 444 else {
375816f8
RH
445 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
446 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 447 }
8289b279
BS
448 }
449}
450
a0ce341a
RH
451static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
452 tcg_target_long val)
53c37487
BS
453{
454 if (val != 0) {
455 if (check_fit_tl(val, 13))
a0ce341a 456 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
53c37487 457 else {
375816f8
RH
458 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
459 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
53c37487
BS
460 }
461 }
462}
463
583d1215
RH
464static void tcg_out_div32(TCGContext *s, int rd, int rs1,
465 int val2, int val2const, int uns)
466{
467 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
468 if (uns) {
469 tcg_out_sety(s, TCG_REG_G0);
470 } else {
375816f8
RH
471 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
472 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
473 }
474
475 tcg_out_arithc(s, rd, rs1, val2, val2const,
476 uns ? ARITH_UDIV : ARITH_SDIV);
477}
478
8289b279
BS
479static inline void tcg_out_nop(TCGContext *s)
480{
26cc915c 481 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
482}
483
1da92db2 484static void tcg_out_branch_i32(TCGContext *s, int opc, int label_index)
cf7c2ca5 485{
cf7c2ca5 486 TCGLabel *l = &s->labels[label_index];
f4bf0b91 487 uint32_t off22;
cf7c2ca5
BS
488
489 if (l->has_value) {
f4bf0b91 490 off22 = INSN_OFF22(l->u.value - (unsigned long)s->code_ptr);
f5ef6aac 491 } else {
f4bf0b91
RH
492 /* Make sure to preserve destinations during retranslation. */
493 off22 = *(uint32_t *)s->code_ptr & INSN_OFF22(-1);
f5ef6aac 494 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP22, label_index, 0);
f5ef6aac 495 }
f4bf0b91 496 tcg_out32(s, INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x2) | off22);
cf7c2ca5
BS
497}
498
a212ea75 499#if TCG_TARGET_REG_BITS == 64
1da92db2
BS
500static void tcg_out_branch_i64(TCGContext *s, int opc, int label_index)
501{
1da92db2 502 TCGLabel *l = &s->labels[label_index];
f4bf0b91 503 uint32_t off19;
1da92db2
BS
504
505 if (l->has_value) {
f4bf0b91 506 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
1da92db2 507 } else {
f4bf0b91
RH
508 /* Make sure to preserve destinations during retranslation. */
509 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
1da92db2 510 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label_index, 0);
1da92db2 511 }
f4bf0b91
RH
512 tcg_out32(s, (INSN_OP(0) | INSN_COND(opc, 0) | INSN_OP2(0x1) |
513 (0x5 << 19) | off19));
1da92db2
BS
514}
515#endif
516
0aed257f 517static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
518 [TCG_COND_EQ] = COND_E,
519 [TCG_COND_NE] = COND_NE,
520 [TCG_COND_LT] = COND_L,
521 [TCG_COND_GE] = COND_GE,
522 [TCG_COND_LE] = COND_LE,
523 [TCG_COND_GT] = COND_G,
524 [TCG_COND_LTU] = COND_CS,
525 [TCG_COND_GEU] = COND_CC,
526 [TCG_COND_LEU] = COND_LEU,
527 [TCG_COND_GTU] = COND_GU,
528};
529
56f4927e
RH
530static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
531{
ba225198 532 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
533}
534
8a56e840 535static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond,
1da92db2
BS
536 TCGArg arg1, TCGArg arg2, int const_arg2,
537 int label_index)
cf7c2ca5 538{
56f4927e 539 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2 540 tcg_out_branch_i32(s, tcg_cond_to_bcond[cond], label_index);
cf7c2ca5
BS
541 tcg_out_nop(s);
542}
543
ded37f0d
RH
544static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
545 TCGArg v1, int v1const)
546{
547 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
548 | INSN_RS1(tcg_cond_to_bcond[cond])
549 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
550}
551
552static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
553 TCGArg c1, TCGArg c2, int c2const,
554 TCGArg v1, int v1const)
555{
556 tcg_out_cmp(s, c1, c2, c2const);
557 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
558}
559
a212ea75 560#if TCG_TARGET_REG_BITS == 64
8a56e840 561static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond,
1da92db2
BS
562 TCGArg arg1, TCGArg arg2, int const_arg2,
563 int label_index)
564{
56f4927e 565 tcg_out_cmp(s, arg1, arg2, const_arg2);
1da92db2
BS
566 tcg_out_branch_i64(s, tcg_cond_to_bcond[cond], label_index);
567 tcg_out_nop(s);
568}
ded37f0d
RH
569
570static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
571 TCGArg c1, TCGArg c2, int c2const,
572 TCGArg v1, int v1const)
573{
574 tcg_out_cmp(s, c1, c2, c2const);
575 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
576}
56f4927e 577#else
8a56e840 578static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
579 TCGArg al, TCGArg ah,
580 TCGArg bl, int blconst,
581 TCGArg bh, int bhconst, int label_dest)
582{
583 int cc, label_next = gen_new_label();
584
585 tcg_out_cmp(s, ah, bh, bhconst);
586
587 /* Note that we fill one of the delay slots with the second compare. */
588 switch (cond) {
589 case TCG_COND_EQ:
24c7f754 590 tcg_out_branch_i32(s, COND_NE, label_next);
56f4927e 591 tcg_out_cmp(s, al, bl, blconst);
24c7f754 592 tcg_out_branch_i32(s, COND_E, label_dest);
56f4927e
RH
593 break;
594
595 case TCG_COND_NE:
24c7f754 596 tcg_out_branch_i32(s, COND_NE, label_dest);
56f4927e 597 tcg_out_cmp(s, al, bl, blconst);
24c7f754 598 tcg_out_branch_i32(s, COND_NE, label_dest);
56f4927e
RH
599 break;
600
601 default:
24c7f754 602 cc = tcg_cond_to_bcond[tcg_high_cond(cond)];
56f4927e
RH
603 tcg_out_branch_i32(s, cc, label_dest);
604 tcg_out_nop(s);
24c7f754 605 tcg_out_branch_i32(s, COND_NE, label_next);
56f4927e 606 tcg_out_cmp(s, al, bl, blconst);
24c7f754 607 cc = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
56f4927e
RH
608 tcg_out_branch_i32(s, cc, label_dest);
609 break;
610 }
611 tcg_out_nop(s);
612
9d6fca70 613 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 614}
1da92db2
BS
615#endif
616
8a56e840 617static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
618 TCGArg c1, TCGArg c2, int c2const)
619{
620 TCGArg t;
621
622 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
623 switch (cond) {
624 case TCG_COND_EQ:
625 case TCG_COND_NE:
626 if (c2 != 0) {
627 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
628 }
629 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
630 cond = (cond == TCG_COND_EQ ? TCG_COND_LEU : TCG_COND_LTU);
631 break;
632
633 case TCG_COND_GTU:
634 case TCG_COND_GEU:
635 if (c2const && c2 != 0) {
375816f8
RH
636 tcg_out_movi_imm13(s, TCG_REG_T1, c2);
637 c2 = TCG_REG_T1;
dbfe80e1
RH
638 }
639 t = c1, c1 = c2, c2 = t, c2const = 0;
640 cond = tcg_swap_cond(cond);
641 break;
642
643 case TCG_COND_LTU:
644 case TCG_COND_LEU:
645 break;
646
647 default:
648 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 649 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 650 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
651 return;
652 }
653
654 tcg_out_cmp(s, c1, c2, c2const);
655 if (cond == TCG_COND_LTU) {
656 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
657 } else {
658 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
659 }
660}
661
662#if TCG_TARGET_REG_BITS == 64
8a56e840 663static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
664 TCGArg c1, TCGArg c2, int c2const)
665{
666 tcg_out_cmp(s, c1, c2, c2const);
667 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 668 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
dbfe80e1
RH
669}
670#else
8a56e840 671static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
672 TCGArg al, TCGArg ah,
673 TCGArg bl, int blconst,
674 TCGArg bh, int bhconst)
675{
dda73c78
RH
676 int tmp = TCG_REG_T1;
677
678 /* Note that the low parts are fully consumed before tmp is set. */
679 if (ret != ah && (bhconst || ret != bh)) {
680 tmp = ret;
681 }
dbfe80e1
RH
682
683 switch (cond) {
684 case TCG_COND_EQ:
dbfe80e1 685 case TCG_COND_NE:
dda73c78
RH
686 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
687 tcg_out_cmp(s, ah, bh, bhconst);
688 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
689 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
dbfe80e1
RH
690 break;
691
692 default:
dda73c78
RH
693 /* <= : ah < bh | (ah == bh && al <= bl) */
694 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
dbfe80e1 695 tcg_out_cmp(s, ah, bh, bhconst);
dda73c78
RH
696 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
697 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
698 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
699 break;
700 }
701}
702#endif
703
7d551702 704/* Generate global QEMU prologue and epilogue code */
e4d58b41 705static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 706{
4c3204cb
RH
707 int tmp_buf_size, frame_size;
708
709 /* The TCG temp buffer is at the top of the frame, immediately
710 below the frame pointer. */
711 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
712 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
713 tmp_buf_size);
714
715 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
716 otherwise the minimal frame usable by callees. */
717 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
718 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
719 frame_size += TCG_TARGET_STACK_ALIGN - 1;
720 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 721 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 722 INSN_IMM13(-frame_size));
c6f7e4fb
RH
723
724#ifdef CONFIG_USE_GUEST_BASE
725 if (GUEST_BASE != 0) {
726 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
727 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
728 }
729#endif
730
cea5f9a2 731 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
7d551702 732 INSN_RS2(TCG_REG_G0));
0c554161
RH
733 /* delay slot */
734 tcg_out_nop(s);
4c3204cb
RH
735
736 /* No epilogue required. We issue ret + restore directly in the TB. */
b3db8758
BS
737}
738
f5ef6aac 739#if defined(CONFIG_SOFTMMU)
f5ef6aac 740
79383c9c 741#include "../../softmmu_defs.h"
f5ef6aac 742
e141ab52
BS
743/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
744 int mmu_idx) */
745static const void * const qemu_ld_helpers[4] = {
746 helper_ldb_mmu,
747 helper_ldw_mmu,
748 helper_ldl_mmu,
749 helper_ldq_mmu,
750};
751
752/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
753 uintxx_t val, int mmu_idx) */
754static const void * const qemu_st_helpers[4] = {
755 helper_stb_mmu,
756 helper_stw_mmu,
757 helper_stl_mmu,
758 helper_stq_mmu,
759};
f5ef6aac 760
a0ce341a 761/* Perform the TLB load and compare.
bffe1431 762
a0ce341a
RH
763 Inputs:
764 ADDRLO_IDX contains the index into ARGS of the low part of the
765 address; the high part of the address is at ADDR_LOW_IDX+1.
766
767 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
768
769 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
770 This should be offsetof addr_read or addr_write.
771
772 The result of the TLB comparison is in %[ix]cc. The sanitized address
773 is in the returned register, maybe %o0. The TLB addend is in %o1. */
774
775static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
776 int s_bits, const TCGArg *args, int which)
777{
778 const int addrlo = args[addrlo_idx];
779 const int r0 = TCG_REG_O0;
780 const int r1 = TCG_REG_O1;
781 const int r2 = TCG_REG_O2;
782 int addr = addrlo;
783 int tlb_ofs;
784
785 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
786 /* Assemble the 64-bit address in R0. */
787 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
788 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
789 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
790 }
791
792 /* Shift the page number down to tlb-entry. */
793 tcg_out_arithi(s, r1, addrlo,
794 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
795
796 /* Mask out the page offset, except for the required alignment. */
797 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
798
799 /* Compute tlb index, modulo tlb size. */
800 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
801
802 /* Relative to the current ENV. */
803 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
804
805 /* Find a base address that can load both tlb comparator and addend. */
806 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
807 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
808 tcg_out_addi(s, r1, tlb_ofs);
809 tlb_ofs = 0;
810 }
811
812 /* Load the tlb comparator and the addend. */
813 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
814 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
815
816 /* subcc arg0, arg2, %g0 */
817 tcg_out_cmp(s, r0, r2, 0);
818
819 /* If the guest address must be zero-extended, do so now. */
820 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
821 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
822 return r0;
823 }
824 return addrlo;
825}
826#endif /* CONFIG_SOFTMMU */
827
828static const int qemu_ld_opc[8] = {
829#ifdef TARGET_WORDS_BIGENDIAN
830 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
65850a02 831#else
a0ce341a 832 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
65850a02 833#endif
a0ce341a 834};
9d0efc88 835
a0ce341a
RH
836static const int qemu_st_opc[4] = {
837#ifdef TARGET_WORDS_BIGENDIAN
838 STB, STH, STW, STX
bffe1431 839#else
a0ce341a 840 STB, STH_LE, STW_LE, STX_LE
bffe1431 841#endif
a0ce341a 842};
bffe1431 843
a0ce341a 844static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 845{
a0ce341a 846 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 847#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
848 int memi_idx, memi, s_bits, n;
849 uint32_t *label_ptr[2];
f5ef6aac
BS
850#endif
851
a0ce341a
RH
852 datahi = datalo = args[0];
853 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
854 datahi = args[1];
855 addrlo_idx = 2;
856 }
f5ef6aac 857
f5ef6aac 858#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
859 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
860 memi = args[memi_idx];
861 s_bits = sizeop & 3;
862
863 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
864 offsetof(CPUTLBEntry, addr_read));
865
866 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
867 int reg64;
868
869 /* bne,pn %[xi]cc, label0 */
870 label_ptr[0] = (uint32_t *)s->code_ptr;
871 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_NE, 0) | INSN_OP2(0x1)
872 | ((TARGET_LONG_BITS == 64) << 21)));
873
874 /* TLB Hit. */
875 /* Load all 64-bits into an O/G register. */
876 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
877 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
878
879 /* Move the two 32-bit pieces into the destination registers. */
880 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
881 if (reg64 != datalo) {
882 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
883 }
f5ef6aac 884
a0ce341a
RH
885 /* b,a,pt label1 */
886 label_ptr[1] = (uint32_t *)s->code_ptr;
887 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_A, 0) | INSN_OP2(0x1)
888 | (1 << 29) | (1 << 19)));
889 } else {
890 /* The fast path is exactly one insn. Thus we can perform the
891 entire TLB Hit in the (annulled) delay slot of the branch
892 over the TLB Miss case. */
893
894 /* beq,a,pt %[xi]cc, label0 */
895 label_ptr[0] = NULL;
896 label_ptr[1] = (uint32_t *)s->code_ptr;
897 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
898 | ((TARGET_LONG_BITS == 64) << 21)
899 | (1 << 29) | (1 << 19)));
900 /* delay slot */
901 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
902 }
53c37487 903
a0ce341a 904 /* TLB Miss. */
f5ef6aac 905
a0ce341a
RH
906 if (label_ptr[0]) {
907 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
908 (unsigned long)label_ptr[0]);
909 }
910 n = 0;
911 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
912 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
913 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
914 args[addrlo_idx + 1]);
915 }
916 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
917 args[addrlo_idx]);
f5ef6aac 918
53c37487 919 /* qemu_ld_helper[s_bits](arg0, arg1) */
f5ef6aac
BS
920 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
921 - (tcg_target_ulong)s->code_ptr) >> 2)
922 & 0x3fffffff));
a0ce341a
RH
923 /* delay slot */
924 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
925
a0ce341a
RH
926 n = tcg_target_call_oarg_regs[0];
927 /* datalo = sign_extend(arg0) */
928 switch (sizeop) {
f5ef6aac 929 case 0 | 4:
a0ce341a
RH
930 /* Recall that SRA sign extends from bit 31 through bit 63. */
931 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
932 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
f5ef6aac
BS
933 break;
934 case 1 | 4:
a0ce341a
RH
935 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
936 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
f5ef6aac
BS
937 break;
938 case 2 | 4:
a0ce341a 939 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
f5ef6aac 940 break;
a0ce341a
RH
941 case 3:
942 if (TCG_TARGET_REG_BITS == 32) {
943 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
944 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
945 break;
946 }
947 /* FALLTHRU */
f5ef6aac
BS
948 case 0:
949 case 1:
950 case 2:
f5ef6aac
BS
951 default:
952 /* mov */
a0ce341a 953 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
f5ef6aac
BS
954 break;
955 }
956
a0ce341a
RH
957 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
958 (unsigned long)label_ptr[1]);
90cbed46 959#else
a0ce341a
RH
960 addr_reg = args[addrlo_idx];
961 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
962 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
963 addr_reg = TCG_REG_T1;
a0ce341a
RH
964 }
965 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
966 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 967
c6f7e4fb
RH
968 tcg_out_ldst_rr(s, reg64, addr_reg,
969 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
970 qemu_ld_opc[sizeop]);
f5ef6aac 971
a0ce341a
RH
972 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
973 if (reg64 != datalo) {
974 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
975 }
976 } else {
c6f7e4fb
RH
977 tcg_out_ldst_rr(s, datalo, addr_reg,
978 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
979 qemu_ld_opc[sizeop]);
f5ef6aac 980 }
a0ce341a 981#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
982}
983
a0ce341a 984static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 985{
a0ce341a 986 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 987#if defined(CONFIG_SOFTMMU)
a7a49843 988 int memi_idx, memi, n, datafull;
a0ce341a 989 uint32_t *label_ptr;
f5ef6aac
BS
990#endif
991
a0ce341a
RH
992 datahi = datalo = args[0];
993 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
994 datahi = args[1];
995 addrlo_idx = 2;
996 }
f5ef6aac 997
f5ef6aac 998#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
999 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1000 memi = args[memi_idx];
1001
1002 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1003 offsetof(CPUTLBEntry, addr_write));
1004
a7a49843 1005 datafull = datalo;
a0ce341a 1006 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8
RH
1007 /* Reconstruct the full 64-bit value. */
1008 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1009 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8 1010 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
a7a49843 1011 datafull = TCG_REG_O2;
a0ce341a 1012 }
f5ef6aac 1013
a0ce341a
RH
1014 /* The fast path is exactly one insn. Thus we can perform the entire
1015 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1016 /* beq,a,pt %[xi]cc, label0 */
1017 label_ptr = (uint32_t *)s->code_ptr;
1018 tcg_out32(s, (INSN_OP(0) | INSN_COND(COND_E, 0) | INSN_OP2(0x1)
1019 | ((TARGET_LONG_BITS == 64) << 21)
1020 | (1 << 29) | (1 << 19)));
1021 /* delay slot */
a7a49843 1022 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
a0ce341a
RH
1023
1024 /* TLB Miss. */
1025
1026 n = 0;
1027 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1028 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1029 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1030 args[addrlo_idx + 1]);
1031 }
1032 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1033 args[addrlo_idx]);
1034 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1035 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1036 }
1037 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
53c37487 1038
53c37487 1039 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
a0ce341a 1040 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
f5ef6aac
BS
1041 - (tcg_target_ulong)s->code_ptr) >> 2)
1042 & 0x3fffffff));
a0ce341a
RH
1043 /* delay slot */
1044 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
f5ef6aac 1045
a0ce341a
RH
1046 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1047 (unsigned long)label_ptr);
8384dd67 1048#else
a0ce341a
RH
1049 addr_reg = args[addrlo_idx];
1050 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1051 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1052 addr_reg = TCG_REG_T1;
f5ef6aac 1053 }
a0ce341a 1054 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8 1055 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1056 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1057 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1058 datalo = TCG_REG_O2;
a0ce341a 1059 }
c6f7e4fb
RH
1060 tcg_out_ldst_rr(s, datalo, addr_reg,
1061 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1062 qemu_st_opc[sizeop]);
a0ce341a 1063#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1064}
1065
a9751609 1066static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1067 const int *const_args)
1068{
1069 int c;
1070
1071 switch (opc) {
1072 case INDEX_op_exit_tb:
b3db8758
BS
1073 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1074 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
8289b279 1075 INSN_IMM13(8));
b3db8758
BS
1076 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1077 INSN_RS2(TCG_REG_G0));
8289b279
BS
1078 break;
1079 case INDEX_op_goto_tb:
1080 if (s->tb_jmp_offset) {
1081 /* direct jump method */
5bbd2cae 1082 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1083 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1084 /* Make sure to preserve links during retranslation. */
1085 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1086 } else {
1087 /* indirect jump method */
375816f8
RH
1088 tcg_out_ld_ptr(s, TCG_REG_T1,
1089 (tcg_target_long)(s->tb_next + args[0]));
1090 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
b3db8758 1091 INSN_RS2(TCG_REG_G0));
8289b279 1092 }
53cd9273 1093 tcg_out_nop(s);
8289b279
BS
1094 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1095 break;
1096 case INDEX_op_call:
375816f8 1097 if (const_args[0]) {
bffe1431
BS
1098 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1099 - (tcg_target_ulong)s->code_ptr) >> 2)
1100 & 0x3fffffff));
375816f8
RH
1101 } else {
1102 tcg_out_ld_ptr(s, TCG_REG_T1,
bffe1431 1103 (tcg_target_long)(s->tb_next + args[0]));
375816f8 1104 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
bffe1431 1105 INSN_RS2(TCG_REG_G0));
8289b279 1106 }
4c3204cb
RH
1107 /* delay slot */
1108 tcg_out_nop(s);
8289b279 1109 break;
8289b279 1110 case INDEX_op_br:
1da92db2 1111 tcg_out_branch_i32(s, COND_A, args[0]);
f5ef6aac 1112 tcg_out_nop(s);
8289b279
BS
1113 break;
1114 case INDEX_op_movi_i32:
1115 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1116 break;
1117
a212ea75 1118#if TCG_TARGET_REG_BITS == 64
8289b279 1119#define OP_32_64(x) \
ba225198
RH
1120 glue(glue(case INDEX_op_, x), _i32): \
1121 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1122#else
1123#define OP_32_64(x) \
ba225198 1124 glue(glue(case INDEX_op_, x), _i32)
8289b279 1125#endif
ba225198 1126 OP_32_64(ld8u):
8289b279
BS
1127 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1128 break;
ba225198 1129 OP_32_64(ld8s):
8289b279
BS
1130 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1131 break;
ba225198 1132 OP_32_64(ld16u):
8289b279
BS
1133 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1134 break;
ba225198 1135 OP_32_64(ld16s):
8289b279
BS
1136 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1137 break;
1138 case INDEX_op_ld_i32:
a212ea75 1139#if TCG_TARGET_REG_BITS == 64
53cd9273 1140 case INDEX_op_ld32u_i64:
8289b279
BS
1141#endif
1142 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1143 break;
ba225198 1144 OP_32_64(st8):
8289b279
BS
1145 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1146 break;
ba225198 1147 OP_32_64(st16):
8289b279
BS
1148 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1149 break;
1150 case INDEX_op_st_i32:
a212ea75 1151#if TCG_TARGET_REG_BITS == 64
53cd9273 1152 case INDEX_op_st32_i64:
8289b279
BS
1153#endif
1154 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1155 break;
ba225198 1156 OP_32_64(add):
53cd9273 1157 c = ARITH_ADD;
ba225198
RH
1158 goto gen_arith;
1159 OP_32_64(sub):
8289b279 1160 c = ARITH_SUB;
ba225198
RH
1161 goto gen_arith;
1162 OP_32_64(and):
8289b279 1163 c = ARITH_AND;
ba225198 1164 goto gen_arith;
dc69960d
RH
1165 OP_32_64(andc):
1166 c = ARITH_ANDN;
1167 goto gen_arith;
ba225198 1168 OP_32_64(or):
8289b279 1169 c = ARITH_OR;
ba225198 1170 goto gen_arith;
18c8f7a3
RH
1171 OP_32_64(orc):
1172 c = ARITH_ORN;
1173 goto gen_arith;
ba225198 1174 OP_32_64(xor):
8289b279 1175 c = ARITH_XOR;
ba225198 1176 goto gen_arith;
8289b279
BS
1177 case INDEX_op_shl_i32:
1178 c = SHIFT_SLL;
1fd95946
RH
1179 do_shift32:
1180 /* Limit immediate shift count lest we create an illegal insn. */
1181 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1182 break;
8289b279
BS
1183 case INDEX_op_shr_i32:
1184 c = SHIFT_SRL;
1fd95946 1185 goto do_shift32;
8289b279
BS
1186 case INDEX_op_sar_i32:
1187 c = SHIFT_SRA;
1fd95946 1188 goto do_shift32;
8289b279
BS
1189 case INDEX_op_mul_i32:
1190 c = ARITH_UMUL;
ba225198 1191 goto gen_arith;
583d1215 1192
4b5a85c1
RH
1193 OP_32_64(neg):
1194 c = ARITH_SUB;
1195 goto gen_arith1;
be6551b1
RH
1196 OP_32_64(not):
1197 c = ARITH_ORN;
1198 goto gen_arith1;
4b5a85c1 1199
583d1215
RH
1200 case INDEX_op_div_i32:
1201 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1202 break;
1203 case INDEX_op_divu_i32:
1204 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1205 break;
1206
1207 case INDEX_op_rem_i32:
1208 case INDEX_op_remu_i32:
375816f8 1209 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1210 opc == INDEX_op_remu_i32);
375816f8 1211 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1212 ARITH_UMUL);
375816f8 1213 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1214 break;
8289b279
BS
1215
1216 case INDEX_op_brcond_i32:
1da92db2
BS
1217 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1218 args[3]);
8289b279 1219 break;
dbfe80e1
RH
1220 case INDEX_op_setcond_i32:
1221 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1222 args[2], const_args[2]);
1223 break;
ded37f0d
RH
1224 case INDEX_op_movcond_i32:
1225 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1226 args[2], const_args[2], args[3], const_args[3]);
1227 break;
dbfe80e1 1228
56f4927e
RH
1229#if TCG_TARGET_REG_BITS == 32
1230 case INDEX_op_brcond2_i32:
1231 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1232 args[2], const_args[2],
1233 args[3], const_args[3], args[5]);
1234 break;
dbfe80e1
RH
1235 case INDEX_op_setcond2_i32:
1236 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1237 args[3], const_args[3],
1238 args[4], const_args[4]);
1239 break;
7a3766f3
RH
1240 case INDEX_op_add2_i32:
1241 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1242 ARITH_ADDCC);
1243 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1244 ARITH_ADDX);
1245 break;
1246 case INDEX_op_sub2_i32:
1247 tcg_out_arithc(s, args[0], args[2], args[4], const_args[4],
1248 ARITH_SUBCC);
1249 tcg_out_arithc(s, args[1], args[3], args[5], const_args[5],
1250 ARITH_SUBX);
1251 break;
1252 case INDEX_op_mulu2_i32:
1253 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1254 ARITH_UMUL);
1255 tcg_out_rdy(s, args[1]);
1256 break;
56f4927e 1257#endif
8289b279
BS
1258
1259 case INDEX_op_qemu_ld8u:
f5ef6aac 1260 tcg_out_qemu_ld(s, args, 0);
8289b279
BS
1261 break;
1262 case INDEX_op_qemu_ld8s:
f5ef6aac 1263 tcg_out_qemu_ld(s, args, 0 | 4);
8289b279
BS
1264 break;
1265 case INDEX_op_qemu_ld16u:
f5ef6aac 1266 tcg_out_qemu_ld(s, args, 1);
8289b279
BS
1267 break;
1268 case INDEX_op_qemu_ld16s:
f5ef6aac 1269 tcg_out_qemu_ld(s, args, 1 | 4);
8289b279 1270 break;
86feb1c8
RH
1271 case INDEX_op_qemu_ld32:
1272#if TCG_TARGET_REG_BITS == 64
8289b279 1273 case INDEX_op_qemu_ld32u:
86feb1c8 1274#endif
f5ef6aac 1275 tcg_out_qemu_ld(s, args, 2);
8289b279 1276 break;
30c0c76c 1277#if TCG_TARGET_REG_BITS == 64
8289b279 1278 case INDEX_op_qemu_ld32s:
f5ef6aac 1279 tcg_out_qemu_ld(s, args, 2 | 4);
8289b279 1280 break;
30c0c76c 1281#endif
a0ce341a
RH
1282 case INDEX_op_qemu_ld64:
1283 tcg_out_qemu_ld(s, args, 3);
1284 break;
8289b279 1285 case INDEX_op_qemu_st8:
f5ef6aac 1286 tcg_out_qemu_st(s, args, 0);
8289b279
BS
1287 break;
1288 case INDEX_op_qemu_st16:
f5ef6aac 1289 tcg_out_qemu_st(s, args, 1);
8289b279
BS
1290 break;
1291 case INDEX_op_qemu_st32:
f5ef6aac 1292 tcg_out_qemu_st(s, args, 2);
8289b279 1293 break;
a0ce341a
RH
1294 case INDEX_op_qemu_st64:
1295 tcg_out_qemu_st(s, args, 3);
1296 break;
8289b279 1297
a212ea75 1298#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1299 case INDEX_op_movi_i64:
1300 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1301 break;
53cd9273
BS
1302 case INDEX_op_ld32s_i64:
1303 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1304 break;
8289b279
BS
1305 case INDEX_op_ld_i64:
1306 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1307 break;
1308 case INDEX_op_st_i64:
1309 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1310 break;
1311 case INDEX_op_shl_i64:
1312 c = SHIFT_SLLX;
1fd95946
RH
1313 do_shift64:
1314 /* Limit immediate shift count lest we create an illegal insn. */
1315 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1316 break;
8289b279
BS
1317 case INDEX_op_shr_i64:
1318 c = SHIFT_SRLX;
1fd95946 1319 goto do_shift64;
8289b279
BS
1320 case INDEX_op_sar_i64:
1321 c = SHIFT_SRAX;
1fd95946 1322 goto do_shift64;
8289b279
BS
1323 case INDEX_op_mul_i64:
1324 c = ARITH_MULX;
ba225198 1325 goto gen_arith;
583d1215 1326 case INDEX_op_div_i64:
53cd9273 1327 c = ARITH_SDIVX;
ba225198 1328 goto gen_arith;
583d1215 1329 case INDEX_op_divu_i64:
8289b279 1330 c = ARITH_UDIVX;
ba225198 1331 goto gen_arith;
583d1215
RH
1332 case INDEX_op_rem_i64:
1333 case INDEX_op_remu_i64:
375816f8 1334 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1335 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
375816f8 1336 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1337 ARITH_MULX);
375816f8 1338 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1339 break;
cc6dfecf
RH
1340 case INDEX_op_ext32s_i64:
1341 if (const_args[1]) {
1342 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1343 } else {
1344 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1345 }
1346 break;
1347 case INDEX_op_ext32u_i64:
1348 if (const_args[1]) {
1349 tcg_out_movi_imm32(s, args[0], args[1]);
1350 } else {
1351 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1352 }
1353 break;
8289b279
BS
1354
1355 case INDEX_op_brcond_i64:
1da92db2
BS
1356 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1357 args[3]);
8289b279 1358 break;
dbfe80e1
RH
1359 case INDEX_op_setcond_i64:
1360 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1361 args[2], const_args[2]);
1362 break;
ded37f0d
RH
1363 case INDEX_op_movcond_i64:
1364 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1365 args[2], const_args[2], args[3], const_args[3]);
1366 break;
8289b279 1367#endif
ba225198
RH
1368 gen_arith:
1369 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1370 break;
1371
4b5a85c1
RH
1372 gen_arith1:
1373 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1374 break;
1375
8289b279
BS
1376 default:
1377 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1378 tcg_abort();
1379 }
1380}
1381
1382static const TCGTargetOpDef sparc_op_defs[] = {
1383 { INDEX_op_exit_tb, { } },
b3db8758 1384 { INDEX_op_goto_tb, { } },
8289b279 1385 { INDEX_op_call, { "ri" } },
8289b279
BS
1386 { INDEX_op_br, { } },
1387
1388 { INDEX_op_mov_i32, { "r", "r" } },
1389 { INDEX_op_movi_i32, { "r" } },
1390 { INDEX_op_ld8u_i32, { "r", "r" } },
1391 { INDEX_op_ld8s_i32, { "r", "r" } },
1392 { INDEX_op_ld16u_i32, { "r", "r" } },
1393 { INDEX_op_ld16s_i32, { "r", "r" } },
1394 { INDEX_op_ld_i32, { "r", "r" } },
1395 { INDEX_op_st8_i32, { "r", "r" } },
1396 { INDEX_op_st16_i32, { "r", "r" } },
1397 { INDEX_op_st_i32, { "r", "r" } },
1398
53cd9273
BS
1399 { INDEX_op_add_i32, { "r", "r", "rJ" } },
1400 { INDEX_op_mul_i32, { "r", "r", "rJ" } },
583d1215
RH
1401 { INDEX_op_div_i32, { "r", "r", "rJ" } },
1402 { INDEX_op_divu_i32, { "r", "r", "rJ" } },
1403 { INDEX_op_rem_i32, { "r", "r", "rJ" } },
1404 { INDEX_op_remu_i32, { "r", "r", "rJ" } },
53cd9273
BS
1405 { INDEX_op_sub_i32, { "r", "r", "rJ" } },
1406 { INDEX_op_and_i32, { "r", "r", "rJ" } },
dc69960d 1407 { INDEX_op_andc_i32, { "r", "r", "rJ" } },
53cd9273 1408 { INDEX_op_or_i32, { "r", "r", "rJ" } },
18c8f7a3 1409 { INDEX_op_orc_i32, { "r", "r", "rJ" } },
53cd9273 1410 { INDEX_op_xor_i32, { "r", "r", "rJ" } },
8289b279 1411
53cd9273
BS
1412 { INDEX_op_shl_i32, { "r", "r", "rJ" } },
1413 { INDEX_op_shr_i32, { "r", "r", "rJ" } },
1414 { INDEX_op_sar_i32, { "r", "r", "rJ" } },
8289b279 1415
4b5a85c1 1416 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1417 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1418
56f4927e 1419 { INDEX_op_brcond_i32, { "r", "rJ" } },
dbfe80e1 1420 { INDEX_op_setcond_i32, { "r", "r", "rJ" } },
ded37f0d 1421 { INDEX_op_movcond_i32, { "r", "r", "rJ", "rI", "0" } },
dbfe80e1 1422
56f4927e
RH
1423#if TCG_TARGET_REG_BITS == 32
1424 { INDEX_op_brcond2_i32, { "r", "r", "rJ", "rJ" } },
dbfe80e1 1425 { INDEX_op_setcond2_i32, { "r", "r", "r", "rJ", "rJ" } },
7a3766f3
RH
1426 { INDEX_op_add2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1427 { INDEX_op_sub2_i32, { "r", "r", "r", "r", "rJ", "rJ" } },
1428 { INDEX_op_mulu2_i32, { "r", "r", "r", "rJ" } },
56f4927e 1429#endif
8289b279 1430
a212ea75 1431#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1432 { INDEX_op_mov_i64, { "r", "r" } },
1433 { INDEX_op_movi_i64, { "r" } },
1434 { INDEX_op_ld8u_i64, { "r", "r" } },
1435 { INDEX_op_ld8s_i64, { "r", "r" } },
1436 { INDEX_op_ld16u_i64, { "r", "r" } },
1437 { INDEX_op_ld16s_i64, { "r", "r" } },
1438 { INDEX_op_ld32u_i64, { "r", "r" } },
1439 { INDEX_op_ld32s_i64, { "r", "r" } },
1440 { INDEX_op_ld_i64, { "r", "r" } },
1441 { INDEX_op_st8_i64, { "r", "r" } },
1442 { INDEX_op_st16_i64, { "r", "r" } },
1443 { INDEX_op_st32_i64, { "r", "r" } },
1444 { INDEX_op_st_i64, { "r", "r" } },
1445
53cd9273
BS
1446 { INDEX_op_add_i64, { "r", "r", "rJ" } },
1447 { INDEX_op_mul_i64, { "r", "r", "rJ" } },
583d1215
RH
1448 { INDEX_op_div_i64, { "r", "r", "rJ" } },
1449 { INDEX_op_divu_i64, { "r", "r", "rJ" } },
1450 { INDEX_op_rem_i64, { "r", "r", "rJ" } },
1451 { INDEX_op_remu_i64, { "r", "r", "rJ" } },
53cd9273
BS
1452 { INDEX_op_sub_i64, { "r", "r", "rJ" } },
1453 { INDEX_op_and_i64, { "r", "r", "rJ" } },
dc69960d 1454 { INDEX_op_andc_i64, { "r", "r", "rJ" } },
53cd9273 1455 { INDEX_op_or_i64, { "r", "r", "rJ" } },
18c8f7a3 1456 { INDEX_op_orc_i64, { "r", "r", "rJ" } },
53cd9273 1457 { INDEX_op_xor_i64, { "r", "r", "rJ" } },
8289b279 1458
53cd9273
BS
1459 { INDEX_op_shl_i64, { "r", "r", "rJ" } },
1460 { INDEX_op_shr_i64, { "r", "r", "rJ" } },
1461 { INDEX_op_sar_i64, { "r", "r", "rJ" } },
4b5a85c1
RH
1462
1463 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1464 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1465
cc6dfecf
RH
1466 { INDEX_op_ext32s_i64, { "r", "ri" } },
1467 { INDEX_op_ext32u_i64, { "r", "ri" } },
8289b279 1468
56f4927e 1469 { INDEX_op_brcond_i64, { "r", "rJ" } },
dbfe80e1 1470 { INDEX_op_setcond_i64, { "r", "r", "rJ" } },
ded37f0d 1471 { INDEX_op_movcond_i64, { "r", "r", "rJ", "rI", "0" } },
8289b279 1472#endif
a0ce341a
RH
1473
1474#if TCG_TARGET_REG_BITS == 64
1475 { INDEX_op_qemu_ld8u, { "r", "L" } },
1476 { INDEX_op_qemu_ld8s, { "r", "L" } },
1477 { INDEX_op_qemu_ld16u, { "r", "L" } },
1478 { INDEX_op_qemu_ld16s, { "r", "L" } },
1479 { INDEX_op_qemu_ld32, { "r", "L" } },
1480 { INDEX_op_qemu_ld32u, { "r", "L" } },
1481 { INDEX_op_qemu_ld32s, { "r", "L" } },
1482 { INDEX_op_qemu_ld64, { "r", "L" } },
1483
1484 { INDEX_op_qemu_st8, { "L", "L" } },
1485 { INDEX_op_qemu_st16, { "L", "L" } },
1486 { INDEX_op_qemu_st32, { "L", "L" } },
1487 { INDEX_op_qemu_st64, { "L", "L" } },
1488#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1489 { INDEX_op_qemu_ld8u, { "r", "L" } },
1490 { INDEX_op_qemu_ld8s, { "r", "L" } },
1491 { INDEX_op_qemu_ld16u, { "r", "L" } },
1492 { INDEX_op_qemu_ld16s, { "r", "L" } },
1493 { INDEX_op_qemu_ld32, { "r", "L" } },
1494 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1495
1496 { INDEX_op_qemu_st8, { "L", "L" } },
1497 { INDEX_op_qemu_st16, { "L", "L" } },
1498 { INDEX_op_qemu_st32, { "L", "L" } },
3ee60ad4 1499 { INDEX_op_qemu_st64, { "L", "L", "L" } },
a0ce341a
RH
1500#else
1501 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1502 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1503 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1504 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1505 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1506 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1507
1508 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1509 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1510 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1511 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
8289b279 1512#endif
a0ce341a 1513
8289b279
BS
1514 { -1 },
1515};
1516
e4d58b41 1517static void tcg_target_init(TCGContext *s)
8289b279
BS
1518{
1519 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
a212ea75 1520#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1521 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1522#endif
1523 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1524 (1 << TCG_REG_G1) |
1525 (1 << TCG_REG_G2) |
1526 (1 << TCG_REG_G3) |
1527 (1 << TCG_REG_G4) |
1528 (1 << TCG_REG_G5) |
1529 (1 << TCG_REG_G6) |
1530 (1 << TCG_REG_G7) |
8289b279
BS
1531 (1 << TCG_REG_O0) |
1532 (1 << TCG_REG_O1) |
1533 (1 << TCG_REG_O2) |
1534 (1 << TCG_REG_O3) |
1535 (1 << TCG_REG_O4) |
1536 (1 << TCG_REG_O5) |
8289b279
BS
1537 (1 << TCG_REG_O7));
1538
1539 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1540 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1541 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1542 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1543 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1544 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1545 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1546 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1547 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1548
8289b279
BS
1549 tcg_add_target_add_op_defs(sparc_op_defs);
1550}
cb1977d3
RH
1551
1552#if TCG_TARGET_REG_BITS == 64
1553# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1554#else
cb1977d3
RH
1555# define ELF_HOST_MACHINE EM_SPARC32PLUS
1556# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1557#endif
1558
1559typedef struct {
1560 uint32_t len __attribute__((aligned((sizeof(void *)))));
1561 uint32_t id;
1562 uint8_t version;
1563 char augmentation[1];
1564 uint8_t code_align;
1565 uint8_t data_align;
1566 uint8_t return_column;
1567} DebugFrameCIE;
1568
1569typedef struct {
1570 uint32_t len __attribute__((aligned((sizeof(void *)))));
1571 uint32_t cie_offset;
1572 tcg_target_long func_start __attribute__((packed));
1573 tcg_target_long func_len __attribute__((packed));
1574 uint8_t def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1575 uint8_t win_save;
1576 uint8_t ret_save[3];
1577} DebugFrameFDE;
1578
1579typedef struct {
1580 DebugFrameCIE cie;
1581 DebugFrameFDE fde;
1582} DebugFrame;
1583
1584static DebugFrame debug_frame = {
1585 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1586 .cie.id = -1,
1587 .cie.version = 1,
1588 .cie.code_align = 1,
1589 .cie.data_align = -sizeof(void *) & 0x7f,
1590 .cie.return_column = 15, /* o7 */
1591
1592 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1593 .fde.def_cfa = {
1594#if TCG_TARGET_REG_BITS == 64
1595 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1596 (2047 & 0x7f) | 0x80, (2047 >> 7)
1597#else
1598 13, 30 /* DW_CFA_def_cfa_register i6 */
1599#endif
1600 },
1601 .fde.win_save = 0x2d, /* DW_CFA_GNU_window_save */
1602 .fde.ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1603};
1604
1605void tcg_register_jit(void *buf, size_t buf_size)
1606{
1607 debug_frame.fde.func_start = (tcg_target_long) buf;
1608 debug_frame.fde.func_len = buf_size;
1609
1610 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1611}
5bbd2cae
RH
1612
1613void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1614{
1615 uint32_t *ptr = (uint32_t *)jmp_addr;
1616 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1617
1618 /* We can reach the entire address space for 32-bit. For 64-bit
1619 the code_gen_buffer can't be larger than 2GB. */
1620 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1621 tcg_abort();
1622 }
1623
1624 *ptr = CALL | (disp & 0x3fffffff);
1625 flush_icache_range(jmp_addr, jmp_addr + 4);
1626}