]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc64/tcg-target.c.inc
tcg: Add TCG_CALL_{RET,ARG}_BY_REF
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3a5f6805
RH
25/* We only support generating code for 64-bit mode. */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
139c1837 30#include "../tcg-pool.c.inc"
e9823b4c 31
8d8fdbae 32#ifdef CONFIG_DEBUG_TCG
8289b279
BS
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66};
d4a9eb1f 67#endif
8289b279 68
77f268e8
RH
69#define TCG_CT_CONST_S11 0x100
70#define TCG_CT_CONST_S13 0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
77f268e8 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
77f268e8 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
34b1a49c 85
375816f8
RH
86/* Define some temporary registers. T2 is used for constant generation. */
87#define TCG_REG_T1 TCG_REG_G1
88#define TCG_REG_T2 TCG_REG_O7
89
4cbea598 90#ifndef CONFIG_SOFTMMU
375816f8 91# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 92#endif
e141ab52 93
ab20bdc1 94#define TCG_REG_TB TCG_REG_I1
ab20bdc1 95
0954d0d9 96static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
97 TCG_REG_L0,
98 TCG_REG_L1,
99 TCG_REG_L2,
100 TCG_REG_L3,
101 TCG_REG_L4,
102 TCG_REG_L5,
103 TCG_REG_L6,
104 TCG_REG_L7,
26adfb75 105
8289b279
BS
106 TCG_REG_I0,
107 TCG_REG_I1,
108 TCG_REG_I2,
109 TCG_REG_I3,
110 TCG_REG_I4,
375816f8 111 TCG_REG_I5,
26adfb75
RH
112
113 TCG_REG_G2,
114 TCG_REG_G3,
115 TCG_REG_G4,
116 TCG_REG_G5,
117
118 TCG_REG_O0,
119 TCG_REG_O1,
120 TCG_REG_O2,
121 TCG_REG_O3,
122 TCG_REG_O4,
123 TCG_REG_O5,
8289b279
BS
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127 TCG_REG_O0,
128 TCG_REG_O1,
129 TCG_REG_O2,
130 TCG_REG_O3,
131 TCG_REG_O4,
132 TCG_REG_O5,
133};
134
26a74ae3 135static const int tcg_target_call_oarg_regs[] = {
8289b279 136 TCG_REG_O0,
e141ab52
BS
137 TCG_REG_O1,
138 TCG_REG_O2,
139 TCG_REG_O3,
8289b279
BS
140};
141
8289b279
BS
142#define INSN_OP(x) ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x) ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
8384dd67 149#define INSN_ASI(x) ((x) << 5)
8289b279 150
203342d8 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 156#define INSN_COND(x) ((x) << 25)
8289b279 157
cf7c2ca5
BS
158#define COND_N 0x0
159#define COND_E 0x1
160#define COND_LE 0x2
161#define COND_L 0x3
162#define COND_LEU 0x4
163#define COND_CS 0x5
164#define COND_NEG 0x6
165#define COND_VS 0x7
b3db8758 166#define COND_A 0x8
cf7c2ca5
BS
167#define COND_NE 0x9
168#define COND_G 0xa
169#define COND_GE 0xb
170#define COND_GU 0xc
171#define COND_CC 0xd
172#define COND_POS 0xe
173#define COND_VC 0xf
a115f3ea 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 175
ab1339b9
RH
176#define RCOND_Z 1
177#define RCOND_LEZ 2
178#define RCOND_LZ 3
179#define RCOND_NZ 5
180#define RCOND_GZ 6
181#define RCOND_GEZ 7
182
dbfe80e1
RH
183#define MOVCC_ICC (1 << 18)
184#define MOVCC_XCC (1 << 18 | 1 << 12)
185
a115f3ea
RH
186#define BPCC_ICC 0
187#define BPCC_XCC (2 << 20)
188#define BPCC_PT (1 << 19)
189#define BPCC_PN 0
190#define BPCC_A (1 << 29)
191
ab1339b9
RH
192#define BPR_PT BPCC_PT
193
8289b279 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
321dbde3 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
dc69960d 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 216
90379ca8 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 219
8289b279
BS
220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
7a3766f3 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL INSN_OP(1)
236#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB (INSN_OP(3) | INSN_OP3(0x05))
244#define STH (INSN_OP(3) | INSN_OP3(0x06))
245#define STW (INSN_OP(3) | INSN_OP3(0x04))
246#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
258
f8f03b37
PK
259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
ab20bdc1
RH
261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
8384dd67
BS
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
8289b279 266
a0ce341a
RH
267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
276
90379ca8
RH
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
897fd616 281static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 282{
425532d7 283 return val == sextract64(val, 0, bits);
a115f3ea
RH
284}
285
897fd616 286static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 287{
425532d7 288 return val == sextract32(val, 0, bits);
a115f3ea
RH
289}
290
425532d7 291#define check_fit_tl check_fit_i64
3a5f6805 292#define check_fit_ptr check_fit_i64
425532d7 293
0d8b6191 294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
0d8b6191
RH
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
e9823b4c 299 intptr_t pcrel;
abce5964 300
e9823b4c 301 value += addend;
0d8b6191 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 303
a115f3ea 304 switch (type) {
ab1339b9 305 case R_SPARC_WDISP16:
6a6bfa3c
RH
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
307 return false;
308 }
ab1339b9 309 insn &= ~INSN_OFF16(-1);
e9823b4c 310 insn |= INSN_OFF16(pcrel);
ab1339b9 311 break;
a115f3ea 312 case R_SPARC_WDISP19:
6a6bfa3c
RH
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
314 return false;
315 }
a115f3ea 316 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
317 insn |= INSN_OFF19(pcrel);
318 break;
c834b8d8
RH
319 case R_SPARC_13:
320 if (!check_fit_ptr(value, 13)) {
321 return false;
322 }
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
325 break;
a115f3ea 326 default:
e9823b4c 327 g_assert_not_reached();
a115f3ea 328 }
e9823b4c 329
0d8b6191 330 *src_rw = insn;
6ac17786 331 return true;
a115f3ea
RH
332}
333
a115f3ea 334/* test if a constant matches the constraint */
a4fbbd77 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 336{
a115f3ea
RH
337 if (ct & TCG_CT_CONST) {
338 return 1;
4b304cfa
RH
339 }
340
341 if (type == TCG_TYPE_I32) {
342 val = (int32_t)val;
343 }
344
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
346 return 1;
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350 return 1;
351 } else {
352 return 0;
353 }
354}
355
220b2da7
RH
356static void tcg_out_nop(TCGContext *s)
357{
358 tcg_out32(s, NOP);
359}
360
897fd616
RH
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362 TCGReg rs2, int op)
26cc915c 363{
35e2da15 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
365}
366
897fd616
RH
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
26cc915c 369{
35e2da15 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
371}
372
35e2da15
RH
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
ba225198
RH
375{
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
897fd616 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 381{
dda73c78
RH
382 if (ret != arg) {
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384 }
78113e83 385 return true;
26cc915c
BS
386}
387
220b2da7
RH
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390 if (ret != arg) {
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392 } else {
393 tcg_out_nop(s);
394 }
395}
396
897fd616 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
398{
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
400}
401
897fd616 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
403{
404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
c71929c3
RH
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409 if (check_fit_i32(arg, 13)) {
410 /* A 13-bit constant sign-extended to 64-bits. */
411 tcg_out_movi_imm13(s, ret, arg);
412 } else {
413 /* A 32-bit constant zero-extended to 64 bits. */
414 tcg_out_sethi(s, ret, arg);
415 if (arg & 0x3ff) {
416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417 }
418 }
419}
420
ab20bdc1 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
422 tcg_target_long arg, bool in_prologue,
423 TCGReg scratch)
8289b279 424{
425532d7 425 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 426 tcg_target_long test, lsb;
a9c7d27b 427
c71929c3
RH
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_imm32(s, ret, arg);
431 return;
035b2398
RH
432 }
433
a9c7d27b
RH
434 /* A 13-bit constant sign-extended to 64-bits. */
435 if (check_fit_tl(arg, 13)) {
b101234a 436 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 437 return;
8289b279 438 }
8289b279 439
f6823cbe 440 /* A 13-bit constant relative to the TB. */
1e42b4f8 441 if (!in_prologue) {
47c2206b 442 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
443 if (check_fit_ptr(test, 13)) {
444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445 return;
446 }
447 }
448
a9c7d27b 449 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 450 if (arg == lo) {
43172207
RH
451 tcg_out_sethi(s, ret, ~arg);
452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
453 return;
454 }
455
684db2a0 456 /* A 32-bit constant, shifted. */
ab20bdc1
RH
457 lsb = ctz64(arg);
458 test = (tcg_target_long)arg >> lsb;
684db2a0 459 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
460 tcg_out_sethi(s, ret, test << 10);
461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462 return;
684db2a0
RH
463 } else if (test == (uint32_t)test || test == (int32_t)test) {
464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466 return;
ab20bdc1
RH
467 }
468
c834b8d8 469 /* Use the constant pool, if possible. */
1e42b4f8 470 if (!in_prologue) {
c834b8d8
RH
471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472 tcg_tbrel_diff(s, NULL));
473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474 return;
475 }
476
a9c7d27b 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 478 if (check_fit_i32(lo, 13)) {
34b1a49c 479 hi = (arg - lo) >> 32;
c71929c3 480 tcg_out_movi_imm32(s, ret, hi);
a9c7d27b
RH
481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 483 } else {
34b1a49c 484 hi = arg >> 32;
c71929c3 485 tcg_out_movi_imm32(s, ret, hi);
92840d06 486 tcg_out_movi_imm32(s, scratch, lo);
375816f8 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 489 }
b101234a
BS
490}
491
897fd616
RH
492static void tcg_out_movi(TCGContext *s, TCGType type,
493 TCGReg ret, tcg_target_long arg)
ab20bdc1 494{
92840d06
RH
495 tcg_debug_assert(ret != TCG_REG_T2);
496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
ab20bdc1
RH
497}
498
6a6d772e
RH
499static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
500 tcg_target_long imm)
501{
502 /* This function is only used for passing structs by reference. */
503 g_assert_not_reached();
504}
505
897fd616
RH
506static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
507 TCGReg a2, int op)
8289b279 508{
a0ce341a 509 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
510}
511
35e2da15
RH
512static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
513 intptr_t offset, int op)
8289b279 514{
425532d7 515 if (check_fit_ptr(offset, 13)) {
8289b279
BS
516 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
517 INSN_IMM13(offset));
a0ce341a 518 } else {
375816f8
RH
519 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
520 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 521 }
8289b279
BS
522}
523
897fd616
RH
524static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
525 TCGReg arg1, intptr_t arg2)
8289b279 526{
a0ce341a 527 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
528}
529
897fd616
RH
530static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
531 TCGReg arg1, intptr_t arg2)
8289b279 532{
a0ce341a
RH
533 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
534}
535
897fd616
RH
536static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
537 TCGReg base, intptr_t ofs)
59d7c14e
RH
538{
539 if (val == 0) {
540 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
541 return true;
542 }
543 return false;
544}
545
897fd616 546static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 547{
583d1215 548 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
549}
550
35e2da15
RH
551static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
552 int32_t val2, int val2const, int uns)
583d1215
RH
553{
554 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
555 if (uns) {
556 tcg_out_sety(s, TCG_REG_G0);
557 } else {
375816f8
RH
558 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
559 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
560 }
561
562 tcg_out_arithc(s, rd, rs1, val2, val2const,
563 uns ? ARITH_UDIV : ARITH_SDIV);
564}
565
0aed257f 566static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
567 [TCG_COND_EQ] = COND_E,
568 [TCG_COND_NE] = COND_NE,
569 [TCG_COND_LT] = COND_L,
570 [TCG_COND_GE] = COND_GE,
571 [TCG_COND_LE] = COND_LE,
572 [TCG_COND_GT] = COND_G,
573 [TCG_COND_LTU] = COND_CS,
574 [TCG_COND_GEU] = COND_CC,
575 [TCG_COND_LEU] = COND_LEU,
576 [TCG_COND_GTU] = COND_GU,
577};
578
ab1339b9
RH
579static const uint8_t tcg_cond_to_rcond[] = {
580 [TCG_COND_EQ] = RCOND_Z,
581 [TCG_COND_NE] = RCOND_NZ,
582 [TCG_COND_LT] = RCOND_LZ,
583 [TCG_COND_GT] = RCOND_GZ,
584 [TCG_COND_LE] = RCOND_LEZ,
585 [TCG_COND_GE] = RCOND_GEZ
586};
587
a115f3ea
RH
588static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
589{
590 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
591}
592
bec16311 593static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 594{
791645f0 595 int off19 = 0;
a115f3ea
RH
596
597 if (l->has_value) {
abce5964 598 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 599 } else {
bec16311 600 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
601 }
602 tcg_out_bpcc0(s, scond, flags, off19);
603}
604
35e2da15 605static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 606{
ba225198 607 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
608}
609
35e2da15 610static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 611 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 612{
56f4927e 613 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 614 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
615 tcg_out_nop(s);
616}
617
35e2da15
RH
618static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
619 int32_t v1, int v1const)
ded37f0d
RH
620{
621 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
622 | INSN_RS1(tcg_cond_to_bcond[cond])
623 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
624}
625
35e2da15
RH
626static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
627 TCGReg c1, int32_t c2, int c2const,
628 int32_t v1, int v1const)
ded37f0d
RH
629{
630 tcg_out_cmp(s, c1, c2, c2const);
631 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
632}
633
35e2da15 634static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 635 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 636{
ab1339b9
RH
637 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
638 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 639 int off16 = 0;
ab1339b9
RH
640
641 if (l->has_value) {
abce5964 642 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 643 } else {
bec16311 644 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
645 }
646 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
647 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
648 } else {
649 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 650 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 651 }
1da92db2
BS
652 tcg_out_nop(s);
653}
ded37f0d 654
35e2da15
RH
655static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
656 int32_t v1, int v1const)
203342d8
RH
657{
658 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
659 | (tcg_cond_to_rcond[cond] << 10)
660 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
661}
662
35e2da15
RH
663static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
664 TCGReg c1, int32_t c2, int c2const,
665 int32_t v1, int v1const)
ded37f0d 666{
203342d8
RH
667 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
668 Note that the immediate range is one bit smaller, so we must check
669 for that as well. */
670 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 671 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
672 tcg_out_movr(s, cond, ret, c1, v1, v1const);
673 } else {
674 tcg_out_cmp(s, c1, c2, c2const);
675 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
676 }
ded37f0d 677}
1da92db2 678
35e2da15
RH
679static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
680 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 681{
c470b663 682 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 683 switch (cond) {
7d458a75
RH
684 case TCG_COND_LTU:
685 case TCG_COND_GEU:
686 /* The result of the comparison is in the carry bit. */
687 break;
688
dbfe80e1
RH
689 case TCG_COND_EQ:
690 case TCG_COND_NE:
7d458a75 691 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 692 if (c2 != 0) {
321b6c05
RH
693 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
694 c2 = TCG_REG_T1;
695 } else {
696 c2 = c1;
dbfe80e1 697 }
321b6c05 698 c1 = TCG_REG_G0, c2const = 0;
7d458a75 699 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
700 break;
701
702 case TCG_COND_GTU:
dbfe80e1 703 case TCG_COND_LEU:
7d458a75
RH
704 /* If we don't need to load a constant into a register, we can
705 swap the operands on GTU/LEU. There's no benefit to loading
706 the constant into a temporary register. */
707 if (!c2const || c2 == 0) {
35e2da15 708 TCGReg t = c1;
7d458a75
RH
709 c1 = c2;
710 c2 = t;
711 c2const = 0;
712 cond = tcg_swap_cond(cond);
713 break;
714 }
715 /* FALLTHRU */
dbfe80e1
RH
716
717 default:
718 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 719 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 720 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
721 return;
722 }
723
724 tcg_out_cmp(s, c1, c2, c2const);
725 if (cond == TCG_COND_LTU) {
c470b663 726 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 727 } else {
c470b663 728 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
729 }
730}
731
35e2da15
RH
732static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
733 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 734{
9d6a7a85
RH
735 if (use_vis3_instructions) {
736 switch (cond) {
737 case TCG_COND_NE:
738 if (c2 != 0) {
739 break;
740 }
741 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
742 /* FALLTHRU */
743 case TCG_COND_LTU:
744 tcg_out_cmp(s, c1, c2, c2const);
745 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
746 return;
747 default:
748 break;
749 }
750 }
751
203342d8
RH
752 /* For 64-bit signed comparisons vs zero, we can avoid the compare
753 if the input does not overlap the output. */
754 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
755 tcg_out_movi_imm13(s, ret, 0);
756 tcg_out_movr(s, cond, ret, c1, 1, 1);
757 } else {
758 tcg_out_cmp(s, c1, c2, c2const);
759 tcg_out_movi_imm13(s, ret, 0);
760 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
761 }
dbfe80e1 762}
4ec28e25 763
609ac1e1
RH
764static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
765 TCGReg al, TCGReg ah, int32_t bl, int blconst,
766 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 767{
35e2da15 768 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
769
770 /* Note that the low parts are fully consumed before tmp is set. */
771 if (rl != ah && (bhconst || rl != bh)) {
772 tmp = rl;
773 }
774
775 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
776 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
777 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
778}
dbfe80e1 779
609ac1e1
RH
780static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
781 TCGReg al, TCGReg ah, int32_t bl, int blconst,
782 int32_t bh, int bhconst, bool is_sub)
783{
784 TCGReg tmp = TCG_REG_T1;
785
786 /* Note that the low parts are fully consumed before tmp is set. */
787 if (rl != ah && (bhconst || rl != bh)) {
788 tmp = rl;
789 }
790
791 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
792
90379ca8
RH
793 if (use_vis3_instructions && !is_sub) {
794 /* Note that ADDXC doesn't accept immediates. */
795 if (bhconst && bh != 0) {
414399b6 796 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
90379ca8
RH
797 bh = TCG_REG_T2;
798 }
799 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
800 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
801 /* If we have a zero, we can perform the operation in two insns,
802 with the arithmetic first, and a conditional move into place. */
803 if (rh == ah) {
804 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
805 is_sub ? ARITH_SUB : ARITH_ADD);
806 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
807 } else {
808 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
809 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
810 }
811 } else {
414399b6
RH
812 /*
813 * Otherwise adjust BH as if there is carry into T2.
814 * Note that constant BH is constrained to 11 bits for the MOVCC,
815 * so the adjustment fits 12 bits.
816 */
609ac1e1 817 if (bhconst) {
414399b6 818 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
819 } else {
820 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
821 is_sub ? ARITH_SUB : ARITH_ADD);
822 }
823 /* ... smoosh T2 back to original BH if carry is clear ... */
824 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
825 /* ... and finally perform the arithmetic with the new operand. */
826 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
827 }
828
829 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
830}
831
e01d60f2
RH
832static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
833 bool in_prologue, bool tail_call)
834{
835 uintptr_t desti = (uintptr_t)dest;
836
837 /* Be careful not to clobber %o7 for a tail call. */
838 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
839 desti & ~0xfff, in_prologue,
840 tail_call ? TCG_REG_G2 : TCG_REG_O7);
841 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
842 TCG_REG_T1, desti & 0xfff, JMPL);
843}
844
2be7d76b 845static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 846 bool in_prologue)
aad2f06a 847{
abce5964 848 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
849
850 if (disp == (int32_t)disp) {
851 tcg_out32(s, CALL | (uint32_t)disp >> 2);
852 } else {
e01d60f2 853 tcg_out_jmpl_const(s, dest, in_prologue, false);
aad2f06a
RH
854 }
855}
856
cee44b03
RH
857static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
858 const TCGHelperInfo *info)
4e9cf840 859{
ab20bdc1 860 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
861 tcg_out_nop(s);
862}
863
f8f03b37
PK
864static void tcg_out_mb(TCGContext *s, TCGArg a0)
865{
866 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
867 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
868}
869
7ea5d725 870#ifdef CONFIG_SOFTMMU
4b473e0c
RH
871static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
872static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
7ea5d725 873
709a340d
PM
874static void emit_extend(TCGContext *s, TCGReg r, int op)
875{
876 /* Emit zero extend of 8, 16 or 32 bit data as
877 * required by the MO_* value op; do nothing for 64 bit.
878 */
879 switch (op & MO_SIZE) {
880 case MO_8:
881 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
882 break;
883 case MO_16:
884 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
885 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
886 break;
887 case MO_32:
3a5f6805 888 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
709a340d
PM
889 break;
890 case MO_64:
891 break;
892 }
893}
894
7ea5d725
RH
895static void build_trampolines(TCGContext *s)
896{
4b473e0c 897 static void * const qemu_ld_helpers[] = {
abce5964
RH
898 [MO_UB] = helper_ret_ldub_mmu,
899 [MO_SB] = helper_ret_ldsb_mmu,
900 [MO_LEUW] = helper_le_lduw_mmu,
901 [MO_LESW] = helper_le_ldsw_mmu,
902 [MO_LEUL] = helper_le_ldul_mmu,
fc313c64 903 [MO_LEUQ] = helper_le_ldq_mmu,
abce5964
RH
904 [MO_BEUW] = helper_be_lduw_mmu,
905 [MO_BESW] = helper_be_ldsw_mmu,
906 [MO_BEUL] = helper_be_ldul_mmu,
fc313c64 907 [MO_BEUQ] = helper_be_ldq_mmu,
7ea5d725 908 };
4b473e0c 909 static void * const qemu_st_helpers[] = {
abce5964
RH
910 [MO_UB] = helper_ret_stb_mmu,
911 [MO_LEUW] = helper_le_stw_mmu,
912 [MO_LEUL] = helper_le_stl_mmu,
fc313c64 913 [MO_LEUQ] = helper_le_stq_mmu,
abce5964
RH
914 [MO_BEUW] = helper_be_stw_mmu,
915 [MO_BEUL] = helper_be_stl_mmu,
fc313c64 916 [MO_BEUQ] = helper_be_stq_mmu,
7ea5d725
RH
917 };
918
919 int i;
7ea5d725 920
4b473e0c 921 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 922 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
923 continue;
924 }
925
926 /* May as well align the trampoline. */
abce5964 927 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 928 tcg_out_nop(s);
7ea5d725 929 }
0d8b6191 930 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 931
7ea5d725 932 /* Set the retaddr operand. */
3a5f6805 933 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
7ea5d725 934 /* Tail call. */
e01d60f2
RH
935 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
936 /* delay slot -- set the env argument */
937 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
938 }
939
4b473e0c 940 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 941 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
942 continue;
943 }
944
945 /* May as well align the trampoline. */
abce5964 946 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 947 tcg_out_nop(s);
7ea5d725 948 }
0d8b6191 949 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 950
3a5f6805
RH
951 emit_extend(s, TCG_REG_O2, i);
952
7ea5d725 953 /* Set the retaddr operand. */
3a5f6805 954 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
e01d60f2 955
7ea5d725 956 /* Tail call. */
e01d60f2
RH
957 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
958 /* delay slot -- set the env argument */
959 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
960 }
961}
321dbde3
RH
962#else
963static const tcg_insn_unit *qemu_unalign_ld_trampoline;
964static const tcg_insn_unit *qemu_unalign_st_trampoline;
965
966static void build_trampolines(TCGContext *s)
967{
968 for (int ld = 0; ld < 2; ++ld) {
969 void *helper;
970
971 while ((uintptr_t)s->code_ptr & 15) {
972 tcg_out_nop(s);
973 }
974
975 if (ld) {
976 helper = helper_unaligned_ld;
977 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
978 } else {
979 helper = helper_unaligned_st;
980 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
981 }
982
321dbde3
RH
983 /* Tail call. */
984 tcg_out_jmpl_const(s, helper, true, true);
985 /* delay slot -- set the env argument */
986 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
987 }
988}
7ea5d725
RH
989#endif
990
7d551702 991/* Generate global QEMU prologue and epilogue code */
e4d58b41 992static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 993{
4c3204cb
RH
994 int tmp_buf_size, frame_size;
995
9defd1bd
RH
996 /*
997 * The TCG temp buffer is at the top of the frame, immediately
998 * below the frame pointer. Use the logical (aligned) offset here;
999 * the stack bias is applied in temp_allocate_frame().
1000 */
4c3204cb 1001 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1002 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1003
9defd1bd
RH
1004 /*
1005 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1006 * otherwise the minimal frame usable by callees.
1007 */
4c3204cb
RH
1008 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1009 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1010 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1011 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1012 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1013 INSN_IMM13(-frame_size));
c6f7e4fb 1014
4cbea598 1015#ifndef CONFIG_SOFTMMU
b76f21a7 1016 if (guest_base != 0) {
92840d06
RH
1017 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1018 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1019 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1020 }
1021#endif
1022
ab20bdc1 1023 /* We choose TCG_REG_TB such that no move is required. */
1e42b4f8
RH
1024 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1025 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
ab20bdc1 1026
aad2f06a 1027 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1028 /* delay slot */
1029 tcg_out_nop(s);
4c3204cb 1030
38f81dc5 1031 /* Epilogue for goto_ptr. */
c8bc1168 1032 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1034 /* delay slot */
1035 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725 1036
7ea5d725 1037 build_trampolines(s);
b3db8758
BS
1038}
1039
e9823b4c
RH
1040static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1041{
1042 int i;
1043 for (i = 0; i < count; ++i) {
1044 p[i] = NOP;
1045 }
1046}
1047
f5ef6aac 1048#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1049
1050/* We expect to use a 13-bit negative offset from ENV. */
1051QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1052QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1053
a0ce341a 1054/* Perform the TLB load and compare.
bffe1431 1055
a0ce341a 1056 Inputs:
a8b12c10 1057 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1058
1059 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1060
1061 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1062 This should be offsetof addr_read or addr_write.
1063
1064 The result of the TLB comparison is in %[ix]cc. The sanitized address
1065 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1066
34b1a49c 1067static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1068 MemOp opc, int which)
a0ce341a 1069{
269bd5d8
RH
1070 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1071 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1072 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1073 const TCGReg r0 = TCG_REG_O0;
1074 const TCGReg r1 = TCG_REG_O1;
1075 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1076 unsigned s_bits = opc & MO_SIZE;
1077 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1078 tcg_target_long compare_mask;
1079
17ff9f78 1080 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1081 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1082 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1083
17ff9f78
RH
1084 /* Extract the page index, shifted into place for tlb index. */
1085 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1086 SHIFT_SRL);
1087 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1088
1089 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1090 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1091
1092 /* Load the tlb comparator and the addend. */
1093 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1094 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1095
85aa8081
RH
1096 /* Mask out the page offset, except for the required alignment.
1097 We don't support unaligned accesses. */
1098 if (a_bits < s_bits) {
1099 a_bits = s_bits;
1100 }
17ff9f78
RH
1101 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1102 if (check_fit_tl(compare_mask, 13)) {
1103 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1104 } else {
1105 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1106 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1107 }
a0ce341a
RH
1108 tcg_out_cmp(s, r0, r2, 0);
1109
1110 /* If the guest address must be zero-extended, do so now. */
3a5f6805 1111 if (TARGET_LONG_BITS == 32) {
34b1a49c 1112 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1113 return r0;
1114 }
34b1a49c 1115 return addr;
a0ce341a
RH
1116}
1117#endif /* CONFIG_SOFTMMU */
1118
4b473e0c 1119static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1120 [MO_UB] = LDUB,
1121 [MO_SB] = LDSB,
321dbde3
RH
1122 [MO_UB | MO_LE] = LDUB,
1123 [MO_SB | MO_LE] = LDSB,
eef0d9e7
RH
1124
1125 [MO_BEUW] = LDUH,
1126 [MO_BESW] = LDSH,
1127 [MO_BEUL] = LDUW,
1128 [MO_BESL] = LDSW,
fc313c64 1129 [MO_BEUQ] = LDX,
321dbde3 1130 [MO_BESQ] = LDX,
eef0d9e7
RH
1131
1132 [MO_LEUW] = LDUH_LE,
1133 [MO_LESW] = LDSH_LE,
1134 [MO_LEUL] = LDUW_LE,
1135 [MO_LESL] = LDSW_LE,
fc313c64 1136 [MO_LEUQ] = LDX_LE,
321dbde3 1137 [MO_LESQ] = LDX_LE,
a0ce341a 1138};
9d0efc88 1139
4b473e0c 1140static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1141 [MO_UB] = STB,
1142
1143 [MO_BEUW] = STH,
1144 [MO_BEUL] = STW,
fc313c64 1145 [MO_BEUQ] = STX,
eef0d9e7
RH
1146
1147 [MO_LEUW] = STH_LE,
1148 [MO_LEUL] = STW_LE,
fc313c64 1149 [MO_LEUQ] = STX_LE,
a0ce341a 1150};
bffe1431 1151
34b1a49c 1152static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1153 MemOpIdx oi, bool is_64)
f5ef6aac 1154{
14776ab5 1155 MemOp memop = get_memop(oi);
321dbde3
RH
1156 tcg_insn_unit *label_ptr;
1157
34b1a49c 1158#ifdef CONFIG_SOFTMMU
59227d5d 1159 unsigned memi = get_mmuidx(oi);
3a5f6805 1160 TCGReg addrz;
0d8b6191 1161 const tcg_insn_unit *func;
f5ef6aac 1162
85aa8081 1163 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1164 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1165
34b1a49c
RH
1166 /* The fast path is exactly one insn. Thus we can perform the
1167 entire TLB Hit in the (annulled) delay slot of the branch
1168 over the TLB Miss case. */
a0ce341a 1169
34b1a49c 1170 /* beq,a,pt %[xi]cc, label0 */
abce5964 1171 label_ptr = s->code_ptr;
34b1a49c
RH
1172 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1173 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1174 /* delay slot */
2b7ec66f
RH
1175 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1176 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1177
a0ce341a 1178 /* TLB Miss. */
f5ef6aac 1179
3a5f6805 1180 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
f5ef6aac 1181
7ea5d725
RH
1182 /* We use the helpers to extend SB and SW data, leaving the case
1183 of SL needing explicit extending below. */
2b7ec66f
RH
1184 if ((memop & MO_SSIZE) == MO_SL) {
1185 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1186 } else {
2b7ec66f 1187 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1188 }
eabb7b91 1189 tcg_debug_assert(func != NULL);
ab20bdc1 1190 tcg_out_call_nodelay(s, func, false);
a0ce341a 1191 /* delay slot */
3a5f6805
RH
1192 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1193
1194 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1195 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1196 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
34b1a49c 1197 } else {
3a5f6805 1198 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
f5ef6aac
BS
1199 }
1200
abce5964 1201 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1202#else
321dbde3
RH
1203 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1204 unsigned a_bits = get_alignment_bits(memop);
1205 unsigned s_bits = memop & MO_SIZE;
1206 unsigned t_bits;
1207
3a5f6805 1208 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1209 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1210 addr = TCG_REG_T1;
f5ef6aac 1211 }
321dbde3
RH
1212
1213 /*
1214 * Normal case: alignment equal to access size.
1215 */
1216 if (a_bits == s_bits) {
1217 tcg_out_ldst_rr(s, data, addr, index,
1218 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1219 return;
1220 }
1221
1222 /*
1223 * Test for at least natural alignment, and assume most accesses
1224 * will be aligned -- perform a straight load in the delay slot.
1225 * This is required to preserve atomicity for aligned accesses.
1226 */
1227 t_bits = MAX(a_bits, s_bits);
1228 tcg_debug_assert(t_bits < 13);
1229 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1230
1231 /* beq,a,pt %icc, label */
1232 label_ptr = s->code_ptr;
1233 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1234 /* delay slot */
1235 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1236 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
321dbde3
RH
1237
1238 if (a_bits >= s_bits) {
1239 /*
1240 * Overalignment: A successful alignment test will perform the memory
1241 * operation in the delay slot, and failure need only invoke the
1242 * handler for SIGBUS.
1243 */
321dbde3
RH
1244 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1245 /* delay slot -- move to low part of argument reg */
3a5f6805 1246 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1247 } else {
1248 /* Underalignment: load by pieces of minimum alignment. */
1249 int ld_opc, a_size, s_size, i;
1250
1251 /*
1252 * Force full address into T1 early; avoids problems with
1253 * overlap between @addr and @data.
1254 */
1255 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1256
1257 a_size = 1 << a_bits;
1258 s_size = 1 << s_bits;
1259 if ((memop & MO_BSWAP) == MO_BE) {
1260 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1261 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1262 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1263 for (i = a_size; i < s_size; i += a_size) {
1264 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1265 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1266 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1267 }
1268 } else if (a_bits == 0) {
1269 ld_opc = LDUB;
1270 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1271 for (i = a_size; i < s_size; i += a_size) {
1272 if ((memop & MO_SIGN) && i == s_size - a_size) {
1273 ld_opc = LDSB;
1274 }
1275 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1276 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1277 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1278 }
1279 } else {
1280 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1281 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1282 for (i = a_size; i < s_size; i += a_size) {
1283 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1284 if ((memop & MO_SIGN) && i == s_size - a_size) {
1285 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1286 }
1287 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1288 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1289 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1290 }
1291 }
1292 }
1293
1294 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1295#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1296}
1297
34b1a49c 1298static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1299 MemOpIdx oi)
f5ef6aac 1300{
14776ab5 1301 MemOp memop = get_memop(oi);
321dbde3
RH
1302 tcg_insn_unit *label_ptr;
1303
34b1a49c 1304#ifdef CONFIG_SOFTMMU
59227d5d 1305 unsigned memi = get_mmuidx(oi);
3a5f6805 1306 TCGReg addrz;
0d8b6191 1307 const tcg_insn_unit *func;
f5ef6aac 1308
85aa8081 1309 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1310 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1311
a0ce341a
RH
1312 /* The fast path is exactly one insn. Thus we can perform the entire
1313 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1314 /* beq,a,pt %[xi]cc, label0 */
abce5964 1315 label_ptr = s->code_ptr;
a115f3ea
RH
1316 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1317 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1318 /* delay slot */
2b7ec66f
RH
1319 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1320 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1321
1322 /* TLB Miss. */
1323
3a5f6805
RH
1324 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1325 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
53c37487 1326
2b7ec66f 1327 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1328 tcg_debug_assert(func != NULL);
ab20bdc1 1329 tcg_out_call_nodelay(s, func, false);
a0ce341a 1330 /* delay slot */
3a5f6805 1331 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
f5ef6aac 1332
abce5964 1333 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1334#else
321dbde3
RH
1335 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1336 unsigned a_bits = get_alignment_bits(memop);
1337 unsigned s_bits = memop & MO_SIZE;
1338 unsigned t_bits;
1339
3a5f6805 1340 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1341 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1342 addr = TCG_REG_T1;
a0ce341a 1343 }
321dbde3
RH
1344
1345 /*
1346 * Normal case: alignment equal to access size.
1347 */
1348 if (a_bits == s_bits) {
1349 tcg_out_ldst_rr(s, data, addr, index,
1350 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1351 return;
1352 }
1353
1354 /*
1355 * Test for at least natural alignment, and assume most accesses
1356 * will be aligned -- perform a straight store in the delay slot.
1357 * This is required to preserve atomicity for aligned accesses.
1358 */
1359 t_bits = MAX(a_bits, s_bits);
1360 tcg_debug_assert(t_bits < 13);
1361 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1362
1363 /* beq,a,pt %icc, label */
1364 label_ptr = s->code_ptr;
1365 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1366 /* delay slot */
1367 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1368 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
321dbde3
RH
1369
1370 if (a_bits >= s_bits) {
1371 /*
1372 * Overalignment: A successful alignment test will perform the memory
1373 * operation in the delay slot, and failure need only invoke the
1374 * handler for SIGBUS.
1375 */
321dbde3
RH
1376 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1377 /* delay slot -- move to low part of argument reg */
3a5f6805 1378 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1379 } else {
1380 /* Underalignment: store by pieces of minimum alignment. */
1381 int st_opc, a_size, s_size, i;
1382
1383 /*
1384 * Force full address into T1 early; avoids problems with
1385 * overlap between @addr and @data.
1386 */
1387 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1388
1389 a_size = 1 << a_bits;
1390 s_size = 1 << s_bits;
1391 if ((memop & MO_BSWAP) == MO_BE) {
1392 st_opc = qemu_st_opc[a_bits | MO_BE];
1393 for (i = 0; i < s_size; i += a_size) {
1394 TCGReg d = data;
1395 int shift = (s_size - a_size - i) * 8;
1396 if (shift) {
1397 d = TCG_REG_T2;
1398 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1399 }
1400 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1401 }
1402 } else if (a_bits == 0) {
1403 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1404 for (i = 1; i < s_size; i++) {
1405 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1406 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1407 }
1408 } else {
1409 /* Note that ST*A with immediate asi must use indexed address. */
1410 st_opc = qemu_st_opc[a_bits + MO_LE];
1411 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1412 for (i = a_size; i < s_size; i += a_size) {
1413 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1414 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1415 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1416 }
1417 }
1418 }
1419
1420 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1421#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1422}
1423
b55a8d9d
RH
1424static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1425{
1426 if (check_fit_ptr(a0, 13)) {
1427 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1428 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1429 return;
1e42b4f8 1430 } else {
b55a8d9d
RH
1431 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1432 if (check_fit_ptr(tb_diff, 13)) {
1433 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1434 /* Note that TCG_REG_TB has been unwound to O1. */
1435 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1436 return;
1437 }
1438 }
1439 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1440 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1441 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1442}
1443
cf7d6b8e
RH
1444static void tcg_out_goto_tb(TCGContext *s, int which)
1445{
a228ae3e 1446 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1e42b4f8 1447
a228ae3e 1448 /* Direct branch will be patched by tb_target_set_jmp_target. */
1e42b4f8 1449 set_jmp_insn_offset(s, which);
a228ae3e
RH
1450 tcg_out32(s, CALL);
1451 /* delay slot */
1452 tcg_debug_assert(check_fit_ptr(off, 13));
1453 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
cf7d6b8e
RH
1454 set_jmp_reset_offset(s, which);
1455
1456 /*
1457 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1458 * to the beginning of this TB.
1459 */
a228ae3e
RH
1460 off = -tcg_current_code_size(s);
1461 if (check_fit_i32(off, 13)) {
1462 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1e42b4f8 1463 } else {
a228ae3e 1464 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1e42b4f8 1465 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
cf7d6b8e
RH
1466 }
1467}
1468
a228ae3e
RH
1469void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1470 uintptr_t jmp_rx, uintptr_t jmp_rw)
1471{
1472 uintptr_t addr = tb->jmp_target_addr[n];
1473 intptr_t br_disp = (intptr_t)(addr - jmp_rx) >> 2;
1474 tcg_insn_unit insn;
1475
1476 br_disp >>= 2;
1477 if (check_fit_ptr(br_disp, 19)) {
1478 /* ba,pt %icc, addr */
1479 insn = deposit32(INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1480 | BPCC_ICC | BPCC_PT, 0, 19, br_disp);
1481 } else if (check_fit_ptr(br_disp, 22)) {
1482 /* ba addr */
1483 insn = deposit32(INSN_OP(0) | INSN_OP2(2) | INSN_COND(COND_A),
1484 0, 22, br_disp);
1485 } else {
1486 /* The code_gen_buffer can't be larger than 2GB. */
1487 tcg_debug_assert(check_fit_ptr(br_disp, 30));
1488 /* call addr */
1489 insn = deposit32(CALL, 0, 30, br_disp);
1490 }
1491
1492 qatomic_set((uint32_t *)jmp_rw, insn);
1493 flush_idcache_range(jmp_rx, jmp_rw, 4);
1494}
1495
b357f902
RH
1496static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1497 const TCGArg args[TCG_MAX_OP_ARGS],
1498 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1499{
b357f902
RH
1500 TCGArg a0, a1, a2;
1501 int c, c2;
1502
1503 /* Hoist the loads of the most common arguments. */
1504 a0 = args[0];
1505 a1 = args[1];
1506 a2 = args[2];
1507 c2 = const_args[2];
8289b279
BS
1508
1509 switch (opc) {
38f81dc5
RH
1510 case INDEX_op_goto_ptr:
1511 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1e42b4f8 1512 tcg_out_mov_delay(s, TCG_REG_TB, a0);
38f81dc5 1513 break;
8289b279 1514 case INDEX_op_br:
bec16311 1515 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1516 tcg_out_nop(s);
8289b279 1517 break;
8289b279 1518
8289b279 1519#define OP_32_64(x) \
ba225198
RH
1520 glue(glue(case INDEX_op_, x), _i32): \
1521 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1522
ba225198 1523 OP_32_64(ld8u):
b357f902 1524 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1525 break;
ba225198 1526 OP_32_64(ld8s):
b357f902 1527 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1528 break;
ba225198 1529 OP_32_64(ld16u):
b357f902 1530 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1531 break;
ba225198 1532 OP_32_64(ld16s):
b357f902 1533 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1534 break;
1535 case INDEX_op_ld_i32:
53cd9273 1536 case INDEX_op_ld32u_i64:
b357f902 1537 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1538 break;
ba225198 1539 OP_32_64(st8):
b357f902 1540 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1541 break;
ba225198 1542 OP_32_64(st16):
b357f902 1543 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1544 break;
1545 case INDEX_op_st_i32:
53cd9273 1546 case INDEX_op_st32_i64:
b357f902 1547 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1548 break;
ba225198 1549 OP_32_64(add):
53cd9273 1550 c = ARITH_ADD;
ba225198
RH
1551 goto gen_arith;
1552 OP_32_64(sub):
8289b279 1553 c = ARITH_SUB;
ba225198
RH
1554 goto gen_arith;
1555 OP_32_64(and):
8289b279 1556 c = ARITH_AND;
ba225198 1557 goto gen_arith;
dc69960d
RH
1558 OP_32_64(andc):
1559 c = ARITH_ANDN;
1560 goto gen_arith;
ba225198 1561 OP_32_64(or):
8289b279 1562 c = ARITH_OR;
ba225198 1563 goto gen_arith;
18c8f7a3
RH
1564 OP_32_64(orc):
1565 c = ARITH_ORN;
1566 goto gen_arith;
ba225198 1567 OP_32_64(xor):
8289b279 1568 c = ARITH_XOR;
ba225198 1569 goto gen_arith;
8289b279
BS
1570 case INDEX_op_shl_i32:
1571 c = SHIFT_SLL;
1fd95946
RH
1572 do_shift32:
1573 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1574 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1575 break;
8289b279
BS
1576 case INDEX_op_shr_i32:
1577 c = SHIFT_SRL;
1fd95946 1578 goto do_shift32;
8289b279
BS
1579 case INDEX_op_sar_i32:
1580 c = SHIFT_SRA;
1fd95946 1581 goto do_shift32;
8289b279
BS
1582 case INDEX_op_mul_i32:
1583 c = ARITH_UMUL;
ba225198 1584 goto gen_arith;
583d1215 1585
4b5a85c1
RH
1586 OP_32_64(neg):
1587 c = ARITH_SUB;
1588 goto gen_arith1;
be6551b1
RH
1589 OP_32_64(not):
1590 c = ARITH_ORN;
1591 goto gen_arith1;
4b5a85c1 1592
583d1215 1593 case INDEX_op_div_i32:
b357f902 1594 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1595 break;
1596 case INDEX_op_divu_i32:
b357f902 1597 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1598 break;
1599
8289b279 1600 case INDEX_op_brcond_i32:
bec16311 1601 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1602 break;
dbfe80e1 1603 case INDEX_op_setcond_i32:
b357f902 1604 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1605 break;
ded37f0d 1606 case INDEX_op_movcond_i32:
b357f902 1607 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1608 break;
dbfe80e1 1609
7a3766f3 1610 case INDEX_op_add2_i32:
609ac1e1
RH
1611 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1612 args[4], const_args[4], args[5], const_args[5],
c470b663 1613 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1614 break;
1615 case INDEX_op_sub2_i32:
609ac1e1
RH
1616 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1617 args[4], const_args[4], args[5], const_args[5],
c470b663 1618 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1619 break;
1620 case INDEX_op_mulu2_i32:
f4c16661
RH
1621 c = ARITH_UMUL;
1622 goto do_mul2;
1623 case INDEX_op_muls2_i32:
1624 c = ARITH_SMUL;
1625 do_mul2:
3a5f6805 1626 /* The 32-bit multiply insns produce a full 64-bit result. */
b357f902 1627 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
3a5f6805 1628 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
7a3766f3 1629 break;
8289b279 1630
cab0a7ea 1631 case INDEX_op_qemu_ld_i32:
59227d5d 1632 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1633 break;
cab0a7ea 1634 case INDEX_op_qemu_ld_i64:
59227d5d 1635 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1636 break;
cab0a7ea 1637 case INDEX_op_qemu_st_i32:
cab0a7ea 1638 case INDEX_op_qemu_st_i64:
59227d5d 1639 tcg_out_qemu_st(s, a0, a1, a2);
a0ce341a 1640 break;
8289b279 1641
53cd9273 1642 case INDEX_op_ld32s_i64:
b357f902 1643 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1644 break;
8289b279 1645 case INDEX_op_ld_i64:
b357f902 1646 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1647 break;
1648 case INDEX_op_st_i64:
b357f902 1649 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1650 break;
1651 case INDEX_op_shl_i64:
1652 c = SHIFT_SLLX;
1fd95946
RH
1653 do_shift64:
1654 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1655 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1656 break;
8289b279
BS
1657 case INDEX_op_shr_i64:
1658 c = SHIFT_SRLX;
1fd95946 1659 goto do_shift64;
8289b279
BS
1660 case INDEX_op_sar_i64:
1661 c = SHIFT_SRAX;
1fd95946 1662 goto do_shift64;
8289b279
BS
1663 case INDEX_op_mul_i64:
1664 c = ARITH_MULX;
ba225198 1665 goto gen_arith;
583d1215 1666 case INDEX_op_div_i64:
53cd9273 1667 c = ARITH_SDIVX;
ba225198 1668 goto gen_arith;
583d1215 1669 case INDEX_op_divu_i64:
8289b279 1670 c = ARITH_UDIVX;
ba225198 1671 goto gen_arith;
4f2331e5 1672 case INDEX_op_ext_i32_i64:
cc6dfecf 1673 case INDEX_op_ext32s_i64:
b357f902 1674 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf 1675 break;
4f2331e5 1676 case INDEX_op_extu_i32_i64:
cc6dfecf 1677 case INDEX_op_ext32u_i64:
b357f902 1678 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1679 break;
609ad705
RH
1680 case INDEX_op_extrl_i64_i32:
1681 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1682 break;
1683 case INDEX_op_extrh_i64_i32:
1684 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1685 break;
8289b279
BS
1686
1687 case INDEX_op_brcond_i64:
bec16311 1688 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1689 break;
dbfe80e1 1690 case INDEX_op_setcond_i64:
b357f902 1691 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1692 break;
ded37f0d 1693 case INDEX_op_movcond_i64:
b357f902 1694 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1695 break;
609ac1e1
RH
1696 case INDEX_op_add2_i64:
1697 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1698 const_args[4], args[5], const_args[5], false);
1699 break;
1700 case INDEX_op_sub2_i64:
1701 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1702 const_args[4], args[5], const_args[5], true);
1703 break;
de8301e5
RH
1704 case INDEX_op_muluh_i64:
1705 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1706 break;
34b1a49c 1707
ba225198 1708 gen_arith:
b357f902 1709 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1710 break;
1711
4b5a85c1 1712 gen_arith1:
b357f902 1713 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1714 break;
1715
f8f03b37
PK
1716 case INDEX_op_mb:
1717 tcg_out_mb(s, a0);
1718 break;
1719
96d0ee7f 1720 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1721 case INDEX_op_mov_i64:
96d0ee7f 1722 case INDEX_op_call: /* Always emitted via tcg_out_call. */
b55a8d9d 1723 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
cf7d6b8e 1724 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
8289b279 1725 default:
8289b279
BS
1726 tcg_abort();
1727 }
1728}
1729
0d11dc7c 1730static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1731{
9be44a16
RH
1732 switch (op) {
1733 case INDEX_op_goto_ptr:
0d11dc7c 1734 return C_O0_I1(r);
f69d277e 1735
9be44a16 1736 case INDEX_op_ld8u_i32:
a59a2931 1737 case INDEX_op_ld8u_i64:
9be44a16 1738 case INDEX_op_ld8s_i32:
a59a2931 1739 case INDEX_op_ld8s_i64:
9be44a16 1740 case INDEX_op_ld16u_i32:
a59a2931 1741 case INDEX_op_ld16u_i64:
9be44a16 1742 case INDEX_op_ld16s_i32:
a59a2931 1743 case INDEX_op_ld16s_i64:
9be44a16 1744 case INDEX_op_ld_i32:
a59a2931
RH
1745 case INDEX_op_ld32u_i64:
1746 case INDEX_op_ld32s_i64:
1747 case INDEX_op_ld_i64:
9be44a16 1748 case INDEX_op_neg_i32:
a59a2931 1749 case INDEX_op_neg_i64:
9be44a16 1750 case INDEX_op_not_i32:
a59a2931
RH
1751 case INDEX_op_not_i64:
1752 case INDEX_op_ext32s_i64:
1753 case INDEX_op_ext32u_i64:
1754 case INDEX_op_ext_i32_i64:
1755 case INDEX_op_extu_i32_i64:
1756 case INDEX_op_extrl_i64_i32:
1757 case INDEX_op_extrh_i64_i32:
0d11dc7c 1758 return C_O1_I1(r, r);
9be44a16
RH
1759
1760 case INDEX_op_st8_i32:
a59a2931 1761 case INDEX_op_st8_i64:
9be44a16 1762 case INDEX_op_st16_i32:
a59a2931 1763 case INDEX_op_st16_i64:
9be44a16 1764 case INDEX_op_st_i32:
a59a2931
RH
1765 case INDEX_op_st32_i64:
1766 case INDEX_op_st_i64:
0d11dc7c 1767 return C_O0_I2(rZ, r);
9be44a16
RH
1768
1769 case INDEX_op_add_i32:
a59a2931 1770 case INDEX_op_add_i64:
9be44a16 1771 case INDEX_op_mul_i32:
a59a2931 1772 case INDEX_op_mul_i64:
9be44a16 1773 case INDEX_op_div_i32:
a59a2931 1774 case INDEX_op_div_i64:
9be44a16 1775 case INDEX_op_divu_i32:
a59a2931 1776 case INDEX_op_divu_i64:
9be44a16 1777 case INDEX_op_sub_i32:
a59a2931 1778 case INDEX_op_sub_i64:
9be44a16 1779 case INDEX_op_and_i32:
a59a2931 1780 case INDEX_op_and_i64:
9be44a16 1781 case INDEX_op_andc_i32:
a59a2931 1782 case INDEX_op_andc_i64:
9be44a16 1783 case INDEX_op_or_i32:
a59a2931 1784 case INDEX_op_or_i64:
9be44a16 1785 case INDEX_op_orc_i32:
a59a2931 1786 case INDEX_op_orc_i64:
9be44a16 1787 case INDEX_op_xor_i32:
a59a2931 1788 case INDEX_op_xor_i64:
9be44a16 1789 case INDEX_op_shl_i32:
a59a2931 1790 case INDEX_op_shl_i64:
9be44a16 1791 case INDEX_op_shr_i32:
a59a2931 1792 case INDEX_op_shr_i64:
9be44a16 1793 case INDEX_op_sar_i32:
a59a2931 1794 case INDEX_op_sar_i64:
9be44a16 1795 case INDEX_op_setcond_i32:
a59a2931 1796 case INDEX_op_setcond_i64:
0d11dc7c 1797 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1798
1799 case INDEX_op_brcond_i32:
a59a2931 1800 case INDEX_op_brcond_i64:
0d11dc7c 1801 return C_O0_I2(rZ, rJ);
9be44a16 1802 case INDEX_op_movcond_i32:
a59a2931 1803 case INDEX_op_movcond_i64:
0d11dc7c 1804 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16 1805 case INDEX_op_add2_i32:
a59a2931 1806 case INDEX_op_add2_i64:
9be44a16 1807 case INDEX_op_sub2_i32:
a59a2931 1808 case INDEX_op_sub2_i64:
0d11dc7c 1809 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1810 case INDEX_op_mulu2_i32:
1811 case INDEX_op_muls2_i32:
0d11dc7c 1812 return C_O2_I2(r, r, rZ, rJ);
9be44a16 1813 case INDEX_op_muluh_i64:
a59a2931 1814 return C_O1_I2(r, r, r);
9be44a16
RH
1815
1816 case INDEX_op_qemu_ld_i32:
9be44a16 1817 case INDEX_op_qemu_ld_i64:
a59a2931 1818 return C_O1_I1(r, s);
9be44a16 1819 case INDEX_op_qemu_st_i32:
9be44a16 1820 case INDEX_op_qemu_st_i64:
a59a2931 1821 return C_O0_I2(sZ, s);
9be44a16
RH
1822
1823 default:
0d11dc7c 1824 g_assert_not_reached();
f69d277e 1825 }
f69d277e
RH
1826}
1827
e4d58b41 1828static void tcg_target_init(TCGContext *s)
8289b279 1829{
a4761232
PMD
1830 /*
1831 * Only probe for the platform and capabilities if we haven't already
1832 * determined maximum values at compile time.
1833 */
90379ca8
RH
1834#ifndef use_vis3_instructions
1835 {
1836 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1837 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1838 }
1839#endif
1840
77f268e8 1841 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
a59a2931 1842 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
f46934df
RH
1843
1844 tcg_target_call_clobber_regs = 0;
1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1853 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1854 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1855 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1856 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1857 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1858 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1859 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1860
ccb1bb66 1861 s->reserved_regs = 0;
375816f8
RH
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1863 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1864 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1865 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1866 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1867 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1868 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1869 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1870}
cb1977d3 1871
3a5f6805 1872#define ELF_HOST_MACHINE EM_SPARCV9
cb1977d3 1873
cb1977d3 1874typedef struct {
ae18b28d 1875 DebugFrameHeader h;
3a5f6805 1876 uint8_t fde_def_cfa[4];
497a22eb
RH
1877 uint8_t fde_win_save;
1878 uint8_t fde_ret_save[3];
cb1977d3
RH
1879} DebugFrame;
1880
ae18b28d
RH
1881static const DebugFrame debug_frame = {
1882 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1883 .h.cie.id = -1,
1884 .h.cie.version = 1,
1885 .h.cie.code_align = 1,
1886 .h.cie.data_align = -sizeof(void *) & 0x7f,
1887 .h.cie.return_column = 15, /* o7 */
cb1977d3 1888
497a22eb 1889 /* Total FDE size does not include the "len" member. */
ae18b28d 1890 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1891
1892 .fde_def_cfa = {
cb1977d3
RH
1893 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1894 (2047 & 0x7f) | 0x80, (2047 >> 7)
cb1977d3 1895 },
497a22eb
RH
1896 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1897 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1898};
1899
755bf9e5 1900void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1901{
cb1977d3
RH
1902 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1903}