]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc64/tcg-target.c.inc
tcg/sparc64: Rename tcg_out_movi_imm32 to tcg_out_movi_u32
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3a5f6805
RH
25/* We only support generating code for 64-bit mode. */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
139c1837 30#include "../tcg-pool.c.inc"
e9823b4c 31
8d8fdbae 32#ifdef CONFIG_DEBUG_TCG
8289b279
BS
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66};
d4a9eb1f 67#endif
8289b279 68
77f268e8
RH
69#define TCG_CT_CONST_S11 0x100
70#define TCG_CT_CONST_S13 0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
77f268e8 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
77f268e8 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
34b1a49c 85
33982b89 86/* Define some temporary registers. T3 is used for constant generation. */
375816f8 87#define TCG_REG_T1 TCG_REG_G1
33982b89
RH
88#define TCG_REG_T2 TCG_REG_G2
89#define TCG_REG_T3 TCG_REG_O7
375816f8 90
4cbea598 91#ifndef CONFIG_SOFTMMU
375816f8 92# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 93#endif
e141ab52 94
ab20bdc1 95#define TCG_REG_TB TCG_REG_I1
ab20bdc1 96
0954d0d9 97static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
98 TCG_REG_L0,
99 TCG_REG_L1,
100 TCG_REG_L2,
101 TCG_REG_L3,
102 TCG_REG_L4,
103 TCG_REG_L5,
104 TCG_REG_L6,
105 TCG_REG_L7,
26adfb75 106
8289b279
BS
107 TCG_REG_I0,
108 TCG_REG_I1,
109 TCG_REG_I2,
110 TCG_REG_I3,
111 TCG_REG_I4,
375816f8 112 TCG_REG_I5,
26adfb75 113
26adfb75
RH
114 TCG_REG_G3,
115 TCG_REG_G4,
116 TCG_REG_G5,
117
118 TCG_REG_O0,
119 TCG_REG_O1,
120 TCG_REG_O2,
121 TCG_REG_O3,
122 TCG_REG_O4,
123 TCG_REG_O5,
8289b279
BS
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127 TCG_REG_O0,
128 TCG_REG_O1,
129 TCG_REG_O2,
130 TCG_REG_O3,
131 TCG_REG_O4,
132 TCG_REG_O5,
133};
134
5e3d0c19
RH
135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
136{
137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138 tcg_debug_assert(slot >= 0 && slot <= 3);
139 return TCG_REG_O0 + slot;
140}
8289b279 141
8289b279
BS
142#define INSN_OP(x) ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x) ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
8384dd67 149#define INSN_ASI(x) ((x) << 5)
8289b279 150
203342d8 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 156#define INSN_COND(x) ((x) << 25)
8289b279 157
cf7c2ca5
BS
158#define COND_N 0x0
159#define COND_E 0x1
160#define COND_LE 0x2
161#define COND_L 0x3
162#define COND_LEU 0x4
163#define COND_CS 0x5
164#define COND_NEG 0x6
165#define COND_VS 0x7
b3db8758 166#define COND_A 0x8
cf7c2ca5
BS
167#define COND_NE 0x9
168#define COND_G 0xa
169#define COND_GE 0xb
170#define COND_GU 0xc
171#define COND_CC 0xd
172#define COND_POS 0xe
173#define COND_VC 0xf
a115f3ea 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 175
ab1339b9
RH
176#define RCOND_Z 1
177#define RCOND_LEZ 2
178#define RCOND_LZ 3
179#define RCOND_NZ 5
180#define RCOND_GZ 6
181#define RCOND_GEZ 7
182
dbfe80e1
RH
183#define MOVCC_ICC (1 << 18)
184#define MOVCC_XCC (1 << 18 | 1 << 12)
185
a115f3ea
RH
186#define BPCC_ICC 0
187#define BPCC_XCC (2 << 20)
188#define BPCC_PT (1 << 19)
189#define BPCC_PN 0
190#define BPCC_A (1 << 29)
191
ab1339b9
RH
192#define BPR_PT BPCC_PT
193
8289b279 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
321dbde3 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
dc69960d 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 216
90379ca8 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 219
8289b279
BS
220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
7a3766f3 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL INSN_OP(1)
236#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB (INSN_OP(3) | INSN_OP3(0x05))
244#define STH (INSN_OP(3) | INSN_OP3(0x06))
245#define STW (INSN_OP(3) | INSN_OP3(0x04))
246#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
258
f8f03b37
PK
259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
ab20bdc1
RH
261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
8384dd67
BS
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
8289b279 266
a0ce341a
RH
267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
276
90379ca8
RH
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
897fd616 281static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 282{
425532d7 283 return val == sextract64(val, 0, bits);
a115f3ea
RH
284}
285
897fd616 286static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 287{
425532d7 288 return val == sextract32(val, 0, bits);
a115f3ea
RH
289}
290
425532d7 291#define check_fit_tl check_fit_i64
3a5f6805 292#define check_fit_ptr check_fit_i64
425532d7 293
0d8b6191 294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
0d8b6191
RH
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
e9823b4c 299 intptr_t pcrel;
abce5964 300
e9823b4c 301 value += addend;
0d8b6191 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 303
a115f3ea 304 switch (type) {
ab1339b9 305 case R_SPARC_WDISP16:
6a6bfa3c
RH
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
307 return false;
308 }
ab1339b9 309 insn &= ~INSN_OFF16(-1);
e9823b4c 310 insn |= INSN_OFF16(pcrel);
ab1339b9 311 break;
a115f3ea 312 case R_SPARC_WDISP19:
6a6bfa3c
RH
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
314 return false;
315 }
a115f3ea 316 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
317 insn |= INSN_OFF19(pcrel);
318 break;
c834b8d8
RH
319 case R_SPARC_13:
320 if (!check_fit_ptr(value, 13)) {
321 return false;
322 }
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
325 break;
a115f3ea 326 default:
e9823b4c 327 g_assert_not_reached();
a115f3ea 328 }
e9823b4c 329
0d8b6191 330 *src_rw = insn;
6ac17786 331 return true;
a115f3ea
RH
332}
333
a115f3ea 334/* test if a constant matches the constraint */
a4fbbd77 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 336{
a115f3ea
RH
337 if (ct & TCG_CT_CONST) {
338 return 1;
4b304cfa
RH
339 }
340
341 if (type == TCG_TYPE_I32) {
342 val = (int32_t)val;
343 }
344
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
346 return 1;
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350 return 1;
351 } else {
352 return 0;
353 }
354}
355
220b2da7
RH
356static void tcg_out_nop(TCGContext *s)
357{
358 tcg_out32(s, NOP);
359}
360
897fd616
RH
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362 TCGReg rs2, int op)
26cc915c 363{
35e2da15 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
365}
366
897fd616
RH
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
26cc915c 369{
35e2da15 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
371}
372
35e2da15
RH
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
ba225198
RH
375{
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
897fd616 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 381{
dda73c78
RH
382 if (ret != arg) {
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384 }
78113e83 385 return true;
26cc915c
BS
386}
387
220b2da7
RH
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390 if (ret != arg) {
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392 } else {
393 tcg_out_nop(s);
394 }
395}
396
897fd616 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
398{
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
400}
401
8b14f862
RH
402/* A 13-bit constant sign-extended to 64 bits. */
403static void tcg_out_movi_s13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
404{
405 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
406}
407
2cb3f794
RH
408/* A 32-bit constant zero-extended to 64 bits. */
409static void tcg_out_movi_u32(TCGContext *s, TCGReg ret, uint32_t arg)
c71929c3 410{
ca0681c9
RH
411 tcg_out_sethi(s, ret, arg);
412 if (arg & 0x3ff) {
413 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
c71929c3
RH
414 }
415}
416
ab20bdc1 417static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
418 tcg_target_long arg, bool in_prologue,
419 TCGReg scratch)
8289b279 420{
425532d7 421 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 422 tcg_target_long test, lsb;
a9c7d27b
RH
423
424 /* A 13-bit constant sign-extended to 64-bits. */
425 if (check_fit_tl(arg, 13)) {
8b14f862 426 tcg_out_movi_s13(s, ret, arg);
a9c7d27b 427 return;
8289b279 428 }
8289b279 429
ca0681c9
RH
430 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
431 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
2cb3f794 432 tcg_out_movi_u32(s, ret, arg);
ca0681c9
RH
433 return;
434 }
435
f6823cbe 436 /* A 13-bit constant relative to the TB. */
1e42b4f8 437 if (!in_prologue) {
47c2206b 438 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
439 if (check_fit_ptr(test, 13)) {
440 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
441 return;
442 }
443 }
444
a9c7d27b 445 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 446 if (arg == lo) {
43172207
RH
447 tcg_out_sethi(s, ret, ~arg);
448 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
449 return;
450 }
451
684db2a0 452 /* A 32-bit constant, shifted. */
ab20bdc1
RH
453 lsb = ctz64(arg);
454 test = (tcg_target_long)arg >> lsb;
684db2a0 455 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
456 tcg_out_sethi(s, ret, test << 10);
457 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
458 return;
684db2a0
RH
459 } else if (test == (uint32_t)test || test == (int32_t)test) {
460 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
461 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
462 return;
ab20bdc1
RH
463 }
464
c834b8d8 465 /* Use the constant pool, if possible. */
1e42b4f8 466 if (!in_prologue) {
c834b8d8
RH
467 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
468 tcg_tbrel_diff(s, NULL));
469 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
470 return;
471 }
472
a9c7d27b 473 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 474 if (check_fit_i32(lo, 13)) {
34b1a49c 475 hi = (arg - lo) >> 32;
2cb3f794 476 tcg_out_movi_u32(s, ret, hi);
a9c7d27b
RH
477 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
478 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 479 } else {
34b1a49c 480 hi = arg >> 32;
2cb3f794
RH
481 tcg_out_movi_u32(s, ret, hi);
482 tcg_out_movi_u32(s, scratch, lo);
375816f8 483 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 484 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 485 }
b101234a
BS
486}
487
897fd616
RH
488static void tcg_out_movi(TCGContext *s, TCGType type,
489 TCGReg ret, tcg_target_long arg)
ab20bdc1 490{
33982b89
RH
491 tcg_debug_assert(ret != TCG_REG_T3);
492 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T3);
ab20bdc1
RH
493}
494
678155b2
RH
495static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
496{
497 g_assert_not_reached();
498}
499
753e42ea
RH
500static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
501{
502 g_assert_not_reached();
503}
504
d0e66c89
RH
505static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
506{
507 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
508}
509
379afdff
RH
510static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
511{
512 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
513 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
514}
515
52bf3398
RH
516static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
517{
518 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
519}
520
9ecf5f61
RH
521static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
522{
523 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
524}
525
9c6aa274
RH
526static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
527{
528 tcg_out_ext32s(s, rd, rs);
529}
530
b9bfe000
RH
531static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
532{
533 tcg_out_ext32u(s, rd, rs);
534}
535
b8b94ac6
RH
536static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
537{
538 tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
539}
540
767c2503
RH
541static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
542{
543 return false;
544}
545
6a6d772e
RH
546static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
547 tcg_target_long imm)
548{
549 /* This function is only used for passing structs by reference. */
550 g_assert_not_reached();
551}
552
897fd616
RH
553static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
554 TCGReg a2, int op)
8289b279 555{
a0ce341a 556 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
557}
558
35e2da15
RH
559static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
560 intptr_t offset, int op)
8289b279 561{
425532d7 562 if (check_fit_ptr(offset, 13)) {
8289b279
BS
563 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
564 INSN_IMM13(offset));
a0ce341a 565 } else {
375816f8
RH
566 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
567 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 568 }
8289b279
BS
569}
570
897fd616
RH
571static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
572 TCGReg arg1, intptr_t arg2)
8289b279 573{
a0ce341a 574 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
575}
576
897fd616
RH
577static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
578 TCGReg arg1, intptr_t arg2)
8289b279 579{
a0ce341a
RH
580 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
581}
582
897fd616
RH
583static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
584 TCGReg base, intptr_t ofs)
59d7c14e
RH
585{
586 if (val == 0) {
587 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
588 return true;
589 }
590 return false;
591}
592
897fd616 593static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 594{
583d1215 595 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
596}
597
35e2da15
RH
598static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
599 int32_t val2, int val2const, int uns)
583d1215
RH
600{
601 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
602 if (uns) {
603 tcg_out_sety(s, TCG_REG_G0);
604 } else {
375816f8
RH
605 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
606 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
607 }
608
609 tcg_out_arithc(s, rd, rs1, val2, val2const,
610 uns ? ARITH_UDIV : ARITH_SDIV);
611}
612
0aed257f 613static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
614 [TCG_COND_EQ] = COND_E,
615 [TCG_COND_NE] = COND_NE,
616 [TCG_COND_LT] = COND_L,
617 [TCG_COND_GE] = COND_GE,
618 [TCG_COND_LE] = COND_LE,
619 [TCG_COND_GT] = COND_G,
620 [TCG_COND_LTU] = COND_CS,
621 [TCG_COND_GEU] = COND_CC,
622 [TCG_COND_LEU] = COND_LEU,
623 [TCG_COND_GTU] = COND_GU,
624};
625
ab1339b9
RH
626static const uint8_t tcg_cond_to_rcond[] = {
627 [TCG_COND_EQ] = RCOND_Z,
628 [TCG_COND_NE] = RCOND_NZ,
629 [TCG_COND_LT] = RCOND_LZ,
630 [TCG_COND_GT] = RCOND_GZ,
631 [TCG_COND_LE] = RCOND_LEZ,
632 [TCG_COND_GE] = RCOND_GEZ
633};
634
a115f3ea
RH
635static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
636{
637 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
638}
639
bec16311 640static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 641{
791645f0 642 int off19 = 0;
a115f3ea
RH
643
644 if (l->has_value) {
abce5964 645 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 646 } else {
bec16311 647 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
648 }
649 tcg_out_bpcc0(s, scond, flags, off19);
650}
651
35e2da15 652static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 653{
ba225198 654 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
655}
656
35e2da15 657static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 658 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 659{
56f4927e 660 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 661 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
662 tcg_out_nop(s);
663}
664
35e2da15
RH
665static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
666 int32_t v1, int v1const)
ded37f0d
RH
667{
668 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
669 | INSN_RS1(tcg_cond_to_bcond[cond])
670 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
671}
672
35e2da15
RH
673static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
674 TCGReg c1, int32_t c2, int c2const,
675 int32_t v1, int v1const)
ded37f0d
RH
676{
677 tcg_out_cmp(s, c1, c2, c2const);
678 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
679}
680
35e2da15 681static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 682 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 683{
ab1339b9
RH
684 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
685 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 686 int off16 = 0;
ab1339b9
RH
687
688 if (l->has_value) {
abce5964 689 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 690 } else {
bec16311 691 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
692 }
693 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
694 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
695 } else {
696 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 697 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 698 }
1da92db2
BS
699 tcg_out_nop(s);
700}
ded37f0d 701
35e2da15
RH
702static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
703 int32_t v1, int v1const)
203342d8
RH
704{
705 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
706 | (tcg_cond_to_rcond[cond] << 10)
707 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
708}
709
35e2da15
RH
710static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
711 TCGReg c1, int32_t c2, int c2const,
712 int32_t v1, int v1const)
ded37f0d 713{
203342d8
RH
714 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
715 Note that the immediate range is one bit smaller, so we must check
716 for that as well. */
717 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 718 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
719 tcg_out_movr(s, cond, ret, c1, v1, v1const);
720 } else {
721 tcg_out_cmp(s, c1, c2, c2const);
722 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
723 }
ded37f0d 724}
1da92db2 725
35e2da15
RH
726static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
727 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 728{
c470b663 729 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 730 switch (cond) {
7d458a75
RH
731 case TCG_COND_LTU:
732 case TCG_COND_GEU:
733 /* The result of the comparison is in the carry bit. */
734 break;
735
dbfe80e1
RH
736 case TCG_COND_EQ:
737 case TCG_COND_NE:
7d458a75 738 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 739 if (c2 != 0) {
321b6c05
RH
740 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
741 c2 = TCG_REG_T1;
742 } else {
743 c2 = c1;
dbfe80e1 744 }
321b6c05 745 c1 = TCG_REG_G0, c2const = 0;
7d458a75 746 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
747 break;
748
749 case TCG_COND_GTU:
dbfe80e1 750 case TCG_COND_LEU:
7d458a75
RH
751 /* If we don't need to load a constant into a register, we can
752 swap the operands on GTU/LEU. There's no benefit to loading
753 the constant into a temporary register. */
754 if (!c2const || c2 == 0) {
35e2da15 755 TCGReg t = c1;
7d458a75
RH
756 c1 = c2;
757 c2 = t;
758 c2const = 0;
759 cond = tcg_swap_cond(cond);
760 break;
761 }
762 /* FALLTHRU */
dbfe80e1
RH
763
764 default:
765 tcg_out_cmp(s, c1, c2, c2const);
8b14f862 766 tcg_out_movi_s13(s, ret, 0);
ded37f0d 767 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
768 return;
769 }
770
771 tcg_out_cmp(s, c1, c2, c2const);
772 if (cond == TCG_COND_LTU) {
c470b663 773 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 774 } else {
c470b663 775 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
776 }
777}
778
35e2da15
RH
779static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
780 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 781{
9d6a7a85
RH
782 if (use_vis3_instructions) {
783 switch (cond) {
784 case TCG_COND_NE:
785 if (c2 != 0) {
786 break;
787 }
788 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
789 /* FALLTHRU */
790 case TCG_COND_LTU:
791 tcg_out_cmp(s, c1, c2, c2const);
792 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
793 return;
794 default:
795 break;
796 }
797 }
798
203342d8
RH
799 /* For 64-bit signed comparisons vs zero, we can avoid the compare
800 if the input does not overlap the output. */
801 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
8b14f862 802 tcg_out_movi_s13(s, ret, 0);
203342d8
RH
803 tcg_out_movr(s, cond, ret, c1, 1, 1);
804 } else {
805 tcg_out_cmp(s, c1, c2, c2const);
8b14f862 806 tcg_out_movi_s13(s, ret, 0);
203342d8
RH
807 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
808 }
dbfe80e1 809}
4ec28e25 810
609ac1e1
RH
811static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
812 TCGReg al, TCGReg ah, int32_t bl, int blconst,
813 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 814{
35e2da15 815 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
816
817 /* Note that the low parts are fully consumed before tmp is set. */
818 if (rl != ah && (bhconst || rl != bh)) {
819 tmp = rl;
820 }
821
822 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
823 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
824 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
825}
dbfe80e1 826
609ac1e1
RH
827static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
828 TCGReg al, TCGReg ah, int32_t bl, int blconst,
829 int32_t bh, int bhconst, bool is_sub)
830{
831 TCGReg tmp = TCG_REG_T1;
832
833 /* Note that the low parts are fully consumed before tmp is set. */
834 if (rl != ah && (bhconst || rl != bh)) {
835 tmp = rl;
836 }
837
838 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
839
90379ca8
RH
840 if (use_vis3_instructions && !is_sub) {
841 /* Note that ADDXC doesn't accept immediates. */
842 if (bhconst && bh != 0) {
8b14f862 843 tcg_out_movi_s13(s, TCG_REG_T2, bh);
90379ca8
RH
844 bh = TCG_REG_T2;
845 }
846 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
847 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
848 /* If we have a zero, we can perform the operation in two insns,
849 with the arithmetic first, and a conditional move into place. */
850 if (rh == ah) {
851 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
852 is_sub ? ARITH_SUB : ARITH_ADD);
853 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
854 } else {
855 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
856 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
857 }
858 } else {
414399b6
RH
859 /*
860 * Otherwise adjust BH as if there is carry into T2.
861 * Note that constant BH is constrained to 11 bits for the MOVCC,
862 * so the adjustment fits 12 bits.
863 */
609ac1e1 864 if (bhconst) {
8b14f862 865 tcg_out_movi_s13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
866 } else {
867 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
868 is_sub ? ARITH_SUB : ARITH_ADD);
869 }
870 /* ... smoosh T2 back to original BH if carry is clear ... */
871 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
872 /* ... and finally perform the arithmetic with the new operand. */
873 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
874 }
875
876 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
877}
878
e01d60f2
RH
879static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
880 bool in_prologue, bool tail_call)
881{
882 uintptr_t desti = (uintptr_t)dest;
883
e01d60f2 884 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
33982b89 885 desti & ~0xfff, in_prologue, TCG_REG_T2);
e01d60f2
RH
886 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
887 TCG_REG_T1, desti & 0xfff, JMPL);
888}
889
2be7d76b 890static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 891 bool in_prologue)
aad2f06a 892{
abce5964 893 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
894
895 if (disp == (int32_t)disp) {
896 tcg_out32(s, CALL | (uint32_t)disp >> 2);
897 } else {
e01d60f2 898 tcg_out_jmpl_const(s, dest, in_prologue, false);
aad2f06a
RH
899 }
900}
901
cee44b03
RH
902static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
903 const TCGHelperInfo *info)
4e9cf840 904{
ab20bdc1 905 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
906 tcg_out_nop(s);
907}
908
f8f03b37
PK
909static void tcg_out_mb(TCGContext *s, TCGArg a0)
910{
911 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
912 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
913}
914
7ea5d725 915#ifdef CONFIG_SOFTMMU
0cadc1ed
RH
916static const tcg_insn_unit *qemu_ld_trampoline[MO_SSIZE + 1];
917static const tcg_insn_unit *qemu_st_trampoline[MO_SIZE + 1];
7ea5d725
RH
918
919static void build_trampolines(TCGContext *s)
920{
7ea5d725 921 int i;
7ea5d725 922
4b473e0c 923 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 924 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
925 continue;
926 }
927
928 /* May as well align the trampoline. */
abce5964 929 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 930 tcg_out_nop(s);
7ea5d725 931 }
0d8b6191 932 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 933
7ea5d725 934 /* Set the retaddr operand. */
3a5f6805 935 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
7ea5d725 936 /* Tail call. */
e01d60f2
RH
937 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
938 /* delay slot -- set the env argument */
939 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
940 }
941
4b473e0c 942 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 943 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
944 continue;
945 }
946
947 /* May as well align the trampoline. */
abce5964 948 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 949 tcg_out_nop(s);
7ea5d725 950 }
0d8b6191 951 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 952
7ea5d725 953 /* Set the retaddr operand. */
3a5f6805 954 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
e01d60f2 955
7ea5d725 956 /* Tail call. */
e01d60f2
RH
957 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
958 /* delay slot -- set the env argument */
959 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
960 }
961}
321dbde3
RH
962#else
963static const tcg_insn_unit *qemu_unalign_ld_trampoline;
964static const tcg_insn_unit *qemu_unalign_st_trampoline;
965
966static void build_trampolines(TCGContext *s)
967{
968 for (int ld = 0; ld < 2; ++ld) {
969 void *helper;
970
971 while ((uintptr_t)s->code_ptr & 15) {
972 tcg_out_nop(s);
973 }
974
975 if (ld) {
976 helper = helper_unaligned_ld;
977 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
978 } else {
979 helper = helper_unaligned_st;
980 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
981 }
982
321dbde3
RH
983 /* Tail call. */
984 tcg_out_jmpl_const(s, helper, true, true);
985 /* delay slot -- set the env argument */
986 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
987 }
988}
7ea5d725
RH
989#endif
990
7d551702 991/* Generate global QEMU prologue and epilogue code */
e4d58b41 992static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 993{
4c3204cb
RH
994 int tmp_buf_size, frame_size;
995
9defd1bd
RH
996 /*
997 * The TCG temp buffer is at the top of the frame, immediately
998 * below the frame pointer. Use the logical (aligned) offset here;
999 * the stack bias is applied in temp_allocate_frame().
1000 */
4c3204cb 1001 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1002 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1003
9defd1bd
RH
1004 /*
1005 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1006 * otherwise the minimal frame usable by callees.
1007 */
4c3204cb
RH
1008 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1009 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1010 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1011 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1012 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1013 INSN_IMM13(-frame_size));
c6f7e4fb 1014
4cbea598 1015#ifndef CONFIG_SOFTMMU
b76f21a7 1016 if (guest_base != 0) {
92840d06
RH
1017 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1018 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1019 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1020 }
1021#endif
1022
ab20bdc1 1023 /* We choose TCG_REG_TB such that no move is required. */
1e42b4f8
RH
1024 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1025 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
ab20bdc1 1026
aad2f06a 1027 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1028 /* delay slot */
1029 tcg_out_nop(s);
4c3204cb 1030
38f81dc5 1031 /* Epilogue for goto_ptr. */
c8bc1168 1032 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1034 /* delay slot */
8b14f862 1035 tcg_out_movi_s13(s, TCG_REG_O0, 0);
7ea5d725 1036
7ea5d725 1037 build_trampolines(s);
b3db8758
BS
1038}
1039
e9823b4c
RH
1040static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1041{
1042 int i;
1043 for (i = 0; i < count; ++i) {
1044 p[i] = NOP;
1045 }
1046}
1047
f5ef6aac 1048#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1049
1050/* We expect to use a 13-bit negative offset from ENV. */
1051QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1052QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1053
a0ce341a 1054/* Perform the TLB load and compare.
bffe1431 1055
a0ce341a 1056 Inputs:
a8b12c10 1057 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1058
1059 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1060
1061 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1062 This should be offsetof addr_read or addr_write.
1063
1064 The result of the TLB comparison is in %[ix]cc. The sanitized address
1065 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1066
34b1a49c 1067static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1068 MemOp opc, int which)
a0ce341a 1069{
269bd5d8
RH
1070 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1071 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1072 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1073 const TCGReg r0 = TCG_REG_O0;
1074 const TCGReg r1 = TCG_REG_O1;
1075 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1076 unsigned s_bits = opc & MO_SIZE;
1077 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1078 tcg_target_long compare_mask;
1079
17ff9f78 1080 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1081 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1082 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1083
17ff9f78
RH
1084 /* Extract the page index, shifted into place for tlb index. */
1085 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1086 SHIFT_SRL);
1087 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1088
1089 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1090 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1091
1092 /* Load the tlb comparator and the addend. */
1093 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1094 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1095
85aa8081
RH
1096 /* Mask out the page offset, except for the required alignment.
1097 We don't support unaligned accesses. */
1098 if (a_bits < s_bits) {
1099 a_bits = s_bits;
1100 }
17ff9f78
RH
1101 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1102 if (check_fit_tl(compare_mask, 13)) {
1103 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1104 } else {
1105 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1106 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1107 }
a0ce341a
RH
1108 tcg_out_cmp(s, r0, r2, 0);
1109
1110 /* If the guest address must be zero-extended, do so now. */
3a5f6805 1111 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1112 tcg_out_ext32u(s, r0, addr);
a0ce341a
RH
1113 return r0;
1114 }
34b1a49c 1115 return addr;
a0ce341a
RH
1116}
1117#endif /* CONFIG_SOFTMMU */
1118
4b473e0c 1119static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1120 [MO_UB] = LDUB,
1121 [MO_SB] = LDSB,
321dbde3
RH
1122 [MO_UB | MO_LE] = LDUB,
1123 [MO_SB | MO_LE] = LDSB,
eef0d9e7
RH
1124
1125 [MO_BEUW] = LDUH,
1126 [MO_BESW] = LDSH,
1127 [MO_BEUL] = LDUW,
1128 [MO_BESL] = LDSW,
fc313c64 1129 [MO_BEUQ] = LDX,
321dbde3 1130 [MO_BESQ] = LDX,
eef0d9e7
RH
1131
1132 [MO_LEUW] = LDUH_LE,
1133 [MO_LESW] = LDSH_LE,
1134 [MO_LEUL] = LDUW_LE,
1135 [MO_LESL] = LDSW_LE,
fc313c64 1136 [MO_LEUQ] = LDX_LE,
321dbde3 1137 [MO_LESQ] = LDX_LE,
a0ce341a 1138};
9d0efc88 1139
4b473e0c 1140static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1141 [MO_UB] = STB,
1142
1143 [MO_BEUW] = STH,
1144 [MO_BEUL] = STW,
fc313c64 1145 [MO_BEUQ] = STX,
eef0d9e7
RH
1146
1147 [MO_LEUW] = STH_LE,
1148 [MO_LEUL] = STW_LE,
fc313c64 1149 [MO_LEUQ] = STX_LE,
a0ce341a 1150};
bffe1431 1151
34b1a49c 1152static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
e2adae3f 1153 MemOpIdx oi, TCGType data_type)
f5ef6aac 1154{
14776ab5 1155 MemOp memop = get_memop(oi);
321dbde3
RH
1156 tcg_insn_unit *label_ptr;
1157
34b1a49c 1158#ifdef CONFIG_SOFTMMU
59227d5d 1159 unsigned memi = get_mmuidx(oi);
3a5f6805 1160 TCGReg addrz;
0d8b6191 1161 const tcg_insn_unit *func;
f5ef6aac 1162
85aa8081 1163 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1164 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1165
34b1a49c
RH
1166 /* The fast path is exactly one insn. Thus we can perform the
1167 entire TLB Hit in the (annulled) delay slot of the branch
1168 over the TLB Miss case. */
a0ce341a 1169
34b1a49c 1170 /* beq,a,pt %[xi]cc, label0 */
abce5964 1171 label_ptr = s->code_ptr;
34b1a49c
RH
1172 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1173 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1174 /* delay slot */
2b7ec66f
RH
1175 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1176 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1177
a0ce341a 1178 /* TLB Miss. */
f5ef6aac 1179
3a5f6805 1180 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
f5ef6aac 1181
7ea5d725
RH
1182 /* We use the helpers to extend SB and SW data, leaving the case
1183 of SL needing explicit extending below. */
2b7ec66f 1184 if ((memop & MO_SSIZE) == MO_SL) {
0cadc1ed 1185 func = qemu_ld_trampoline[MO_UL];
7ea5d725 1186 } else {
0cadc1ed 1187 func = qemu_ld_trampoline[memop & MO_SSIZE];
7ea5d725 1188 }
eabb7b91 1189 tcg_debug_assert(func != NULL);
ab20bdc1 1190 tcg_out_call_nodelay(s, func, false);
a0ce341a 1191 /* delay slot */
3a5f6805
RH
1192 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1193
1194 /* We let the helper sign-extend SB and SW, but leave SL for here. */
acfe9491 1195 if ((memop & MO_SSIZE) == MO_SL) {
52bf3398 1196 tcg_out_ext32s(s, data, TCG_REG_O0);
34b1a49c 1197 } else {
3a5f6805 1198 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
f5ef6aac
BS
1199 }
1200
abce5964 1201 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1202#else
321dbde3
RH
1203 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1204 unsigned a_bits = get_alignment_bits(memop);
1205 unsigned s_bits = memop & MO_SIZE;
1206 unsigned t_bits;
1207
3a5f6805 1208 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1209 tcg_out_ext32u(s, TCG_REG_T1, addr);
34b1a49c 1210 addr = TCG_REG_T1;
f5ef6aac 1211 }
321dbde3
RH
1212
1213 /*
1214 * Normal case: alignment equal to access size.
1215 */
1216 if (a_bits == s_bits) {
1217 tcg_out_ldst_rr(s, data, addr, index,
1218 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1219 return;
1220 }
1221
1222 /*
1223 * Test for at least natural alignment, and assume most accesses
1224 * will be aligned -- perform a straight load in the delay slot.
1225 * This is required to preserve atomicity for aligned accesses.
1226 */
1227 t_bits = MAX(a_bits, s_bits);
1228 tcg_debug_assert(t_bits < 13);
1229 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1230
1231 /* beq,a,pt %icc, label */
1232 label_ptr = s->code_ptr;
1233 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1234 /* delay slot */
1235 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1236 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
321dbde3
RH
1237
1238 if (a_bits >= s_bits) {
1239 /*
1240 * Overalignment: A successful alignment test will perform the memory
1241 * operation in the delay slot, and failure need only invoke the
1242 * handler for SIGBUS.
1243 */
321dbde3
RH
1244 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1245 /* delay slot -- move to low part of argument reg */
3a5f6805 1246 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1247 } else {
1248 /* Underalignment: load by pieces of minimum alignment. */
1249 int ld_opc, a_size, s_size, i;
1250
1251 /*
1252 * Force full address into T1 early; avoids problems with
1253 * overlap between @addr and @data.
1254 */
1255 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1256
1257 a_size = 1 << a_bits;
1258 s_size = 1 << s_bits;
1259 if ((memop & MO_BSWAP) == MO_BE) {
1260 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1261 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1262 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1263 for (i = a_size; i < s_size; i += a_size) {
1264 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1265 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1266 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1267 }
1268 } else if (a_bits == 0) {
1269 ld_opc = LDUB;
1270 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1271 for (i = a_size; i < s_size; i += a_size) {
1272 if ((memop & MO_SIGN) && i == s_size - a_size) {
1273 ld_opc = LDSB;
1274 }
1275 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1276 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1277 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1278 }
1279 } else {
1280 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1281 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1282 for (i = a_size; i < s_size; i += a_size) {
1283 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1284 if ((memop & MO_SIGN) && i == s_size - a_size) {
1285 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1286 }
1287 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1288 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1289 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1290 }
1291 }
1292 }
1293
1294 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1295#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1296}
1297
34b1a49c 1298static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
b3dfd5fc 1299 MemOpIdx oi, TCGType data_type)
f5ef6aac 1300{
14776ab5 1301 MemOp memop = get_memop(oi);
321dbde3
RH
1302 tcg_insn_unit *label_ptr;
1303
34b1a49c 1304#ifdef CONFIG_SOFTMMU
59227d5d 1305 unsigned memi = get_mmuidx(oi);
3a5f6805 1306 TCGReg addrz;
0d8b6191 1307 const tcg_insn_unit *func;
f5ef6aac 1308
85aa8081 1309 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1310 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1311
a0ce341a
RH
1312 /* The fast path is exactly one insn. Thus we can perform the entire
1313 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1314 /* beq,a,pt %[xi]cc, label0 */
abce5964 1315 label_ptr = s->code_ptr;
a115f3ea
RH
1316 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1317 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1318 /* delay slot */
2b7ec66f
RH
1319 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1320 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1321
1322 /* TLB Miss. */
1323
3a5f6805 1324 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
b3dfd5fc
RH
1325 tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
1326 TCG_REG_O2, data_type, memop & MO_SIZE, data);
53c37487 1327
0cadc1ed 1328 func = qemu_st_trampoline[memop & MO_SIZE];
eabb7b91 1329 tcg_debug_assert(func != NULL);
ab20bdc1 1330 tcg_out_call_nodelay(s, func, false);
a0ce341a 1331 /* delay slot */
3a5f6805 1332 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
f5ef6aac 1333
abce5964 1334 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1335#else
321dbde3
RH
1336 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1337 unsigned a_bits = get_alignment_bits(memop);
1338 unsigned s_bits = memop & MO_SIZE;
1339 unsigned t_bits;
1340
3a5f6805 1341 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1342 tcg_out_ext32u(s, TCG_REG_T1, addr);
34b1a49c 1343 addr = TCG_REG_T1;
a0ce341a 1344 }
321dbde3
RH
1345
1346 /*
1347 * Normal case: alignment equal to access size.
1348 */
1349 if (a_bits == s_bits) {
1350 tcg_out_ldst_rr(s, data, addr, index,
1351 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1352 return;
1353 }
1354
1355 /*
1356 * Test for at least natural alignment, and assume most accesses
1357 * will be aligned -- perform a straight store in the delay slot.
1358 * This is required to preserve atomicity for aligned accesses.
1359 */
1360 t_bits = MAX(a_bits, s_bits);
1361 tcg_debug_assert(t_bits < 13);
1362 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1363
1364 /* beq,a,pt %icc, label */
1365 label_ptr = s->code_ptr;
1366 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1367 /* delay slot */
1368 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1369 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
321dbde3
RH
1370
1371 if (a_bits >= s_bits) {
1372 /*
1373 * Overalignment: A successful alignment test will perform the memory
1374 * operation in the delay slot, and failure need only invoke the
1375 * handler for SIGBUS.
1376 */
321dbde3
RH
1377 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1378 /* delay slot -- move to low part of argument reg */
3a5f6805 1379 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1380 } else {
1381 /* Underalignment: store by pieces of minimum alignment. */
1382 int st_opc, a_size, s_size, i;
1383
1384 /*
1385 * Force full address into T1 early; avoids problems with
1386 * overlap between @addr and @data.
1387 */
1388 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1389
1390 a_size = 1 << a_bits;
1391 s_size = 1 << s_bits;
1392 if ((memop & MO_BSWAP) == MO_BE) {
1393 st_opc = qemu_st_opc[a_bits | MO_BE];
1394 for (i = 0; i < s_size; i += a_size) {
1395 TCGReg d = data;
1396 int shift = (s_size - a_size - i) * 8;
1397 if (shift) {
1398 d = TCG_REG_T2;
1399 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1400 }
1401 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1402 }
1403 } else if (a_bits == 0) {
1404 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1405 for (i = 1; i < s_size; i++) {
1406 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1407 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1408 }
1409 } else {
1410 /* Note that ST*A with immediate asi must use indexed address. */
1411 st_opc = qemu_st_opc[a_bits + MO_LE];
1412 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1413 for (i = a_size; i < s_size; i += a_size) {
1414 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1415 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1416 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1417 }
1418 }
1419 }
1420
1421 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1422#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1423}
1424
b55a8d9d
RH
1425static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1426{
1427 if (check_fit_ptr(a0, 13)) {
1428 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
8b14f862 1429 tcg_out_movi_s13(s, TCG_REG_O0, a0);
b55a8d9d 1430 return;
1e42b4f8 1431 } else {
b55a8d9d
RH
1432 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1433 if (check_fit_ptr(tb_diff, 13)) {
1434 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1435 /* Note that TCG_REG_TB has been unwound to O1. */
1436 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1437 return;
1438 }
1439 }
1440 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1441 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1442 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1443}
1444
cf7d6b8e
RH
1445static void tcg_out_goto_tb(TCGContext *s, int which)
1446{
a228ae3e 1447 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1e42b4f8 1448
1ffbe5d6 1449 /* Load link and indirect branch. */
1e42b4f8 1450 set_jmp_insn_offset(s, which);
a228ae3e 1451 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1ffbe5d6
RH
1452 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1453 /* delay slot */
1454 tcg_out_nop(s);
cf7d6b8e
RH
1455 set_jmp_reset_offset(s, which);
1456
1457 /*
1458 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1459 * to the beginning of this TB.
1460 */
a228ae3e
RH
1461 off = -tcg_current_code_size(s);
1462 if (check_fit_i32(off, 13)) {
1463 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1e42b4f8 1464 } else {
a228ae3e 1465 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1e42b4f8 1466 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
cf7d6b8e
RH
1467 }
1468}
1469
a228ae3e
RH
1470void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1471 uintptr_t jmp_rx, uintptr_t jmp_rw)
1472{
a228ae3e
RH
1473}
1474
b357f902
RH
1475static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1476 const TCGArg args[TCG_MAX_OP_ARGS],
1477 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1478{
b357f902
RH
1479 TCGArg a0, a1, a2;
1480 int c, c2;
1481
1482 /* Hoist the loads of the most common arguments. */
1483 a0 = args[0];
1484 a1 = args[1];
1485 a2 = args[2];
1486 c2 = const_args[2];
8289b279
BS
1487
1488 switch (opc) {
38f81dc5
RH
1489 case INDEX_op_goto_ptr:
1490 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1e42b4f8 1491 tcg_out_mov_delay(s, TCG_REG_TB, a0);
38f81dc5 1492 break;
8289b279 1493 case INDEX_op_br:
bec16311 1494 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1495 tcg_out_nop(s);
8289b279 1496 break;
8289b279 1497
8289b279 1498#define OP_32_64(x) \
ba225198
RH
1499 glue(glue(case INDEX_op_, x), _i32): \
1500 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1501
ba225198 1502 OP_32_64(ld8u):
b357f902 1503 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1504 break;
ba225198 1505 OP_32_64(ld8s):
b357f902 1506 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1507 break;
ba225198 1508 OP_32_64(ld16u):
b357f902 1509 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1510 break;
ba225198 1511 OP_32_64(ld16s):
b357f902 1512 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1513 break;
1514 case INDEX_op_ld_i32:
53cd9273 1515 case INDEX_op_ld32u_i64:
b357f902 1516 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1517 break;
ba225198 1518 OP_32_64(st8):
b357f902 1519 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1520 break;
ba225198 1521 OP_32_64(st16):
b357f902 1522 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1523 break;
1524 case INDEX_op_st_i32:
53cd9273 1525 case INDEX_op_st32_i64:
b357f902 1526 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1527 break;
ba225198 1528 OP_32_64(add):
53cd9273 1529 c = ARITH_ADD;
ba225198
RH
1530 goto gen_arith;
1531 OP_32_64(sub):
8289b279 1532 c = ARITH_SUB;
ba225198
RH
1533 goto gen_arith;
1534 OP_32_64(and):
8289b279 1535 c = ARITH_AND;
ba225198 1536 goto gen_arith;
dc69960d
RH
1537 OP_32_64(andc):
1538 c = ARITH_ANDN;
1539 goto gen_arith;
ba225198 1540 OP_32_64(or):
8289b279 1541 c = ARITH_OR;
ba225198 1542 goto gen_arith;
18c8f7a3
RH
1543 OP_32_64(orc):
1544 c = ARITH_ORN;
1545 goto gen_arith;
ba225198 1546 OP_32_64(xor):
8289b279 1547 c = ARITH_XOR;
ba225198 1548 goto gen_arith;
8289b279
BS
1549 case INDEX_op_shl_i32:
1550 c = SHIFT_SLL;
1fd95946
RH
1551 do_shift32:
1552 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1553 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1554 break;
8289b279
BS
1555 case INDEX_op_shr_i32:
1556 c = SHIFT_SRL;
1fd95946 1557 goto do_shift32;
8289b279
BS
1558 case INDEX_op_sar_i32:
1559 c = SHIFT_SRA;
1fd95946 1560 goto do_shift32;
8289b279
BS
1561 case INDEX_op_mul_i32:
1562 c = ARITH_UMUL;
ba225198 1563 goto gen_arith;
583d1215 1564
4b5a85c1
RH
1565 OP_32_64(neg):
1566 c = ARITH_SUB;
1567 goto gen_arith1;
be6551b1
RH
1568 OP_32_64(not):
1569 c = ARITH_ORN;
1570 goto gen_arith1;
4b5a85c1 1571
583d1215 1572 case INDEX_op_div_i32:
b357f902 1573 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1574 break;
1575 case INDEX_op_divu_i32:
b357f902 1576 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1577 break;
1578
8289b279 1579 case INDEX_op_brcond_i32:
bec16311 1580 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1581 break;
dbfe80e1 1582 case INDEX_op_setcond_i32:
b357f902 1583 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1584 break;
ded37f0d 1585 case INDEX_op_movcond_i32:
b357f902 1586 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1587 break;
dbfe80e1 1588
7a3766f3 1589 case INDEX_op_add2_i32:
609ac1e1
RH
1590 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1591 args[4], const_args[4], args[5], const_args[5],
c470b663 1592 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1593 break;
1594 case INDEX_op_sub2_i32:
609ac1e1
RH
1595 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1596 args[4], const_args[4], args[5], const_args[5],
c470b663 1597 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1598 break;
1599 case INDEX_op_mulu2_i32:
f4c16661
RH
1600 c = ARITH_UMUL;
1601 goto do_mul2;
1602 case INDEX_op_muls2_i32:
1603 c = ARITH_SMUL;
1604 do_mul2:
3a5f6805 1605 /* The 32-bit multiply insns produce a full 64-bit result. */
b357f902 1606 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
3a5f6805 1607 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
7a3766f3 1608 break;
8289b279 1609
cab0a7ea 1610 case INDEX_op_qemu_ld_i32:
e2adae3f 1611 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
8289b279 1612 break;
cab0a7ea 1613 case INDEX_op_qemu_ld_i64:
e2adae3f 1614 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
8289b279 1615 break;
cab0a7ea 1616 case INDEX_op_qemu_st_i32:
b3dfd5fc
RH
1617 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1618 break;
cab0a7ea 1619 case INDEX_op_qemu_st_i64:
b3dfd5fc 1620 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
a0ce341a 1621 break;
8289b279 1622
53cd9273 1623 case INDEX_op_ld32s_i64:
b357f902 1624 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1625 break;
8289b279 1626 case INDEX_op_ld_i64:
b357f902 1627 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1628 break;
1629 case INDEX_op_st_i64:
b357f902 1630 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1631 break;
1632 case INDEX_op_shl_i64:
1633 c = SHIFT_SLLX;
1fd95946
RH
1634 do_shift64:
1635 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1636 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1637 break;
8289b279
BS
1638 case INDEX_op_shr_i64:
1639 c = SHIFT_SRLX;
1fd95946 1640 goto do_shift64;
8289b279
BS
1641 case INDEX_op_sar_i64:
1642 c = SHIFT_SRAX;
1fd95946 1643 goto do_shift64;
8289b279
BS
1644 case INDEX_op_mul_i64:
1645 c = ARITH_MULX;
ba225198 1646 goto gen_arith;
583d1215 1647 case INDEX_op_div_i64:
53cd9273 1648 c = ARITH_SDIVX;
ba225198 1649 goto gen_arith;
583d1215 1650 case INDEX_op_divu_i64:
8289b279 1651 c = ARITH_UDIVX;
ba225198 1652 goto gen_arith;
609ad705
RH
1653 case INDEX_op_extrh_i64_i32:
1654 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1655 break;
8289b279
BS
1656
1657 case INDEX_op_brcond_i64:
bec16311 1658 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1659 break;
dbfe80e1 1660 case INDEX_op_setcond_i64:
b357f902 1661 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1662 break;
ded37f0d 1663 case INDEX_op_movcond_i64:
b357f902 1664 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1665 break;
609ac1e1
RH
1666 case INDEX_op_add2_i64:
1667 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1668 const_args[4], args[5], const_args[5], false);
1669 break;
1670 case INDEX_op_sub2_i64:
1671 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1672 const_args[4], args[5], const_args[5], true);
1673 break;
de8301e5
RH
1674 case INDEX_op_muluh_i64:
1675 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1676 break;
34b1a49c 1677
ba225198 1678 gen_arith:
b357f902 1679 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1680 break;
1681
4b5a85c1 1682 gen_arith1:
b357f902 1683 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1684 break;
1685
f8f03b37
PK
1686 case INDEX_op_mb:
1687 tcg_out_mb(s, a0);
1688 break;
1689
96d0ee7f 1690 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1691 case INDEX_op_mov_i64:
96d0ee7f 1692 case INDEX_op_call: /* Always emitted via tcg_out_call. */
b55a8d9d 1693 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
cf7d6b8e 1694 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
678155b2
RH
1695 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1696 case INDEX_op_ext8s_i64:
d0e66c89
RH
1697 case INDEX_op_ext8u_i32:
1698 case INDEX_op_ext8u_i64:
753e42ea
RH
1699 case INDEX_op_ext16s_i32:
1700 case INDEX_op_ext16s_i64:
379afdff
RH
1701 case INDEX_op_ext16u_i32:
1702 case INDEX_op_ext16u_i64:
52bf3398 1703 case INDEX_op_ext32s_i64:
9ecf5f61 1704 case INDEX_op_ext32u_i64:
9c6aa274 1705 case INDEX_op_ext_i32_i64:
b9bfe000 1706 case INDEX_op_extu_i32_i64:
b8b94ac6 1707 case INDEX_op_extrl_i64_i32:
8289b279 1708 default:
732e89f4 1709 g_assert_not_reached();
8289b279
BS
1710 }
1711}
1712
0d11dc7c 1713static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1714{
9be44a16
RH
1715 switch (op) {
1716 case INDEX_op_goto_ptr:
0d11dc7c 1717 return C_O0_I1(r);
f69d277e 1718
9be44a16 1719 case INDEX_op_ld8u_i32:
a59a2931 1720 case INDEX_op_ld8u_i64:
9be44a16 1721 case INDEX_op_ld8s_i32:
a59a2931 1722 case INDEX_op_ld8s_i64:
9be44a16 1723 case INDEX_op_ld16u_i32:
a59a2931 1724 case INDEX_op_ld16u_i64:
9be44a16 1725 case INDEX_op_ld16s_i32:
a59a2931 1726 case INDEX_op_ld16s_i64:
9be44a16 1727 case INDEX_op_ld_i32:
a59a2931
RH
1728 case INDEX_op_ld32u_i64:
1729 case INDEX_op_ld32s_i64:
1730 case INDEX_op_ld_i64:
9be44a16 1731 case INDEX_op_neg_i32:
a59a2931 1732 case INDEX_op_neg_i64:
9be44a16 1733 case INDEX_op_not_i32:
a59a2931
RH
1734 case INDEX_op_not_i64:
1735 case INDEX_op_ext32s_i64:
1736 case INDEX_op_ext32u_i64:
1737 case INDEX_op_ext_i32_i64:
1738 case INDEX_op_extu_i32_i64:
1739 case INDEX_op_extrl_i64_i32:
1740 case INDEX_op_extrh_i64_i32:
0d11dc7c 1741 return C_O1_I1(r, r);
9be44a16
RH
1742
1743 case INDEX_op_st8_i32:
a59a2931 1744 case INDEX_op_st8_i64:
9be44a16 1745 case INDEX_op_st16_i32:
a59a2931 1746 case INDEX_op_st16_i64:
9be44a16 1747 case INDEX_op_st_i32:
a59a2931
RH
1748 case INDEX_op_st32_i64:
1749 case INDEX_op_st_i64:
0d11dc7c 1750 return C_O0_I2(rZ, r);
9be44a16
RH
1751
1752 case INDEX_op_add_i32:
a59a2931 1753 case INDEX_op_add_i64:
9be44a16 1754 case INDEX_op_mul_i32:
a59a2931 1755 case INDEX_op_mul_i64:
9be44a16 1756 case INDEX_op_div_i32:
a59a2931 1757 case INDEX_op_div_i64:
9be44a16 1758 case INDEX_op_divu_i32:
a59a2931 1759 case INDEX_op_divu_i64:
9be44a16 1760 case INDEX_op_sub_i32:
a59a2931 1761 case INDEX_op_sub_i64:
9be44a16 1762 case INDEX_op_and_i32:
a59a2931 1763 case INDEX_op_and_i64:
9be44a16 1764 case INDEX_op_andc_i32:
a59a2931 1765 case INDEX_op_andc_i64:
9be44a16 1766 case INDEX_op_or_i32:
a59a2931 1767 case INDEX_op_or_i64:
9be44a16 1768 case INDEX_op_orc_i32:
a59a2931 1769 case INDEX_op_orc_i64:
9be44a16 1770 case INDEX_op_xor_i32:
a59a2931 1771 case INDEX_op_xor_i64:
9be44a16 1772 case INDEX_op_shl_i32:
a59a2931 1773 case INDEX_op_shl_i64:
9be44a16 1774 case INDEX_op_shr_i32:
a59a2931 1775 case INDEX_op_shr_i64:
9be44a16 1776 case INDEX_op_sar_i32:
a59a2931 1777 case INDEX_op_sar_i64:
9be44a16 1778 case INDEX_op_setcond_i32:
a59a2931 1779 case INDEX_op_setcond_i64:
0d11dc7c 1780 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1781
1782 case INDEX_op_brcond_i32:
a59a2931 1783 case INDEX_op_brcond_i64:
0d11dc7c 1784 return C_O0_I2(rZ, rJ);
9be44a16 1785 case INDEX_op_movcond_i32:
a59a2931 1786 case INDEX_op_movcond_i64:
0d11dc7c 1787 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16 1788 case INDEX_op_add2_i32:
a59a2931 1789 case INDEX_op_add2_i64:
9be44a16 1790 case INDEX_op_sub2_i32:
a59a2931 1791 case INDEX_op_sub2_i64:
0d11dc7c 1792 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1793 case INDEX_op_mulu2_i32:
1794 case INDEX_op_muls2_i32:
0d11dc7c 1795 return C_O2_I2(r, r, rZ, rJ);
9be44a16 1796 case INDEX_op_muluh_i64:
a59a2931 1797 return C_O1_I2(r, r, r);
9be44a16
RH
1798
1799 case INDEX_op_qemu_ld_i32:
9be44a16 1800 case INDEX_op_qemu_ld_i64:
a59a2931 1801 return C_O1_I1(r, s);
9be44a16 1802 case INDEX_op_qemu_st_i32:
9be44a16 1803 case INDEX_op_qemu_st_i64:
a59a2931 1804 return C_O0_I2(sZ, s);
9be44a16
RH
1805
1806 default:
0d11dc7c 1807 g_assert_not_reached();
f69d277e 1808 }
f69d277e
RH
1809}
1810
e4d58b41 1811static void tcg_target_init(TCGContext *s)
8289b279 1812{
a4761232
PMD
1813 /*
1814 * Only probe for the platform and capabilities if we haven't already
1815 * determined maximum values at compile time.
1816 */
90379ca8
RH
1817#ifndef use_vis3_instructions
1818 {
1819 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1820 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1821 }
1822#endif
1823
77f268e8 1824 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
a59a2931 1825 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
f46934df
RH
1826
1827 tcg_target_call_clobber_regs = 0;
1828 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1829 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1830 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1831 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1832 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1833 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1834 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1835 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1836 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1837 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1843
ccb1bb66 1844 s->reserved_regs = 0;
375816f8
RH
1845 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1846 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1847 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1848 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1849 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1850 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1851 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1852 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
33982b89 1853 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T3); /* for internal use */
8289b279 1854}
cb1977d3 1855
3a5f6805 1856#define ELF_HOST_MACHINE EM_SPARCV9
cb1977d3 1857
cb1977d3 1858typedef struct {
ae18b28d 1859 DebugFrameHeader h;
3a5f6805 1860 uint8_t fde_def_cfa[4];
497a22eb
RH
1861 uint8_t fde_win_save;
1862 uint8_t fde_ret_save[3];
cb1977d3
RH
1863} DebugFrame;
1864
ae18b28d
RH
1865static const DebugFrame debug_frame = {
1866 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1867 .h.cie.id = -1,
1868 .h.cie.version = 1,
1869 .h.cie.code_align = 1,
1870 .h.cie.data_align = -sizeof(void *) & 0x7f,
1871 .h.cie.return_column = 15, /* o7 */
cb1977d3 1872
497a22eb 1873 /* Total FDE size does not include the "len" member. */
ae18b28d 1874 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1875
1876 .fde_def_cfa = {
cb1977d3
RH
1877 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1878 (2047 & 0x7f) | 0x80, (2047 >> 7)
cb1977d3 1879 },
497a22eb
RH
1880 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1881 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1882};
1883
755bf9e5 1884void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1885{
cb1977d3
RH
1886 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1887}