]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc64/tcg-target.c.inc
tcg: Split out tcg_out_ext8s
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3a5f6805
RH
25/* We only support generating code for 64-bit mode. */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
139c1837 30#include "../tcg-pool.c.inc"
e9823b4c 31
8d8fdbae 32#ifdef CONFIG_DEBUG_TCG
8289b279
BS
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66};
d4a9eb1f 67#endif
8289b279 68
77f268e8
RH
69#define TCG_CT_CONST_S11 0x100
70#define TCG_CT_CONST_S13 0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
77f268e8 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
77f268e8 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
34b1a49c 85
375816f8
RH
86/* Define some temporary registers. T2 is used for constant generation. */
87#define TCG_REG_T1 TCG_REG_G1
88#define TCG_REG_T2 TCG_REG_O7
89
4cbea598 90#ifndef CONFIG_SOFTMMU
375816f8 91# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 92#endif
e141ab52 93
ab20bdc1 94#define TCG_REG_TB TCG_REG_I1
ab20bdc1 95
0954d0d9 96static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
97 TCG_REG_L0,
98 TCG_REG_L1,
99 TCG_REG_L2,
100 TCG_REG_L3,
101 TCG_REG_L4,
102 TCG_REG_L5,
103 TCG_REG_L6,
104 TCG_REG_L7,
26adfb75 105
8289b279
BS
106 TCG_REG_I0,
107 TCG_REG_I1,
108 TCG_REG_I2,
109 TCG_REG_I3,
110 TCG_REG_I4,
375816f8 111 TCG_REG_I5,
26adfb75
RH
112
113 TCG_REG_G2,
114 TCG_REG_G3,
115 TCG_REG_G4,
116 TCG_REG_G5,
117
118 TCG_REG_O0,
119 TCG_REG_O1,
120 TCG_REG_O2,
121 TCG_REG_O3,
122 TCG_REG_O4,
123 TCG_REG_O5,
8289b279
BS
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127 TCG_REG_O0,
128 TCG_REG_O1,
129 TCG_REG_O2,
130 TCG_REG_O3,
131 TCG_REG_O4,
132 TCG_REG_O5,
133};
134
5e3d0c19
RH
135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
136{
137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138 tcg_debug_assert(slot >= 0 && slot <= 3);
139 return TCG_REG_O0 + slot;
140}
8289b279 141
8289b279
BS
142#define INSN_OP(x) ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x) ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
8384dd67 149#define INSN_ASI(x) ((x) << 5)
8289b279 150
203342d8 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 156#define INSN_COND(x) ((x) << 25)
8289b279 157
cf7c2ca5
BS
158#define COND_N 0x0
159#define COND_E 0x1
160#define COND_LE 0x2
161#define COND_L 0x3
162#define COND_LEU 0x4
163#define COND_CS 0x5
164#define COND_NEG 0x6
165#define COND_VS 0x7
b3db8758 166#define COND_A 0x8
cf7c2ca5
BS
167#define COND_NE 0x9
168#define COND_G 0xa
169#define COND_GE 0xb
170#define COND_GU 0xc
171#define COND_CC 0xd
172#define COND_POS 0xe
173#define COND_VC 0xf
a115f3ea 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 175
ab1339b9
RH
176#define RCOND_Z 1
177#define RCOND_LEZ 2
178#define RCOND_LZ 3
179#define RCOND_NZ 5
180#define RCOND_GZ 6
181#define RCOND_GEZ 7
182
dbfe80e1
RH
183#define MOVCC_ICC (1 << 18)
184#define MOVCC_XCC (1 << 18 | 1 << 12)
185
a115f3ea
RH
186#define BPCC_ICC 0
187#define BPCC_XCC (2 << 20)
188#define BPCC_PT (1 << 19)
189#define BPCC_PN 0
190#define BPCC_A (1 << 29)
191
ab1339b9
RH
192#define BPR_PT BPCC_PT
193
8289b279 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
321dbde3 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
dc69960d 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 216
90379ca8 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 219
8289b279
BS
220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
7a3766f3 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL INSN_OP(1)
236#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB (INSN_OP(3) | INSN_OP3(0x05))
244#define STH (INSN_OP(3) | INSN_OP3(0x06))
245#define STW (INSN_OP(3) | INSN_OP3(0x04))
246#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
258
f8f03b37
PK
259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
ab20bdc1
RH
261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
8384dd67
BS
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
8289b279 266
a0ce341a
RH
267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
276
90379ca8
RH
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
897fd616 281static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 282{
425532d7 283 return val == sextract64(val, 0, bits);
a115f3ea
RH
284}
285
897fd616 286static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 287{
425532d7 288 return val == sextract32(val, 0, bits);
a115f3ea
RH
289}
290
425532d7 291#define check_fit_tl check_fit_i64
3a5f6805 292#define check_fit_ptr check_fit_i64
425532d7 293
0d8b6191 294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
0d8b6191
RH
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
e9823b4c 299 intptr_t pcrel;
abce5964 300
e9823b4c 301 value += addend;
0d8b6191 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 303
a115f3ea 304 switch (type) {
ab1339b9 305 case R_SPARC_WDISP16:
6a6bfa3c
RH
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
307 return false;
308 }
ab1339b9 309 insn &= ~INSN_OFF16(-1);
e9823b4c 310 insn |= INSN_OFF16(pcrel);
ab1339b9 311 break;
a115f3ea 312 case R_SPARC_WDISP19:
6a6bfa3c
RH
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
314 return false;
315 }
a115f3ea 316 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
317 insn |= INSN_OFF19(pcrel);
318 break;
c834b8d8
RH
319 case R_SPARC_13:
320 if (!check_fit_ptr(value, 13)) {
321 return false;
322 }
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
325 break;
a115f3ea 326 default:
e9823b4c 327 g_assert_not_reached();
a115f3ea 328 }
e9823b4c 329
0d8b6191 330 *src_rw = insn;
6ac17786 331 return true;
a115f3ea
RH
332}
333
a115f3ea 334/* test if a constant matches the constraint */
a4fbbd77 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 336{
a115f3ea
RH
337 if (ct & TCG_CT_CONST) {
338 return 1;
4b304cfa
RH
339 }
340
341 if (type == TCG_TYPE_I32) {
342 val = (int32_t)val;
343 }
344
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
346 return 1;
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350 return 1;
351 } else {
352 return 0;
353 }
354}
355
220b2da7
RH
356static void tcg_out_nop(TCGContext *s)
357{
358 tcg_out32(s, NOP);
359}
360
897fd616
RH
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362 TCGReg rs2, int op)
26cc915c 363{
35e2da15 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
365}
366
897fd616
RH
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
26cc915c 369{
35e2da15 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
371}
372
35e2da15
RH
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
ba225198
RH
375{
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
897fd616 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 381{
dda73c78
RH
382 if (ret != arg) {
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384 }
78113e83 385 return true;
26cc915c
BS
386}
387
220b2da7
RH
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390 if (ret != arg) {
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392 } else {
393 tcg_out_nop(s);
394 }
395}
396
897fd616 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
398{
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
400}
401
897fd616 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
403{
404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
c71929c3
RH
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409 if (check_fit_i32(arg, 13)) {
410 /* A 13-bit constant sign-extended to 64-bits. */
411 tcg_out_movi_imm13(s, ret, arg);
412 } else {
413 /* A 32-bit constant zero-extended to 64 bits. */
414 tcg_out_sethi(s, ret, arg);
415 if (arg & 0x3ff) {
416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417 }
418 }
419}
420
ab20bdc1 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
422 tcg_target_long arg, bool in_prologue,
423 TCGReg scratch)
8289b279 424{
425532d7 425 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 426 tcg_target_long test, lsb;
a9c7d27b 427
c71929c3
RH
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_imm32(s, ret, arg);
431 return;
035b2398
RH
432 }
433
a9c7d27b
RH
434 /* A 13-bit constant sign-extended to 64-bits. */
435 if (check_fit_tl(arg, 13)) {
b101234a 436 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 437 return;
8289b279 438 }
8289b279 439
f6823cbe 440 /* A 13-bit constant relative to the TB. */
1e42b4f8 441 if (!in_prologue) {
47c2206b 442 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
443 if (check_fit_ptr(test, 13)) {
444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445 return;
446 }
447 }
448
a9c7d27b 449 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 450 if (arg == lo) {
43172207
RH
451 tcg_out_sethi(s, ret, ~arg);
452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
453 return;
454 }
455
684db2a0 456 /* A 32-bit constant, shifted. */
ab20bdc1
RH
457 lsb = ctz64(arg);
458 test = (tcg_target_long)arg >> lsb;
684db2a0 459 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
460 tcg_out_sethi(s, ret, test << 10);
461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462 return;
684db2a0
RH
463 } else if (test == (uint32_t)test || test == (int32_t)test) {
464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466 return;
ab20bdc1
RH
467 }
468
c834b8d8 469 /* Use the constant pool, if possible. */
1e42b4f8 470 if (!in_prologue) {
c834b8d8
RH
471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472 tcg_tbrel_diff(s, NULL));
473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474 return;
475 }
476
a9c7d27b 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 478 if (check_fit_i32(lo, 13)) {
34b1a49c 479 hi = (arg - lo) >> 32;
c71929c3 480 tcg_out_movi_imm32(s, ret, hi);
a9c7d27b
RH
481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 483 } else {
34b1a49c 484 hi = arg >> 32;
c71929c3 485 tcg_out_movi_imm32(s, ret, hi);
92840d06 486 tcg_out_movi_imm32(s, scratch, lo);
375816f8 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 489 }
b101234a
BS
490}
491
897fd616
RH
492static void tcg_out_movi(TCGContext *s, TCGType type,
493 TCGReg ret, tcg_target_long arg)
ab20bdc1 494{
92840d06
RH
495 tcg_debug_assert(ret != TCG_REG_T2);
496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
ab20bdc1
RH
497}
498
678155b2
RH
499static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
500{
501 g_assert_not_reached();
502}
503
6a6d772e
RH
504static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
505 tcg_target_long imm)
506{
507 /* This function is only used for passing structs by reference. */
508 g_assert_not_reached();
509}
510
897fd616
RH
511static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
512 TCGReg a2, int op)
8289b279 513{
a0ce341a 514 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
515}
516
35e2da15
RH
517static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
518 intptr_t offset, int op)
8289b279 519{
425532d7 520 if (check_fit_ptr(offset, 13)) {
8289b279
BS
521 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
522 INSN_IMM13(offset));
a0ce341a 523 } else {
375816f8
RH
524 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
525 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 526 }
8289b279
BS
527}
528
897fd616
RH
529static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
530 TCGReg arg1, intptr_t arg2)
8289b279 531{
a0ce341a 532 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
533}
534
897fd616
RH
535static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
536 TCGReg arg1, intptr_t arg2)
8289b279 537{
a0ce341a
RH
538 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
539}
540
897fd616
RH
541static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
542 TCGReg base, intptr_t ofs)
59d7c14e
RH
543{
544 if (val == 0) {
545 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
546 return true;
547 }
548 return false;
549}
550
897fd616 551static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 552{
583d1215 553 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
554}
555
35e2da15
RH
556static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
557 int32_t val2, int val2const, int uns)
583d1215
RH
558{
559 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
560 if (uns) {
561 tcg_out_sety(s, TCG_REG_G0);
562 } else {
375816f8
RH
563 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
564 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
565 }
566
567 tcg_out_arithc(s, rd, rs1, val2, val2const,
568 uns ? ARITH_UDIV : ARITH_SDIV);
569}
570
0aed257f 571static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
572 [TCG_COND_EQ] = COND_E,
573 [TCG_COND_NE] = COND_NE,
574 [TCG_COND_LT] = COND_L,
575 [TCG_COND_GE] = COND_GE,
576 [TCG_COND_LE] = COND_LE,
577 [TCG_COND_GT] = COND_G,
578 [TCG_COND_LTU] = COND_CS,
579 [TCG_COND_GEU] = COND_CC,
580 [TCG_COND_LEU] = COND_LEU,
581 [TCG_COND_GTU] = COND_GU,
582};
583
ab1339b9
RH
584static const uint8_t tcg_cond_to_rcond[] = {
585 [TCG_COND_EQ] = RCOND_Z,
586 [TCG_COND_NE] = RCOND_NZ,
587 [TCG_COND_LT] = RCOND_LZ,
588 [TCG_COND_GT] = RCOND_GZ,
589 [TCG_COND_LE] = RCOND_LEZ,
590 [TCG_COND_GE] = RCOND_GEZ
591};
592
a115f3ea
RH
593static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
594{
595 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
596}
597
bec16311 598static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 599{
791645f0 600 int off19 = 0;
a115f3ea
RH
601
602 if (l->has_value) {
abce5964 603 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 604 } else {
bec16311 605 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
606 }
607 tcg_out_bpcc0(s, scond, flags, off19);
608}
609
35e2da15 610static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 611{
ba225198 612 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
613}
614
35e2da15 615static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 616 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 617{
56f4927e 618 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 619 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
620 tcg_out_nop(s);
621}
622
35e2da15
RH
623static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
624 int32_t v1, int v1const)
ded37f0d
RH
625{
626 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
627 | INSN_RS1(tcg_cond_to_bcond[cond])
628 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
629}
630
35e2da15
RH
631static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
632 TCGReg c1, int32_t c2, int c2const,
633 int32_t v1, int v1const)
ded37f0d
RH
634{
635 tcg_out_cmp(s, c1, c2, c2const);
636 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
637}
638
35e2da15 639static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 640 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 641{
ab1339b9
RH
642 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
643 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 644 int off16 = 0;
ab1339b9
RH
645
646 if (l->has_value) {
abce5964 647 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 648 } else {
bec16311 649 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
650 }
651 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
652 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
653 } else {
654 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 655 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 656 }
1da92db2
BS
657 tcg_out_nop(s);
658}
ded37f0d 659
35e2da15
RH
660static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
661 int32_t v1, int v1const)
203342d8
RH
662{
663 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
664 | (tcg_cond_to_rcond[cond] << 10)
665 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
666}
667
35e2da15
RH
668static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
669 TCGReg c1, int32_t c2, int c2const,
670 int32_t v1, int v1const)
ded37f0d 671{
203342d8
RH
672 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
673 Note that the immediate range is one bit smaller, so we must check
674 for that as well. */
675 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 676 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
677 tcg_out_movr(s, cond, ret, c1, v1, v1const);
678 } else {
679 tcg_out_cmp(s, c1, c2, c2const);
680 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
681 }
ded37f0d 682}
1da92db2 683
35e2da15
RH
684static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
685 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 686{
c470b663 687 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 688 switch (cond) {
7d458a75
RH
689 case TCG_COND_LTU:
690 case TCG_COND_GEU:
691 /* The result of the comparison is in the carry bit. */
692 break;
693
dbfe80e1
RH
694 case TCG_COND_EQ:
695 case TCG_COND_NE:
7d458a75 696 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 697 if (c2 != 0) {
321b6c05
RH
698 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
699 c2 = TCG_REG_T1;
700 } else {
701 c2 = c1;
dbfe80e1 702 }
321b6c05 703 c1 = TCG_REG_G0, c2const = 0;
7d458a75 704 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
705 break;
706
707 case TCG_COND_GTU:
dbfe80e1 708 case TCG_COND_LEU:
7d458a75
RH
709 /* If we don't need to load a constant into a register, we can
710 swap the operands on GTU/LEU. There's no benefit to loading
711 the constant into a temporary register. */
712 if (!c2const || c2 == 0) {
35e2da15 713 TCGReg t = c1;
7d458a75
RH
714 c1 = c2;
715 c2 = t;
716 c2const = 0;
717 cond = tcg_swap_cond(cond);
718 break;
719 }
720 /* FALLTHRU */
dbfe80e1
RH
721
722 default:
723 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 724 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 725 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
726 return;
727 }
728
729 tcg_out_cmp(s, c1, c2, c2const);
730 if (cond == TCG_COND_LTU) {
c470b663 731 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 732 } else {
c470b663 733 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
734 }
735}
736
35e2da15
RH
737static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
738 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 739{
9d6a7a85
RH
740 if (use_vis3_instructions) {
741 switch (cond) {
742 case TCG_COND_NE:
743 if (c2 != 0) {
744 break;
745 }
746 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
747 /* FALLTHRU */
748 case TCG_COND_LTU:
749 tcg_out_cmp(s, c1, c2, c2const);
750 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
751 return;
752 default:
753 break;
754 }
755 }
756
203342d8
RH
757 /* For 64-bit signed comparisons vs zero, we can avoid the compare
758 if the input does not overlap the output. */
759 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
760 tcg_out_movi_imm13(s, ret, 0);
761 tcg_out_movr(s, cond, ret, c1, 1, 1);
762 } else {
763 tcg_out_cmp(s, c1, c2, c2const);
764 tcg_out_movi_imm13(s, ret, 0);
765 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
766 }
dbfe80e1 767}
4ec28e25 768
609ac1e1
RH
769static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
770 TCGReg al, TCGReg ah, int32_t bl, int blconst,
771 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 772{
35e2da15 773 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
774
775 /* Note that the low parts are fully consumed before tmp is set. */
776 if (rl != ah && (bhconst || rl != bh)) {
777 tmp = rl;
778 }
779
780 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
781 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
782 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
783}
dbfe80e1 784
609ac1e1
RH
785static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
786 TCGReg al, TCGReg ah, int32_t bl, int blconst,
787 int32_t bh, int bhconst, bool is_sub)
788{
789 TCGReg tmp = TCG_REG_T1;
790
791 /* Note that the low parts are fully consumed before tmp is set. */
792 if (rl != ah && (bhconst || rl != bh)) {
793 tmp = rl;
794 }
795
796 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
797
90379ca8
RH
798 if (use_vis3_instructions && !is_sub) {
799 /* Note that ADDXC doesn't accept immediates. */
800 if (bhconst && bh != 0) {
414399b6 801 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
90379ca8
RH
802 bh = TCG_REG_T2;
803 }
804 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
805 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
806 /* If we have a zero, we can perform the operation in two insns,
807 with the arithmetic first, and a conditional move into place. */
808 if (rh == ah) {
809 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
810 is_sub ? ARITH_SUB : ARITH_ADD);
811 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
812 } else {
813 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
814 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
815 }
816 } else {
414399b6
RH
817 /*
818 * Otherwise adjust BH as if there is carry into T2.
819 * Note that constant BH is constrained to 11 bits for the MOVCC,
820 * so the adjustment fits 12 bits.
821 */
609ac1e1 822 if (bhconst) {
414399b6 823 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
824 } else {
825 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
826 is_sub ? ARITH_SUB : ARITH_ADD);
827 }
828 /* ... smoosh T2 back to original BH if carry is clear ... */
829 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
830 /* ... and finally perform the arithmetic with the new operand. */
831 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
832 }
833
834 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
835}
836
e01d60f2
RH
837static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
838 bool in_prologue, bool tail_call)
839{
840 uintptr_t desti = (uintptr_t)dest;
841
842 /* Be careful not to clobber %o7 for a tail call. */
843 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
844 desti & ~0xfff, in_prologue,
845 tail_call ? TCG_REG_G2 : TCG_REG_O7);
846 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
847 TCG_REG_T1, desti & 0xfff, JMPL);
848}
849
2be7d76b 850static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 851 bool in_prologue)
aad2f06a 852{
abce5964 853 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
854
855 if (disp == (int32_t)disp) {
856 tcg_out32(s, CALL | (uint32_t)disp >> 2);
857 } else {
e01d60f2 858 tcg_out_jmpl_const(s, dest, in_prologue, false);
aad2f06a
RH
859 }
860}
861
cee44b03
RH
862static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
863 const TCGHelperInfo *info)
4e9cf840 864{
ab20bdc1 865 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
866 tcg_out_nop(s);
867}
868
f8f03b37
PK
869static void tcg_out_mb(TCGContext *s, TCGArg a0)
870{
871 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
872 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
873}
874
7ea5d725 875#ifdef CONFIG_SOFTMMU
4b473e0c
RH
876static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
877static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
7ea5d725 878
709a340d
PM
879static void emit_extend(TCGContext *s, TCGReg r, int op)
880{
881 /* Emit zero extend of 8, 16 or 32 bit data as
882 * required by the MO_* value op; do nothing for 64 bit.
883 */
884 switch (op & MO_SIZE) {
885 case MO_8:
886 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
887 break;
888 case MO_16:
889 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
890 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
891 break;
892 case MO_32:
3a5f6805 893 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
709a340d
PM
894 break;
895 case MO_64:
896 break;
897 }
898}
899
7ea5d725
RH
900static void build_trampolines(TCGContext *s)
901{
4b473e0c 902 static void * const qemu_ld_helpers[] = {
abce5964
RH
903 [MO_UB] = helper_ret_ldub_mmu,
904 [MO_SB] = helper_ret_ldsb_mmu,
905 [MO_LEUW] = helper_le_lduw_mmu,
906 [MO_LESW] = helper_le_ldsw_mmu,
907 [MO_LEUL] = helper_le_ldul_mmu,
fc313c64 908 [MO_LEUQ] = helper_le_ldq_mmu,
abce5964
RH
909 [MO_BEUW] = helper_be_lduw_mmu,
910 [MO_BESW] = helper_be_ldsw_mmu,
911 [MO_BEUL] = helper_be_ldul_mmu,
fc313c64 912 [MO_BEUQ] = helper_be_ldq_mmu,
7ea5d725 913 };
4b473e0c 914 static void * const qemu_st_helpers[] = {
abce5964
RH
915 [MO_UB] = helper_ret_stb_mmu,
916 [MO_LEUW] = helper_le_stw_mmu,
917 [MO_LEUL] = helper_le_stl_mmu,
fc313c64 918 [MO_LEUQ] = helper_le_stq_mmu,
abce5964
RH
919 [MO_BEUW] = helper_be_stw_mmu,
920 [MO_BEUL] = helper_be_stl_mmu,
fc313c64 921 [MO_BEUQ] = helper_be_stq_mmu,
7ea5d725
RH
922 };
923
924 int i;
7ea5d725 925
4b473e0c 926 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 927 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
928 continue;
929 }
930
931 /* May as well align the trampoline. */
abce5964 932 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 933 tcg_out_nop(s);
7ea5d725 934 }
0d8b6191 935 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 936
7ea5d725 937 /* Set the retaddr operand. */
3a5f6805 938 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
7ea5d725 939 /* Tail call. */
e01d60f2
RH
940 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
941 /* delay slot -- set the env argument */
942 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
943 }
944
4b473e0c 945 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 946 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
947 continue;
948 }
949
950 /* May as well align the trampoline. */
abce5964 951 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 952 tcg_out_nop(s);
7ea5d725 953 }
0d8b6191 954 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 955
3a5f6805
RH
956 emit_extend(s, TCG_REG_O2, i);
957
7ea5d725 958 /* Set the retaddr operand. */
3a5f6805 959 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
e01d60f2 960
7ea5d725 961 /* Tail call. */
e01d60f2
RH
962 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
963 /* delay slot -- set the env argument */
964 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
965 }
966}
321dbde3
RH
967#else
968static const tcg_insn_unit *qemu_unalign_ld_trampoline;
969static const tcg_insn_unit *qemu_unalign_st_trampoline;
970
971static void build_trampolines(TCGContext *s)
972{
973 for (int ld = 0; ld < 2; ++ld) {
974 void *helper;
975
976 while ((uintptr_t)s->code_ptr & 15) {
977 tcg_out_nop(s);
978 }
979
980 if (ld) {
981 helper = helper_unaligned_ld;
982 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
983 } else {
984 helper = helper_unaligned_st;
985 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
986 }
987
321dbde3
RH
988 /* Tail call. */
989 tcg_out_jmpl_const(s, helper, true, true);
990 /* delay slot -- set the env argument */
991 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
992 }
993}
7ea5d725
RH
994#endif
995
7d551702 996/* Generate global QEMU prologue and epilogue code */
e4d58b41 997static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 998{
4c3204cb
RH
999 int tmp_buf_size, frame_size;
1000
9defd1bd
RH
1001 /*
1002 * The TCG temp buffer is at the top of the frame, immediately
1003 * below the frame pointer. Use the logical (aligned) offset here;
1004 * the stack bias is applied in temp_allocate_frame().
1005 */
4c3204cb 1006 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1007 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1008
9defd1bd
RH
1009 /*
1010 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1011 * otherwise the minimal frame usable by callees.
1012 */
4c3204cb
RH
1013 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1014 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1015 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1016 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1017 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1018 INSN_IMM13(-frame_size));
c6f7e4fb 1019
4cbea598 1020#ifndef CONFIG_SOFTMMU
b76f21a7 1021 if (guest_base != 0) {
92840d06
RH
1022 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1023 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1024 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1025 }
1026#endif
1027
ab20bdc1 1028 /* We choose TCG_REG_TB such that no move is required. */
1e42b4f8
RH
1029 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1030 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
ab20bdc1 1031
aad2f06a 1032 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1033 /* delay slot */
1034 tcg_out_nop(s);
4c3204cb 1035
38f81dc5 1036 /* Epilogue for goto_ptr. */
c8bc1168 1037 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1038 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1039 /* delay slot */
1040 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725 1041
7ea5d725 1042 build_trampolines(s);
b3db8758
BS
1043}
1044
e9823b4c
RH
1045static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1046{
1047 int i;
1048 for (i = 0; i < count; ++i) {
1049 p[i] = NOP;
1050 }
1051}
1052
f5ef6aac 1053#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1054
1055/* We expect to use a 13-bit negative offset from ENV. */
1056QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1057QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1058
a0ce341a 1059/* Perform the TLB load and compare.
bffe1431 1060
a0ce341a 1061 Inputs:
a8b12c10 1062 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1063
1064 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1065
1066 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1067 This should be offsetof addr_read or addr_write.
1068
1069 The result of the TLB comparison is in %[ix]cc. The sanitized address
1070 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1071
34b1a49c 1072static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1073 MemOp opc, int which)
a0ce341a 1074{
269bd5d8
RH
1075 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1076 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1077 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1078 const TCGReg r0 = TCG_REG_O0;
1079 const TCGReg r1 = TCG_REG_O1;
1080 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1081 unsigned s_bits = opc & MO_SIZE;
1082 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1083 tcg_target_long compare_mask;
1084
17ff9f78 1085 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1086 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1087 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1088
17ff9f78
RH
1089 /* Extract the page index, shifted into place for tlb index. */
1090 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1091 SHIFT_SRL);
1092 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1093
1094 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1095 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1096
1097 /* Load the tlb comparator and the addend. */
1098 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1099 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1100
85aa8081
RH
1101 /* Mask out the page offset, except for the required alignment.
1102 We don't support unaligned accesses. */
1103 if (a_bits < s_bits) {
1104 a_bits = s_bits;
1105 }
17ff9f78
RH
1106 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1107 if (check_fit_tl(compare_mask, 13)) {
1108 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1109 } else {
1110 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1111 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1112 }
a0ce341a
RH
1113 tcg_out_cmp(s, r0, r2, 0);
1114
1115 /* If the guest address must be zero-extended, do so now. */
3a5f6805 1116 if (TARGET_LONG_BITS == 32) {
34b1a49c 1117 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1118 return r0;
1119 }
34b1a49c 1120 return addr;
a0ce341a
RH
1121}
1122#endif /* CONFIG_SOFTMMU */
1123
4b473e0c 1124static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1125 [MO_UB] = LDUB,
1126 [MO_SB] = LDSB,
321dbde3
RH
1127 [MO_UB | MO_LE] = LDUB,
1128 [MO_SB | MO_LE] = LDSB,
eef0d9e7
RH
1129
1130 [MO_BEUW] = LDUH,
1131 [MO_BESW] = LDSH,
1132 [MO_BEUL] = LDUW,
1133 [MO_BESL] = LDSW,
fc313c64 1134 [MO_BEUQ] = LDX,
321dbde3 1135 [MO_BESQ] = LDX,
eef0d9e7
RH
1136
1137 [MO_LEUW] = LDUH_LE,
1138 [MO_LESW] = LDSH_LE,
1139 [MO_LEUL] = LDUW_LE,
1140 [MO_LESL] = LDSW_LE,
fc313c64 1141 [MO_LEUQ] = LDX_LE,
321dbde3 1142 [MO_LESQ] = LDX_LE,
a0ce341a 1143};
9d0efc88 1144
4b473e0c 1145static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1146 [MO_UB] = STB,
1147
1148 [MO_BEUW] = STH,
1149 [MO_BEUL] = STW,
fc313c64 1150 [MO_BEUQ] = STX,
eef0d9e7
RH
1151
1152 [MO_LEUW] = STH_LE,
1153 [MO_LEUL] = STW_LE,
fc313c64 1154 [MO_LEUQ] = STX_LE,
a0ce341a 1155};
bffe1431 1156
34b1a49c 1157static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1158 MemOpIdx oi, bool is_64)
f5ef6aac 1159{
14776ab5 1160 MemOp memop = get_memop(oi);
321dbde3
RH
1161 tcg_insn_unit *label_ptr;
1162
34b1a49c 1163#ifdef CONFIG_SOFTMMU
59227d5d 1164 unsigned memi = get_mmuidx(oi);
3a5f6805 1165 TCGReg addrz;
0d8b6191 1166 const tcg_insn_unit *func;
f5ef6aac 1167
85aa8081 1168 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1169 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1170
34b1a49c
RH
1171 /* The fast path is exactly one insn. Thus we can perform the
1172 entire TLB Hit in the (annulled) delay slot of the branch
1173 over the TLB Miss case. */
a0ce341a 1174
34b1a49c 1175 /* beq,a,pt %[xi]cc, label0 */
abce5964 1176 label_ptr = s->code_ptr;
34b1a49c
RH
1177 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1178 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1179 /* delay slot */
2b7ec66f
RH
1180 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1181 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1182
a0ce341a 1183 /* TLB Miss. */
f5ef6aac 1184
3a5f6805 1185 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
f5ef6aac 1186
7ea5d725
RH
1187 /* We use the helpers to extend SB and SW data, leaving the case
1188 of SL needing explicit extending below. */
2b7ec66f
RH
1189 if ((memop & MO_SSIZE) == MO_SL) {
1190 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1191 } else {
2b7ec66f 1192 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1193 }
eabb7b91 1194 tcg_debug_assert(func != NULL);
ab20bdc1 1195 tcg_out_call_nodelay(s, func, false);
a0ce341a 1196 /* delay slot */
3a5f6805
RH
1197 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1198
1199 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1200 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1201 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
34b1a49c 1202 } else {
3a5f6805 1203 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
f5ef6aac
BS
1204 }
1205
abce5964 1206 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1207#else
321dbde3
RH
1208 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1209 unsigned a_bits = get_alignment_bits(memop);
1210 unsigned s_bits = memop & MO_SIZE;
1211 unsigned t_bits;
1212
3a5f6805 1213 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1214 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1215 addr = TCG_REG_T1;
f5ef6aac 1216 }
321dbde3
RH
1217
1218 /*
1219 * Normal case: alignment equal to access size.
1220 */
1221 if (a_bits == s_bits) {
1222 tcg_out_ldst_rr(s, data, addr, index,
1223 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1224 return;
1225 }
1226
1227 /*
1228 * Test for at least natural alignment, and assume most accesses
1229 * will be aligned -- perform a straight load in the delay slot.
1230 * This is required to preserve atomicity for aligned accesses.
1231 */
1232 t_bits = MAX(a_bits, s_bits);
1233 tcg_debug_assert(t_bits < 13);
1234 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1235
1236 /* beq,a,pt %icc, label */
1237 label_ptr = s->code_ptr;
1238 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1239 /* delay slot */
1240 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1241 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
321dbde3
RH
1242
1243 if (a_bits >= s_bits) {
1244 /*
1245 * Overalignment: A successful alignment test will perform the memory
1246 * operation in the delay slot, and failure need only invoke the
1247 * handler for SIGBUS.
1248 */
321dbde3
RH
1249 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1250 /* delay slot -- move to low part of argument reg */
3a5f6805 1251 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1252 } else {
1253 /* Underalignment: load by pieces of minimum alignment. */
1254 int ld_opc, a_size, s_size, i;
1255
1256 /*
1257 * Force full address into T1 early; avoids problems with
1258 * overlap between @addr and @data.
1259 */
1260 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1261
1262 a_size = 1 << a_bits;
1263 s_size = 1 << s_bits;
1264 if ((memop & MO_BSWAP) == MO_BE) {
1265 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1266 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1267 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1268 for (i = a_size; i < s_size; i += a_size) {
1269 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1270 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1271 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1272 }
1273 } else if (a_bits == 0) {
1274 ld_opc = LDUB;
1275 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1276 for (i = a_size; i < s_size; i += a_size) {
1277 if ((memop & MO_SIGN) && i == s_size - a_size) {
1278 ld_opc = LDSB;
1279 }
1280 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1281 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1282 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1283 }
1284 } else {
1285 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1286 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1287 for (i = a_size; i < s_size; i += a_size) {
1288 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1289 if ((memop & MO_SIGN) && i == s_size - a_size) {
1290 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1291 }
1292 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1293 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1294 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1295 }
1296 }
1297 }
1298
1299 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1300#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1301}
1302
34b1a49c 1303static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1304 MemOpIdx oi)
f5ef6aac 1305{
14776ab5 1306 MemOp memop = get_memop(oi);
321dbde3
RH
1307 tcg_insn_unit *label_ptr;
1308
34b1a49c 1309#ifdef CONFIG_SOFTMMU
59227d5d 1310 unsigned memi = get_mmuidx(oi);
3a5f6805 1311 TCGReg addrz;
0d8b6191 1312 const tcg_insn_unit *func;
f5ef6aac 1313
85aa8081 1314 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1315 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1316
a0ce341a
RH
1317 /* The fast path is exactly one insn. Thus we can perform the entire
1318 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1319 /* beq,a,pt %[xi]cc, label0 */
abce5964 1320 label_ptr = s->code_ptr;
a115f3ea
RH
1321 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1322 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1323 /* delay slot */
2b7ec66f
RH
1324 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1325 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1326
1327 /* TLB Miss. */
1328
3a5f6805
RH
1329 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1330 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
53c37487 1331
2b7ec66f 1332 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1333 tcg_debug_assert(func != NULL);
ab20bdc1 1334 tcg_out_call_nodelay(s, func, false);
a0ce341a 1335 /* delay slot */
3a5f6805 1336 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
f5ef6aac 1337
abce5964 1338 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1339#else
321dbde3
RH
1340 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1341 unsigned a_bits = get_alignment_bits(memop);
1342 unsigned s_bits = memop & MO_SIZE;
1343 unsigned t_bits;
1344
3a5f6805 1345 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1346 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1347 addr = TCG_REG_T1;
a0ce341a 1348 }
321dbde3
RH
1349
1350 /*
1351 * Normal case: alignment equal to access size.
1352 */
1353 if (a_bits == s_bits) {
1354 tcg_out_ldst_rr(s, data, addr, index,
1355 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1356 return;
1357 }
1358
1359 /*
1360 * Test for at least natural alignment, and assume most accesses
1361 * will be aligned -- perform a straight store in the delay slot.
1362 * This is required to preserve atomicity for aligned accesses.
1363 */
1364 t_bits = MAX(a_bits, s_bits);
1365 tcg_debug_assert(t_bits < 13);
1366 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1367
1368 /* beq,a,pt %icc, label */
1369 label_ptr = s->code_ptr;
1370 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1371 /* delay slot */
1372 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1373 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
321dbde3
RH
1374
1375 if (a_bits >= s_bits) {
1376 /*
1377 * Overalignment: A successful alignment test will perform the memory
1378 * operation in the delay slot, and failure need only invoke the
1379 * handler for SIGBUS.
1380 */
321dbde3
RH
1381 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1382 /* delay slot -- move to low part of argument reg */
3a5f6805 1383 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1384 } else {
1385 /* Underalignment: store by pieces of minimum alignment. */
1386 int st_opc, a_size, s_size, i;
1387
1388 /*
1389 * Force full address into T1 early; avoids problems with
1390 * overlap between @addr and @data.
1391 */
1392 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1393
1394 a_size = 1 << a_bits;
1395 s_size = 1 << s_bits;
1396 if ((memop & MO_BSWAP) == MO_BE) {
1397 st_opc = qemu_st_opc[a_bits | MO_BE];
1398 for (i = 0; i < s_size; i += a_size) {
1399 TCGReg d = data;
1400 int shift = (s_size - a_size - i) * 8;
1401 if (shift) {
1402 d = TCG_REG_T2;
1403 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1404 }
1405 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1406 }
1407 } else if (a_bits == 0) {
1408 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1409 for (i = 1; i < s_size; i++) {
1410 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1411 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1412 }
1413 } else {
1414 /* Note that ST*A with immediate asi must use indexed address. */
1415 st_opc = qemu_st_opc[a_bits + MO_LE];
1416 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1417 for (i = a_size; i < s_size; i += a_size) {
1418 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1419 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1420 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1421 }
1422 }
1423 }
1424
1425 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1426#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1427}
1428
b55a8d9d
RH
1429static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1430{
1431 if (check_fit_ptr(a0, 13)) {
1432 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1433 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1434 return;
1e42b4f8 1435 } else {
b55a8d9d
RH
1436 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1437 if (check_fit_ptr(tb_diff, 13)) {
1438 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1439 /* Note that TCG_REG_TB has been unwound to O1. */
1440 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1441 return;
1442 }
1443 }
1444 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1445 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1446 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1447}
1448
cf7d6b8e
RH
1449static void tcg_out_goto_tb(TCGContext *s, int which)
1450{
a228ae3e 1451 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1e42b4f8 1452
1ffbe5d6 1453 /* Load link and indirect branch. */
1e42b4f8 1454 set_jmp_insn_offset(s, which);
a228ae3e 1455 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1ffbe5d6
RH
1456 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1457 /* delay slot */
1458 tcg_out_nop(s);
cf7d6b8e
RH
1459 set_jmp_reset_offset(s, which);
1460
1461 /*
1462 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1463 * to the beginning of this TB.
1464 */
a228ae3e
RH
1465 off = -tcg_current_code_size(s);
1466 if (check_fit_i32(off, 13)) {
1467 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1e42b4f8 1468 } else {
a228ae3e 1469 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1e42b4f8 1470 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
cf7d6b8e
RH
1471 }
1472}
1473
a228ae3e
RH
1474void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1475 uintptr_t jmp_rx, uintptr_t jmp_rw)
1476{
a228ae3e
RH
1477}
1478
b357f902
RH
1479static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1480 const TCGArg args[TCG_MAX_OP_ARGS],
1481 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1482{
b357f902
RH
1483 TCGArg a0, a1, a2;
1484 int c, c2;
1485
1486 /* Hoist the loads of the most common arguments. */
1487 a0 = args[0];
1488 a1 = args[1];
1489 a2 = args[2];
1490 c2 = const_args[2];
8289b279
BS
1491
1492 switch (opc) {
38f81dc5
RH
1493 case INDEX_op_goto_ptr:
1494 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1e42b4f8 1495 tcg_out_mov_delay(s, TCG_REG_TB, a0);
38f81dc5 1496 break;
8289b279 1497 case INDEX_op_br:
bec16311 1498 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1499 tcg_out_nop(s);
8289b279 1500 break;
8289b279 1501
8289b279 1502#define OP_32_64(x) \
ba225198
RH
1503 glue(glue(case INDEX_op_, x), _i32): \
1504 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1505
ba225198 1506 OP_32_64(ld8u):
b357f902 1507 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1508 break;
ba225198 1509 OP_32_64(ld8s):
b357f902 1510 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1511 break;
ba225198 1512 OP_32_64(ld16u):
b357f902 1513 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1514 break;
ba225198 1515 OP_32_64(ld16s):
b357f902 1516 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1517 break;
1518 case INDEX_op_ld_i32:
53cd9273 1519 case INDEX_op_ld32u_i64:
b357f902 1520 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1521 break;
ba225198 1522 OP_32_64(st8):
b357f902 1523 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1524 break;
ba225198 1525 OP_32_64(st16):
b357f902 1526 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1527 break;
1528 case INDEX_op_st_i32:
53cd9273 1529 case INDEX_op_st32_i64:
b357f902 1530 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1531 break;
ba225198 1532 OP_32_64(add):
53cd9273 1533 c = ARITH_ADD;
ba225198
RH
1534 goto gen_arith;
1535 OP_32_64(sub):
8289b279 1536 c = ARITH_SUB;
ba225198
RH
1537 goto gen_arith;
1538 OP_32_64(and):
8289b279 1539 c = ARITH_AND;
ba225198 1540 goto gen_arith;
dc69960d
RH
1541 OP_32_64(andc):
1542 c = ARITH_ANDN;
1543 goto gen_arith;
ba225198 1544 OP_32_64(or):
8289b279 1545 c = ARITH_OR;
ba225198 1546 goto gen_arith;
18c8f7a3
RH
1547 OP_32_64(orc):
1548 c = ARITH_ORN;
1549 goto gen_arith;
ba225198 1550 OP_32_64(xor):
8289b279 1551 c = ARITH_XOR;
ba225198 1552 goto gen_arith;
8289b279
BS
1553 case INDEX_op_shl_i32:
1554 c = SHIFT_SLL;
1fd95946
RH
1555 do_shift32:
1556 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1557 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1558 break;
8289b279
BS
1559 case INDEX_op_shr_i32:
1560 c = SHIFT_SRL;
1fd95946 1561 goto do_shift32;
8289b279
BS
1562 case INDEX_op_sar_i32:
1563 c = SHIFT_SRA;
1fd95946 1564 goto do_shift32;
8289b279
BS
1565 case INDEX_op_mul_i32:
1566 c = ARITH_UMUL;
ba225198 1567 goto gen_arith;
583d1215 1568
4b5a85c1
RH
1569 OP_32_64(neg):
1570 c = ARITH_SUB;
1571 goto gen_arith1;
be6551b1
RH
1572 OP_32_64(not):
1573 c = ARITH_ORN;
1574 goto gen_arith1;
4b5a85c1 1575
583d1215 1576 case INDEX_op_div_i32:
b357f902 1577 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1578 break;
1579 case INDEX_op_divu_i32:
b357f902 1580 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1581 break;
1582
8289b279 1583 case INDEX_op_brcond_i32:
bec16311 1584 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1585 break;
dbfe80e1 1586 case INDEX_op_setcond_i32:
b357f902 1587 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1588 break;
ded37f0d 1589 case INDEX_op_movcond_i32:
b357f902 1590 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1591 break;
dbfe80e1 1592
7a3766f3 1593 case INDEX_op_add2_i32:
609ac1e1
RH
1594 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1595 args[4], const_args[4], args[5], const_args[5],
c470b663 1596 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1597 break;
1598 case INDEX_op_sub2_i32:
609ac1e1
RH
1599 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1600 args[4], const_args[4], args[5], const_args[5],
c470b663 1601 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1602 break;
1603 case INDEX_op_mulu2_i32:
f4c16661
RH
1604 c = ARITH_UMUL;
1605 goto do_mul2;
1606 case INDEX_op_muls2_i32:
1607 c = ARITH_SMUL;
1608 do_mul2:
3a5f6805 1609 /* The 32-bit multiply insns produce a full 64-bit result. */
b357f902 1610 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
3a5f6805 1611 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
7a3766f3 1612 break;
8289b279 1613
cab0a7ea 1614 case INDEX_op_qemu_ld_i32:
59227d5d 1615 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1616 break;
cab0a7ea 1617 case INDEX_op_qemu_ld_i64:
59227d5d 1618 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1619 break;
cab0a7ea 1620 case INDEX_op_qemu_st_i32:
cab0a7ea 1621 case INDEX_op_qemu_st_i64:
59227d5d 1622 tcg_out_qemu_st(s, a0, a1, a2);
a0ce341a 1623 break;
8289b279 1624
53cd9273 1625 case INDEX_op_ld32s_i64:
b357f902 1626 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1627 break;
8289b279 1628 case INDEX_op_ld_i64:
b357f902 1629 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1630 break;
1631 case INDEX_op_st_i64:
b357f902 1632 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1633 break;
1634 case INDEX_op_shl_i64:
1635 c = SHIFT_SLLX;
1fd95946
RH
1636 do_shift64:
1637 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1638 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1639 break;
8289b279
BS
1640 case INDEX_op_shr_i64:
1641 c = SHIFT_SRLX;
1fd95946 1642 goto do_shift64;
8289b279
BS
1643 case INDEX_op_sar_i64:
1644 c = SHIFT_SRAX;
1fd95946 1645 goto do_shift64;
8289b279
BS
1646 case INDEX_op_mul_i64:
1647 c = ARITH_MULX;
ba225198 1648 goto gen_arith;
583d1215 1649 case INDEX_op_div_i64:
53cd9273 1650 c = ARITH_SDIVX;
ba225198 1651 goto gen_arith;
583d1215 1652 case INDEX_op_divu_i64:
8289b279 1653 c = ARITH_UDIVX;
ba225198 1654 goto gen_arith;
4f2331e5 1655 case INDEX_op_ext_i32_i64:
cc6dfecf 1656 case INDEX_op_ext32s_i64:
b357f902 1657 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf 1658 break;
4f2331e5 1659 case INDEX_op_extu_i32_i64:
cc6dfecf 1660 case INDEX_op_ext32u_i64:
b357f902 1661 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1662 break;
609ad705
RH
1663 case INDEX_op_extrl_i64_i32:
1664 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1665 break;
1666 case INDEX_op_extrh_i64_i32:
1667 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1668 break;
8289b279
BS
1669
1670 case INDEX_op_brcond_i64:
bec16311 1671 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1672 break;
dbfe80e1 1673 case INDEX_op_setcond_i64:
b357f902 1674 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1675 break;
ded37f0d 1676 case INDEX_op_movcond_i64:
b357f902 1677 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1678 break;
609ac1e1
RH
1679 case INDEX_op_add2_i64:
1680 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1681 const_args[4], args[5], const_args[5], false);
1682 break;
1683 case INDEX_op_sub2_i64:
1684 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1685 const_args[4], args[5], const_args[5], true);
1686 break;
de8301e5
RH
1687 case INDEX_op_muluh_i64:
1688 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1689 break;
34b1a49c 1690
ba225198 1691 gen_arith:
b357f902 1692 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1693 break;
1694
4b5a85c1 1695 gen_arith1:
b357f902 1696 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1697 break;
1698
f8f03b37
PK
1699 case INDEX_op_mb:
1700 tcg_out_mb(s, a0);
1701 break;
1702
96d0ee7f 1703 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1704 case INDEX_op_mov_i64:
96d0ee7f 1705 case INDEX_op_call: /* Always emitted via tcg_out_call. */
b55a8d9d 1706 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
cf7d6b8e 1707 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
678155b2
RH
1708 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1709 case INDEX_op_ext8s_i64:
8289b279 1710 default:
732e89f4 1711 g_assert_not_reached();
8289b279
BS
1712 }
1713}
1714
0d11dc7c 1715static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1716{
9be44a16
RH
1717 switch (op) {
1718 case INDEX_op_goto_ptr:
0d11dc7c 1719 return C_O0_I1(r);
f69d277e 1720
9be44a16 1721 case INDEX_op_ld8u_i32:
a59a2931 1722 case INDEX_op_ld8u_i64:
9be44a16 1723 case INDEX_op_ld8s_i32:
a59a2931 1724 case INDEX_op_ld8s_i64:
9be44a16 1725 case INDEX_op_ld16u_i32:
a59a2931 1726 case INDEX_op_ld16u_i64:
9be44a16 1727 case INDEX_op_ld16s_i32:
a59a2931 1728 case INDEX_op_ld16s_i64:
9be44a16 1729 case INDEX_op_ld_i32:
a59a2931
RH
1730 case INDEX_op_ld32u_i64:
1731 case INDEX_op_ld32s_i64:
1732 case INDEX_op_ld_i64:
9be44a16 1733 case INDEX_op_neg_i32:
a59a2931 1734 case INDEX_op_neg_i64:
9be44a16 1735 case INDEX_op_not_i32:
a59a2931
RH
1736 case INDEX_op_not_i64:
1737 case INDEX_op_ext32s_i64:
1738 case INDEX_op_ext32u_i64:
1739 case INDEX_op_ext_i32_i64:
1740 case INDEX_op_extu_i32_i64:
1741 case INDEX_op_extrl_i64_i32:
1742 case INDEX_op_extrh_i64_i32:
0d11dc7c 1743 return C_O1_I1(r, r);
9be44a16
RH
1744
1745 case INDEX_op_st8_i32:
a59a2931 1746 case INDEX_op_st8_i64:
9be44a16 1747 case INDEX_op_st16_i32:
a59a2931 1748 case INDEX_op_st16_i64:
9be44a16 1749 case INDEX_op_st_i32:
a59a2931
RH
1750 case INDEX_op_st32_i64:
1751 case INDEX_op_st_i64:
0d11dc7c 1752 return C_O0_I2(rZ, r);
9be44a16
RH
1753
1754 case INDEX_op_add_i32:
a59a2931 1755 case INDEX_op_add_i64:
9be44a16 1756 case INDEX_op_mul_i32:
a59a2931 1757 case INDEX_op_mul_i64:
9be44a16 1758 case INDEX_op_div_i32:
a59a2931 1759 case INDEX_op_div_i64:
9be44a16 1760 case INDEX_op_divu_i32:
a59a2931 1761 case INDEX_op_divu_i64:
9be44a16 1762 case INDEX_op_sub_i32:
a59a2931 1763 case INDEX_op_sub_i64:
9be44a16 1764 case INDEX_op_and_i32:
a59a2931 1765 case INDEX_op_and_i64:
9be44a16 1766 case INDEX_op_andc_i32:
a59a2931 1767 case INDEX_op_andc_i64:
9be44a16 1768 case INDEX_op_or_i32:
a59a2931 1769 case INDEX_op_or_i64:
9be44a16 1770 case INDEX_op_orc_i32:
a59a2931 1771 case INDEX_op_orc_i64:
9be44a16 1772 case INDEX_op_xor_i32:
a59a2931 1773 case INDEX_op_xor_i64:
9be44a16 1774 case INDEX_op_shl_i32:
a59a2931 1775 case INDEX_op_shl_i64:
9be44a16 1776 case INDEX_op_shr_i32:
a59a2931 1777 case INDEX_op_shr_i64:
9be44a16 1778 case INDEX_op_sar_i32:
a59a2931 1779 case INDEX_op_sar_i64:
9be44a16 1780 case INDEX_op_setcond_i32:
a59a2931 1781 case INDEX_op_setcond_i64:
0d11dc7c 1782 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1783
1784 case INDEX_op_brcond_i32:
a59a2931 1785 case INDEX_op_brcond_i64:
0d11dc7c 1786 return C_O0_I2(rZ, rJ);
9be44a16 1787 case INDEX_op_movcond_i32:
a59a2931 1788 case INDEX_op_movcond_i64:
0d11dc7c 1789 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16 1790 case INDEX_op_add2_i32:
a59a2931 1791 case INDEX_op_add2_i64:
9be44a16 1792 case INDEX_op_sub2_i32:
a59a2931 1793 case INDEX_op_sub2_i64:
0d11dc7c 1794 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1795 case INDEX_op_mulu2_i32:
1796 case INDEX_op_muls2_i32:
0d11dc7c 1797 return C_O2_I2(r, r, rZ, rJ);
9be44a16 1798 case INDEX_op_muluh_i64:
a59a2931 1799 return C_O1_I2(r, r, r);
9be44a16
RH
1800
1801 case INDEX_op_qemu_ld_i32:
9be44a16 1802 case INDEX_op_qemu_ld_i64:
a59a2931 1803 return C_O1_I1(r, s);
9be44a16 1804 case INDEX_op_qemu_st_i32:
9be44a16 1805 case INDEX_op_qemu_st_i64:
a59a2931 1806 return C_O0_I2(sZ, s);
9be44a16
RH
1807
1808 default:
0d11dc7c 1809 g_assert_not_reached();
f69d277e 1810 }
f69d277e
RH
1811}
1812
e4d58b41 1813static void tcg_target_init(TCGContext *s)
8289b279 1814{
a4761232
PMD
1815 /*
1816 * Only probe for the platform and capabilities if we haven't already
1817 * determined maximum values at compile time.
1818 */
90379ca8
RH
1819#ifndef use_vis3_instructions
1820 {
1821 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1822 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1823 }
1824#endif
1825
77f268e8 1826 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
a59a2931 1827 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
f46934df
RH
1828
1829 tcg_target_call_clobber_regs = 0;
1830 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1831 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1832 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1833 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1834 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1835 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1836 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1837 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1843 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1845
ccb1bb66 1846 s->reserved_regs = 0;
375816f8
RH
1847 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1848 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1849 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1850 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1851 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1852 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1853 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1854 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1855}
cb1977d3 1856
3a5f6805 1857#define ELF_HOST_MACHINE EM_SPARCV9
cb1977d3 1858
cb1977d3 1859typedef struct {
ae18b28d 1860 DebugFrameHeader h;
3a5f6805 1861 uint8_t fde_def_cfa[4];
497a22eb
RH
1862 uint8_t fde_win_save;
1863 uint8_t fde_ret_save[3];
cb1977d3
RH
1864} DebugFrame;
1865
ae18b28d
RH
1866static const DebugFrame debug_frame = {
1867 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1868 .h.cie.id = -1,
1869 .h.cie.version = 1,
1870 .h.cie.code_align = 1,
1871 .h.cie.data_align = -sizeof(void *) & 0x7f,
1872 .h.cie.return_column = 15, /* o7 */
cb1977d3 1873
497a22eb 1874 /* Total FDE size does not include the "len" member. */
ae18b28d 1875 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1876
1877 .fde_def_cfa = {
cb1977d3
RH
1878 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1879 (2047 & 0x7f) | 0x80, (2047 >> 7)
cb1977d3 1880 },
497a22eb
RH
1881 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1882 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1883};
1884
755bf9e5 1885void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1886{
cb1977d3
RH
1887 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1888}