]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc64/tcg-target.c.inc
tcg: Split out tcg_out_ext16s
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3a5f6805
RH
25/* We only support generating code for 64-bit mode. */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
139c1837 30#include "../tcg-pool.c.inc"
e9823b4c 31
8d8fdbae 32#ifdef CONFIG_DEBUG_TCG
8289b279
BS
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66};
d4a9eb1f 67#endif
8289b279 68
77f268e8
RH
69#define TCG_CT_CONST_S11 0x100
70#define TCG_CT_CONST_S13 0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
77f268e8 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
77f268e8 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
34b1a49c 85
375816f8
RH
86/* Define some temporary registers. T2 is used for constant generation. */
87#define TCG_REG_T1 TCG_REG_G1
88#define TCG_REG_T2 TCG_REG_O7
89
4cbea598 90#ifndef CONFIG_SOFTMMU
375816f8 91# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 92#endif
e141ab52 93
ab20bdc1 94#define TCG_REG_TB TCG_REG_I1
ab20bdc1 95
0954d0d9 96static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
97 TCG_REG_L0,
98 TCG_REG_L1,
99 TCG_REG_L2,
100 TCG_REG_L3,
101 TCG_REG_L4,
102 TCG_REG_L5,
103 TCG_REG_L6,
104 TCG_REG_L7,
26adfb75 105
8289b279
BS
106 TCG_REG_I0,
107 TCG_REG_I1,
108 TCG_REG_I2,
109 TCG_REG_I3,
110 TCG_REG_I4,
375816f8 111 TCG_REG_I5,
26adfb75
RH
112
113 TCG_REG_G2,
114 TCG_REG_G3,
115 TCG_REG_G4,
116 TCG_REG_G5,
117
118 TCG_REG_O0,
119 TCG_REG_O1,
120 TCG_REG_O2,
121 TCG_REG_O3,
122 TCG_REG_O4,
123 TCG_REG_O5,
8289b279
BS
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127 TCG_REG_O0,
128 TCG_REG_O1,
129 TCG_REG_O2,
130 TCG_REG_O3,
131 TCG_REG_O4,
132 TCG_REG_O5,
133};
134
5e3d0c19
RH
135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
136{
137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138 tcg_debug_assert(slot >= 0 && slot <= 3);
139 return TCG_REG_O0 + slot;
140}
8289b279 141
8289b279
BS
142#define INSN_OP(x) ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x) ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
8384dd67 149#define INSN_ASI(x) ((x) << 5)
8289b279 150
203342d8 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 156#define INSN_COND(x) ((x) << 25)
8289b279 157
cf7c2ca5
BS
158#define COND_N 0x0
159#define COND_E 0x1
160#define COND_LE 0x2
161#define COND_L 0x3
162#define COND_LEU 0x4
163#define COND_CS 0x5
164#define COND_NEG 0x6
165#define COND_VS 0x7
b3db8758 166#define COND_A 0x8
cf7c2ca5
BS
167#define COND_NE 0x9
168#define COND_G 0xa
169#define COND_GE 0xb
170#define COND_GU 0xc
171#define COND_CC 0xd
172#define COND_POS 0xe
173#define COND_VC 0xf
a115f3ea 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 175
ab1339b9
RH
176#define RCOND_Z 1
177#define RCOND_LEZ 2
178#define RCOND_LZ 3
179#define RCOND_NZ 5
180#define RCOND_GZ 6
181#define RCOND_GEZ 7
182
dbfe80e1
RH
183#define MOVCC_ICC (1 << 18)
184#define MOVCC_XCC (1 << 18 | 1 << 12)
185
a115f3ea
RH
186#define BPCC_ICC 0
187#define BPCC_XCC (2 << 20)
188#define BPCC_PT (1 << 19)
189#define BPCC_PN 0
190#define BPCC_A (1 << 29)
191
ab1339b9
RH
192#define BPR_PT BPCC_PT
193
8289b279 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
321dbde3 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
dc69960d 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 216
90379ca8 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 219
8289b279
BS
220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
7a3766f3 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL INSN_OP(1)
236#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB (INSN_OP(3) | INSN_OP3(0x05))
244#define STH (INSN_OP(3) | INSN_OP3(0x06))
245#define STW (INSN_OP(3) | INSN_OP3(0x04))
246#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
258
f8f03b37
PK
259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
ab20bdc1
RH
261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
8384dd67
BS
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
8289b279 266
a0ce341a
RH
267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
276
90379ca8
RH
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
897fd616 281static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 282{
425532d7 283 return val == sextract64(val, 0, bits);
a115f3ea
RH
284}
285
897fd616 286static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 287{
425532d7 288 return val == sextract32(val, 0, bits);
a115f3ea
RH
289}
290
425532d7 291#define check_fit_tl check_fit_i64
3a5f6805 292#define check_fit_ptr check_fit_i64
425532d7 293
0d8b6191 294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
0d8b6191
RH
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
e9823b4c 299 intptr_t pcrel;
abce5964 300
e9823b4c 301 value += addend;
0d8b6191 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 303
a115f3ea 304 switch (type) {
ab1339b9 305 case R_SPARC_WDISP16:
6a6bfa3c
RH
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
307 return false;
308 }
ab1339b9 309 insn &= ~INSN_OFF16(-1);
e9823b4c 310 insn |= INSN_OFF16(pcrel);
ab1339b9 311 break;
a115f3ea 312 case R_SPARC_WDISP19:
6a6bfa3c
RH
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
314 return false;
315 }
a115f3ea 316 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
317 insn |= INSN_OFF19(pcrel);
318 break;
c834b8d8
RH
319 case R_SPARC_13:
320 if (!check_fit_ptr(value, 13)) {
321 return false;
322 }
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
325 break;
a115f3ea 326 default:
e9823b4c 327 g_assert_not_reached();
a115f3ea 328 }
e9823b4c 329
0d8b6191 330 *src_rw = insn;
6ac17786 331 return true;
a115f3ea
RH
332}
333
a115f3ea 334/* test if a constant matches the constraint */
a4fbbd77 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 336{
a115f3ea
RH
337 if (ct & TCG_CT_CONST) {
338 return 1;
4b304cfa
RH
339 }
340
341 if (type == TCG_TYPE_I32) {
342 val = (int32_t)val;
343 }
344
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
346 return 1;
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350 return 1;
351 } else {
352 return 0;
353 }
354}
355
220b2da7
RH
356static void tcg_out_nop(TCGContext *s)
357{
358 tcg_out32(s, NOP);
359}
360
897fd616
RH
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362 TCGReg rs2, int op)
26cc915c 363{
35e2da15 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
365}
366
897fd616
RH
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
26cc915c 369{
35e2da15 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
371}
372
35e2da15
RH
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
ba225198
RH
375{
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
897fd616 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 381{
dda73c78
RH
382 if (ret != arg) {
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384 }
78113e83 385 return true;
26cc915c
BS
386}
387
220b2da7
RH
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390 if (ret != arg) {
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392 } else {
393 tcg_out_nop(s);
394 }
395}
396
897fd616 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
398{
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
400}
401
897fd616 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
403{
404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
c71929c3
RH
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409 if (check_fit_i32(arg, 13)) {
410 /* A 13-bit constant sign-extended to 64-bits. */
411 tcg_out_movi_imm13(s, ret, arg);
412 } else {
413 /* A 32-bit constant zero-extended to 64 bits. */
414 tcg_out_sethi(s, ret, arg);
415 if (arg & 0x3ff) {
416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417 }
418 }
419}
420
ab20bdc1 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
422 tcg_target_long arg, bool in_prologue,
423 TCGReg scratch)
8289b279 424{
425532d7 425 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 426 tcg_target_long test, lsb;
a9c7d27b 427
c71929c3
RH
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_imm32(s, ret, arg);
431 return;
035b2398
RH
432 }
433
a9c7d27b
RH
434 /* A 13-bit constant sign-extended to 64-bits. */
435 if (check_fit_tl(arg, 13)) {
b101234a 436 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 437 return;
8289b279 438 }
8289b279 439
f6823cbe 440 /* A 13-bit constant relative to the TB. */
1e42b4f8 441 if (!in_prologue) {
47c2206b 442 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
443 if (check_fit_ptr(test, 13)) {
444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445 return;
446 }
447 }
448
a9c7d27b 449 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 450 if (arg == lo) {
43172207
RH
451 tcg_out_sethi(s, ret, ~arg);
452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
453 return;
454 }
455
684db2a0 456 /* A 32-bit constant, shifted. */
ab20bdc1
RH
457 lsb = ctz64(arg);
458 test = (tcg_target_long)arg >> lsb;
684db2a0 459 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
460 tcg_out_sethi(s, ret, test << 10);
461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462 return;
684db2a0
RH
463 } else if (test == (uint32_t)test || test == (int32_t)test) {
464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466 return;
ab20bdc1
RH
467 }
468
c834b8d8 469 /* Use the constant pool, if possible. */
1e42b4f8 470 if (!in_prologue) {
c834b8d8
RH
471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472 tcg_tbrel_diff(s, NULL));
473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474 return;
475 }
476
a9c7d27b 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 478 if (check_fit_i32(lo, 13)) {
34b1a49c 479 hi = (arg - lo) >> 32;
c71929c3 480 tcg_out_movi_imm32(s, ret, hi);
a9c7d27b
RH
481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 483 } else {
34b1a49c 484 hi = arg >> 32;
c71929c3 485 tcg_out_movi_imm32(s, ret, hi);
92840d06 486 tcg_out_movi_imm32(s, scratch, lo);
375816f8 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 489 }
b101234a
BS
490}
491
897fd616
RH
492static void tcg_out_movi(TCGContext *s, TCGType type,
493 TCGReg ret, tcg_target_long arg)
ab20bdc1 494{
92840d06
RH
495 tcg_debug_assert(ret != TCG_REG_T2);
496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
ab20bdc1
RH
497}
498
678155b2
RH
499static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
500{
501 g_assert_not_reached();
502}
503
753e42ea
RH
504static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
505{
506 g_assert_not_reached();
507}
508
d0e66c89
RH
509static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
510{
511 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
512}
513
6a6d772e
RH
514static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
515 tcg_target_long imm)
516{
517 /* This function is only used for passing structs by reference. */
518 g_assert_not_reached();
519}
520
897fd616
RH
521static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
522 TCGReg a2, int op)
8289b279 523{
a0ce341a 524 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
525}
526
35e2da15
RH
527static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
528 intptr_t offset, int op)
8289b279 529{
425532d7 530 if (check_fit_ptr(offset, 13)) {
8289b279
BS
531 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
532 INSN_IMM13(offset));
a0ce341a 533 } else {
375816f8
RH
534 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
535 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 536 }
8289b279
BS
537}
538
897fd616
RH
539static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
540 TCGReg arg1, intptr_t arg2)
8289b279 541{
a0ce341a 542 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
543}
544
897fd616
RH
545static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
546 TCGReg arg1, intptr_t arg2)
8289b279 547{
a0ce341a
RH
548 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
549}
550
897fd616
RH
551static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
552 TCGReg base, intptr_t ofs)
59d7c14e
RH
553{
554 if (val == 0) {
555 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
556 return true;
557 }
558 return false;
559}
560
897fd616 561static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 562{
583d1215 563 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
564}
565
35e2da15
RH
566static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
567 int32_t val2, int val2const, int uns)
583d1215
RH
568{
569 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
570 if (uns) {
571 tcg_out_sety(s, TCG_REG_G0);
572 } else {
375816f8
RH
573 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
574 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
575 }
576
577 tcg_out_arithc(s, rd, rs1, val2, val2const,
578 uns ? ARITH_UDIV : ARITH_SDIV);
579}
580
0aed257f 581static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
582 [TCG_COND_EQ] = COND_E,
583 [TCG_COND_NE] = COND_NE,
584 [TCG_COND_LT] = COND_L,
585 [TCG_COND_GE] = COND_GE,
586 [TCG_COND_LE] = COND_LE,
587 [TCG_COND_GT] = COND_G,
588 [TCG_COND_LTU] = COND_CS,
589 [TCG_COND_GEU] = COND_CC,
590 [TCG_COND_LEU] = COND_LEU,
591 [TCG_COND_GTU] = COND_GU,
592};
593
ab1339b9
RH
594static const uint8_t tcg_cond_to_rcond[] = {
595 [TCG_COND_EQ] = RCOND_Z,
596 [TCG_COND_NE] = RCOND_NZ,
597 [TCG_COND_LT] = RCOND_LZ,
598 [TCG_COND_GT] = RCOND_GZ,
599 [TCG_COND_LE] = RCOND_LEZ,
600 [TCG_COND_GE] = RCOND_GEZ
601};
602
a115f3ea
RH
603static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
604{
605 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
606}
607
bec16311 608static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 609{
791645f0 610 int off19 = 0;
a115f3ea
RH
611
612 if (l->has_value) {
abce5964 613 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 614 } else {
bec16311 615 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
616 }
617 tcg_out_bpcc0(s, scond, flags, off19);
618}
619
35e2da15 620static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 621{
ba225198 622 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
623}
624
35e2da15 625static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 626 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 627{
56f4927e 628 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 629 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
630 tcg_out_nop(s);
631}
632
35e2da15
RH
633static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
634 int32_t v1, int v1const)
ded37f0d
RH
635{
636 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
637 | INSN_RS1(tcg_cond_to_bcond[cond])
638 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
639}
640
35e2da15
RH
641static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
642 TCGReg c1, int32_t c2, int c2const,
643 int32_t v1, int v1const)
ded37f0d
RH
644{
645 tcg_out_cmp(s, c1, c2, c2const);
646 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
647}
648
35e2da15 649static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 650 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 651{
ab1339b9
RH
652 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
653 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 654 int off16 = 0;
ab1339b9
RH
655
656 if (l->has_value) {
abce5964 657 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 658 } else {
bec16311 659 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
660 }
661 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
662 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
663 } else {
664 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 665 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 666 }
1da92db2
BS
667 tcg_out_nop(s);
668}
ded37f0d 669
35e2da15
RH
670static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
671 int32_t v1, int v1const)
203342d8
RH
672{
673 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
674 | (tcg_cond_to_rcond[cond] << 10)
675 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
676}
677
35e2da15
RH
678static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
679 TCGReg c1, int32_t c2, int c2const,
680 int32_t v1, int v1const)
ded37f0d 681{
203342d8
RH
682 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
683 Note that the immediate range is one bit smaller, so we must check
684 for that as well. */
685 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 686 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
687 tcg_out_movr(s, cond, ret, c1, v1, v1const);
688 } else {
689 tcg_out_cmp(s, c1, c2, c2const);
690 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
691 }
ded37f0d 692}
1da92db2 693
35e2da15
RH
694static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
695 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 696{
c470b663 697 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 698 switch (cond) {
7d458a75
RH
699 case TCG_COND_LTU:
700 case TCG_COND_GEU:
701 /* The result of the comparison is in the carry bit. */
702 break;
703
dbfe80e1
RH
704 case TCG_COND_EQ:
705 case TCG_COND_NE:
7d458a75 706 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 707 if (c2 != 0) {
321b6c05
RH
708 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
709 c2 = TCG_REG_T1;
710 } else {
711 c2 = c1;
dbfe80e1 712 }
321b6c05 713 c1 = TCG_REG_G0, c2const = 0;
7d458a75 714 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
715 break;
716
717 case TCG_COND_GTU:
dbfe80e1 718 case TCG_COND_LEU:
7d458a75
RH
719 /* If we don't need to load a constant into a register, we can
720 swap the operands on GTU/LEU. There's no benefit to loading
721 the constant into a temporary register. */
722 if (!c2const || c2 == 0) {
35e2da15 723 TCGReg t = c1;
7d458a75
RH
724 c1 = c2;
725 c2 = t;
726 c2const = 0;
727 cond = tcg_swap_cond(cond);
728 break;
729 }
730 /* FALLTHRU */
dbfe80e1
RH
731
732 default:
733 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 734 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 735 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
736 return;
737 }
738
739 tcg_out_cmp(s, c1, c2, c2const);
740 if (cond == TCG_COND_LTU) {
c470b663 741 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 742 } else {
c470b663 743 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
744 }
745}
746
35e2da15
RH
747static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
748 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 749{
9d6a7a85
RH
750 if (use_vis3_instructions) {
751 switch (cond) {
752 case TCG_COND_NE:
753 if (c2 != 0) {
754 break;
755 }
756 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
757 /* FALLTHRU */
758 case TCG_COND_LTU:
759 tcg_out_cmp(s, c1, c2, c2const);
760 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
761 return;
762 default:
763 break;
764 }
765 }
766
203342d8
RH
767 /* For 64-bit signed comparisons vs zero, we can avoid the compare
768 if the input does not overlap the output. */
769 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
770 tcg_out_movi_imm13(s, ret, 0);
771 tcg_out_movr(s, cond, ret, c1, 1, 1);
772 } else {
773 tcg_out_cmp(s, c1, c2, c2const);
774 tcg_out_movi_imm13(s, ret, 0);
775 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
776 }
dbfe80e1 777}
4ec28e25 778
609ac1e1
RH
779static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
780 TCGReg al, TCGReg ah, int32_t bl, int blconst,
781 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 782{
35e2da15 783 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
784
785 /* Note that the low parts are fully consumed before tmp is set. */
786 if (rl != ah && (bhconst || rl != bh)) {
787 tmp = rl;
788 }
789
790 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
791 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
792 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
793}
dbfe80e1 794
609ac1e1
RH
795static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
796 TCGReg al, TCGReg ah, int32_t bl, int blconst,
797 int32_t bh, int bhconst, bool is_sub)
798{
799 TCGReg tmp = TCG_REG_T1;
800
801 /* Note that the low parts are fully consumed before tmp is set. */
802 if (rl != ah && (bhconst || rl != bh)) {
803 tmp = rl;
804 }
805
806 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
807
90379ca8
RH
808 if (use_vis3_instructions && !is_sub) {
809 /* Note that ADDXC doesn't accept immediates. */
810 if (bhconst && bh != 0) {
414399b6 811 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
90379ca8
RH
812 bh = TCG_REG_T2;
813 }
814 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
815 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
816 /* If we have a zero, we can perform the operation in two insns,
817 with the arithmetic first, and a conditional move into place. */
818 if (rh == ah) {
819 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
820 is_sub ? ARITH_SUB : ARITH_ADD);
821 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
822 } else {
823 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
824 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
825 }
826 } else {
414399b6
RH
827 /*
828 * Otherwise adjust BH as if there is carry into T2.
829 * Note that constant BH is constrained to 11 bits for the MOVCC,
830 * so the adjustment fits 12 bits.
831 */
609ac1e1 832 if (bhconst) {
414399b6 833 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
834 } else {
835 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
836 is_sub ? ARITH_SUB : ARITH_ADD);
837 }
838 /* ... smoosh T2 back to original BH if carry is clear ... */
839 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
840 /* ... and finally perform the arithmetic with the new operand. */
841 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
842 }
843
844 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
845}
846
e01d60f2
RH
847static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
848 bool in_prologue, bool tail_call)
849{
850 uintptr_t desti = (uintptr_t)dest;
851
852 /* Be careful not to clobber %o7 for a tail call. */
853 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
854 desti & ~0xfff, in_prologue,
855 tail_call ? TCG_REG_G2 : TCG_REG_O7);
856 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
857 TCG_REG_T1, desti & 0xfff, JMPL);
858}
859
2be7d76b 860static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 861 bool in_prologue)
aad2f06a 862{
abce5964 863 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
864
865 if (disp == (int32_t)disp) {
866 tcg_out32(s, CALL | (uint32_t)disp >> 2);
867 } else {
e01d60f2 868 tcg_out_jmpl_const(s, dest, in_prologue, false);
aad2f06a
RH
869 }
870}
871
cee44b03
RH
872static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
873 const TCGHelperInfo *info)
4e9cf840 874{
ab20bdc1 875 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
876 tcg_out_nop(s);
877}
878
f8f03b37
PK
879static void tcg_out_mb(TCGContext *s, TCGArg a0)
880{
881 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
882 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
883}
884
7ea5d725 885#ifdef CONFIG_SOFTMMU
4b473e0c
RH
886static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
887static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
7ea5d725 888
709a340d
PM
889static void emit_extend(TCGContext *s, TCGReg r, int op)
890{
891 /* Emit zero extend of 8, 16 or 32 bit data as
892 * required by the MO_* value op; do nothing for 64 bit.
893 */
894 switch (op & MO_SIZE) {
895 case MO_8:
d0e66c89 896 tcg_out_ext8u(s, r, r);
709a340d
PM
897 break;
898 case MO_16:
899 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
900 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
901 break;
902 case MO_32:
3a5f6805 903 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
709a340d
PM
904 break;
905 case MO_64:
906 break;
907 }
908}
909
7ea5d725
RH
910static void build_trampolines(TCGContext *s)
911{
4b473e0c 912 static void * const qemu_ld_helpers[] = {
abce5964
RH
913 [MO_UB] = helper_ret_ldub_mmu,
914 [MO_SB] = helper_ret_ldsb_mmu,
915 [MO_LEUW] = helper_le_lduw_mmu,
916 [MO_LESW] = helper_le_ldsw_mmu,
917 [MO_LEUL] = helper_le_ldul_mmu,
fc313c64 918 [MO_LEUQ] = helper_le_ldq_mmu,
abce5964
RH
919 [MO_BEUW] = helper_be_lduw_mmu,
920 [MO_BESW] = helper_be_ldsw_mmu,
921 [MO_BEUL] = helper_be_ldul_mmu,
fc313c64 922 [MO_BEUQ] = helper_be_ldq_mmu,
7ea5d725 923 };
4b473e0c 924 static void * const qemu_st_helpers[] = {
abce5964
RH
925 [MO_UB] = helper_ret_stb_mmu,
926 [MO_LEUW] = helper_le_stw_mmu,
927 [MO_LEUL] = helper_le_stl_mmu,
fc313c64 928 [MO_LEUQ] = helper_le_stq_mmu,
abce5964
RH
929 [MO_BEUW] = helper_be_stw_mmu,
930 [MO_BEUL] = helper_be_stl_mmu,
fc313c64 931 [MO_BEUQ] = helper_be_stq_mmu,
7ea5d725
RH
932 };
933
934 int i;
7ea5d725 935
4b473e0c 936 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 937 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
938 continue;
939 }
940
941 /* May as well align the trampoline. */
abce5964 942 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 943 tcg_out_nop(s);
7ea5d725 944 }
0d8b6191 945 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 946
7ea5d725 947 /* Set the retaddr operand. */
3a5f6805 948 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
7ea5d725 949 /* Tail call. */
e01d60f2
RH
950 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
951 /* delay slot -- set the env argument */
952 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
953 }
954
4b473e0c 955 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 956 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
957 continue;
958 }
959
960 /* May as well align the trampoline. */
abce5964 961 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 962 tcg_out_nop(s);
7ea5d725 963 }
0d8b6191 964 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 965
3a5f6805
RH
966 emit_extend(s, TCG_REG_O2, i);
967
7ea5d725 968 /* Set the retaddr operand. */
3a5f6805 969 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
e01d60f2 970
7ea5d725 971 /* Tail call. */
e01d60f2
RH
972 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
973 /* delay slot -- set the env argument */
974 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
975 }
976}
321dbde3
RH
977#else
978static const tcg_insn_unit *qemu_unalign_ld_trampoline;
979static const tcg_insn_unit *qemu_unalign_st_trampoline;
980
981static void build_trampolines(TCGContext *s)
982{
983 for (int ld = 0; ld < 2; ++ld) {
984 void *helper;
985
986 while ((uintptr_t)s->code_ptr & 15) {
987 tcg_out_nop(s);
988 }
989
990 if (ld) {
991 helper = helper_unaligned_ld;
992 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
993 } else {
994 helper = helper_unaligned_st;
995 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
996 }
997
321dbde3
RH
998 /* Tail call. */
999 tcg_out_jmpl_const(s, helper, true, true);
1000 /* delay slot -- set the env argument */
1001 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1002 }
1003}
7ea5d725
RH
1004#endif
1005
7d551702 1006/* Generate global QEMU prologue and epilogue code */
e4d58b41 1007static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 1008{
4c3204cb
RH
1009 int tmp_buf_size, frame_size;
1010
9defd1bd
RH
1011 /*
1012 * The TCG temp buffer is at the top of the frame, immediately
1013 * below the frame pointer. Use the logical (aligned) offset here;
1014 * the stack bias is applied in temp_allocate_frame().
1015 */
4c3204cb 1016 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1017 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1018
9defd1bd
RH
1019 /*
1020 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1021 * otherwise the minimal frame usable by callees.
1022 */
4c3204cb
RH
1023 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1024 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1025 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1026 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1027 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1028 INSN_IMM13(-frame_size));
c6f7e4fb 1029
4cbea598 1030#ifndef CONFIG_SOFTMMU
b76f21a7 1031 if (guest_base != 0) {
92840d06
RH
1032 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1033 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1034 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1035 }
1036#endif
1037
ab20bdc1 1038 /* We choose TCG_REG_TB such that no move is required. */
1e42b4f8
RH
1039 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1040 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
ab20bdc1 1041
aad2f06a 1042 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1043 /* delay slot */
1044 tcg_out_nop(s);
4c3204cb 1045
38f81dc5 1046 /* Epilogue for goto_ptr. */
c8bc1168 1047 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1048 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1049 /* delay slot */
1050 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725 1051
7ea5d725 1052 build_trampolines(s);
b3db8758
BS
1053}
1054
e9823b4c
RH
1055static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1056{
1057 int i;
1058 for (i = 0; i < count; ++i) {
1059 p[i] = NOP;
1060 }
1061}
1062
f5ef6aac 1063#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1064
1065/* We expect to use a 13-bit negative offset from ENV. */
1066QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1067QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1068
a0ce341a 1069/* Perform the TLB load and compare.
bffe1431 1070
a0ce341a 1071 Inputs:
a8b12c10 1072 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1073
1074 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1075
1076 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1077 This should be offsetof addr_read or addr_write.
1078
1079 The result of the TLB comparison is in %[ix]cc. The sanitized address
1080 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1081
34b1a49c 1082static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1083 MemOp opc, int which)
a0ce341a 1084{
269bd5d8
RH
1085 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1086 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1087 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1088 const TCGReg r0 = TCG_REG_O0;
1089 const TCGReg r1 = TCG_REG_O1;
1090 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1091 unsigned s_bits = opc & MO_SIZE;
1092 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1093 tcg_target_long compare_mask;
1094
17ff9f78 1095 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1096 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1097 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1098
17ff9f78
RH
1099 /* Extract the page index, shifted into place for tlb index. */
1100 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1101 SHIFT_SRL);
1102 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1103
1104 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1105 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1106
1107 /* Load the tlb comparator and the addend. */
1108 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1109 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1110
85aa8081
RH
1111 /* Mask out the page offset, except for the required alignment.
1112 We don't support unaligned accesses. */
1113 if (a_bits < s_bits) {
1114 a_bits = s_bits;
1115 }
17ff9f78
RH
1116 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1117 if (check_fit_tl(compare_mask, 13)) {
1118 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1119 } else {
1120 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1121 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1122 }
a0ce341a
RH
1123 tcg_out_cmp(s, r0, r2, 0);
1124
1125 /* If the guest address must be zero-extended, do so now. */
3a5f6805 1126 if (TARGET_LONG_BITS == 32) {
34b1a49c 1127 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1128 return r0;
1129 }
34b1a49c 1130 return addr;
a0ce341a
RH
1131}
1132#endif /* CONFIG_SOFTMMU */
1133
4b473e0c 1134static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1135 [MO_UB] = LDUB,
1136 [MO_SB] = LDSB,
321dbde3
RH
1137 [MO_UB | MO_LE] = LDUB,
1138 [MO_SB | MO_LE] = LDSB,
eef0d9e7
RH
1139
1140 [MO_BEUW] = LDUH,
1141 [MO_BESW] = LDSH,
1142 [MO_BEUL] = LDUW,
1143 [MO_BESL] = LDSW,
fc313c64 1144 [MO_BEUQ] = LDX,
321dbde3 1145 [MO_BESQ] = LDX,
eef0d9e7
RH
1146
1147 [MO_LEUW] = LDUH_LE,
1148 [MO_LESW] = LDSH_LE,
1149 [MO_LEUL] = LDUW_LE,
1150 [MO_LESL] = LDSW_LE,
fc313c64 1151 [MO_LEUQ] = LDX_LE,
321dbde3 1152 [MO_LESQ] = LDX_LE,
a0ce341a 1153};
9d0efc88 1154
4b473e0c 1155static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1156 [MO_UB] = STB,
1157
1158 [MO_BEUW] = STH,
1159 [MO_BEUL] = STW,
fc313c64 1160 [MO_BEUQ] = STX,
eef0d9e7
RH
1161
1162 [MO_LEUW] = STH_LE,
1163 [MO_LEUL] = STW_LE,
fc313c64 1164 [MO_LEUQ] = STX_LE,
a0ce341a 1165};
bffe1431 1166
34b1a49c 1167static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1168 MemOpIdx oi, bool is_64)
f5ef6aac 1169{
14776ab5 1170 MemOp memop = get_memop(oi);
321dbde3
RH
1171 tcg_insn_unit *label_ptr;
1172
34b1a49c 1173#ifdef CONFIG_SOFTMMU
59227d5d 1174 unsigned memi = get_mmuidx(oi);
3a5f6805 1175 TCGReg addrz;
0d8b6191 1176 const tcg_insn_unit *func;
f5ef6aac 1177
85aa8081 1178 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1179 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1180
34b1a49c
RH
1181 /* The fast path is exactly one insn. Thus we can perform the
1182 entire TLB Hit in the (annulled) delay slot of the branch
1183 over the TLB Miss case. */
a0ce341a 1184
34b1a49c 1185 /* beq,a,pt %[xi]cc, label0 */
abce5964 1186 label_ptr = s->code_ptr;
34b1a49c
RH
1187 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1188 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1189 /* delay slot */
2b7ec66f
RH
1190 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1191 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1192
a0ce341a 1193 /* TLB Miss. */
f5ef6aac 1194
3a5f6805 1195 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
f5ef6aac 1196
7ea5d725
RH
1197 /* We use the helpers to extend SB and SW data, leaving the case
1198 of SL needing explicit extending below. */
2b7ec66f
RH
1199 if ((memop & MO_SSIZE) == MO_SL) {
1200 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1201 } else {
2b7ec66f 1202 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1203 }
eabb7b91 1204 tcg_debug_assert(func != NULL);
ab20bdc1 1205 tcg_out_call_nodelay(s, func, false);
a0ce341a 1206 /* delay slot */
3a5f6805
RH
1207 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1208
1209 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1210 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1211 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
34b1a49c 1212 } else {
3a5f6805 1213 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
f5ef6aac
BS
1214 }
1215
abce5964 1216 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1217#else
321dbde3
RH
1218 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1219 unsigned a_bits = get_alignment_bits(memop);
1220 unsigned s_bits = memop & MO_SIZE;
1221 unsigned t_bits;
1222
3a5f6805 1223 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1224 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1225 addr = TCG_REG_T1;
f5ef6aac 1226 }
321dbde3
RH
1227
1228 /*
1229 * Normal case: alignment equal to access size.
1230 */
1231 if (a_bits == s_bits) {
1232 tcg_out_ldst_rr(s, data, addr, index,
1233 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1234 return;
1235 }
1236
1237 /*
1238 * Test for at least natural alignment, and assume most accesses
1239 * will be aligned -- perform a straight load in the delay slot.
1240 * This is required to preserve atomicity for aligned accesses.
1241 */
1242 t_bits = MAX(a_bits, s_bits);
1243 tcg_debug_assert(t_bits < 13);
1244 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1245
1246 /* beq,a,pt %icc, label */
1247 label_ptr = s->code_ptr;
1248 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1249 /* delay slot */
1250 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1251 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
321dbde3
RH
1252
1253 if (a_bits >= s_bits) {
1254 /*
1255 * Overalignment: A successful alignment test will perform the memory
1256 * operation in the delay slot, and failure need only invoke the
1257 * handler for SIGBUS.
1258 */
321dbde3
RH
1259 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1260 /* delay slot -- move to low part of argument reg */
3a5f6805 1261 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1262 } else {
1263 /* Underalignment: load by pieces of minimum alignment. */
1264 int ld_opc, a_size, s_size, i;
1265
1266 /*
1267 * Force full address into T1 early; avoids problems with
1268 * overlap between @addr and @data.
1269 */
1270 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1271
1272 a_size = 1 << a_bits;
1273 s_size = 1 << s_bits;
1274 if ((memop & MO_BSWAP) == MO_BE) {
1275 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1276 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1277 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1278 for (i = a_size; i < s_size; i += a_size) {
1279 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1280 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1281 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1282 }
1283 } else if (a_bits == 0) {
1284 ld_opc = LDUB;
1285 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1286 for (i = a_size; i < s_size; i += a_size) {
1287 if ((memop & MO_SIGN) && i == s_size - a_size) {
1288 ld_opc = LDSB;
1289 }
1290 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1291 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1292 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1293 }
1294 } else {
1295 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1296 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1297 for (i = a_size; i < s_size; i += a_size) {
1298 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1299 if ((memop & MO_SIGN) && i == s_size - a_size) {
1300 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1301 }
1302 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1303 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1304 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1305 }
1306 }
1307 }
1308
1309 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1310#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1311}
1312
34b1a49c 1313static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1314 MemOpIdx oi)
f5ef6aac 1315{
14776ab5 1316 MemOp memop = get_memop(oi);
321dbde3
RH
1317 tcg_insn_unit *label_ptr;
1318
34b1a49c 1319#ifdef CONFIG_SOFTMMU
59227d5d 1320 unsigned memi = get_mmuidx(oi);
3a5f6805 1321 TCGReg addrz;
0d8b6191 1322 const tcg_insn_unit *func;
f5ef6aac 1323
85aa8081 1324 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1325 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1326
a0ce341a
RH
1327 /* The fast path is exactly one insn. Thus we can perform the entire
1328 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1329 /* beq,a,pt %[xi]cc, label0 */
abce5964 1330 label_ptr = s->code_ptr;
a115f3ea
RH
1331 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1332 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1333 /* delay slot */
2b7ec66f
RH
1334 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1335 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1336
1337 /* TLB Miss. */
1338
3a5f6805
RH
1339 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1340 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
53c37487 1341
2b7ec66f 1342 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1343 tcg_debug_assert(func != NULL);
ab20bdc1 1344 tcg_out_call_nodelay(s, func, false);
a0ce341a 1345 /* delay slot */
3a5f6805 1346 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
f5ef6aac 1347
abce5964 1348 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1349#else
321dbde3
RH
1350 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1351 unsigned a_bits = get_alignment_bits(memop);
1352 unsigned s_bits = memop & MO_SIZE;
1353 unsigned t_bits;
1354
3a5f6805 1355 if (TARGET_LONG_BITS == 32) {
34b1a49c
RH
1356 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1357 addr = TCG_REG_T1;
a0ce341a 1358 }
321dbde3
RH
1359
1360 /*
1361 * Normal case: alignment equal to access size.
1362 */
1363 if (a_bits == s_bits) {
1364 tcg_out_ldst_rr(s, data, addr, index,
1365 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1366 return;
1367 }
1368
1369 /*
1370 * Test for at least natural alignment, and assume most accesses
1371 * will be aligned -- perform a straight store in the delay slot.
1372 * This is required to preserve atomicity for aligned accesses.
1373 */
1374 t_bits = MAX(a_bits, s_bits);
1375 tcg_debug_assert(t_bits < 13);
1376 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1377
1378 /* beq,a,pt %icc, label */
1379 label_ptr = s->code_ptr;
1380 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1381 /* delay slot */
1382 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1383 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
321dbde3
RH
1384
1385 if (a_bits >= s_bits) {
1386 /*
1387 * Overalignment: A successful alignment test will perform the memory
1388 * operation in the delay slot, and failure need only invoke the
1389 * handler for SIGBUS.
1390 */
321dbde3
RH
1391 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1392 /* delay slot -- move to low part of argument reg */
3a5f6805 1393 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1394 } else {
1395 /* Underalignment: store by pieces of minimum alignment. */
1396 int st_opc, a_size, s_size, i;
1397
1398 /*
1399 * Force full address into T1 early; avoids problems with
1400 * overlap between @addr and @data.
1401 */
1402 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1403
1404 a_size = 1 << a_bits;
1405 s_size = 1 << s_bits;
1406 if ((memop & MO_BSWAP) == MO_BE) {
1407 st_opc = qemu_st_opc[a_bits | MO_BE];
1408 for (i = 0; i < s_size; i += a_size) {
1409 TCGReg d = data;
1410 int shift = (s_size - a_size - i) * 8;
1411 if (shift) {
1412 d = TCG_REG_T2;
1413 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1414 }
1415 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1416 }
1417 } else if (a_bits == 0) {
1418 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1419 for (i = 1; i < s_size; i++) {
1420 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1421 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1422 }
1423 } else {
1424 /* Note that ST*A with immediate asi must use indexed address. */
1425 st_opc = qemu_st_opc[a_bits + MO_LE];
1426 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1427 for (i = a_size; i < s_size; i += a_size) {
1428 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1429 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1430 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1431 }
1432 }
1433 }
1434
1435 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1436#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1437}
1438
b55a8d9d
RH
1439static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1440{
1441 if (check_fit_ptr(a0, 13)) {
1442 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1443 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1444 return;
1e42b4f8 1445 } else {
b55a8d9d
RH
1446 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1447 if (check_fit_ptr(tb_diff, 13)) {
1448 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1449 /* Note that TCG_REG_TB has been unwound to O1. */
1450 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1451 return;
1452 }
1453 }
1454 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1455 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1456 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1457}
1458
cf7d6b8e
RH
1459static void tcg_out_goto_tb(TCGContext *s, int which)
1460{
a228ae3e 1461 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1e42b4f8 1462
1ffbe5d6 1463 /* Load link and indirect branch. */
1e42b4f8 1464 set_jmp_insn_offset(s, which);
a228ae3e 1465 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1ffbe5d6
RH
1466 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1467 /* delay slot */
1468 tcg_out_nop(s);
cf7d6b8e
RH
1469 set_jmp_reset_offset(s, which);
1470
1471 /*
1472 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1473 * to the beginning of this TB.
1474 */
a228ae3e
RH
1475 off = -tcg_current_code_size(s);
1476 if (check_fit_i32(off, 13)) {
1477 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1e42b4f8 1478 } else {
a228ae3e 1479 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1e42b4f8 1480 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
cf7d6b8e
RH
1481 }
1482}
1483
a228ae3e
RH
1484void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1485 uintptr_t jmp_rx, uintptr_t jmp_rw)
1486{
a228ae3e
RH
1487}
1488
b357f902
RH
1489static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1490 const TCGArg args[TCG_MAX_OP_ARGS],
1491 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1492{
b357f902
RH
1493 TCGArg a0, a1, a2;
1494 int c, c2;
1495
1496 /* Hoist the loads of the most common arguments. */
1497 a0 = args[0];
1498 a1 = args[1];
1499 a2 = args[2];
1500 c2 = const_args[2];
8289b279
BS
1501
1502 switch (opc) {
38f81dc5
RH
1503 case INDEX_op_goto_ptr:
1504 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1e42b4f8 1505 tcg_out_mov_delay(s, TCG_REG_TB, a0);
38f81dc5 1506 break;
8289b279 1507 case INDEX_op_br:
bec16311 1508 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1509 tcg_out_nop(s);
8289b279 1510 break;
8289b279 1511
8289b279 1512#define OP_32_64(x) \
ba225198
RH
1513 glue(glue(case INDEX_op_, x), _i32): \
1514 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1515
ba225198 1516 OP_32_64(ld8u):
b357f902 1517 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1518 break;
ba225198 1519 OP_32_64(ld8s):
b357f902 1520 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1521 break;
ba225198 1522 OP_32_64(ld16u):
b357f902 1523 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1524 break;
ba225198 1525 OP_32_64(ld16s):
b357f902 1526 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1527 break;
1528 case INDEX_op_ld_i32:
53cd9273 1529 case INDEX_op_ld32u_i64:
b357f902 1530 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1531 break;
ba225198 1532 OP_32_64(st8):
b357f902 1533 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1534 break;
ba225198 1535 OP_32_64(st16):
b357f902 1536 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1537 break;
1538 case INDEX_op_st_i32:
53cd9273 1539 case INDEX_op_st32_i64:
b357f902 1540 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1541 break;
ba225198 1542 OP_32_64(add):
53cd9273 1543 c = ARITH_ADD;
ba225198
RH
1544 goto gen_arith;
1545 OP_32_64(sub):
8289b279 1546 c = ARITH_SUB;
ba225198
RH
1547 goto gen_arith;
1548 OP_32_64(and):
8289b279 1549 c = ARITH_AND;
ba225198 1550 goto gen_arith;
dc69960d
RH
1551 OP_32_64(andc):
1552 c = ARITH_ANDN;
1553 goto gen_arith;
ba225198 1554 OP_32_64(or):
8289b279 1555 c = ARITH_OR;
ba225198 1556 goto gen_arith;
18c8f7a3
RH
1557 OP_32_64(orc):
1558 c = ARITH_ORN;
1559 goto gen_arith;
ba225198 1560 OP_32_64(xor):
8289b279 1561 c = ARITH_XOR;
ba225198 1562 goto gen_arith;
8289b279
BS
1563 case INDEX_op_shl_i32:
1564 c = SHIFT_SLL;
1fd95946
RH
1565 do_shift32:
1566 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1567 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1568 break;
8289b279
BS
1569 case INDEX_op_shr_i32:
1570 c = SHIFT_SRL;
1fd95946 1571 goto do_shift32;
8289b279
BS
1572 case INDEX_op_sar_i32:
1573 c = SHIFT_SRA;
1fd95946 1574 goto do_shift32;
8289b279
BS
1575 case INDEX_op_mul_i32:
1576 c = ARITH_UMUL;
ba225198 1577 goto gen_arith;
583d1215 1578
4b5a85c1
RH
1579 OP_32_64(neg):
1580 c = ARITH_SUB;
1581 goto gen_arith1;
be6551b1
RH
1582 OP_32_64(not):
1583 c = ARITH_ORN;
1584 goto gen_arith1;
4b5a85c1 1585
583d1215 1586 case INDEX_op_div_i32:
b357f902 1587 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1588 break;
1589 case INDEX_op_divu_i32:
b357f902 1590 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1591 break;
1592
8289b279 1593 case INDEX_op_brcond_i32:
bec16311 1594 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1595 break;
dbfe80e1 1596 case INDEX_op_setcond_i32:
b357f902 1597 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1598 break;
ded37f0d 1599 case INDEX_op_movcond_i32:
b357f902 1600 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1601 break;
dbfe80e1 1602
7a3766f3 1603 case INDEX_op_add2_i32:
609ac1e1
RH
1604 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1605 args[4], const_args[4], args[5], const_args[5],
c470b663 1606 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1607 break;
1608 case INDEX_op_sub2_i32:
609ac1e1
RH
1609 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1610 args[4], const_args[4], args[5], const_args[5],
c470b663 1611 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1612 break;
1613 case INDEX_op_mulu2_i32:
f4c16661
RH
1614 c = ARITH_UMUL;
1615 goto do_mul2;
1616 case INDEX_op_muls2_i32:
1617 c = ARITH_SMUL;
1618 do_mul2:
3a5f6805 1619 /* The 32-bit multiply insns produce a full 64-bit result. */
b357f902 1620 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
3a5f6805 1621 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
7a3766f3 1622 break;
8289b279 1623
cab0a7ea 1624 case INDEX_op_qemu_ld_i32:
59227d5d 1625 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1626 break;
cab0a7ea 1627 case INDEX_op_qemu_ld_i64:
59227d5d 1628 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1629 break;
cab0a7ea 1630 case INDEX_op_qemu_st_i32:
cab0a7ea 1631 case INDEX_op_qemu_st_i64:
59227d5d 1632 tcg_out_qemu_st(s, a0, a1, a2);
a0ce341a 1633 break;
8289b279 1634
53cd9273 1635 case INDEX_op_ld32s_i64:
b357f902 1636 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1637 break;
8289b279 1638 case INDEX_op_ld_i64:
b357f902 1639 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1640 break;
1641 case INDEX_op_st_i64:
b357f902 1642 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1643 break;
1644 case INDEX_op_shl_i64:
1645 c = SHIFT_SLLX;
1fd95946
RH
1646 do_shift64:
1647 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1648 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1649 break;
8289b279
BS
1650 case INDEX_op_shr_i64:
1651 c = SHIFT_SRLX;
1fd95946 1652 goto do_shift64;
8289b279
BS
1653 case INDEX_op_sar_i64:
1654 c = SHIFT_SRAX;
1fd95946 1655 goto do_shift64;
8289b279
BS
1656 case INDEX_op_mul_i64:
1657 c = ARITH_MULX;
ba225198 1658 goto gen_arith;
583d1215 1659 case INDEX_op_div_i64:
53cd9273 1660 c = ARITH_SDIVX;
ba225198 1661 goto gen_arith;
583d1215 1662 case INDEX_op_divu_i64:
8289b279 1663 c = ARITH_UDIVX;
ba225198 1664 goto gen_arith;
4f2331e5 1665 case INDEX_op_ext_i32_i64:
cc6dfecf 1666 case INDEX_op_ext32s_i64:
b357f902 1667 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf 1668 break;
4f2331e5 1669 case INDEX_op_extu_i32_i64:
cc6dfecf 1670 case INDEX_op_ext32u_i64:
b357f902 1671 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1672 break;
609ad705
RH
1673 case INDEX_op_extrl_i64_i32:
1674 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1675 break;
1676 case INDEX_op_extrh_i64_i32:
1677 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1678 break;
8289b279
BS
1679
1680 case INDEX_op_brcond_i64:
bec16311 1681 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1682 break;
dbfe80e1 1683 case INDEX_op_setcond_i64:
b357f902 1684 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1685 break;
ded37f0d 1686 case INDEX_op_movcond_i64:
b357f902 1687 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1688 break;
609ac1e1
RH
1689 case INDEX_op_add2_i64:
1690 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1691 const_args[4], args[5], const_args[5], false);
1692 break;
1693 case INDEX_op_sub2_i64:
1694 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1695 const_args[4], args[5], const_args[5], true);
1696 break;
de8301e5
RH
1697 case INDEX_op_muluh_i64:
1698 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1699 break;
34b1a49c 1700
ba225198 1701 gen_arith:
b357f902 1702 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1703 break;
1704
4b5a85c1 1705 gen_arith1:
b357f902 1706 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1707 break;
1708
f8f03b37
PK
1709 case INDEX_op_mb:
1710 tcg_out_mb(s, a0);
1711 break;
1712
96d0ee7f 1713 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1714 case INDEX_op_mov_i64:
96d0ee7f 1715 case INDEX_op_call: /* Always emitted via tcg_out_call. */
b55a8d9d 1716 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
cf7d6b8e 1717 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
678155b2
RH
1718 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1719 case INDEX_op_ext8s_i64:
d0e66c89
RH
1720 case INDEX_op_ext8u_i32:
1721 case INDEX_op_ext8u_i64:
753e42ea
RH
1722 case INDEX_op_ext16s_i32:
1723 case INDEX_op_ext16s_i64:
8289b279 1724 default:
732e89f4 1725 g_assert_not_reached();
8289b279
BS
1726 }
1727}
1728
0d11dc7c 1729static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1730{
9be44a16
RH
1731 switch (op) {
1732 case INDEX_op_goto_ptr:
0d11dc7c 1733 return C_O0_I1(r);
f69d277e 1734
9be44a16 1735 case INDEX_op_ld8u_i32:
a59a2931 1736 case INDEX_op_ld8u_i64:
9be44a16 1737 case INDEX_op_ld8s_i32:
a59a2931 1738 case INDEX_op_ld8s_i64:
9be44a16 1739 case INDEX_op_ld16u_i32:
a59a2931 1740 case INDEX_op_ld16u_i64:
9be44a16 1741 case INDEX_op_ld16s_i32:
a59a2931 1742 case INDEX_op_ld16s_i64:
9be44a16 1743 case INDEX_op_ld_i32:
a59a2931
RH
1744 case INDEX_op_ld32u_i64:
1745 case INDEX_op_ld32s_i64:
1746 case INDEX_op_ld_i64:
9be44a16 1747 case INDEX_op_neg_i32:
a59a2931 1748 case INDEX_op_neg_i64:
9be44a16 1749 case INDEX_op_not_i32:
a59a2931
RH
1750 case INDEX_op_not_i64:
1751 case INDEX_op_ext32s_i64:
1752 case INDEX_op_ext32u_i64:
1753 case INDEX_op_ext_i32_i64:
1754 case INDEX_op_extu_i32_i64:
1755 case INDEX_op_extrl_i64_i32:
1756 case INDEX_op_extrh_i64_i32:
0d11dc7c 1757 return C_O1_I1(r, r);
9be44a16
RH
1758
1759 case INDEX_op_st8_i32:
a59a2931 1760 case INDEX_op_st8_i64:
9be44a16 1761 case INDEX_op_st16_i32:
a59a2931 1762 case INDEX_op_st16_i64:
9be44a16 1763 case INDEX_op_st_i32:
a59a2931
RH
1764 case INDEX_op_st32_i64:
1765 case INDEX_op_st_i64:
0d11dc7c 1766 return C_O0_I2(rZ, r);
9be44a16
RH
1767
1768 case INDEX_op_add_i32:
a59a2931 1769 case INDEX_op_add_i64:
9be44a16 1770 case INDEX_op_mul_i32:
a59a2931 1771 case INDEX_op_mul_i64:
9be44a16 1772 case INDEX_op_div_i32:
a59a2931 1773 case INDEX_op_div_i64:
9be44a16 1774 case INDEX_op_divu_i32:
a59a2931 1775 case INDEX_op_divu_i64:
9be44a16 1776 case INDEX_op_sub_i32:
a59a2931 1777 case INDEX_op_sub_i64:
9be44a16 1778 case INDEX_op_and_i32:
a59a2931 1779 case INDEX_op_and_i64:
9be44a16 1780 case INDEX_op_andc_i32:
a59a2931 1781 case INDEX_op_andc_i64:
9be44a16 1782 case INDEX_op_or_i32:
a59a2931 1783 case INDEX_op_or_i64:
9be44a16 1784 case INDEX_op_orc_i32:
a59a2931 1785 case INDEX_op_orc_i64:
9be44a16 1786 case INDEX_op_xor_i32:
a59a2931 1787 case INDEX_op_xor_i64:
9be44a16 1788 case INDEX_op_shl_i32:
a59a2931 1789 case INDEX_op_shl_i64:
9be44a16 1790 case INDEX_op_shr_i32:
a59a2931 1791 case INDEX_op_shr_i64:
9be44a16 1792 case INDEX_op_sar_i32:
a59a2931 1793 case INDEX_op_sar_i64:
9be44a16 1794 case INDEX_op_setcond_i32:
a59a2931 1795 case INDEX_op_setcond_i64:
0d11dc7c 1796 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1797
1798 case INDEX_op_brcond_i32:
a59a2931 1799 case INDEX_op_brcond_i64:
0d11dc7c 1800 return C_O0_I2(rZ, rJ);
9be44a16 1801 case INDEX_op_movcond_i32:
a59a2931 1802 case INDEX_op_movcond_i64:
0d11dc7c 1803 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16 1804 case INDEX_op_add2_i32:
a59a2931 1805 case INDEX_op_add2_i64:
9be44a16 1806 case INDEX_op_sub2_i32:
a59a2931 1807 case INDEX_op_sub2_i64:
0d11dc7c 1808 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1809 case INDEX_op_mulu2_i32:
1810 case INDEX_op_muls2_i32:
0d11dc7c 1811 return C_O2_I2(r, r, rZ, rJ);
9be44a16 1812 case INDEX_op_muluh_i64:
a59a2931 1813 return C_O1_I2(r, r, r);
9be44a16
RH
1814
1815 case INDEX_op_qemu_ld_i32:
9be44a16 1816 case INDEX_op_qemu_ld_i64:
a59a2931 1817 return C_O1_I1(r, s);
9be44a16 1818 case INDEX_op_qemu_st_i32:
9be44a16 1819 case INDEX_op_qemu_st_i64:
a59a2931 1820 return C_O0_I2(sZ, s);
9be44a16
RH
1821
1822 default:
0d11dc7c 1823 g_assert_not_reached();
f69d277e 1824 }
f69d277e
RH
1825}
1826
e4d58b41 1827static void tcg_target_init(TCGContext *s)
8289b279 1828{
a4761232
PMD
1829 /*
1830 * Only probe for the platform and capabilities if we haven't already
1831 * determined maximum values at compile time.
1832 */
90379ca8
RH
1833#ifndef use_vis3_instructions
1834 {
1835 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1836 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1837 }
1838#endif
1839
77f268e8 1840 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
a59a2931 1841 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
f46934df
RH
1842
1843 tcg_target_call_clobber_regs = 0;
1844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1853 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1854 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1855 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1856 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1857 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1858 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1859
ccb1bb66 1860 s->reserved_regs = 0;
375816f8
RH
1861 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1863 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1864 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1865 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1866 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1867 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1868 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1869}
cb1977d3 1870
3a5f6805 1871#define ELF_HOST_MACHINE EM_SPARCV9
cb1977d3 1872
cb1977d3 1873typedef struct {
ae18b28d 1874 DebugFrameHeader h;
3a5f6805 1875 uint8_t fde_def_cfa[4];
497a22eb
RH
1876 uint8_t fde_win_save;
1877 uint8_t fde_ret_save[3];
cb1977d3
RH
1878} DebugFrame;
1879
ae18b28d
RH
1880static const DebugFrame debug_frame = {
1881 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1882 .h.cie.id = -1,
1883 .h.cie.version = 1,
1884 .h.cie.code_align = 1,
1885 .h.cie.data_align = -sizeof(void *) & 0x7f,
1886 .h.cie.return_column = 15, /* o7 */
cb1977d3 1887
497a22eb 1888 /* Total FDE size does not include the "len" member. */
ae18b28d 1889 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1890
1891 .fde_def_cfa = {
cb1977d3
RH
1892 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1893 (2047 & 0x7f) | 0x80, (2047 >> 7)
cb1977d3 1894 },
497a22eb
RH
1895 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1896 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1897};
1898
755bf9e5 1899void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1900{
cb1977d3
RH
1901 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1902}