]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc64/tcg-target.c.inc
tcg/sparc64: Drop is_64 test from tcg_out_qemu_ld data return
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3a5f6805
RH
25/* We only support generating code for 64-bit mode. */
26#ifndef __arch64__
27#error "unsupported code generation mode"
28#endif
29
139c1837 30#include "../tcg-pool.c.inc"
e9823b4c 31
8d8fdbae 32#ifdef CONFIG_DEBUG_TCG
8289b279
BS
33static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66};
d4a9eb1f 67#endif
8289b279 68
77f268e8
RH
69#define TCG_CT_CONST_S11 0x100
70#define TCG_CT_CONST_S13 0x200
71#define TCG_CT_CONST_ZERO 0x400
72
73/*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78#ifdef CONFIG_SOFTMMU
79#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80#else
81#define SOFTMMU_RESERVE_REGS 0
82#endif
77f268e8 83#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
77f268e8 84#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
34b1a49c 85
375816f8
RH
86/* Define some temporary registers. T2 is used for constant generation. */
87#define TCG_REG_T1 TCG_REG_G1
88#define TCG_REG_T2 TCG_REG_O7
89
4cbea598 90#ifndef CONFIG_SOFTMMU
375816f8 91# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 92#endif
e141ab52 93
ab20bdc1 94#define TCG_REG_TB TCG_REG_I1
ab20bdc1 95
0954d0d9 96static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
97 TCG_REG_L0,
98 TCG_REG_L1,
99 TCG_REG_L2,
100 TCG_REG_L3,
101 TCG_REG_L4,
102 TCG_REG_L5,
103 TCG_REG_L6,
104 TCG_REG_L7,
26adfb75 105
8289b279
BS
106 TCG_REG_I0,
107 TCG_REG_I1,
108 TCG_REG_I2,
109 TCG_REG_I3,
110 TCG_REG_I4,
375816f8 111 TCG_REG_I5,
26adfb75
RH
112
113 TCG_REG_G2,
114 TCG_REG_G3,
115 TCG_REG_G4,
116 TCG_REG_G5,
117
118 TCG_REG_O0,
119 TCG_REG_O1,
120 TCG_REG_O2,
121 TCG_REG_O3,
122 TCG_REG_O4,
123 TCG_REG_O5,
8289b279
BS
124};
125
126static const int tcg_target_call_iarg_regs[6] = {
127 TCG_REG_O0,
128 TCG_REG_O1,
129 TCG_REG_O2,
130 TCG_REG_O3,
131 TCG_REG_O4,
132 TCG_REG_O5,
133};
134
5e3d0c19
RH
135static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
136{
137 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
138 tcg_debug_assert(slot >= 0 && slot <= 3);
139 return TCG_REG_O0 + slot;
140}
8289b279 141
8289b279
BS
142#define INSN_OP(x) ((x) << 30)
143#define INSN_OP2(x) ((x) << 22)
144#define INSN_OP3(x) ((x) << 19)
145#define INSN_OPF(x) ((x) << 5)
146#define INSN_RD(x) ((x) << 25)
147#define INSN_RS1(x) ((x) << 14)
148#define INSN_RS2(x) (x)
8384dd67 149#define INSN_ASI(x) ((x) << 5)
8289b279 150
203342d8 151#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 152#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 153#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 154#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 155#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 156#define INSN_COND(x) ((x) << 25)
8289b279 157
cf7c2ca5
BS
158#define COND_N 0x0
159#define COND_E 0x1
160#define COND_LE 0x2
161#define COND_L 0x3
162#define COND_LEU 0x4
163#define COND_CS 0x5
164#define COND_NEG 0x6
165#define COND_VS 0x7
b3db8758 166#define COND_A 0x8
cf7c2ca5
BS
167#define COND_NE 0x9
168#define COND_G 0xa
169#define COND_GE 0xb
170#define COND_GU 0xc
171#define COND_CC 0xd
172#define COND_POS 0xe
173#define COND_VC 0xf
a115f3ea 174#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 175
ab1339b9
RH
176#define RCOND_Z 1
177#define RCOND_LEZ 2
178#define RCOND_LZ 3
179#define RCOND_NZ 5
180#define RCOND_GZ 6
181#define RCOND_GEZ 7
182
dbfe80e1
RH
183#define MOVCC_ICC (1 << 18)
184#define MOVCC_XCC (1 << 18 | 1 << 12)
185
a115f3ea
RH
186#define BPCC_ICC 0
187#define BPCC_XCC (2 << 20)
188#define BPCC_PT (1 << 19)
189#define BPCC_PN 0
190#define BPCC_A (1 << 29)
191
ab1339b9
RH
192#define BPR_PT BPCC_PT
193
8289b279 194#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 195#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 196#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
321dbde3 197#define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
dc69960d 198#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 199#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 200#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 201#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 202#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
203#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
204#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
205#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
206#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 207#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 208#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
209#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
210#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
211#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
212#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
213#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 214#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 215#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 216
90379ca8 217#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 218#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 219
8289b279
BS
220#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
221#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
222#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
223
224#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
225#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
226#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
227
7a3766f3 228#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 229#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 230#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 231#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
232#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
233#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
234#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
235#define CALL INSN_OP(1)
236#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
237#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
238#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
239#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
240#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
241#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
242#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
243#define STB (INSN_OP(3) | INSN_OP3(0x05))
244#define STH (INSN_OP(3) | INSN_OP3(0x06))
245#define STW (INSN_OP(3) | INSN_OP3(0x04))
246#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
247#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
248#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
249#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
250#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
251#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
252#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
253#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
254#define STBA (INSN_OP(3) | INSN_OP3(0x15))
255#define STHA (INSN_OP(3) | INSN_OP3(0x16))
256#define STWA (INSN_OP(3) | INSN_OP3(0x14))
257#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
258
f8f03b37
PK
259#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
260
ab20bdc1
RH
261#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
262
8384dd67
BS
263#ifndef ASI_PRIMARY_LITTLE
264#define ASI_PRIMARY_LITTLE 0x88
265#endif
8289b279 266
a0ce341a
RH
267#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
268#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
273#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
274#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
275#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
276
90379ca8
RH
277#ifndef use_vis3_instructions
278bool use_vis3_instructions;
279#endif
280
897fd616 281static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 282{
425532d7 283 return val == sextract64(val, 0, bits);
a115f3ea
RH
284}
285
897fd616 286static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 287{
425532d7 288 return val == sextract32(val, 0, bits);
a115f3ea
RH
289}
290
425532d7 291#define check_fit_tl check_fit_i64
3a5f6805 292#define check_fit_ptr check_fit_i64
425532d7 293
0d8b6191 294static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
0d8b6191
RH
297 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
298 uint32_t insn = *src_rw;
e9823b4c 299 intptr_t pcrel;
abce5964 300
e9823b4c 301 value += addend;
0d8b6191 302 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 303
a115f3ea 304 switch (type) {
ab1339b9 305 case R_SPARC_WDISP16:
6a6bfa3c
RH
306 if (!check_fit_ptr(pcrel >> 2, 16)) {
307 return false;
308 }
ab1339b9 309 insn &= ~INSN_OFF16(-1);
e9823b4c 310 insn |= INSN_OFF16(pcrel);
ab1339b9 311 break;
a115f3ea 312 case R_SPARC_WDISP19:
6a6bfa3c
RH
313 if (!check_fit_ptr(pcrel >> 2, 19)) {
314 return false;
315 }
a115f3ea 316 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
317 insn |= INSN_OFF19(pcrel);
318 break;
c834b8d8
RH
319 case R_SPARC_13:
320 if (!check_fit_ptr(value, 13)) {
321 return false;
322 }
323 insn &= ~INSN_IMM13(-1);
324 insn |= INSN_IMM13(value);
325 break;
a115f3ea 326 default:
e9823b4c 327 g_assert_not_reached();
a115f3ea 328 }
e9823b4c 329
0d8b6191 330 *src_rw = insn;
6ac17786 331 return true;
a115f3ea
RH
332}
333
a115f3ea 334/* test if a constant matches the constraint */
a4fbbd77 335static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 336{
a115f3ea
RH
337 if (ct & TCG_CT_CONST) {
338 return 1;
4b304cfa
RH
339 }
340
341 if (type == TCG_TYPE_I32) {
342 val = (int32_t)val;
343 }
344
345 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
346 return 1;
347 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
348 return 1;
349 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
350 return 1;
351 } else {
352 return 0;
353 }
354}
355
220b2da7
RH
356static void tcg_out_nop(TCGContext *s)
357{
358 tcg_out32(s, NOP);
359}
360
897fd616
RH
361static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
362 TCGReg rs2, int op)
26cc915c 363{
35e2da15 364 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
365}
366
897fd616
RH
367static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
368 int32_t offset, int op)
26cc915c 369{
35e2da15 370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
371}
372
35e2da15
RH
373static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
374 int32_t val2, int val2const, int op)
ba225198
RH
375{
376 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
377 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
378}
379
897fd616 380static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 381{
dda73c78
RH
382 if (ret != arg) {
383 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
384 }
78113e83 385 return true;
26cc915c
BS
386}
387
220b2da7
RH
388static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
389{
390 if (ret != arg) {
391 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
392 } else {
393 tcg_out_nop(s);
394 }
395}
396
897fd616 397static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
398{
399 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
400}
401
897fd616 402static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
403{
404 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
405}
406
c71929c3
RH
407static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
408{
409 if (check_fit_i32(arg, 13)) {
410 /* A 13-bit constant sign-extended to 64-bits. */
411 tcg_out_movi_imm13(s, ret, arg);
412 } else {
413 /* A 32-bit constant zero-extended to 64 bits. */
414 tcg_out_sethi(s, ret, arg);
415 if (arg & 0x3ff) {
416 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
417 }
418 }
419}
420
ab20bdc1 421static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
422 tcg_target_long arg, bool in_prologue,
423 TCGReg scratch)
8289b279 424{
425532d7 425 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 426 tcg_target_long test, lsb;
a9c7d27b 427
c71929c3
RH
428 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
429 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
430 tcg_out_movi_imm32(s, ret, arg);
431 return;
035b2398
RH
432 }
433
a9c7d27b
RH
434 /* A 13-bit constant sign-extended to 64-bits. */
435 if (check_fit_tl(arg, 13)) {
b101234a 436 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 437 return;
8289b279 438 }
8289b279 439
f6823cbe 440 /* A 13-bit constant relative to the TB. */
1e42b4f8 441 if (!in_prologue) {
47c2206b 442 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
443 if (check_fit_ptr(test, 13)) {
444 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
445 return;
446 }
447 }
448
a9c7d27b 449 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 450 if (arg == lo) {
43172207
RH
451 tcg_out_sethi(s, ret, ~arg);
452 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
453 return;
454 }
455
684db2a0 456 /* A 32-bit constant, shifted. */
ab20bdc1
RH
457 lsb = ctz64(arg);
458 test = (tcg_target_long)arg >> lsb;
684db2a0 459 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
460 tcg_out_sethi(s, ret, test << 10);
461 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
462 return;
684db2a0
RH
463 } else if (test == (uint32_t)test || test == (int32_t)test) {
464 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
465 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
466 return;
ab20bdc1
RH
467 }
468
c834b8d8 469 /* Use the constant pool, if possible. */
1e42b4f8 470 if (!in_prologue) {
c834b8d8
RH
471 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
472 tcg_tbrel_diff(s, NULL));
473 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
474 return;
475 }
476
a9c7d27b 477 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 478 if (check_fit_i32(lo, 13)) {
34b1a49c 479 hi = (arg - lo) >> 32;
c71929c3 480 tcg_out_movi_imm32(s, ret, hi);
a9c7d27b
RH
481 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
482 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 483 } else {
34b1a49c 484 hi = arg >> 32;
c71929c3 485 tcg_out_movi_imm32(s, ret, hi);
92840d06 486 tcg_out_movi_imm32(s, scratch, lo);
375816f8 487 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 488 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 489 }
b101234a
BS
490}
491
897fd616
RH
492static void tcg_out_movi(TCGContext *s, TCGType type,
493 TCGReg ret, tcg_target_long arg)
ab20bdc1 494{
92840d06
RH
495 tcg_debug_assert(ret != TCG_REG_T2);
496 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
ab20bdc1
RH
497}
498
678155b2
RH
499static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
500{
501 g_assert_not_reached();
502}
503
753e42ea
RH
504static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg rd, TCGReg rs)
505{
506 g_assert_not_reached();
507}
508
d0e66c89
RH
509static void tcg_out_ext8u(TCGContext *s, TCGReg rd, TCGReg rs)
510{
511 tcg_out_arithi(s, rd, rs, 0xff, ARITH_AND);
512}
513
379afdff
RH
514static void tcg_out_ext16u(TCGContext *s, TCGReg rd, TCGReg rs)
515{
516 tcg_out_arithi(s, rd, rs, 16, SHIFT_SLL);
517 tcg_out_arithi(s, rd, rd, 16, SHIFT_SRL);
518}
519
52bf3398
RH
520static void tcg_out_ext32s(TCGContext *s, TCGReg rd, TCGReg rs)
521{
522 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRA);
523}
524
9ecf5f61
RH
525static void tcg_out_ext32u(TCGContext *s, TCGReg rd, TCGReg rs)
526{
527 tcg_out_arithi(s, rd, rs, 0, SHIFT_SRL);
528}
529
9c6aa274
RH
530static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
531{
532 tcg_out_ext32s(s, rd, rs);
533}
534
b9bfe000
RH
535static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg rd, TCGReg rs)
536{
537 tcg_out_ext32u(s, rd, rs);
538}
539
b8b94ac6
RH
540static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rs)
541{
542 tcg_out_mov(s, TCG_TYPE_I32, rd, rs);
543}
544
767c2503
RH
545static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
546{
547 return false;
548}
549
6a6d772e
RH
550static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
551 tcg_target_long imm)
552{
553 /* This function is only used for passing structs by reference. */
554 g_assert_not_reached();
555}
556
897fd616
RH
557static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
558 TCGReg a2, int op)
8289b279 559{
a0ce341a 560 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
561}
562
35e2da15
RH
563static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
564 intptr_t offset, int op)
8289b279 565{
425532d7 566 if (check_fit_ptr(offset, 13)) {
8289b279
BS
567 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
568 INSN_IMM13(offset));
a0ce341a 569 } else {
375816f8
RH
570 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
571 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 572 }
8289b279
BS
573}
574
897fd616
RH
575static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
576 TCGReg arg1, intptr_t arg2)
8289b279 577{
a0ce341a 578 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
579}
580
897fd616
RH
581static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
582 TCGReg arg1, intptr_t arg2)
8289b279 583{
a0ce341a
RH
584 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
585}
586
897fd616
RH
587static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
588 TCGReg base, intptr_t ofs)
59d7c14e
RH
589{
590 if (val == 0) {
591 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
592 return true;
593 }
594 return false;
595}
596
897fd616 597static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 598{
583d1215 599 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
600}
601
35e2da15
RH
602static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
603 int32_t val2, int val2const, int uns)
583d1215
RH
604{
605 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
606 if (uns) {
607 tcg_out_sety(s, TCG_REG_G0);
608 } else {
375816f8
RH
609 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
610 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
611 }
612
613 tcg_out_arithc(s, rd, rs1, val2, val2const,
614 uns ? ARITH_UDIV : ARITH_SDIV);
615}
616
0aed257f 617static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
618 [TCG_COND_EQ] = COND_E,
619 [TCG_COND_NE] = COND_NE,
620 [TCG_COND_LT] = COND_L,
621 [TCG_COND_GE] = COND_GE,
622 [TCG_COND_LE] = COND_LE,
623 [TCG_COND_GT] = COND_G,
624 [TCG_COND_LTU] = COND_CS,
625 [TCG_COND_GEU] = COND_CC,
626 [TCG_COND_LEU] = COND_LEU,
627 [TCG_COND_GTU] = COND_GU,
628};
629
ab1339b9
RH
630static const uint8_t tcg_cond_to_rcond[] = {
631 [TCG_COND_EQ] = RCOND_Z,
632 [TCG_COND_NE] = RCOND_NZ,
633 [TCG_COND_LT] = RCOND_LZ,
634 [TCG_COND_GT] = RCOND_GZ,
635 [TCG_COND_LE] = RCOND_LEZ,
636 [TCG_COND_GE] = RCOND_GEZ
637};
638
a115f3ea
RH
639static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
640{
641 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
642}
643
bec16311 644static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 645{
791645f0 646 int off19 = 0;
a115f3ea
RH
647
648 if (l->has_value) {
abce5964 649 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 650 } else {
bec16311 651 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
652 }
653 tcg_out_bpcc0(s, scond, flags, off19);
654}
655
35e2da15 656static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 657{
ba225198 658 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
659}
660
35e2da15 661static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 662 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 663{
56f4927e 664 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 665 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
666 tcg_out_nop(s);
667}
668
35e2da15
RH
669static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
670 int32_t v1, int v1const)
ded37f0d
RH
671{
672 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
673 | INSN_RS1(tcg_cond_to_bcond[cond])
674 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
675}
676
35e2da15
RH
677static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
678 TCGReg c1, int32_t c2, int c2const,
679 int32_t v1, int v1const)
ded37f0d
RH
680{
681 tcg_out_cmp(s, c1, c2, c2const);
682 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
683}
684
35e2da15 685static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 686 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 687{
ab1339b9
RH
688 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
689 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 690 int off16 = 0;
ab1339b9
RH
691
692 if (l->has_value) {
abce5964 693 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 694 } else {
bec16311 695 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
696 }
697 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
698 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
699 } else {
700 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 701 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 702 }
1da92db2
BS
703 tcg_out_nop(s);
704}
ded37f0d 705
35e2da15
RH
706static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
707 int32_t v1, int v1const)
203342d8
RH
708{
709 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
710 | (tcg_cond_to_rcond[cond] << 10)
711 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
712}
713
35e2da15
RH
714static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
715 TCGReg c1, int32_t c2, int c2const,
716 int32_t v1, int v1const)
ded37f0d 717{
203342d8
RH
718 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
719 Note that the immediate range is one bit smaller, so we must check
720 for that as well. */
721 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 722 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
723 tcg_out_movr(s, cond, ret, c1, v1, v1const);
724 } else {
725 tcg_out_cmp(s, c1, c2, c2const);
726 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
727 }
ded37f0d 728}
1da92db2 729
35e2da15
RH
730static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
731 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 732{
c470b663 733 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 734 switch (cond) {
7d458a75
RH
735 case TCG_COND_LTU:
736 case TCG_COND_GEU:
737 /* The result of the comparison is in the carry bit. */
738 break;
739
dbfe80e1
RH
740 case TCG_COND_EQ:
741 case TCG_COND_NE:
7d458a75 742 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 743 if (c2 != 0) {
321b6c05
RH
744 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
745 c2 = TCG_REG_T1;
746 } else {
747 c2 = c1;
dbfe80e1 748 }
321b6c05 749 c1 = TCG_REG_G0, c2const = 0;
7d458a75 750 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
751 break;
752
753 case TCG_COND_GTU:
dbfe80e1 754 case TCG_COND_LEU:
7d458a75
RH
755 /* If we don't need to load a constant into a register, we can
756 swap the operands on GTU/LEU. There's no benefit to loading
757 the constant into a temporary register. */
758 if (!c2const || c2 == 0) {
35e2da15 759 TCGReg t = c1;
7d458a75
RH
760 c1 = c2;
761 c2 = t;
762 c2const = 0;
763 cond = tcg_swap_cond(cond);
764 break;
765 }
766 /* FALLTHRU */
dbfe80e1
RH
767
768 default:
769 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 770 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 771 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
772 return;
773 }
774
775 tcg_out_cmp(s, c1, c2, c2const);
776 if (cond == TCG_COND_LTU) {
c470b663 777 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 778 } else {
c470b663 779 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
780 }
781}
782
35e2da15
RH
783static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
784 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 785{
9d6a7a85
RH
786 if (use_vis3_instructions) {
787 switch (cond) {
788 case TCG_COND_NE:
789 if (c2 != 0) {
790 break;
791 }
792 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
793 /* FALLTHRU */
794 case TCG_COND_LTU:
795 tcg_out_cmp(s, c1, c2, c2const);
796 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
797 return;
798 default:
799 break;
800 }
801 }
802
203342d8
RH
803 /* For 64-bit signed comparisons vs zero, we can avoid the compare
804 if the input does not overlap the output. */
805 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
806 tcg_out_movi_imm13(s, ret, 0);
807 tcg_out_movr(s, cond, ret, c1, 1, 1);
808 } else {
809 tcg_out_cmp(s, c1, c2, c2const);
810 tcg_out_movi_imm13(s, ret, 0);
811 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
812 }
dbfe80e1 813}
4ec28e25 814
609ac1e1
RH
815static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
816 TCGReg al, TCGReg ah, int32_t bl, int blconst,
817 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 818{
35e2da15 819 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
820
821 /* Note that the low parts are fully consumed before tmp is set. */
822 if (rl != ah && (bhconst || rl != bh)) {
823 tmp = rl;
824 }
825
826 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
827 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
828 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
829}
dbfe80e1 830
609ac1e1
RH
831static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
832 TCGReg al, TCGReg ah, int32_t bl, int blconst,
833 int32_t bh, int bhconst, bool is_sub)
834{
835 TCGReg tmp = TCG_REG_T1;
836
837 /* Note that the low parts are fully consumed before tmp is set. */
838 if (rl != ah && (bhconst || rl != bh)) {
839 tmp = rl;
840 }
841
842 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
843
90379ca8
RH
844 if (use_vis3_instructions && !is_sub) {
845 /* Note that ADDXC doesn't accept immediates. */
846 if (bhconst && bh != 0) {
414399b6 847 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
90379ca8
RH
848 bh = TCG_REG_T2;
849 }
850 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
851 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
852 /* If we have a zero, we can perform the operation in two insns,
853 with the arithmetic first, and a conditional move into place. */
854 if (rh == ah) {
855 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
856 is_sub ? ARITH_SUB : ARITH_ADD);
857 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
858 } else {
859 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
860 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
861 }
862 } else {
414399b6
RH
863 /*
864 * Otherwise adjust BH as if there is carry into T2.
865 * Note that constant BH is constrained to 11 bits for the MOVCC,
866 * so the adjustment fits 12 bits.
867 */
609ac1e1 868 if (bhconst) {
414399b6 869 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
870 } else {
871 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
872 is_sub ? ARITH_SUB : ARITH_ADD);
873 }
874 /* ... smoosh T2 back to original BH if carry is clear ... */
875 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
876 /* ... and finally perform the arithmetic with the new operand. */
877 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
878 }
879
880 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
881}
882
e01d60f2
RH
883static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
884 bool in_prologue, bool tail_call)
885{
886 uintptr_t desti = (uintptr_t)dest;
887
888 /* Be careful not to clobber %o7 for a tail call. */
889 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
890 desti & ~0xfff, in_prologue,
891 tail_call ? TCG_REG_G2 : TCG_REG_O7);
892 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
893 TCG_REG_T1, desti & 0xfff, JMPL);
894}
895
2be7d76b 896static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 897 bool in_prologue)
aad2f06a 898{
abce5964 899 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
900
901 if (disp == (int32_t)disp) {
902 tcg_out32(s, CALL | (uint32_t)disp >> 2);
903 } else {
e01d60f2 904 tcg_out_jmpl_const(s, dest, in_prologue, false);
aad2f06a
RH
905 }
906}
907
cee44b03
RH
908static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest,
909 const TCGHelperInfo *info)
4e9cf840 910{
ab20bdc1 911 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
912 tcg_out_nop(s);
913}
914
f8f03b37
PK
915static void tcg_out_mb(TCGContext *s, TCGArg a0)
916{
917 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
918 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
919}
920
7ea5d725 921#ifdef CONFIG_SOFTMMU
4b473e0c
RH
922static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
923static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
7ea5d725
RH
924
925static void build_trampolines(TCGContext *s)
926{
4b473e0c 927 static void * const qemu_ld_helpers[] = {
abce5964
RH
928 [MO_UB] = helper_ret_ldub_mmu,
929 [MO_SB] = helper_ret_ldsb_mmu,
930 [MO_LEUW] = helper_le_lduw_mmu,
931 [MO_LESW] = helper_le_ldsw_mmu,
932 [MO_LEUL] = helper_le_ldul_mmu,
fc313c64 933 [MO_LEUQ] = helper_le_ldq_mmu,
abce5964
RH
934 [MO_BEUW] = helper_be_lduw_mmu,
935 [MO_BESW] = helper_be_ldsw_mmu,
936 [MO_BEUL] = helper_be_ldul_mmu,
fc313c64 937 [MO_BEUQ] = helper_be_ldq_mmu,
7ea5d725 938 };
4b473e0c 939 static void * const qemu_st_helpers[] = {
abce5964
RH
940 [MO_UB] = helper_ret_stb_mmu,
941 [MO_LEUW] = helper_le_stw_mmu,
942 [MO_LEUL] = helper_le_stl_mmu,
fc313c64 943 [MO_LEUQ] = helper_le_stq_mmu,
abce5964
RH
944 [MO_BEUW] = helper_be_stw_mmu,
945 [MO_BEUL] = helper_be_stl_mmu,
fc313c64 946 [MO_BEUQ] = helper_be_stq_mmu,
7ea5d725
RH
947 };
948
949 int i;
7ea5d725 950
4b473e0c 951 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 952 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
953 continue;
954 }
955
956 /* May as well align the trampoline. */
abce5964 957 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 958 tcg_out_nop(s);
7ea5d725 959 }
0d8b6191 960 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 961
7ea5d725 962 /* Set the retaddr operand. */
3a5f6805 963 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
7ea5d725 964 /* Tail call. */
e01d60f2
RH
965 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
966 /* delay slot -- set the env argument */
967 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
968 }
969
4b473e0c 970 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 971 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
972 continue;
973 }
974
975 /* May as well align the trampoline. */
abce5964 976 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 977 tcg_out_nop(s);
7ea5d725 978 }
0d8b6191 979 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 980
7ea5d725 981 /* Set the retaddr operand. */
3a5f6805 982 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
e01d60f2 983
7ea5d725 984 /* Tail call. */
e01d60f2
RH
985 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
986 /* delay slot -- set the env argument */
987 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
7ea5d725
RH
988 }
989}
321dbde3
RH
990#else
991static const tcg_insn_unit *qemu_unalign_ld_trampoline;
992static const tcg_insn_unit *qemu_unalign_st_trampoline;
993
994static void build_trampolines(TCGContext *s)
995{
996 for (int ld = 0; ld < 2; ++ld) {
997 void *helper;
998
999 while ((uintptr_t)s->code_ptr & 15) {
1000 tcg_out_nop(s);
1001 }
1002
1003 if (ld) {
1004 helper = helper_unaligned_ld;
1005 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1006 } else {
1007 helper = helper_unaligned_st;
1008 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
1009 }
1010
321dbde3
RH
1011 /* Tail call. */
1012 tcg_out_jmpl_const(s, helper, true, true);
1013 /* delay slot -- set the env argument */
1014 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
1015 }
1016}
7ea5d725
RH
1017#endif
1018
7d551702 1019/* Generate global QEMU prologue and epilogue code */
e4d58b41 1020static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 1021{
4c3204cb
RH
1022 int tmp_buf_size, frame_size;
1023
9defd1bd
RH
1024 /*
1025 * The TCG temp buffer is at the top of the frame, immediately
1026 * below the frame pointer. Use the logical (aligned) offset here;
1027 * the stack bias is applied in temp_allocate_frame().
1028 */
4c3204cb 1029 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1030 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1031
9defd1bd
RH
1032 /*
1033 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1034 * otherwise the minimal frame usable by callees.
1035 */
4c3204cb
RH
1036 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1037 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1038 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1039 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1040 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1041 INSN_IMM13(-frame_size));
c6f7e4fb 1042
4cbea598 1043#ifndef CONFIG_SOFTMMU
b76f21a7 1044 if (guest_base != 0) {
92840d06
RH
1045 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1046 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1047 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1048 }
1049#endif
1050
ab20bdc1 1051 /* We choose TCG_REG_TB such that no move is required. */
1e42b4f8
RH
1052 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1053 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
ab20bdc1 1054
aad2f06a 1055 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1056 /* delay slot */
1057 tcg_out_nop(s);
4c3204cb 1058
38f81dc5 1059 /* Epilogue for goto_ptr. */
c8bc1168 1060 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1061 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1062 /* delay slot */
1063 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725 1064
7ea5d725 1065 build_trampolines(s);
b3db8758
BS
1066}
1067
e9823b4c
RH
1068static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1069{
1070 int i;
1071 for (i = 0; i < count; ++i) {
1072 p[i] = NOP;
1073 }
1074}
1075
f5ef6aac 1076#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1077
1078/* We expect to use a 13-bit negative offset from ENV. */
1079QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1080QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1081
a0ce341a 1082/* Perform the TLB load and compare.
bffe1431 1083
a0ce341a 1084 Inputs:
a8b12c10 1085 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1086
1087 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1088
1089 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1090 This should be offsetof addr_read or addr_write.
1091
1092 The result of the TLB comparison is in %[ix]cc. The sanitized address
1093 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1094
34b1a49c 1095static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1096 MemOp opc, int which)
a0ce341a 1097{
269bd5d8
RH
1098 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1099 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1100 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1101 const TCGReg r0 = TCG_REG_O0;
1102 const TCGReg r1 = TCG_REG_O1;
1103 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1104 unsigned s_bits = opc & MO_SIZE;
1105 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1106 tcg_target_long compare_mask;
1107
17ff9f78 1108 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1109 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1110 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1111
17ff9f78
RH
1112 /* Extract the page index, shifted into place for tlb index. */
1113 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1114 SHIFT_SRL);
1115 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1116
1117 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1118 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1119
1120 /* Load the tlb comparator and the addend. */
1121 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1122 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1123
85aa8081
RH
1124 /* Mask out the page offset, except for the required alignment.
1125 We don't support unaligned accesses. */
1126 if (a_bits < s_bits) {
1127 a_bits = s_bits;
1128 }
17ff9f78
RH
1129 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1130 if (check_fit_tl(compare_mask, 13)) {
1131 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1132 } else {
1133 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1134 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1135 }
a0ce341a
RH
1136 tcg_out_cmp(s, r0, r2, 0);
1137
1138 /* If the guest address must be zero-extended, do so now. */
3a5f6805 1139 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1140 tcg_out_ext32u(s, r0, addr);
a0ce341a
RH
1141 return r0;
1142 }
34b1a49c 1143 return addr;
a0ce341a
RH
1144}
1145#endif /* CONFIG_SOFTMMU */
1146
4b473e0c 1147static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1148 [MO_UB] = LDUB,
1149 [MO_SB] = LDSB,
321dbde3
RH
1150 [MO_UB | MO_LE] = LDUB,
1151 [MO_SB | MO_LE] = LDSB,
eef0d9e7
RH
1152
1153 [MO_BEUW] = LDUH,
1154 [MO_BESW] = LDSH,
1155 [MO_BEUL] = LDUW,
1156 [MO_BESL] = LDSW,
fc313c64 1157 [MO_BEUQ] = LDX,
321dbde3 1158 [MO_BESQ] = LDX,
eef0d9e7
RH
1159
1160 [MO_LEUW] = LDUH_LE,
1161 [MO_LESW] = LDSH_LE,
1162 [MO_LEUL] = LDUW_LE,
1163 [MO_LESL] = LDSW_LE,
fc313c64 1164 [MO_LEUQ] = LDX_LE,
321dbde3 1165 [MO_LESQ] = LDX_LE,
a0ce341a 1166};
9d0efc88 1167
4b473e0c 1168static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1169 [MO_UB] = STB,
1170
1171 [MO_BEUW] = STH,
1172 [MO_BEUL] = STW,
fc313c64 1173 [MO_BEUQ] = STX,
eef0d9e7
RH
1174
1175 [MO_LEUW] = STH_LE,
1176 [MO_LEUL] = STW_LE,
fc313c64 1177 [MO_LEUQ] = STX_LE,
a0ce341a 1178};
bffe1431 1179
34b1a49c 1180static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1181 MemOpIdx oi, bool is_64)
f5ef6aac 1182{
14776ab5 1183 MemOp memop = get_memop(oi);
321dbde3
RH
1184 tcg_insn_unit *label_ptr;
1185
34b1a49c 1186#ifdef CONFIG_SOFTMMU
59227d5d 1187 unsigned memi = get_mmuidx(oi);
3a5f6805 1188 TCGReg addrz;
0d8b6191 1189 const tcg_insn_unit *func;
f5ef6aac 1190
85aa8081 1191 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1192 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1193
34b1a49c
RH
1194 /* The fast path is exactly one insn. Thus we can perform the
1195 entire TLB Hit in the (annulled) delay slot of the branch
1196 over the TLB Miss case. */
a0ce341a 1197
34b1a49c 1198 /* beq,a,pt %[xi]cc, label0 */
abce5964 1199 label_ptr = s->code_ptr;
34b1a49c
RH
1200 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1201 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1202 /* delay slot */
2b7ec66f
RH
1203 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1204 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1205
a0ce341a 1206 /* TLB Miss. */
f5ef6aac 1207
3a5f6805 1208 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
f5ef6aac 1209
7ea5d725
RH
1210 /* We use the helpers to extend SB and SW data, leaving the case
1211 of SL needing explicit extending below. */
2b7ec66f
RH
1212 if ((memop & MO_SSIZE) == MO_SL) {
1213 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1214 } else {
2b7ec66f 1215 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1216 }
eabb7b91 1217 tcg_debug_assert(func != NULL);
ab20bdc1 1218 tcg_out_call_nodelay(s, func, false);
a0ce341a 1219 /* delay slot */
3a5f6805
RH
1220 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1221
1222 /* We let the helper sign-extend SB and SW, but leave SL for here. */
acfe9491 1223 if ((memop & MO_SSIZE) == MO_SL) {
52bf3398 1224 tcg_out_ext32s(s, data, TCG_REG_O0);
34b1a49c 1225 } else {
3a5f6805 1226 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
f5ef6aac
BS
1227 }
1228
abce5964 1229 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1230#else
321dbde3
RH
1231 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1232 unsigned a_bits = get_alignment_bits(memop);
1233 unsigned s_bits = memop & MO_SIZE;
1234 unsigned t_bits;
1235
3a5f6805 1236 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1237 tcg_out_ext32u(s, TCG_REG_T1, addr);
34b1a49c 1238 addr = TCG_REG_T1;
f5ef6aac 1239 }
321dbde3
RH
1240
1241 /*
1242 * Normal case: alignment equal to access size.
1243 */
1244 if (a_bits == s_bits) {
1245 tcg_out_ldst_rr(s, data, addr, index,
1246 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1247 return;
1248 }
1249
1250 /*
1251 * Test for at least natural alignment, and assume most accesses
1252 * will be aligned -- perform a straight load in the delay slot.
1253 * This is required to preserve atomicity for aligned accesses.
1254 */
1255 t_bits = MAX(a_bits, s_bits);
1256 tcg_debug_assert(t_bits < 13);
1257 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1258
1259 /* beq,a,pt %icc, label */
1260 label_ptr = s->code_ptr;
1261 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1262 /* delay slot */
1263 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1264 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
321dbde3
RH
1265
1266 if (a_bits >= s_bits) {
1267 /*
1268 * Overalignment: A successful alignment test will perform the memory
1269 * operation in the delay slot, and failure need only invoke the
1270 * handler for SIGBUS.
1271 */
321dbde3
RH
1272 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1273 /* delay slot -- move to low part of argument reg */
3a5f6805 1274 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1275 } else {
1276 /* Underalignment: load by pieces of minimum alignment. */
1277 int ld_opc, a_size, s_size, i;
1278
1279 /*
1280 * Force full address into T1 early; avoids problems with
1281 * overlap between @addr and @data.
1282 */
1283 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1284
1285 a_size = 1 << a_bits;
1286 s_size = 1 << s_bits;
1287 if ((memop & MO_BSWAP) == MO_BE) {
1288 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1289 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1290 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1291 for (i = a_size; i < s_size; i += a_size) {
1292 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1293 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1294 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1295 }
1296 } else if (a_bits == 0) {
1297 ld_opc = LDUB;
1298 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1299 for (i = a_size; i < s_size; i += a_size) {
1300 if ((memop & MO_SIGN) && i == s_size - a_size) {
1301 ld_opc = LDSB;
1302 }
1303 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1304 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1305 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1306 }
1307 } else {
1308 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1309 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1310 for (i = a_size; i < s_size; i += a_size) {
1311 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1312 if ((memop & MO_SIGN) && i == s_size - a_size) {
1313 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1314 }
1315 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1316 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1317 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1318 }
1319 }
1320 }
1321
1322 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1323#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1324}
1325
34b1a49c 1326static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
b3dfd5fc 1327 MemOpIdx oi, TCGType data_type)
f5ef6aac 1328{
14776ab5 1329 MemOp memop = get_memop(oi);
321dbde3
RH
1330 tcg_insn_unit *label_ptr;
1331
34b1a49c 1332#ifdef CONFIG_SOFTMMU
59227d5d 1333 unsigned memi = get_mmuidx(oi);
3a5f6805 1334 TCGReg addrz;
0d8b6191 1335 const tcg_insn_unit *func;
f5ef6aac 1336
85aa8081 1337 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1338 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1339
a0ce341a
RH
1340 /* The fast path is exactly one insn. Thus we can perform the entire
1341 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1342 /* beq,a,pt %[xi]cc, label0 */
abce5964 1343 label_ptr = s->code_ptr;
a115f3ea
RH
1344 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1345 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1346 /* delay slot */
2b7ec66f
RH
1347 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1348 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1349
1350 /* TLB Miss. */
1351
3a5f6805 1352 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
b3dfd5fc
RH
1353 tcg_out_movext(s, (memop & MO_SIZE) == MO_64 ? TCG_TYPE_I64 : TCG_TYPE_I32,
1354 TCG_REG_O2, data_type, memop & MO_SIZE, data);
53c37487 1355
2b7ec66f 1356 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1357 tcg_debug_assert(func != NULL);
ab20bdc1 1358 tcg_out_call_nodelay(s, func, false);
a0ce341a 1359 /* delay slot */
3a5f6805 1360 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
f5ef6aac 1361
abce5964 1362 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1363#else
321dbde3
RH
1364 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1365 unsigned a_bits = get_alignment_bits(memop);
1366 unsigned s_bits = memop & MO_SIZE;
1367 unsigned t_bits;
1368
3a5f6805 1369 if (TARGET_LONG_BITS == 32) {
9ecf5f61 1370 tcg_out_ext32u(s, TCG_REG_T1, addr);
34b1a49c 1371 addr = TCG_REG_T1;
a0ce341a 1372 }
321dbde3
RH
1373
1374 /*
1375 * Normal case: alignment equal to access size.
1376 */
1377 if (a_bits == s_bits) {
1378 tcg_out_ldst_rr(s, data, addr, index,
1379 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1380 return;
1381 }
1382
1383 /*
1384 * Test for at least natural alignment, and assume most accesses
1385 * will be aligned -- perform a straight store in the delay slot.
1386 * This is required to preserve atomicity for aligned accesses.
1387 */
1388 t_bits = MAX(a_bits, s_bits);
1389 tcg_debug_assert(t_bits < 13);
1390 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1391
1392 /* beq,a,pt %icc, label */
1393 label_ptr = s->code_ptr;
1394 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1395 /* delay slot */
1396 tcg_out_ldst_rr(s, data, addr, index,
2b7ec66f 1397 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
321dbde3
RH
1398
1399 if (a_bits >= s_bits) {
1400 /*
1401 * Overalignment: A successful alignment test will perform the memory
1402 * operation in the delay slot, and failure need only invoke the
1403 * handler for SIGBUS.
1404 */
321dbde3
RH
1405 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1406 /* delay slot -- move to low part of argument reg */
3a5f6805 1407 tcg_out_mov_delay(s, TCG_REG_O1, addr);
321dbde3
RH
1408 } else {
1409 /* Underalignment: store by pieces of minimum alignment. */
1410 int st_opc, a_size, s_size, i;
1411
1412 /*
1413 * Force full address into T1 early; avoids problems with
1414 * overlap between @addr and @data.
1415 */
1416 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1417
1418 a_size = 1 << a_bits;
1419 s_size = 1 << s_bits;
1420 if ((memop & MO_BSWAP) == MO_BE) {
1421 st_opc = qemu_st_opc[a_bits | MO_BE];
1422 for (i = 0; i < s_size; i += a_size) {
1423 TCGReg d = data;
1424 int shift = (s_size - a_size - i) * 8;
1425 if (shift) {
1426 d = TCG_REG_T2;
1427 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1428 }
1429 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1430 }
1431 } else if (a_bits == 0) {
1432 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1433 for (i = 1; i < s_size; i++) {
1434 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1435 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1436 }
1437 } else {
1438 /* Note that ST*A with immediate asi must use indexed address. */
1439 st_opc = qemu_st_opc[a_bits + MO_LE];
1440 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1441 for (i = a_size; i < s_size; i += a_size) {
1442 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1443 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1444 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1445 }
1446 }
1447 }
1448
1449 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
a0ce341a 1450#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1451}
1452
b55a8d9d
RH
1453static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1454{
1455 if (check_fit_ptr(a0, 13)) {
1456 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1457 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1458 return;
1e42b4f8 1459 } else {
b55a8d9d
RH
1460 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1461 if (check_fit_ptr(tb_diff, 13)) {
1462 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1463 /* Note that TCG_REG_TB has been unwound to O1. */
1464 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1465 return;
1466 }
1467 }
1468 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1469 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1470 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1471}
1472
cf7d6b8e
RH
1473static void tcg_out_goto_tb(TCGContext *s, int which)
1474{
a228ae3e 1475 ptrdiff_t off = tcg_tbrel_diff(s, (void *)get_jmp_target_addr(s, which));
1e42b4f8 1476
1ffbe5d6 1477 /* Load link and indirect branch. */
1e42b4f8 1478 set_jmp_insn_offset(s, which);
a228ae3e 1479 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, TCG_REG_TB, off);
1ffbe5d6
RH
1480 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1481 /* delay slot */
1482 tcg_out_nop(s);
cf7d6b8e
RH
1483 set_jmp_reset_offset(s, which);
1484
1485 /*
1486 * For the unlinked path of goto_tb, we need to reset TCG_REG_TB
1487 * to the beginning of this TB.
1488 */
a228ae3e
RH
1489 off = -tcg_current_code_size(s);
1490 if (check_fit_i32(off, 13)) {
1491 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, off, ARITH_ADD);
1e42b4f8 1492 } else {
a228ae3e 1493 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, off);
1e42b4f8 1494 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
cf7d6b8e
RH
1495 }
1496}
1497
a228ae3e
RH
1498void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1499 uintptr_t jmp_rx, uintptr_t jmp_rw)
1500{
a228ae3e
RH
1501}
1502
b357f902
RH
1503static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1504 const TCGArg args[TCG_MAX_OP_ARGS],
1505 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1506{
b357f902
RH
1507 TCGArg a0, a1, a2;
1508 int c, c2;
1509
1510 /* Hoist the loads of the most common arguments. */
1511 a0 = args[0];
1512 a1 = args[1];
1513 a2 = args[2];
1514 c2 = const_args[2];
8289b279
BS
1515
1516 switch (opc) {
38f81dc5
RH
1517 case INDEX_op_goto_ptr:
1518 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1e42b4f8 1519 tcg_out_mov_delay(s, TCG_REG_TB, a0);
38f81dc5 1520 break;
8289b279 1521 case INDEX_op_br:
bec16311 1522 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1523 tcg_out_nop(s);
8289b279 1524 break;
8289b279 1525
8289b279 1526#define OP_32_64(x) \
ba225198
RH
1527 glue(glue(case INDEX_op_, x), _i32): \
1528 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1529
ba225198 1530 OP_32_64(ld8u):
b357f902 1531 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1532 break;
ba225198 1533 OP_32_64(ld8s):
b357f902 1534 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1535 break;
ba225198 1536 OP_32_64(ld16u):
b357f902 1537 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1538 break;
ba225198 1539 OP_32_64(ld16s):
b357f902 1540 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1541 break;
1542 case INDEX_op_ld_i32:
53cd9273 1543 case INDEX_op_ld32u_i64:
b357f902 1544 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1545 break;
ba225198 1546 OP_32_64(st8):
b357f902 1547 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1548 break;
ba225198 1549 OP_32_64(st16):
b357f902 1550 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1551 break;
1552 case INDEX_op_st_i32:
53cd9273 1553 case INDEX_op_st32_i64:
b357f902 1554 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1555 break;
ba225198 1556 OP_32_64(add):
53cd9273 1557 c = ARITH_ADD;
ba225198
RH
1558 goto gen_arith;
1559 OP_32_64(sub):
8289b279 1560 c = ARITH_SUB;
ba225198
RH
1561 goto gen_arith;
1562 OP_32_64(and):
8289b279 1563 c = ARITH_AND;
ba225198 1564 goto gen_arith;
dc69960d
RH
1565 OP_32_64(andc):
1566 c = ARITH_ANDN;
1567 goto gen_arith;
ba225198 1568 OP_32_64(or):
8289b279 1569 c = ARITH_OR;
ba225198 1570 goto gen_arith;
18c8f7a3
RH
1571 OP_32_64(orc):
1572 c = ARITH_ORN;
1573 goto gen_arith;
ba225198 1574 OP_32_64(xor):
8289b279 1575 c = ARITH_XOR;
ba225198 1576 goto gen_arith;
8289b279
BS
1577 case INDEX_op_shl_i32:
1578 c = SHIFT_SLL;
1fd95946
RH
1579 do_shift32:
1580 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1581 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1582 break;
8289b279
BS
1583 case INDEX_op_shr_i32:
1584 c = SHIFT_SRL;
1fd95946 1585 goto do_shift32;
8289b279
BS
1586 case INDEX_op_sar_i32:
1587 c = SHIFT_SRA;
1fd95946 1588 goto do_shift32;
8289b279
BS
1589 case INDEX_op_mul_i32:
1590 c = ARITH_UMUL;
ba225198 1591 goto gen_arith;
583d1215 1592
4b5a85c1
RH
1593 OP_32_64(neg):
1594 c = ARITH_SUB;
1595 goto gen_arith1;
be6551b1
RH
1596 OP_32_64(not):
1597 c = ARITH_ORN;
1598 goto gen_arith1;
4b5a85c1 1599
583d1215 1600 case INDEX_op_div_i32:
b357f902 1601 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1602 break;
1603 case INDEX_op_divu_i32:
b357f902 1604 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1605 break;
1606
8289b279 1607 case INDEX_op_brcond_i32:
bec16311 1608 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1609 break;
dbfe80e1 1610 case INDEX_op_setcond_i32:
b357f902 1611 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1612 break;
ded37f0d 1613 case INDEX_op_movcond_i32:
b357f902 1614 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1615 break;
dbfe80e1 1616
7a3766f3 1617 case INDEX_op_add2_i32:
609ac1e1
RH
1618 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1619 args[4], const_args[4], args[5], const_args[5],
c470b663 1620 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1621 break;
1622 case INDEX_op_sub2_i32:
609ac1e1
RH
1623 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1624 args[4], const_args[4], args[5], const_args[5],
c470b663 1625 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1626 break;
1627 case INDEX_op_mulu2_i32:
f4c16661
RH
1628 c = ARITH_UMUL;
1629 goto do_mul2;
1630 case INDEX_op_muls2_i32:
1631 c = ARITH_SMUL;
1632 do_mul2:
3a5f6805 1633 /* The 32-bit multiply insns produce a full 64-bit result. */
b357f902 1634 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
3a5f6805 1635 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
7a3766f3 1636 break;
8289b279 1637
cab0a7ea 1638 case INDEX_op_qemu_ld_i32:
59227d5d 1639 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1640 break;
cab0a7ea 1641 case INDEX_op_qemu_ld_i64:
59227d5d 1642 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1643 break;
cab0a7ea 1644 case INDEX_op_qemu_st_i32:
b3dfd5fc
RH
1645 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1646 break;
cab0a7ea 1647 case INDEX_op_qemu_st_i64:
b3dfd5fc 1648 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
a0ce341a 1649 break;
8289b279 1650
53cd9273 1651 case INDEX_op_ld32s_i64:
b357f902 1652 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1653 break;
8289b279 1654 case INDEX_op_ld_i64:
b357f902 1655 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1656 break;
1657 case INDEX_op_st_i64:
b357f902 1658 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1659 break;
1660 case INDEX_op_shl_i64:
1661 c = SHIFT_SLLX;
1fd95946
RH
1662 do_shift64:
1663 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1664 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1665 break;
8289b279
BS
1666 case INDEX_op_shr_i64:
1667 c = SHIFT_SRLX;
1fd95946 1668 goto do_shift64;
8289b279
BS
1669 case INDEX_op_sar_i64:
1670 c = SHIFT_SRAX;
1fd95946 1671 goto do_shift64;
8289b279
BS
1672 case INDEX_op_mul_i64:
1673 c = ARITH_MULX;
ba225198 1674 goto gen_arith;
583d1215 1675 case INDEX_op_div_i64:
53cd9273 1676 c = ARITH_SDIVX;
ba225198 1677 goto gen_arith;
583d1215 1678 case INDEX_op_divu_i64:
8289b279 1679 c = ARITH_UDIVX;
ba225198 1680 goto gen_arith;
609ad705
RH
1681 case INDEX_op_extrh_i64_i32:
1682 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1683 break;
8289b279
BS
1684
1685 case INDEX_op_brcond_i64:
bec16311 1686 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1687 break;
dbfe80e1 1688 case INDEX_op_setcond_i64:
b357f902 1689 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1690 break;
ded37f0d 1691 case INDEX_op_movcond_i64:
b357f902 1692 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1693 break;
609ac1e1
RH
1694 case INDEX_op_add2_i64:
1695 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1696 const_args[4], args[5], const_args[5], false);
1697 break;
1698 case INDEX_op_sub2_i64:
1699 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1700 const_args[4], args[5], const_args[5], true);
1701 break;
de8301e5
RH
1702 case INDEX_op_muluh_i64:
1703 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1704 break;
34b1a49c 1705
ba225198 1706 gen_arith:
b357f902 1707 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1708 break;
1709
4b5a85c1 1710 gen_arith1:
b357f902 1711 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1712 break;
1713
f8f03b37
PK
1714 case INDEX_op_mb:
1715 tcg_out_mb(s, a0);
1716 break;
1717
96d0ee7f 1718 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1719 case INDEX_op_mov_i64:
96d0ee7f 1720 case INDEX_op_call: /* Always emitted via tcg_out_call. */
b55a8d9d 1721 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
cf7d6b8e 1722 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
678155b2
RH
1723 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1724 case INDEX_op_ext8s_i64:
d0e66c89
RH
1725 case INDEX_op_ext8u_i32:
1726 case INDEX_op_ext8u_i64:
753e42ea
RH
1727 case INDEX_op_ext16s_i32:
1728 case INDEX_op_ext16s_i64:
379afdff
RH
1729 case INDEX_op_ext16u_i32:
1730 case INDEX_op_ext16u_i64:
52bf3398 1731 case INDEX_op_ext32s_i64:
9ecf5f61 1732 case INDEX_op_ext32u_i64:
9c6aa274 1733 case INDEX_op_ext_i32_i64:
b9bfe000 1734 case INDEX_op_extu_i32_i64:
b8b94ac6 1735 case INDEX_op_extrl_i64_i32:
8289b279 1736 default:
732e89f4 1737 g_assert_not_reached();
8289b279
BS
1738 }
1739}
1740
0d11dc7c 1741static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1742{
9be44a16
RH
1743 switch (op) {
1744 case INDEX_op_goto_ptr:
0d11dc7c 1745 return C_O0_I1(r);
f69d277e 1746
9be44a16 1747 case INDEX_op_ld8u_i32:
a59a2931 1748 case INDEX_op_ld8u_i64:
9be44a16 1749 case INDEX_op_ld8s_i32:
a59a2931 1750 case INDEX_op_ld8s_i64:
9be44a16 1751 case INDEX_op_ld16u_i32:
a59a2931 1752 case INDEX_op_ld16u_i64:
9be44a16 1753 case INDEX_op_ld16s_i32:
a59a2931 1754 case INDEX_op_ld16s_i64:
9be44a16 1755 case INDEX_op_ld_i32:
a59a2931
RH
1756 case INDEX_op_ld32u_i64:
1757 case INDEX_op_ld32s_i64:
1758 case INDEX_op_ld_i64:
9be44a16 1759 case INDEX_op_neg_i32:
a59a2931 1760 case INDEX_op_neg_i64:
9be44a16 1761 case INDEX_op_not_i32:
a59a2931
RH
1762 case INDEX_op_not_i64:
1763 case INDEX_op_ext32s_i64:
1764 case INDEX_op_ext32u_i64:
1765 case INDEX_op_ext_i32_i64:
1766 case INDEX_op_extu_i32_i64:
1767 case INDEX_op_extrl_i64_i32:
1768 case INDEX_op_extrh_i64_i32:
0d11dc7c 1769 return C_O1_I1(r, r);
9be44a16
RH
1770
1771 case INDEX_op_st8_i32:
a59a2931 1772 case INDEX_op_st8_i64:
9be44a16 1773 case INDEX_op_st16_i32:
a59a2931 1774 case INDEX_op_st16_i64:
9be44a16 1775 case INDEX_op_st_i32:
a59a2931
RH
1776 case INDEX_op_st32_i64:
1777 case INDEX_op_st_i64:
0d11dc7c 1778 return C_O0_I2(rZ, r);
9be44a16
RH
1779
1780 case INDEX_op_add_i32:
a59a2931 1781 case INDEX_op_add_i64:
9be44a16 1782 case INDEX_op_mul_i32:
a59a2931 1783 case INDEX_op_mul_i64:
9be44a16 1784 case INDEX_op_div_i32:
a59a2931 1785 case INDEX_op_div_i64:
9be44a16 1786 case INDEX_op_divu_i32:
a59a2931 1787 case INDEX_op_divu_i64:
9be44a16 1788 case INDEX_op_sub_i32:
a59a2931 1789 case INDEX_op_sub_i64:
9be44a16 1790 case INDEX_op_and_i32:
a59a2931 1791 case INDEX_op_and_i64:
9be44a16 1792 case INDEX_op_andc_i32:
a59a2931 1793 case INDEX_op_andc_i64:
9be44a16 1794 case INDEX_op_or_i32:
a59a2931 1795 case INDEX_op_or_i64:
9be44a16 1796 case INDEX_op_orc_i32:
a59a2931 1797 case INDEX_op_orc_i64:
9be44a16 1798 case INDEX_op_xor_i32:
a59a2931 1799 case INDEX_op_xor_i64:
9be44a16 1800 case INDEX_op_shl_i32:
a59a2931 1801 case INDEX_op_shl_i64:
9be44a16 1802 case INDEX_op_shr_i32:
a59a2931 1803 case INDEX_op_shr_i64:
9be44a16 1804 case INDEX_op_sar_i32:
a59a2931 1805 case INDEX_op_sar_i64:
9be44a16 1806 case INDEX_op_setcond_i32:
a59a2931 1807 case INDEX_op_setcond_i64:
0d11dc7c 1808 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1809
1810 case INDEX_op_brcond_i32:
a59a2931 1811 case INDEX_op_brcond_i64:
0d11dc7c 1812 return C_O0_I2(rZ, rJ);
9be44a16 1813 case INDEX_op_movcond_i32:
a59a2931 1814 case INDEX_op_movcond_i64:
0d11dc7c 1815 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16 1816 case INDEX_op_add2_i32:
a59a2931 1817 case INDEX_op_add2_i64:
9be44a16 1818 case INDEX_op_sub2_i32:
a59a2931 1819 case INDEX_op_sub2_i64:
0d11dc7c 1820 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1821 case INDEX_op_mulu2_i32:
1822 case INDEX_op_muls2_i32:
0d11dc7c 1823 return C_O2_I2(r, r, rZ, rJ);
9be44a16 1824 case INDEX_op_muluh_i64:
a59a2931 1825 return C_O1_I2(r, r, r);
9be44a16
RH
1826
1827 case INDEX_op_qemu_ld_i32:
9be44a16 1828 case INDEX_op_qemu_ld_i64:
a59a2931 1829 return C_O1_I1(r, s);
9be44a16 1830 case INDEX_op_qemu_st_i32:
9be44a16 1831 case INDEX_op_qemu_st_i64:
a59a2931 1832 return C_O0_I2(sZ, s);
9be44a16
RH
1833
1834 default:
0d11dc7c 1835 g_assert_not_reached();
f69d277e 1836 }
f69d277e
RH
1837}
1838
e4d58b41 1839static void tcg_target_init(TCGContext *s)
8289b279 1840{
a4761232
PMD
1841 /*
1842 * Only probe for the platform and capabilities if we haven't already
1843 * determined maximum values at compile time.
1844 */
90379ca8
RH
1845#ifndef use_vis3_instructions
1846 {
1847 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1848 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1849 }
1850#endif
1851
77f268e8 1852 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
a59a2931 1853 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
f46934df
RH
1854
1855 tcg_target_call_clobber_regs = 0;
1856 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1857 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1858 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1859 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1860 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1861 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1862 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1863 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1864 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1865 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1866 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1867 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1868 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1869 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1870 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1871
ccb1bb66 1872 s->reserved_regs = 0;
375816f8
RH
1873 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1874 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1875 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1876 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1877 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1878 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1879 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1880 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1881}
cb1977d3 1882
3a5f6805 1883#define ELF_HOST_MACHINE EM_SPARCV9
cb1977d3 1884
cb1977d3 1885typedef struct {
ae18b28d 1886 DebugFrameHeader h;
3a5f6805 1887 uint8_t fde_def_cfa[4];
497a22eb
RH
1888 uint8_t fde_win_save;
1889 uint8_t fde_ret_save[3];
cb1977d3
RH
1890} DebugFrame;
1891
ae18b28d
RH
1892static const DebugFrame debug_frame = {
1893 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1894 .h.cie.id = -1,
1895 .h.cie.version = 1,
1896 .h.cie.code_align = 1,
1897 .h.cie.data_align = -sizeof(void *) & 0x7f,
1898 .h.cie.return_column = 15, /* o7 */
cb1977d3 1899
497a22eb 1900 /* Total FDE size does not include the "len" member. */
ae18b28d 1901 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1902
1903 .fde_def_cfa = {
cb1977d3
RH
1904 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1905 (2047 & 0x7f) | 0x80, (2047 >> 7)
cb1977d3 1906 },
497a22eb
RH
1907 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1908 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1909};
1910
755bf9e5 1911void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1912{
cb1977d3
RH
1913 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1914}