]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
Merge remote-tracking branch 'remotes/rth/tcg-ppc-merge-1' into staging
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
34b1a49c
RH
70/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
375816f8
RH
82/* Define some temporary registers. T2 is used for constant generation. */
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
c6f7e4fb 86#ifdef CONFIG_USE_GUEST_BASE
375816f8 87# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
88#else
89# define TCG_GUEST_BASE_REG TCG_REG_G0
90#endif
e141ab52 91
0954d0d9 92static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
93 TCG_REG_L0,
94 TCG_REG_L1,
95 TCG_REG_L2,
96 TCG_REG_L3,
97 TCG_REG_L4,
98 TCG_REG_L5,
99 TCG_REG_L6,
100 TCG_REG_L7,
26adfb75 101
8289b279
BS
102 TCG_REG_I0,
103 TCG_REG_I1,
104 TCG_REG_I2,
105 TCG_REG_I3,
106 TCG_REG_I4,
375816f8 107 TCG_REG_I5,
26adfb75
RH
108
109 TCG_REG_G2,
110 TCG_REG_G3,
111 TCG_REG_G4,
112 TCG_REG_G5,
113
114 TCG_REG_O0,
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
118 TCG_REG_O4,
119 TCG_REG_O5,
8289b279
BS
120};
121
122static const int tcg_target_call_iarg_regs[6] = {
123 TCG_REG_O0,
124 TCG_REG_O1,
125 TCG_REG_O2,
126 TCG_REG_O3,
127 TCG_REG_O4,
128 TCG_REG_O5,
129};
130
26a74ae3 131static const int tcg_target_call_oarg_regs[] = {
8289b279 132 TCG_REG_O0,
e141ab52
BS
133 TCG_REG_O1,
134 TCG_REG_O2,
135 TCG_REG_O3,
8289b279
BS
136};
137
8289b279
BS
138#define INSN_OP(x) ((x) << 30)
139#define INSN_OP2(x) ((x) << 22)
140#define INSN_OP3(x) ((x) << 19)
141#define INSN_OPF(x) ((x) << 5)
142#define INSN_RD(x) ((x) << 25)
143#define INSN_RS1(x) ((x) << 14)
144#define INSN_RS2(x) (x)
8384dd67 145#define INSN_ASI(x) ((x) << 5)
8289b279 146
203342d8 147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 152#define INSN_COND(x) ((x) << 25)
8289b279 153
cf7c2ca5
BS
154#define COND_N 0x0
155#define COND_E 0x1
156#define COND_LE 0x2
157#define COND_L 0x3
158#define COND_LEU 0x4
159#define COND_CS 0x5
160#define COND_NEG 0x6
161#define COND_VS 0x7
b3db8758 162#define COND_A 0x8
cf7c2ca5
BS
163#define COND_NE 0x9
164#define COND_G 0xa
165#define COND_GE 0xb
166#define COND_GU 0xc
167#define COND_CC 0xd
168#define COND_POS 0xe
169#define COND_VC 0xf
a115f3ea 170#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 171
ab1339b9
RH
172#define RCOND_Z 1
173#define RCOND_LEZ 2
174#define RCOND_LZ 3
175#define RCOND_NZ 5
176#define RCOND_GZ 6
177#define RCOND_GEZ 7
178
dbfe80e1
RH
179#define MOVCC_ICC (1 << 18)
180#define MOVCC_XCC (1 << 18 | 1 << 12)
181
a115f3ea
RH
182#define BPCC_ICC 0
183#define BPCC_XCC (2 << 20)
184#define BPCC_PT (1 << 19)
185#define BPCC_PN 0
186#define BPCC_A (1 << 29)
187
ab1339b9
RH
188#define BPR_PT BPCC_PT
189
8289b279 190#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 192#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 193#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 194#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 195#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 196#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 197#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
198#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
199#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 200#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
201#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
202#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 203#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
204#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
205#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
206#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
207#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
208#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 209#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 210#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
211
212#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
213#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
214#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
215
216#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
217#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
218#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
219
7a3766f3 220#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 221#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 222#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 223#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
224#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
225#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
226#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
227#define CALL INSN_OP(1)
228#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
229#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
230#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
231#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
232#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
233#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
234#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
235#define STB (INSN_OP(3) | INSN_OP3(0x05))
236#define STH (INSN_OP(3) | INSN_OP3(0x06))
237#define STW (INSN_OP(3) | INSN_OP3(0x04))
238#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
239#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
240#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
241#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
242#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
243#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
244#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
245#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
246#define STBA (INSN_OP(3) | INSN_OP3(0x15))
247#define STHA (INSN_OP(3) | INSN_OP3(0x16))
248#define STWA (INSN_OP(3) | INSN_OP3(0x14))
249#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
250
251#ifndef ASI_PRIMARY_LITTLE
252#define ASI_PRIMARY_LITTLE 0x88
253#endif
8289b279 254
a0ce341a
RH
255#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
256#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
257#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
258#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
259#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
260
261#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
262#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
263#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
264
425532d7 265static inline int check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 266{
425532d7 267 return val == sextract64(val, 0, bits);
a115f3ea
RH
268}
269
425532d7 270static inline int check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 271{
425532d7 272 return val == sextract32(val, 0, bits);
a115f3ea
RH
273}
274
425532d7
RH
275#define check_fit_tl check_fit_i64
276#if SPARC64
277# define check_fit_ptr check_fit_i64
278#else
279# define check_fit_ptr check_fit_i32
280#endif
281
abce5964 282static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 283 intptr_t value, intptr_t addend)
a115f3ea
RH
284{
285 uint32_t insn;
abce5964
RH
286
287 assert(addend == 0);
288 value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
289
a115f3ea 290 switch (type) {
ab1339b9 291 case R_SPARC_WDISP16:
425532d7 292 if (!check_fit_ptr(value >> 2, 16)) {
ab1339b9
RH
293 tcg_abort();
294 }
abce5964 295 insn = *code_ptr;
ab1339b9
RH
296 insn &= ~INSN_OFF16(-1);
297 insn |= INSN_OFF16(value);
abce5964 298 *code_ptr = insn;
ab1339b9 299 break;
a115f3ea 300 case R_SPARC_WDISP19:
425532d7 301 if (!check_fit_ptr(value >> 2, 19)) {
a115f3ea
RH
302 tcg_abort();
303 }
abce5964 304 insn = *code_ptr;
a115f3ea
RH
305 insn &= ~INSN_OFF19(-1);
306 insn |= INSN_OFF19(value);
abce5964 307 *code_ptr = insn;
a115f3ea
RH
308 break;
309 default:
310 tcg_abort();
311 }
312}
313
314/* parse target specific constraints */
315static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
316{
317 const char *ct_str;
318
319 ct_str = *pct_str;
320 switch (ct_str[0]) {
321 case 'r':
322 ct->ct |= TCG_CT_REG;
323 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
324 break;
34b1a49c 325 case 'R':
a115f3ea 326 ct->ct |= TCG_CT_REG;
34b1a49c
RH
327 tcg_regset_set32(ct->u.regs, 0, ALL_64);
328 break;
329 case 'A': /* qemu_ld/st address constraint */
330 ct->ct |= TCG_CT_REG;
331 tcg_regset_set32(ct->u.regs, 0,
332 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
333 reserve_helpers:
a115f3ea
RH
334 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
335 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
336 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
337 break;
34b1a49c
RH
338 case 's': /* qemu_st data 32-bit constraint */
339 ct->ct |= TCG_CT_REG;
340 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
341 goto reserve_helpers;
342 case 'S': /* qemu_st data 64-bit constraint */
343 ct->ct |= TCG_CT_REG;
344 tcg_regset_set32(ct->u.regs, 0, ALL_64);
345 goto reserve_helpers;
a115f3ea
RH
346 case 'I':
347 ct->ct |= TCG_CT_CONST_S11;
348 break;
349 case 'J':
350 ct->ct |= TCG_CT_CONST_S13;
351 break;
352 case 'Z':
353 ct->ct |= TCG_CT_CONST_ZERO;
354 break;
355 default:
356 return -1;
357 }
358 ct_str++;
359 *pct_str = ct_str;
360 return 0;
361}
362
363/* test if a constant matches the constraint */
f6c6afc1 364static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
a115f3ea
RH
365 const TCGArgConstraint *arg_ct)
366{
367 int ct = arg_ct->ct;
368
369 if (ct & TCG_CT_CONST) {
370 return 1;
4b304cfa
RH
371 }
372
373 if (type == TCG_TYPE_I32) {
374 val = (int32_t)val;
375 }
376
377 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
378 return 1;
379 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
380 return 1;
381 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
382 return 1;
383 } else {
384 return 0;
385 }
386}
387
35e2da15
RH
388static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
389 TCGReg rs2, int op)
26cc915c 390{
35e2da15 391 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
392}
393
35e2da15
RH
394static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
395 int32_t offset, int op)
26cc915c 396{
35e2da15 397 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
398}
399
35e2da15
RH
400static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
401 int32_t val2, int val2const, int op)
ba225198
RH
402{
403 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
404 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
405}
406
2a534aff
RH
407static inline void tcg_out_mov(TCGContext *s, TCGType type,
408 TCGReg ret, TCGReg arg)
8289b279 409{
dda73c78
RH
410 if (ret != arg) {
411 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
412 }
26cc915c
BS
413}
414
35e2da15 415static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
416{
417 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
418}
419
35e2da15 420static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
421{
422 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
423}
424
a9c7d27b
RH
425static void tcg_out_movi(TCGContext *s, TCGType type,
426 TCGReg ret, tcg_target_long arg)
8289b279 427{
425532d7 428 tcg_target_long hi, lo = (int32_t)arg;
a9c7d27b 429
035b2398
RH
430 /* Make sure we test 32-bit constants for imm13 properly. */
431 if (type == TCG_TYPE_I32) {
432 arg = lo;
433 }
434
a9c7d27b
RH
435 /* A 13-bit constant sign-extended to 64-bits. */
436 if (check_fit_tl(arg, 13)) {
b101234a 437 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 438 return;
8289b279 439 }
8289b279 440
a9c7d27b 441 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
34b1a49c 442 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
a9c7d27b
RH
443 tcg_out_sethi(s, ret, arg);
444 if (arg & 0x3ff) {
445 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
446 }
447 return;
448 }
449
450 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 451 if (arg == lo) {
43172207
RH
452 tcg_out_sethi(s, ret, ~arg);
453 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
454 return;
455 }
456
457 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 458 if (check_fit_i32(lo, 13)) {
34b1a49c 459 hi = (arg - lo) >> 32;
a9c7d27b
RH
460 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
461 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
462 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 463 } else {
34b1a49c 464 hi = arg >> 32;
a9c7d27b
RH
465 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
466 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 467 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 468 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 469 }
b101234a
BS
470}
471
35e2da15
RH
472static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
473 TCGReg a2, int op)
8289b279 474{
a0ce341a 475 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
476}
477
35e2da15
RH
478static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
479 intptr_t offset, int op)
8289b279 480{
425532d7 481 if (check_fit_ptr(offset, 13)) {
8289b279
BS
482 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
483 INSN_IMM13(offset));
a0ce341a 484 } else {
375816f8
RH
485 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
486 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 487 }
8289b279
BS
488}
489
2a534aff 490static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 491 TCGReg arg1, intptr_t arg2)
8289b279 492{
a0ce341a 493 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
494}
495
2a534aff 496static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 497 TCGReg arg1, intptr_t arg2)
8289b279 498{
a0ce341a
RH
499 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
500}
501
35e2da15 502static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 503{
35e2da15
RH
504 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
505 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
506}
507
35e2da15 508static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 509{
583d1215 510 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
511}
512
35e2da15 513static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
7a3766f3
RH
514{
515 tcg_out32(s, RDY | INSN_RD(rd));
516}
517
35e2da15
RH
518static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
519 int32_t val2, int val2const, int uns)
583d1215
RH
520{
521 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
522 if (uns) {
523 tcg_out_sety(s, TCG_REG_G0);
524 } else {
375816f8
RH
525 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
526 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
527 }
528
529 tcg_out_arithc(s, rd, rs1, val2, val2const,
530 uns ? ARITH_UDIV : ARITH_SDIV);
531}
532
8289b279
BS
533static inline void tcg_out_nop(TCGContext *s)
534{
26cc915c 535 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
536}
537
0aed257f 538static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
539 [TCG_COND_EQ] = COND_E,
540 [TCG_COND_NE] = COND_NE,
541 [TCG_COND_LT] = COND_L,
542 [TCG_COND_GE] = COND_GE,
543 [TCG_COND_LE] = COND_LE,
544 [TCG_COND_GT] = COND_G,
545 [TCG_COND_LTU] = COND_CS,
546 [TCG_COND_GEU] = COND_CC,
547 [TCG_COND_LEU] = COND_LEU,
548 [TCG_COND_GTU] = COND_GU,
549};
550
ab1339b9
RH
551static const uint8_t tcg_cond_to_rcond[] = {
552 [TCG_COND_EQ] = RCOND_Z,
553 [TCG_COND_NE] = RCOND_NZ,
554 [TCG_COND_LT] = RCOND_LZ,
555 [TCG_COND_GT] = RCOND_GZ,
556 [TCG_COND_LE] = RCOND_LEZ,
557 [TCG_COND_GE] = RCOND_GEZ
558};
559
a115f3ea
RH
560static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
561{
562 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
563}
564
565static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
566{
567 TCGLabel *l = &s->labels[label];
568 int off19;
569
570 if (l->has_value) {
abce5964 571 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea
RH
572 } else {
573 /* Make sure to preserve destinations during retranslation. */
abce5964 574 off19 = *s->code_ptr & INSN_OFF19(-1);
a115f3ea
RH
575 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
576 }
577 tcg_out_bpcc0(s, scond, flags, off19);
578}
579
35e2da15 580static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 581{
ba225198 582 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
583}
584
35e2da15
RH
585static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
586 int32_t arg2, int const_arg2, int label)
cf7c2ca5 587{
56f4927e 588 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 589 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
590 tcg_out_nop(s);
591}
592
35e2da15
RH
593static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
594 int32_t v1, int v1const)
ded37f0d
RH
595{
596 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
597 | INSN_RS1(tcg_cond_to_bcond[cond])
598 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
599}
600
35e2da15
RH
601static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
602 TCGReg c1, int32_t c2, int c2const,
603 int32_t v1, int v1const)
ded37f0d
RH
604{
605 tcg_out_cmp(s, c1, c2, c2const);
606 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
607}
608
35e2da15
RH
609static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
610 int32_t arg2, int const_arg2, int label)
1da92db2 611{
ab1339b9
RH
612 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
613 if (arg2 == 0 && !is_unsigned_cond(cond)) {
614 TCGLabel *l = &s->labels[label];
615 int off16;
616
617 if (l->has_value) {
abce5964 618 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9
RH
619 } else {
620 /* Make sure to preserve destinations during retranslation. */
abce5964 621 off16 = *s->code_ptr & INSN_OFF16(-1);
ab1339b9
RH
622 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
623 }
624 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
625 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
626 } else {
627 tcg_out_cmp(s, arg1, arg2, const_arg2);
628 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
629 }
1da92db2
BS
630 tcg_out_nop(s);
631}
ded37f0d 632
35e2da15
RH
633static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
634 int32_t v1, int v1const)
203342d8
RH
635{
636 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
637 | (tcg_cond_to_rcond[cond] << 10)
638 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
639}
640
35e2da15
RH
641static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
642 TCGReg c1, int32_t c2, int c2const,
643 int32_t v1, int v1const)
ded37f0d 644{
203342d8
RH
645 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
646 Note that the immediate range is one bit smaller, so we must check
647 for that as well. */
648 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 649 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
650 tcg_out_movr(s, cond, ret, c1, v1, v1const);
651 } else {
652 tcg_out_cmp(s, c1, c2, c2const);
653 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
654 }
ded37f0d 655}
1da92db2 656
35e2da15
RH
657static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
658 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 659{
dbfe80e1
RH
660 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
661 switch (cond) {
7d458a75
RH
662 case TCG_COND_LTU:
663 case TCG_COND_GEU:
664 /* The result of the comparison is in the carry bit. */
665 break;
666
dbfe80e1
RH
667 case TCG_COND_EQ:
668 case TCG_COND_NE:
7d458a75 669 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
670 if (c2 != 0) {
671 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
672 }
673 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 674 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
675 break;
676
677 case TCG_COND_GTU:
dbfe80e1 678 case TCG_COND_LEU:
7d458a75
RH
679 /* If we don't need to load a constant into a register, we can
680 swap the operands on GTU/LEU. There's no benefit to loading
681 the constant into a temporary register. */
682 if (!c2const || c2 == 0) {
35e2da15 683 TCGReg t = c1;
7d458a75
RH
684 c1 = c2;
685 c2 = t;
686 c2const = 0;
687 cond = tcg_swap_cond(cond);
688 break;
689 }
690 /* FALLTHRU */
dbfe80e1
RH
691
692 default:
693 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 694 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 695 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
696 return;
697 }
698
699 tcg_out_cmp(s, c1, c2, c2const);
700 if (cond == TCG_COND_LTU) {
701 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
702 } else {
703 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
704 }
705}
706
35e2da15
RH
707static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
708 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 709{
203342d8
RH
710 /* For 64-bit signed comparisons vs zero, we can avoid the compare
711 if the input does not overlap the output. */
712 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
713 tcg_out_movi_imm13(s, ret, 0);
714 tcg_out_movr(s, cond, ret, c1, 1, 1);
715 } else {
716 tcg_out_cmp(s, c1, c2, c2const);
717 tcg_out_movi_imm13(s, ret, 0);
718 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
719 }
dbfe80e1 720}
4ec28e25 721
35e2da15
RH
722static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh,
723 TCGReg al, TCGReg ah, int32_t bl, int blconst,
724 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 725{
35e2da15 726 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
727
728 /* Note that the low parts are fully consumed before tmp is set. */
729 if (rl != ah && (bhconst || rl != bh)) {
730 tmp = rl;
731 }
732
733 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
734 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
735 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
736}
dbfe80e1 737
4e9cf840 738static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
aad2f06a 739{
abce5964 740 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
741
742 if (disp == (int32_t)disp) {
743 tcg_out32(s, CALL | (uint32_t)disp >> 2);
744 } else {
abce5964
RH
745 uintptr_t desti = (uintptr_t)dest;
746 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff);
747 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
aad2f06a
RH
748 }
749}
750
4e9cf840
RH
751static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
752{
753 tcg_out_call_nodelay(s, dest);
754 tcg_out_nop(s);
755}
756
7ea5d725 757#ifdef CONFIG_SOFTMMU
abce5964
RH
758static tcg_insn_unit *qemu_ld_trampoline[16];
759static tcg_insn_unit *qemu_st_trampoline[16];
7ea5d725
RH
760
761static void build_trampolines(TCGContext *s)
762{
abce5964
RH
763 static void * const qemu_ld_helpers[16] = {
764 [MO_UB] = helper_ret_ldub_mmu,
765 [MO_SB] = helper_ret_ldsb_mmu,
766 [MO_LEUW] = helper_le_lduw_mmu,
767 [MO_LESW] = helper_le_ldsw_mmu,
768 [MO_LEUL] = helper_le_ldul_mmu,
769 [MO_LEQ] = helper_le_ldq_mmu,
770 [MO_BEUW] = helper_be_lduw_mmu,
771 [MO_BESW] = helper_be_ldsw_mmu,
772 [MO_BEUL] = helper_be_ldul_mmu,
773 [MO_BEQ] = helper_be_ldq_mmu,
7ea5d725 774 };
abce5964
RH
775 static void * const qemu_st_helpers[16] = {
776 [MO_UB] = helper_ret_stb_mmu,
777 [MO_LEUW] = helper_le_stw_mmu,
778 [MO_LEUL] = helper_le_stl_mmu,
779 [MO_LEQ] = helper_le_stq_mmu,
780 [MO_BEUW] = helper_be_stw_mmu,
781 [MO_BEUL] = helper_be_stl_mmu,
782 [MO_BEQ] = helper_be_stq_mmu,
7ea5d725
RH
783 };
784
785 int i;
786 TCGReg ra;
7ea5d725
RH
787
788 for (i = 0; i < 16; ++i) {
abce5964 789 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
790 continue;
791 }
792
793 /* May as well align the trampoline. */
abce5964 794 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 795 tcg_out_nop(s);
7ea5d725 796 }
abce5964 797 qemu_ld_trampoline[i] = s->code_ptr;
7ea5d725 798
34b1a49c
RH
799 if (SPARC64 || TARGET_LONG_BITS == 32) {
800 ra = TCG_REG_O3;
801 } else {
802 /* Install the high part of the address. */
803 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
804 ra = TCG_REG_O4;
805 }
7ea5d725
RH
806
807 /* Set the retaddr operand. */
808 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
809 /* Set the env operand. */
810 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
811 /* Tail call. */
4e9cf840 812 tcg_out_call_nodelay(s, qemu_ld_helpers[i]);
7ea5d725
RH
813 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
814 }
815
816 for (i = 0; i < 16; ++i) {
abce5964 817 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
818 continue;
819 }
820
821 /* May as well align the trampoline. */
abce5964 822 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 823 tcg_out_nop(s);
7ea5d725 824 }
abce5964 825 qemu_st_trampoline[i] = s->code_ptr;
7ea5d725 826
34b1a49c
RH
827 if (SPARC64) {
828 ra = TCG_REG_O4;
829 } else {
830 ra = TCG_REG_O1;
831 if (TARGET_LONG_BITS == 64) {
832 /* Install the high part of the address. */
833 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
834 ra += 2;
835 } else {
836 ra += 1;
837 }
838 if ((i & MO_SIZE) == MO_64) {
839 /* Install the high part of the data. */
840 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
841 ra += 2;
842 } else {
843 ra += 1;
844 }
845 /* Skip the mem_index argument. */
846 ra += 1;
847 }
848
7ea5d725
RH
849 /* Set the retaddr operand. */
850 if (ra >= TCG_REG_O6) {
851 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
852 TCG_TARGET_CALL_STACK_OFFSET);
853 ra = TCG_REG_G1;
854 }
855 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
856 /* Set the env operand. */
857 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
858 /* Tail call. */
4e9cf840 859 tcg_out_call_nodelay(s, qemu_st_helpers[i]);
7ea5d725
RH
860 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
861 }
862}
863#endif
864
7d551702 865/* Generate global QEMU prologue and epilogue code */
e4d58b41 866static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 867{
4c3204cb
RH
868 int tmp_buf_size, frame_size;
869
870 /* The TCG temp buffer is at the top of the frame, immediately
871 below the frame pointer. */
872 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
873 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
874 tmp_buf_size);
875
876 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
877 otherwise the minimal frame usable by callees. */
878 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
879 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
880 frame_size += TCG_TARGET_STACK_ALIGN - 1;
881 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 882 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 883 INSN_IMM13(-frame_size));
c6f7e4fb
RH
884
885#ifdef CONFIG_USE_GUEST_BASE
886 if (GUEST_BASE != 0) {
887 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
888 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
889 }
890#endif
891
aad2f06a 892 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
893 /* delay slot */
894 tcg_out_nop(s);
4c3204cb
RH
895
896 /* No epilogue required. We issue ret + restore directly in the TB. */
7ea5d725
RH
897
898#ifdef CONFIG_SOFTMMU
899 build_trampolines(s);
900#endif
b3db8758
BS
901}
902
f5ef6aac 903#if defined(CONFIG_SOFTMMU)
a0ce341a 904/* Perform the TLB load and compare.
bffe1431 905
a0ce341a 906 Inputs:
a8b12c10 907 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
908
909 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
910
911 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
912 This should be offsetof addr_read or addr_write.
913
914 The result of the TLB comparison is in %[ix]cc. The sanitized address
915 is in the returned register, maybe %o0. The TLB addend is in %o1. */
916
34b1a49c
RH
917static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
918 TCGMemOp s_bits, int which)
a0ce341a 919{
a8b12c10
RH
920 const TCGReg r0 = TCG_REG_O0;
921 const TCGReg r1 = TCG_REG_O1;
922 const TCGReg r2 = TCG_REG_O2;
a0ce341a
RH
923 int tlb_ofs;
924
d801a8f2 925 /* Shift the page number down. */
34b1a49c 926 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a
RH
927
928 /* Mask out the page offset, except for the required alignment. */
d801a8f2
RH
929 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
930 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
931
932 /* Mask the tlb index. */
933 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
934
935 /* Mask page, part 2. */
936 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 937
d801a8f2
RH
938 /* Shift the tlb index into place. */
939 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
940
941 /* Relative to the current ENV. */
942 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
943
944 /* Find a base address that can load both tlb comparator and addend. */
945 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
425532d7 946 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
35e2da15
RH
947 if (tlb_ofs & ~0x3ff) {
948 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
949 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
950 }
d801a8f2 951 tlb_ofs &= 0x3ff;
a0ce341a
RH
952 }
953
954 /* Load the tlb comparator and the addend. */
955 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
956 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
957
958 /* subcc arg0, arg2, %g0 */
959 tcg_out_cmp(s, r0, r2, 0);
960
961 /* If the guest address must be zero-extended, do so now. */
9f44adc5 962 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c 963 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
964 return r0;
965 }
34b1a49c 966 return addr;
a0ce341a
RH
967}
968#endif /* CONFIG_SOFTMMU */
969
eef0d9e7
RH
970static const int qemu_ld_opc[16] = {
971 [MO_UB] = LDUB,
972 [MO_SB] = LDSB,
973
974 [MO_BEUW] = LDUH,
975 [MO_BESW] = LDSH,
976 [MO_BEUL] = LDUW,
977 [MO_BESL] = LDSW,
978 [MO_BEQ] = LDX,
979
980 [MO_LEUW] = LDUH_LE,
981 [MO_LESW] = LDSH_LE,
982 [MO_LEUL] = LDUW_LE,
983 [MO_LESL] = LDSW_LE,
984 [MO_LEQ] = LDX_LE,
a0ce341a 985};
9d0efc88 986
eef0d9e7
RH
987static const int qemu_st_opc[16] = {
988 [MO_UB] = STB,
989
990 [MO_BEUW] = STH,
991 [MO_BEUL] = STW,
992 [MO_BEQ] = STX,
993
994 [MO_LEUW] = STH_LE,
995 [MO_LEUL] = STW_LE,
996 [MO_LEQ] = STX_LE,
a0ce341a 997};
bffe1431 998
34b1a49c
RH
999static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1000 TCGMemOp memop, int memi, bool is_64)
f5ef6aac 1001{
34b1a49c
RH
1002#ifdef CONFIG_SOFTMMU
1003 TCGMemOp s_bits = memop & MO_SIZE;
cab0a7ea 1004 TCGReg addrz, param;
abce5964
RH
1005 tcg_insn_unit *func;
1006 tcg_insn_unit *label_ptr;
f5ef6aac 1007
34b1a49c 1008 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1009 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1010
34b1a49c
RH
1011 /* The fast path is exactly one insn. Thus we can perform the
1012 entire TLB Hit in the (annulled) delay slot of the branch
1013 over the TLB Miss case. */
a0ce341a 1014
34b1a49c 1015 /* beq,a,pt %[xi]cc, label0 */
abce5964 1016 label_ptr = s->code_ptr;
34b1a49c
RH
1017 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1018 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1019 /* delay slot */
1020 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
53c37487 1021
a0ce341a 1022 /* TLB Miss. */
f5ef6aac 1023
7ea5d725 1024 param = TCG_REG_O1;
34b1a49c
RH
1025 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1026 /* Skip the high-part; we'll perform the extract in the trampoline. */
1027 param++;
a0ce341a 1028 }
34b1a49c 1029 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
f5ef6aac 1030
7ea5d725
RH
1031 /* We use the helpers to extend SB and SW data, leaving the case
1032 of SL needing explicit extending below. */
1033 if ((memop & ~MO_BSWAP) == MO_SL) {
1034 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1035 } else {
1036 func = qemu_ld_trampoline[memop];
1037 }
abce5964 1038 assert(func != NULL);
4e9cf840 1039 tcg_out_call_nodelay(s, func);
a0ce341a 1040 /* delay slot */
7ea5d725
RH
1041 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1042
34b1a49c
RH
1043 /* Recall that all of the helpers return 64-bit results.
1044 Which complicates things for sparcv8plus. */
1045 if (SPARC64) {
1046 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1047 if (is_64 && (memop & ~MO_BSWAP) == MO_SL) {
1048 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1049 } else {
1050 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1051 }
1052 } else {
1053 if (s_bits == MO_64) {
1054 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1055 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1056 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1057 } else if (is_64) {
1058 /* Re-extend from 32-bit rather than reassembling when we
1059 know the high register must be an extension. */
1060 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1061 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1062 } else {
1063 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
a0ce341a 1064 }
f5ef6aac
BS
1065 }
1066
abce5964 1067 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1068#else
9f44adc5 1069 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1070 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1071 addr = TCG_REG_T1;
f5ef6aac 1072 }
34b1a49c
RH
1073 tcg_out_ldst_rr(s, data, addr,
1074 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1075 qemu_ld_opc[memop]);
a0ce341a 1076#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1077}
1078
34b1a49c
RH
1079static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1080 TCGMemOp memop, int memi)
f5ef6aac 1081{
34b1a49c
RH
1082#ifdef CONFIG_SOFTMMU
1083 TCGMemOp s_bits = memop & MO_SIZE;
1084 TCGReg addrz, param;
abce5964
RH
1085 tcg_insn_unit *func;
1086 tcg_insn_unit *label_ptr;
f5ef6aac 1087
34b1a49c 1088 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1089 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1090
a0ce341a
RH
1091 /* The fast path is exactly one insn. Thus we can perform the entire
1092 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1093 /* beq,a,pt %[xi]cc, label0 */
abce5964 1094 label_ptr = s->code_ptr;
a115f3ea
RH
1095 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1096 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1097 /* delay slot */
34b1a49c 1098 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]);
a0ce341a
RH
1099
1100 /* TLB Miss. */
1101
7ea5d725 1102 param = TCG_REG_O1;
34b1a49c
RH
1103 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1104 /* Skip the high-part; we'll perform the extract in the trampoline. */
1105 param++;
a0ce341a 1106 }
34b1a49c
RH
1107 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1108 if (!SPARC64 && s_bits == MO_64) {
1109 /* Skip the high-part; we'll perform the extract in the trampoline. */
1110 param++;
a0ce341a 1111 }
34b1a49c 1112 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
53c37487 1113
7ea5d725 1114 func = qemu_st_trampoline[memop];
abce5964 1115 assert(func != NULL);
4e9cf840 1116 tcg_out_call_nodelay(s, func);
a0ce341a 1117 /* delay slot */
7ea5d725 1118 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
f5ef6aac 1119
abce5964 1120 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1121#else
9f44adc5 1122 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1123 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1124 addr = TCG_REG_T1;
a0ce341a 1125 }
34b1a49c 1126 tcg_out_ldst_rr(s, data, addr,
c6f7e4fb 1127 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1128 qemu_st_opc[memop]);
a0ce341a 1129#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1130}
1131
b357f902
RH
1132static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1133 const TCGArg args[TCG_MAX_OP_ARGS],
1134 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1135{
b357f902
RH
1136 TCGArg a0, a1, a2;
1137 int c, c2;
1138
1139 /* Hoist the loads of the most common arguments. */
1140 a0 = args[0];
1141 a1 = args[1];
1142 a2 = args[2];
1143 c2 = const_args[2];
8289b279
BS
1144
1145 switch (opc) {
1146 case INDEX_op_exit_tb:
b357f902 1147 if (check_fit_ptr(a0, 13)) {
8b66eefe 1148 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1149 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
8b66eefe 1150 } else {
b357f902 1151 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
8b66eefe 1152 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1153 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
8b66eefe 1154 }
8289b279
BS
1155 break;
1156 case INDEX_op_goto_tb:
1157 if (s->tb_jmp_offset) {
1158 /* direct jump method */
abce5964 1159 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
5bbd2cae 1160 /* Make sure to preserve links during retranslation. */
abce5964 1161 tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
8289b279
BS
1162 } else {
1163 /* indirect jump method */
b357f902 1164 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
aad2f06a 1165 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
8289b279 1166 }
53cd9273 1167 tcg_out_nop(s);
abce5964 1168 s->tb_next_offset[a0] = tcg_current_code_size(s);
8289b279 1169 break;
8289b279 1170 case INDEX_op_br:
b357f902 1171 tcg_out_bpcc(s, COND_A, BPCC_PT, a0);
f5ef6aac 1172 tcg_out_nop(s);
8289b279 1173 break;
8289b279 1174
8289b279 1175#define OP_32_64(x) \
ba225198
RH
1176 glue(glue(case INDEX_op_, x), _i32): \
1177 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1178
ba225198 1179 OP_32_64(ld8u):
b357f902 1180 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1181 break;
ba225198 1182 OP_32_64(ld8s):
b357f902 1183 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1184 break;
ba225198 1185 OP_32_64(ld16u):
b357f902 1186 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1187 break;
ba225198 1188 OP_32_64(ld16s):
b357f902 1189 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1190 break;
1191 case INDEX_op_ld_i32:
53cd9273 1192 case INDEX_op_ld32u_i64:
b357f902 1193 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1194 break;
ba225198 1195 OP_32_64(st8):
b357f902 1196 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1197 break;
ba225198 1198 OP_32_64(st16):
b357f902 1199 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1200 break;
1201 case INDEX_op_st_i32:
53cd9273 1202 case INDEX_op_st32_i64:
b357f902 1203 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1204 break;
ba225198 1205 OP_32_64(add):
53cd9273 1206 c = ARITH_ADD;
ba225198
RH
1207 goto gen_arith;
1208 OP_32_64(sub):
8289b279 1209 c = ARITH_SUB;
ba225198
RH
1210 goto gen_arith;
1211 OP_32_64(and):
8289b279 1212 c = ARITH_AND;
ba225198 1213 goto gen_arith;
dc69960d
RH
1214 OP_32_64(andc):
1215 c = ARITH_ANDN;
1216 goto gen_arith;
ba225198 1217 OP_32_64(or):
8289b279 1218 c = ARITH_OR;
ba225198 1219 goto gen_arith;
18c8f7a3
RH
1220 OP_32_64(orc):
1221 c = ARITH_ORN;
1222 goto gen_arith;
ba225198 1223 OP_32_64(xor):
8289b279 1224 c = ARITH_XOR;
ba225198 1225 goto gen_arith;
8289b279
BS
1226 case INDEX_op_shl_i32:
1227 c = SHIFT_SLL;
1fd95946
RH
1228 do_shift32:
1229 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1230 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1231 break;
8289b279
BS
1232 case INDEX_op_shr_i32:
1233 c = SHIFT_SRL;
1fd95946 1234 goto do_shift32;
8289b279
BS
1235 case INDEX_op_sar_i32:
1236 c = SHIFT_SRA;
1fd95946 1237 goto do_shift32;
8289b279
BS
1238 case INDEX_op_mul_i32:
1239 c = ARITH_UMUL;
ba225198 1240 goto gen_arith;
583d1215 1241
4b5a85c1
RH
1242 OP_32_64(neg):
1243 c = ARITH_SUB;
1244 goto gen_arith1;
be6551b1
RH
1245 OP_32_64(not):
1246 c = ARITH_ORN;
1247 goto gen_arith1;
4b5a85c1 1248
583d1215 1249 case INDEX_op_div_i32:
b357f902 1250 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1251 break;
1252 case INDEX_op_divu_i32:
b357f902 1253 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1254 break;
1255
8289b279 1256 case INDEX_op_brcond_i32:
b357f902 1257 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1258 break;
dbfe80e1 1259 case INDEX_op_setcond_i32:
b357f902 1260 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1261 break;
ded37f0d 1262 case INDEX_op_movcond_i32:
b357f902 1263 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1264 break;
dbfe80e1 1265
7a3766f3 1266 case INDEX_op_add2_i32:
b357f902
RH
1267 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1268 args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1269 break;
1270 case INDEX_op_sub2_i32:
b357f902
RH
1271 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1272 args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1273 break;
1274 case INDEX_op_mulu2_i32:
f4c16661
RH
1275 c = ARITH_UMUL;
1276 goto do_mul2;
1277 case INDEX_op_muls2_i32:
1278 c = ARITH_SMUL;
1279 do_mul2:
1280 /* The 32-bit multiply insns produce a full 64-bit result. If the
1281 destination register can hold it, we can avoid the slower RDY. */
b357f902
RH
1282 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1283 if (SPARC64 || a0 <= TCG_REG_O7) {
1284 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
f4c16661 1285 } else {
b357f902 1286 tcg_out_rdy(s, a1);
f4c16661 1287 }
7a3766f3 1288 break;
8289b279 1289
cab0a7ea 1290 case INDEX_op_qemu_ld_i32:
b357f902 1291 tcg_out_qemu_ld(s, a0, a1, a2, args[3], false);
8289b279 1292 break;
cab0a7ea 1293 case INDEX_op_qemu_ld_i64:
b357f902 1294 tcg_out_qemu_ld(s, a0, a1, a2, args[3], true);
8289b279 1295 break;
cab0a7ea 1296 case INDEX_op_qemu_st_i32:
cab0a7ea 1297 case INDEX_op_qemu_st_i64:
b357f902 1298 tcg_out_qemu_st(s, a0, a1, a2, args[3]);
a0ce341a 1299 break;
8289b279 1300
53cd9273 1301 case INDEX_op_ld32s_i64:
b357f902 1302 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1303 break;
8289b279 1304 case INDEX_op_ld_i64:
b357f902 1305 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1306 break;
1307 case INDEX_op_st_i64:
b357f902 1308 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1309 break;
1310 case INDEX_op_shl_i64:
1311 c = SHIFT_SLLX;
1fd95946
RH
1312 do_shift64:
1313 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1314 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1315 break;
8289b279
BS
1316 case INDEX_op_shr_i64:
1317 c = SHIFT_SRLX;
1fd95946 1318 goto do_shift64;
8289b279
BS
1319 case INDEX_op_sar_i64:
1320 c = SHIFT_SRAX;
1fd95946 1321 goto do_shift64;
8289b279
BS
1322 case INDEX_op_mul_i64:
1323 c = ARITH_MULX;
ba225198 1324 goto gen_arith;
583d1215 1325 case INDEX_op_div_i64:
53cd9273 1326 c = ARITH_SDIVX;
ba225198 1327 goto gen_arith;
583d1215 1328 case INDEX_op_divu_i64:
8289b279 1329 c = ARITH_UDIVX;
ba225198 1330 goto gen_arith;
cc6dfecf 1331 case INDEX_op_ext32s_i64:
b357f902 1332 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf
RH
1333 break;
1334 case INDEX_op_ext32u_i64:
b357f902 1335 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1336 break;
a24fba93 1337 case INDEX_op_trunc_shr_i32:
b357f902
RH
1338 if (a2 == 0) {
1339 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
a24fba93 1340 } else {
b357f902 1341 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
a24fba93
RH
1342 }
1343 break;
8289b279
BS
1344
1345 case INDEX_op_brcond_i64:
b357f902 1346 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1347 break;
dbfe80e1 1348 case INDEX_op_setcond_i64:
b357f902 1349 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1350 break;
ded37f0d 1351 case INDEX_op_movcond_i64:
b357f902 1352 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1353 break;
34b1a49c 1354
ba225198 1355 gen_arith:
b357f902 1356 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1357 break;
1358
4b5a85c1 1359 gen_arith1:
b357f902 1360 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1361 break;
1362
96d0ee7f 1363 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1364 case INDEX_op_mov_i64:
96d0ee7f 1365 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
98b90bab 1366 case INDEX_op_movi_i64:
96d0ee7f 1367 case INDEX_op_call: /* Always emitted via tcg_out_call. */
8289b279 1368 default:
8289b279
BS
1369 tcg_abort();
1370 }
1371}
1372
1373static const TCGTargetOpDef sparc_op_defs[] = {
1374 { INDEX_op_exit_tb, { } },
b3db8758 1375 { INDEX_op_goto_tb, { } },
8289b279
BS
1376 { INDEX_op_br, { } },
1377
8289b279
BS
1378 { INDEX_op_ld8u_i32, { "r", "r" } },
1379 { INDEX_op_ld8s_i32, { "r", "r" } },
1380 { INDEX_op_ld16u_i32, { "r", "r" } },
1381 { INDEX_op_ld16s_i32, { "r", "r" } },
1382 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1383 { INDEX_op_st8_i32, { "rZ", "r" } },
1384 { INDEX_op_st16_i32, { "rZ", "r" } },
1385 { INDEX_op_st_i32, { "rZ", "r" } },
1386
1387 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1388 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1389 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1390 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1391 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1392 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1393 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1394 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1395 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1396 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1397
1398 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1399 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1400 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1401
4b5a85c1 1402 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1403 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1404
89269f6c
RH
1405 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1406 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1407 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1408
89269f6c
RH
1409 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1410 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1411 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
f4c16661 1412 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1413
34b1a49c
RH
1414 { INDEX_op_ld8u_i64, { "R", "r" } },
1415 { INDEX_op_ld8s_i64, { "R", "r" } },
1416 { INDEX_op_ld16u_i64, { "R", "r" } },
1417 { INDEX_op_ld16s_i64, { "R", "r" } },
1418 { INDEX_op_ld32u_i64, { "R", "r" } },
1419 { INDEX_op_ld32s_i64, { "R", "r" } },
1420 { INDEX_op_ld_i64, { "R", "r" } },
1421 { INDEX_op_st8_i64, { "RZ", "r" } },
1422 { INDEX_op_st16_i64, { "RZ", "r" } },
1423 { INDEX_op_st32_i64, { "RZ", "r" } },
1424 { INDEX_op_st_i64, { "RZ", "r" } },
1425
1426 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1427 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1428 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1429 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1430 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1431 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1432 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1433 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1434 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1435 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1436
1437 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1438 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1439 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1440
1441 { INDEX_op_neg_i64, { "R", "RJ" } },
1442 { INDEX_op_not_i64, { "R", "RJ" } },
1443
1444 { INDEX_op_ext32s_i64, { "R", "r" } },
1445 { INDEX_op_ext32u_i64, { "R", "r" } },
1446 { INDEX_op_trunc_shr_i32, { "r", "R" } },
1447
1448 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1449 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1450 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1451
1452 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1453 { INDEX_op_qemu_ld_i64, { "R", "A" } },
ebd0c614
RH
1454 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1455 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
a0ce341a 1456
8289b279
BS
1457 { -1 },
1458};
1459
e4d58b41 1460static void tcg_target_init(TCGContext *s)
8289b279
BS
1461{
1462 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
34b1a49c
RH
1463 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1464
8289b279 1465 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1466 (1 << TCG_REG_G1) |
1467 (1 << TCG_REG_G2) |
1468 (1 << TCG_REG_G3) |
1469 (1 << TCG_REG_G4) |
1470 (1 << TCG_REG_G5) |
1471 (1 << TCG_REG_G6) |
1472 (1 << TCG_REG_G7) |
8289b279
BS
1473 (1 << TCG_REG_O0) |
1474 (1 << TCG_REG_O1) |
1475 (1 << TCG_REG_O2) |
1476 (1 << TCG_REG_O3) |
1477 (1 << TCG_REG_O4) |
1478 (1 << TCG_REG_O5) |
8289b279
BS
1479 (1 << TCG_REG_O7));
1480
1481 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1482 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1483 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1484 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1485 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1486 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1487 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1488 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1489 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1490
8289b279
BS
1491 tcg_add_target_add_op_defs(sparc_op_defs);
1492}
cb1977d3 1493
9f44adc5 1494#if SPARC64
cb1977d3 1495# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1496#else
cb1977d3
RH
1497# define ELF_HOST_MACHINE EM_SPARC32PLUS
1498# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1499#endif
1500
cb1977d3 1501typedef struct {
ae18b28d 1502 DebugFrameHeader h;
9f44adc5 1503 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1504 uint8_t fde_win_save;
1505 uint8_t fde_ret_save[3];
cb1977d3
RH
1506} DebugFrame;
1507
ae18b28d
RH
1508static const DebugFrame debug_frame = {
1509 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1510 .h.cie.id = -1,
1511 .h.cie.version = 1,
1512 .h.cie.code_align = 1,
1513 .h.cie.data_align = -sizeof(void *) & 0x7f,
1514 .h.cie.return_column = 15, /* o7 */
cb1977d3 1515
497a22eb 1516 /* Total FDE size does not include the "len" member. */
ae18b28d 1517 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1518
1519 .fde_def_cfa = {
9f44adc5 1520#if SPARC64
cb1977d3
RH
1521 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1522 (2047 & 0x7f) | 0x80, (2047 >> 7)
1523#else
1524 13, 30 /* DW_CFA_def_cfa_register i6 */
1525#endif
1526 },
497a22eb
RH
1527 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1528 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1529};
1530
1531void tcg_register_jit(void *buf, size_t buf_size)
1532{
cb1977d3
RH
1533 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1534}
5bbd2cae
RH
1535
1536void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1537{
1538 uint32_t *ptr = (uint32_t *)jmp_addr;
c8fc56ce 1539 uintptr_t disp = addr - jmp_addr;
5bbd2cae
RH
1540
1541 /* We can reach the entire address space for 32-bit. For 64-bit
1542 the code_gen_buffer can't be larger than 2GB. */
c8fc56ce 1543 assert(disp == (int32_t)disp);
5bbd2cae 1544
c8fc56ce 1545 *ptr = CALL | (uint32_t)disp >> 2;
5bbd2cae
RH
1546 flush_icache_range(jmp_addr, jmp_addr + 4);
1547}