]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-ppc: Define TCG_TARGET_INSN_UNIT_SIZE
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
34b1a49c
RH
70/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
375816f8
RH
82/* Define some temporary registers. T2 is used for constant generation. */
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
c6f7e4fb 86#ifdef CONFIG_USE_GUEST_BASE
375816f8 87# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
88#else
89# define TCG_GUEST_BASE_REG TCG_REG_G0
90#endif
e141ab52 91
0954d0d9 92static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
93 TCG_REG_L0,
94 TCG_REG_L1,
95 TCG_REG_L2,
96 TCG_REG_L3,
97 TCG_REG_L4,
98 TCG_REG_L5,
99 TCG_REG_L6,
100 TCG_REG_L7,
26adfb75 101
8289b279
BS
102 TCG_REG_I0,
103 TCG_REG_I1,
104 TCG_REG_I2,
105 TCG_REG_I3,
106 TCG_REG_I4,
375816f8 107 TCG_REG_I5,
26adfb75
RH
108
109 TCG_REG_G2,
110 TCG_REG_G3,
111 TCG_REG_G4,
112 TCG_REG_G5,
113
114 TCG_REG_O0,
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
118 TCG_REG_O4,
119 TCG_REG_O5,
8289b279
BS
120};
121
122static const int tcg_target_call_iarg_regs[6] = {
123 TCG_REG_O0,
124 TCG_REG_O1,
125 TCG_REG_O2,
126 TCG_REG_O3,
127 TCG_REG_O4,
128 TCG_REG_O5,
129};
130
26a74ae3 131static const int tcg_target_call_oarg_regs[] = {
8289b279 132 TCG_REG_O0,
e141ab52
BS
133 TCG_REG_O1,
134 TCG_REG_O2,
135 TCG_REG_O3,
8289b279
BS
136};
137
8289b279
BS
138#define INSN_OP(x) ((x) << 30)
139#define INSN_OP2(x) ((x) << 22)
140#define INSN_OP3(x) ((x) << 19)
141#define INSN_OPF(x) ((x) << 5)
142#define INSN_RD(x) ((x) << 25)
143#define INSN_RS1(x) ((x) << 14)
144#define INSN_RS2(x) (x)
8384dd67 145#define INSN_ASI(x) ((x) << 5)
8289b279 146
203342d8 147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 152#define INSN_COND(x) ((x) << 25)
8289b279 153
cf7c2ca5
BS
154#define COND_N 0x0
155#define COND_E 0x1
156#define COND_LE 0x2
157#define COND_L 0x3
158#define COND_LEU 0x4
159#define COND_CS 0x5
160#define COND_NEG 0x6
161#define COND_VS 0x7
b3db8758 162#define COND_A 0x8
cf7c2ca5
BS
163#define COND_NE 0x9
164#define COND_G 0xa
165#define COND_GE 0xb
166#define COND_GU 0xc
167#define COND_CC 0xd
168#define COND_POS 0xe
169#define COND_VC 0xf
a115f3ea 170#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 171
ab1339b9
RH
172#define RCOND_Z 1
173#define RCOND_LEZ 2
174#define RCOND_LZ 3
175#define RCOND_NZ 5
176#define RCOND_GZ 6
177#define RCOND_GEZ 7
178
dbfe80e1
RH
179#define MOVCC_ICC (1 << 18)
180#define MOVCC_XCC (1 << 18 | 1 << 12)
181
a115f3ea
RH
182#define BPCC_ICC 0
183#define BPCC_XCC (2 << 20)
184#define BPCC_PT (1 << 19)
185#define BPCC_PN 0
186#define BPCC_A (1 << 29)
187
ab1339b9
RH
188#define BPR_PT BPCC_PT
189
8289b279 190#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 192#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 193#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 194#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 195#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 196#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 197#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
198#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
199#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 200#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
201#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
202#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 203#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
204#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
205#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
206#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
207#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
208#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 209#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 210#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
211
212#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
213#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
214#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
215
216#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
217#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
218#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
219
7a3766f3 220#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 221#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 222#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 223#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
224#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
225#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
226#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
227#define CALL INSN_OP(1)
228#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
229#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
230#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
231#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
232#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
233#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
234#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
235#define STB (INSN_OP(3) | INSN_OP3(0x05))
236#define STH (INSN_OP(3) | INSN_OP3(0x06))
237#define STW (INSN_OP(3) | INSN_OP3(0x04))
238#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
239#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
240#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
241#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
242#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
243#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
244#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
245#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
246#define STBA (INSN_OP(3) | INSN_OP3(0x15))
247#define STHA (INSN_OP(3) | INSN_OP3(0x16))
248#define STWA (INSN_OP(3) | INSN_OP3(0x14))
249#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
250
251#ifndef ASI_PRIMARY_LITTLE
252#define ASI_PRIMARY_LITTLE 0x88
253#endif
8289b279 254
a0ce341a
RH
255#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
256#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
257#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
258#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
259#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
260
261#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
262#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
263#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
264
425532d7 265static inline int check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 266{
425532d7 267 return val == sextract64(val, 0, bits);
a115f3ea
RH
268}
269
425532d7 270static inline int check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 271{
425532d7 272 return val == sextract32(val, 0, bits);
a115f3ea
RH
273}
274
425532d7
RH
275#define check_fit_tl check_fit_i64
276#if SPARC64
277# define check_fit_ptr check_fit_i64
278#else
279# define check_fit_ptr check_fit_i32
280#endif
281
a115f3ea 282static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 283 intptr_t value, intptr_t addend)
a115f3ea
RH
284{
285 uint32_t insn;
286 value += addend;
287 switch (type) {
288 case R_SPARC_32:
289 if (value != (uint32_t)value) {
290 tcg_abort();
291 }
292 *(uint32_t *)code_ptr = value;
293 break;
ab1339b9 294 case R_SPARC_WDISP16:
2ba7fae2 295 value -= (intptr_t)code_ptr;
425532d7 296 if (!check_fit_ptr(value >> 2, 16)) {
ab1339b9
RH
297 tcg_abort();
298 }
299 insn = *(uint32_t *)code_ptr;
300 insn &= ~INSN_OFF16(-1);
301 insn |= INSN_OFF16(value);
302 *(uint32_t *)code_ptr = insn;
303 break;
a115f3ea 304 case R_SPARC_WDISP19:
2ba7fae2 305 value -= (intptr_t)code_ptr;
425532d7 306 if (!check_fit_ptr(value >> 2, 19)) {
a115f3ea
RH
307 tcg_abort();
308 }
309 insn = *(uint32_t *)code_ptr;
310 insn &= ~INSN_OFF19(-1);
311 insn |= INSN_OFF19(value);
312 *(uint32_t *)code_ptr = insn;
313 break;
314 default:
315 tcg_abort();
316 }
317}
318
319/* parse target specific constraints */
320static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
321{
322 const char *ct_str;
323
324 ct_str = *pct_str;
325 switch (ct_str[0]) {
326 case 'r':
327 ct->ct |= TCG_CT_REG;
328 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
329 break;
34b1a49c 330 case 'R':
a115f3ea 331 ct->ct |= TCG_CT_REG;
34b1a49c
RH
332 tcg_regset_set32(ct->u.regs, 0, ALL_64);
333 break;
334 case 'A': /* qemu_ld/st address constraint */
335 ct->ct |= TCG_CT_REG;
336 tcg_regset_set32(ct->u.regs, 0,
337 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
338 reserve_helpers:
a115f3ea
RH
339 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
340 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
342 break;
34b1a49c
RH
343 case 's': /* qemu_st data 32-bit constraint */
344 ct->ct |= TCG_CT_REG;
345 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
346 goto reserve_helpers;
347 case 'S': /* qemu_st data 64-bit constraint */
348 ct->ct |= TCG_CT_REG;
349 tcg_regset_set32(ct->u.regs, 0, ALL_64);
350 goto reserve_helpers;
a115f3ea
RH
351 case 'I':
352 ct->ct |= TCG_CT_CONST_S11;
353 break;
354 case 'J':
355 ct->ct |= TCG_CT_CONST_S13;
356 break;
357 case 'Z':
358 ct->ct |= TCG_CT_CONST_ZERO;
359 break;
360 default:
361 return -1;
362 }
363 ct_str++;
364 *pct_str = ct_str;
365 return 0;
366}
367
368/* test if a constant matches the constraint */
f6c6afc1 369static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
a115f3ea
RH
370 const TCGArgConstraint *arg_ct)
371{
372 int ct = arg_ct->ct;
373
374 if (ct & TCG_CT_CONST) {
375 return 1;
4b304cfa
RH
376 }
377
378 if (type == TCG_TYPE_I32) {
379 val = (int32_t)val;
380 }
381
382 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
383 return 1;
384 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
385 return 1;
386 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
387 return 1;
388 } else {
389 return 0;
390 }
391}
392
35e2da15
RH
393static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
394 TCGReg rs2, int op)
26cc915c 395{
35e2da15 396 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
397}
398
35e2da15
RH
399static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
400 int32_t offset, int op)
26cc915c 401{
35e2da15 402 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
403}
404
35e2da15
RH
405static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
406 int32_t val2, int val2const, int op)
ba225198
RH
407{
408 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
409 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
410}
411
2a534aff
RH
412static inline void tcg_out_mov(TCGContext *s, TCGType type,
413 TCGReg ret, TCGReg arg)
8289b279 414{
dda73c78
RH
415 if (ret != arg) {
416 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
417 }
26cc915c
BS
418}
419
35e2da15 420static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
421{
422 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
423}
424
35e2da15 425static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
426{
427 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
428}
429
a9c7d27b
RH
430static void tcg_out_movi(TCGContext *s, TCGType type,
431 TCGReg ret, tcg_target_long arg)
8289b279 432{
425532d7 433 tcg_target_long hi, lo = (int32_t)arg;
a9c7d27b 434
035b2398
RH
435 /* Make sure we test 32-bit constants for imm13 properly. */
436 if (type == TCG_TYPE_I32) {
437 arg = lo;
438 }
439
a9c7d27b
RH
440 /* A 13-bit constant sign-extended to 64-bits. */
441 if (check_fit_tl(arg, 13)) {
b101234a 442 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 443 return;
8289b279 444 }
8289b279 445
a9c7d27b 446 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
34b1a49c 447 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
a9c7d27b
RH
448 tcg_out_sethi(s, ret, arg);
449 if (arg & 0x3ff) {
450 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
451 }
452 return;
453 }
454
455 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 456 if (arg == lo) {
43172207
RH
457 tcg_out_sethi(s, ret, ~arg);
458 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
459 return;
460 }
461
462 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 463 if (check_fit_i32(lo, 13)) {
34b1a49c 464 hi = (arg - lo) >> 32;
a9c7d27b
RH
465 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
466 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
467 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 468 } else {
34b1a49c 469 hi = arg >> 32;
a9c7d27b
RH
470 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
471 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 472 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 473 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 474 }
b101234a
BS
475}
476
35e2da15
RH
477static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
478 TCGReg a2, int op)
8289b279 479{
a0ce341a 480 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
481}
482
35e2da15
RH
483static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
484 intptr_t offset, int op)
8289b279 485{
425532d7 486 if (check_fit_ptr(offset, 13)) {
8289b279
BS
487 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
488 INSN_IMM13(offset));
a0ce341a 489 } else {
375816f8
RH
490 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
491 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 492 }
8289b279
BS
493}
494
2a534aff 495static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 496 TCGReg arg1, intptr_t arg2)
8289b279 497{
a0ce341a 498 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
499}
500
2a534aff 501static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 502 TCGReg arg1, intptr_t arg2)
8289b279 503{
a0ce341a
RH
504 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
505}
506
35e2da15 507static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 508{
35e2da15
RH
509 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
510 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
511}
512
35e2da15 513static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 514{
583d1215 515 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
516}
517
35e2da15 518static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
7a3766f3
RH
519{
520 tcg_out32(s, RDY | INSN_RD(rd));
521}
522
35e2da15
RH
523static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
524 int32_t val2, int val2const, int uns)
583d1215
RH
525{
526 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
527 if (uns) {
528 tcg_out_sety(s, TCG_REG_G0);
529 } else {
375816f8
RH
530 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
531 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
532 }
533
534 tcg_out_arithc(s, rd, rs1, val2, val2const,
535 uns ? ARITH_UDIV : ARITH_SDIV);
536}
537
8289b279
BS
538static inline void tcg_out_nop(TCGContext *s)
539{
26cc915c 540 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
541}
542
0aed257f 543static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
544 [TCG_COND_EQ] = COND_E,
545 [TCG_COND_NE] = COND_NE,
546 [TCG_COND_LT] = COND_L,
547 [TCG_COND_GE] = COND_GE,
548 [TCG_COND_LE] = COND_LE,
549 [TCG_COND_GT] = COND_G,
550 [TCG_COND_LTU] = COND_CS,
551 [TCG_COND_GEU] = COND_CC,
552 [TCG_COND_LEU] = COND_LEU,
553 [TCG_COND_GTU] = COND_GU,
554};
555
ab1339b9
RH
556static const uint8_t tcg_cond_to_rcond[] = {
557 [TCG_COND_EQ] = RCOND_Z,
558 [TCG_COND_NE] = RCOND_NZ,
559 [TCG_COND_LT] = RCOND_LZ,
560 [TCG_COND_GT] = RCOND_GZ,
561 [TCG_COND_LE] = RCOND_LEZ,
562 [TCG_COND_GE] = RCOND_GEZ
563};
564
a115f3ea
RH
565static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
566{
567 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
568}
569
570static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
571{
572 TCGLabel *l = &s->labels[label];
573 int off19;
574
575 if (l->has_value) {
576 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
577 } else {
578 /* Make sure to preserve destinations during retranslation. */
579 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
580 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
581 }
582 tcg_out_bpcc0(s, scond, flags, off19);
583}
584
35e2da15 585static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 586{
ba225198 587 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
588}
589
35e2da15
RH
590static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
591 int32_t arg2, int const_arg2, int label)
cf7c2ca5 592{
56f4927e 593 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 594 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
595 tcg_out_nop(s);
596}
597
35e2da15
RH
598static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
599 int32_t v1, int v1const)
ded37f0d
RH
600{
601 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
602 | INSN_RS1(tcg_cond_to_bcond[cond])
603 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
604}
605
35e2da15
RH
606static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
607 TCGReg c1, int32_t c2, int c2const,
608 int32_t v1, int v1const)
ded37f0d
RH
609{
610 tcg_out_cmp(s, c1, c2, c2const);
611 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
612}
613
35e2da15
RH
614static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
615 int32_t arg2, int const_arg2, int label)
1da92db2 616{
ab1339b9
RH
617 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
618 if (arg2 == 0 && !is_unsigned_cond(cond)) {
619 TCGLabel *l = &s->labels[label];
620 int off16;
621
622 if (l->has_value) {
623 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
624 } else {
625 /* Make sure to preserve destinations during retranslation. */
626 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
627 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
628 }
629 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
630 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
631 } else {
632 tcg_out_cmp(s, arg1, arg2, const_arg2);
633 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
634 }
1da92db2
BS
635 tcg_out_nop(s);
636}
ded37f0d 637
35e2da15
RH
638static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
639 int32_t v1, int v1const)
203342d8
RH
640{
641 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
642 | (tcg_cond_to_rcond[cond] << 10)
643 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
644}
645
35e2da15
RH
646static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
647 TCGReg c1, int32_t c2, int c2const,
648 int32_t v1, int v1const)
ded37f0d 649{
203342d8
RH
650 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
651 Note that the immediate range is one bit smaller, so we must check
652 for that as well. */
653 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 654 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
655 tcg_out_movr(s, cond, ret, c1, v1, v1const);
656 } else {
657 tcg_out_cmp(s, c1, c2, c2const);
658 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
659 }
ded37f0d 660}
1da92db2 661
35e2da15
RH
662static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
663 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 664{
dbfe80e1
RH
665 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
666 switch (cond) {
7d458a75
RH
667 case TCG_COND_LTU:
668 case TCG_COND_GEU:
669 /* The result of the comparison is in the carry bit. */
670 break;
671
dbfe80e1
RH
672 case TCG_COND_EQ:
673 case TCG_COND_NE:
7d458a75 674 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
675 if (c2 != 0) {
676 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
677 }
678 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 679 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
680 break;
681
682 case TCG_COND_GTU:
dbfe80e1 683 case TCG_COND_LEU:
7d458a75
RH
684 /* If we don't need to load a constant into a register, we can
685 swap the operands on GTU/LEU. There's no benefit to loading
686 the constant into a temporary register. */
687 if (!c2const || c2 == 0) {
35e2da15 688 TCGReg t = c1;
7d458a75
RH
689 c1 = c2;
690 c2 = t;
691 c2const = 0;
692 cond = tcg_swap_cond(cond);
693 break;
694 }
695 /* FALLTHRU */
dbfe80e1
RH
696
697 default:
698 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 699 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 700 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
701 return;
702 }
703
704 tcg_out_cmp(s, c1, c2, c2const);
705 if (cond == TCG_COND_LTU) {
706 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
707 } else {
708 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
709 }
710}
711
35e2da15
RH
712static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
713 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 714{
203342d8
RH
715 /* For 64-bit signed comparisons vs zero, we can avoid the compare
716 if the input does not overlap the output. */
717 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
718 tcg_out_movi_imm13(s, ret, 0);
719 tcg_out_movr(s, cond, ret, c1, 1, 1);
720 } else {
721 tcg_out_cmp(s, c1, c2, c2const);
722 tcg_out_movi_imm13(s, ret, 0);
723 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
724 }
dbfe80e1 725}
4ec28e25 726
35e2da15
RH
727static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh,
728 TCGReg al, TCGReg ah, int32_t bl, int blconst,
729 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 730{
35e2da15 731 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
732
733 /* Note that the low parts are fully consumed before tmp is set. */
734 if (rl != ah && (bhconst || rl != bh)) {
735 tmp = rl;
736 }
737
738 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
739 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
740 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
741}
dbfe80e1 742
35e2da15 743static void tcg_out_calli(TCGContext *s, uintptr_t dest)
aad2f06a
RH
744{
745 intptr_t disp = dest - (uintptr_t)s->code_ptr;
746
747 if (disp == (int32_t)disp) {
748 tcg_out32(s, CALL | (uint32_t)disp >> 2);
749 } else {
750 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, dest & ~0xfff);
751 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, dest & 0xfff, JMPL);
752 }
753}
754
7ea5d725
RH
755#ifdef CONFIG_SOFTMMU
756static uintptr_t qemu_ld_trampoline[16];
757static uintptr_t qemu_st_trampoline[16];
758
759static void build_trampolines(TCGContext *s)
760{
761 static uintptr_t const qemu_ld_helpers[16] = {
762 [MO_UB] = (uintptr_t)helper_ret_ldub_mmu,
763 [MO_SB] = (uintptr_t)helper_ret_ldsb_mmu,
764 [MO_LEUW] = (uintptr_t)helper_le_lduw_mmu,
765 [MO_LESW] = (uintptr_t)helper_le_ldsw_mmu,
766 [MO_LEUL] = (uintptr_t)helper_le_ldul_mmu,
767 [MO_LEQ] = (uintptr_t)helper_le_ldq_mmu,
768 [MO_BEUW] = (uintptr_t)helper_be_lduw_mmu,
769 [MO_BESW] = (uintptr_t)helper_be_ldsw_mmu,
770 [MO_BEUL] = (uintptr_t)helper_be_ldul_mmu,
771 [MO_BEQ] = (uintptr_t)helper_be_ldq_mmu,
772 };
773 static uintptr_t const qemu_st_helpers[16] = {
774 [MO_UB] = (uintptr_t)helper_ret_stb_mmu,
775 [MO_LEUW] = (uintptr_t)helper_le_stw_mmu,
776 [MO_LEUL] = (uintptr_t)helper_le_stl_mmu,
777 [MO_LEQ] = (uintptr_t)helper_le_stq_mmu,
778 [MO_BEUW] = (uintptr_t)helper_be_stw_mmu,
779 [MO_BEUL] = (uintptr_t)helper_be_stl_mmu,
780 [MO_BEQ] = (uintptr_t)helper_be_stq_mmu,
781 };
782
783 int i;
784 TCGReg ra;
785 uintptr_t tramp;
786
787 for (i = 0; i < 16; ++i) {
788 if (qemu_ld_helpers[i] == 0) {
789 continue;
790 }
791
792 /* May as well align the trampoline. */
793 tramp = (uintptr_t)s->code_ptr;
794 while (tramp & 15) {
795 tcg_out_nop(s);
796 tramp += 4;
797 }
798 qemu_ld_trampoline[i] = tramp;
799
34b1a49c
RH
800 if (SPARC64 || TARGET_LONG_BITS == 32) {
801 ra = TCG_REG_O3;
802 } else {
803 /* Install the high part of the address. */
804 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
805 ra = TCG_REG_O4;
806 }
7ea5d725
RH
807
808 /* Set the retaddr operand. */
809 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
810 /* Set the env operand. */
811 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
812 /* Tail call. */
813 tcg_out_calli(s, qemu_ld_helpers[i]);
814 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
815 }
816
817 for (i = 0; i < 16; ++i) {
818 if (qemu_st_helpers[i] == 0) {
819 continue;
820 }
821
822 /* May as well align the trampoline. */
823 tramp = (uintptr_t)s->code_ptr;
824 while (tramp & 15) {
825 tcg_out_nop(s);
826 tramp += 4;
827 }
828 qemu_st_trampoline[i] = tramp;
829
34b1a49c
RH
830 if (SPARC64) {
831 ra = TCG_REG_O4;
832 } else {
833 ra = TCG_REG_O1;
834 if (TARGET_LONG_BITS == 64) {
835 /* Install the high part of the address. */
836 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
837 ra += 2;
838 } else {
839 ra += 1;
840 }
841 if ((i & MO_SIZE) == MO_64) {
842 /* Install the high part of the data. */
843 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
844 ra += 2;
845 } else {
846 ra += 1;
847 }
848 /* Skip the mem_index argument. */
849 ra += 1;
850 }
851
7ea5d725
RH
852 /* Set the retaddr operand. */
853 if (ra >= TCG_REG_O6) {
854 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
855 TCG_TARGET_CALL_STACK_OFFSET);
856 ra = TCG_REG_G1;
857 }
858 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
859 /* Set the env operand. */
860 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
861 /* Tail call. */
862 tcg_out_calli(s, qemu_st_helpers[i]);
863 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
864 }
865}
866#endif
867
7d551702 868/* Generate global QEMU prologue and epilogue code */
e4d58b41 869static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 870{
4c3204cb
RH
871 int tmp_buf_size, frame_size;
872
873 /* The TCG temp buffer is at the top of the frame, immediately
874 below the frame pointer. */
875 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
876 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
877 tmp_buf_size);
878
879 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
880 otherwise the minimal frame usable by callees. */
881 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
882 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
883 frame_size += TCG_TARGET_STACK_ALIGN - 1;
884 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 885 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 886 INSN_IMM13(-frame_size));
c6f7e4fb
RH
887
888#ifdef CONFIG_USE_GUEST_BASE
889 if (GUEST_BASE != 0) {
890 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
891 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
892 }
893#endif
894
aad2f06a 895 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
896 /* delay slot */
897 tcg_out_nop(s);
4c3204cb
RH
898
899 /* No epilogue required. We issue ret + restore directly in the TB. */
7ea5d725
RH
900
901#ifdef CONFIG_SOFTMMU
902 build_trampolines(s);
903#endif
b3db8758
BS
904}
905
f5ef6aac 906#if defined(CONFIG_SOFTMMU)
a0ce341a 907/* Perform the TLB load and compare.
bffe1431 908
a0ce341a 909 Inputs:
a8b12c10 910 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
911
912 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
913
914 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
915 This should be offsetof addr_read or addr_write.
916
917 The result of the TLB comparison is in %[ix]cc. The sanitized address
918 is in the returned register, maybe %o0. The TLB addend is in %o1. */
919
34b1a49c
RH
920static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
921 TCGMemOp s_bits, int which)
a0ce341a 922{
a8b12c10
RH
923 const TCGReg r0 = TCG_REG_O0;
924 const TCGReg r1 = TCG_REG_O1;
925 const TCGReg r2 = TCG_REG_O2;
a0ce341a
RH
926 int tlb_ofs;
927
d801a8f2 928 /* Shift the page number down. */
34b1a49c 929 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a
RH
930
931 /* Mask out the page offset, except for the required alignment. */
d801a8f2
RH
932 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
933 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
934
935 /* Mask the tlb index. */
936 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
937
938 /* Mask page, part 2. */
939 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 940
d801a8f2
RH
941 /* Shift the tlb index into place. */
942 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
943
944 /* Relative to the current ENV. */
945 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
946
947 /* Find a base address that can load both tlb comparator and addend. */
948 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
425532d7 949 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
35e2da15
RH
950 if (tlb_ofs & ~0x3ff) {
951 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
952 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
953 }
d801a8f2 954 tlb_ofs &= 0x3ff;
a0ce341a
RH
955 }
956
957 /* Load the tlb comparator and the addend. */
958 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
959 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
960
961 /* subcc arg0, arg2, %g0 */
962 tcg_out_cmp(s, r0, r2, 0);
963
964 /* If the guest address must be zero-extended, do so now. */
9f44adc5 965 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c 966 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
967 return r0;
968 }
34b1a49c 969 return addr;
a0ce341a
RH
970}
971#endif /* CONFIG_SOFTMMU */
972
eef0d9e7
RH
973static const int qemu_ld_opc[16] = {
974 [MO_UB] = LDUB,
975 [MO_SB] = LDSB,
976
977 [MO_BEUW] = LDUH,
978 [MO_BESW] = LDSH,
979 [MO_BEUL] = LDUW,
980 [MO_BESL] = LDSW,
981 [MO_BEQ] = LDX,
982
983 [MO_LEUW] = LDUH_LE,
984 [MO_LESW] = LDSH_LE,
985 [MO_LEUL] = LDUW_LE,
986 [MO_LESL] = LDSW_LE,
987 [MO_LEQ] = LDX_LE,
a0ce341a 988};
9d0efc88 989
eef0d9e7
RH
990static const int qemu_st_opc[16] = {
991 [MO_UB] = STB,
992
993 [MO_BEUW] = STH,
994 [MO_BEUL] = STW,
995 [MO_BEQ] = STX,
996
997 [MO_LEUW] = STH_LE,
998 [MO_LEUL] = STW_LE,
999 [MO_LEQ] = STX_LE,
a0ce341a 1000};
bffe1431 1001
34b1a49c
RH
1002static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1003 TCGMemOp memop, int memi, bool is_64)
f5ef6aac 1004{
34b1a49c
RH
1005#ifdef CONFIG_SOFTMMU
1006 TCGMemOp s_bits = memop & MO_SIZE;
cab0a7ea 1007 TCGReg addrz, param;
7ea5d725 1008 uintptr_t func;
34b1a49c 1009 uint32_t *label_ptr;
f5ef6aac 1010
34b1a49c 1011 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1012 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1013
34b1a49c
RH
1014 /* The fast path is exactly one insn. Thus we can perform the
1015 entire TLB Hit in the (annulled) delay slot of the branch
1016 over the TLB Miss case. */
a0ce341a 1017
34b1a49c
RH
1018 /* beq,a,pt %[xi]cc, label0 */
1019 label_ptr = (uint32_t *)s->code_ptr;
1020 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1021 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1022 /* delay slot */
1023 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
53c37487 1024
a0ce341a 1025 /* TLB Miss. */
f5ef6aac 1026
7ea5d725 1027 param = TCG_REG_O1;
34b1a49c
RH
1028 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1029 /* Skip the high-part; we'll perform the extract in the trampoline. */
1030 param++;
a0ce341a 1031 }
34b1a49c 1032 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
f5ef6aac 1033
7ea5d725
RH
1034 /* We use the helpers to extend SB and SW data, leaving the case
1035 of SL needing explicit extending below. */
1036 if ((memop & ~MO_BSWAP) == MO_SL) {
1037 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1038 } else {
1039 func = qemu_ld_trampoline[memop];
1040 }
1041 assert(func != 0);
1042 tcg_out_calli(s, func);
a0ce341a 1043 /* delay slot */
7ea5d725
RH
1044 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1045
34b1a49c
RH
1046 /* Recall that all of the helpers return 64-bit results.
1047 Which complicates things for sparcv8plus. */
1048 if (SPARC64) {
1049 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1050 if (is_64 && (memop & ~MO_BSWAP) == MO_SL) {
1051 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1052 } else {
1053 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1054 }
1055 } else {
1056 if (s_bits == MO_64) {
1057 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1058 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1059 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1060 } else if (is_64) {
1061 /* Re-extend from 32-bit rather than reassembling when we
1062 know the high register must be an extension. */
1063 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1064 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1065 } else {
1066 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
a0ce341a 1067 }
f5ef6aac
BS
1068 }
1069
34b1a49c 1070 *label_ptr |= INSN_OFF19((uintptr_t)s->code_ptr - (uintptr_t)label_ptr);
90cbed46 1071#else
9f44adc5 1072 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1073 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1074 addr = TCG_REG_T1;
f5ef6aac 1075 }
34b1a49c
RH
1076 tcg_out_ldst_rr(s, data, addr,
1077 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1078 qemu_ld_opc[memop]);
a0ce341a 1079#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1080}
1081
34b1a49c
RH
1082static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1083 TCGMemOp memop, int memi)
f5ef6aac 1084{
34b1a49c
RH
1085#ifdef CONFIG_SOFTMMU
1086 TCGMemOp s_bits = memop & MO_SIZE;
1087 TCGReg addrz, param;
7ea5d725 1088 uintptr_t func;
a0ce341a 1089 uint32_t *label_ptr;
f5ef6aac 1090
34b1a49c 1091 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1092 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1093
a0ce341a
RH
1094 /* The fast path is exactly one insn. Thus we can perform the entire
1095 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1096 /* beq,a,pt %[xi]cc, label0 */
1097 label_ptr = (uint32_t *)s->code_ptr;
a115f3ea
RH
1098 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1099 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1100 /* delay slot */
34b1a49c 1101 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]);
a0ce341a
RH
1102
1103 /* TLB Miss. */
1104
7ea5d725 1105 param = TCG_REG_O1;
34b1a49c
RH
1106 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1107 /* Skip the high-part; we'll perform the extract in the trampoline. */
1108 param++;
a0ce341a 1109 }
34b1a49c
RH
1110 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1111 if (!SPARC64 && s_bits == MO_64) {
1112 /* Skip the high-part; we'll perform the extract in the trampoline. */
1113 param++;
a0ce341a 1114 }
34b1a49c 1115 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
53c37487 1116
7ea5d725
RH
1117 func = qemu_st_trampoline[memop];
1118 assert(func != 0);
1119 tcg_out_calli(s, func);
a0ce341a 1120 /* delay slot */
7ea5d725 1121 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
f5ef6aac 1122
34b1a49c 1123 *label_ptr |= INSN_OFF19((uintptr_t)s->code_ptr - (uintptr_t)label_ptr);
8384dd67 1124#else
9f44adc5 1125 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1126 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1127 addr = TCG_REG_T1;
a0ce341a 1128 }
34b1a49c 1129 tcg_out_ldst_rr(s, data, addr,
c6f7e4fb 1130 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1131 qemu_st_opc[memop]);
a0ce341a 1132#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1133}
1134
b357f902
RH
1135static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1136 const TCGArg args[TCG_MAX_OP_ARGS],
1137 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1138{
b357f902
RH
1139 TCGArg a0, a1, a2;
1140 int c, c2;
1141
1142 /* Hoist the loads of the most common arguments. */
1143 a0 = args[0];
1144 a1 = args[1];
1145 a2 = args[2];
1146 c2 = const_args[2];
8289b279
BS
1147
1148 switch (opc) {
1149 case INDEX_op_exit_tb:
b357f902 1150 if (check_fit_ptr(a0, 13)) {
8b66eefe 1151 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1152 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
8b66eefe 1153 } else {
b357f902 1154 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
8b66eefe 1155 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1156 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
8b66eefe 1157 }
8289b279
BS
1158 break;
1159 case INDEX_op_goto_tb:
1160 if (s->tb_jmp_offset) {
1161 /* direct jump method */
5bbd2cae 1162 uint32_t old_insn = *(uint32_t *)s->code_ptr;
b357f902 1163 s->tb_jmp_offset[a0] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1164 /* Make sure to preserve links during retranslation. */
1165 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1166 } else {
1167 /* indirect jump method */
b357f902 1168 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
aad2f06a 1169 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
8289b279 1170 }
53cd9273 1171 tcg_out_nop(s);
b357f902 1172 s->tb_next_offset[a0] = s->code_ptr - s->code_buf;
8289b279
BS
1173 break;
1174 case INDEX_op_call:
375816f8 1175 if (const_args[0]) {
b357f902 1176 tcg_out_calli(s, a0);
375816f8 1177 } else {
b357f902 1178 tcg_out_arithi(s, TCG_REG_O7, a0, 0, JMPL);
8289b279 1179 }
4c3204cb
RH
1180 /* delay slot */
1181 tcg_out_nop(s);
8289b279 1182 break;
8289b279 1183 case INDEX_op_br:
b357f902 1184 tcg_out_bpcc(s, COND_A, BPCC_PT, a0);
f5ef6aac 1185 tcg_out_nop(s);
8289b279 1186 break;
8289b279 1187
8289b279 1188#define OP_32_64(x) \
ba225198
RH
1189 glue(glue(case INDEX_op_, x), _i32): \
1190 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1191
ba225198 1192 OP_32_64(ld8u):
b357f902 1193 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1194 break;
ba225198 1195 OP_32_64(ld8s):
b357f902 1196 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1197 break;
ba225198 1198 OP_32_64(ld16u):
b357f902 1199 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1200 break;
ba225198 1201 OP_32_64(ld16s):
b357f902 1202 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1203 break;
1204 case INDEX_op_ld_i32:
53cd9273 1205 case INDEX_op_ld32u_i64:
b357f902 1206 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1207 break;
ba225198 1208 OP_32_64(st8):
b357f902 1209 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1210 break;
ba225198 1211 OP_32_64(st16):
b357f902 1212 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1213 break;
1214 case INDEX_op_st_i32:
53cd9273 1215 case INDEX_op_st32_i64:
b357f902 1216 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1217 break;
ba225198 1218 OP_32_64(add):
53cd9273 1219 c = ARITH_ADD;
ba225198
RH
1220 goto gen_arith;
1221 OP_32_64(sub):
8289b279 1222 c = ARITH_SUB;
ba225198
RH
1223 goto gen_arith;
1224 OP_32_64(and):
8289b279 1225 c = ARITH_AND;
ba225198 1226 goto gen_arith;
dc69960d
RH
1227 OP_32_64(andc):
1228 c = ARITH_ANDN;
1229 goto gen_arith;
ba225198 1230 OP_32_64(or):
8289b279 1231 c = ARITH_OR;
ba225198 1232 goto gen_arith;
18c8f7a3
RH
1233 OP_32_64(orc):
1234 c = ARITH_ORN;
1235 goto gen_arith;
ba225198 1236 OP_32_64(xor):
8289b279 1237 c = ARITH_XOR;
ba225198 1238 goto gen_arith;
8289b279
BS
1239 case INDEX_op_shl_i32:
1240 c = SHIFT_SLL;
1fd95946
RH
1241 do_shift32:
1242 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1243 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1244 break;
8289b279
BS
1245 case INDEX_op_shr_i32:
1246 c = SHIFT_SRL;
1fd95946 1247 goto do_shift32;
8289b279
BS
1248 case INDEX_op_sar_i32:
1249 c = SHIFT_SRA;
1fd95946 1250 goto do_shift32;
8289b279
BS
1251 case INDEX_op_mul_i32:
1252 c = ARITH_UMUL;
ba225198 1253 goto gen_arith;
583d1215 1254
4b5a85c1
RH
1255 OP_32_64(neg):
1256 c = ARITH_SUB;
1257 goto gen_arith1;
be6551b1
RH
1258 OP_32_64(not):
1259 c = ARITH_ORN;
1260 goto gen_arith1;
4b5a85c1 1261
583d1215 1262 case INDEX_op_div_i32:
b357f902 1263 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1264 break;
1265 case INDEX_op_divu_i32:
b357f902 1266 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1267 break;
1268
8289b279 1269 case INDEX_op_brcond_i32:
b357f902 1270 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1271 break;
dbfe80e1 1272 case INDEX_op_setcond_i32:
b357f902 1273 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1274 break;
ded37f0d 1275 case INDEX_op_movcond_i32:
b357f902 1276 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1277 break;
dbfe80e1 1278
7a3766f3 1279 case INDEX_op_add2_i32:
b357f902
RH
1280 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1281 args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1282 break;
1283 case INDEX_op_sub2_i32:
b357f902
RH
1284 tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4],
1285 args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1286 break;
1287 case INDEX_op_mulu2_i32:
f4c16661
RH
1288 c = ARITH_UMUL;
1289 goto do_mul2;
1290 case INDEX_op_muls2_i32:
1291 c = ARITH_SMUL;
1292 do_mul2:
1293 /* The 32-bit multiply insns produce a full 64-bit result. If the
1294 destination register can hold it, we can avoid the slower RDY. */
b357f902
RH
1295 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1296 if (SPARC64 || a0 <= TCG_REG_O7) {
1297 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
f4c16661 1298 } else {
b357f902 1299 tcg_out_rdy(s, a1);
f4c16661 1300 }
7a3766f3 1301 break;
8289b279 1302
cab0a7ea 1303 case INDEX_op_qemu_ld_i32:
b357f902 1304 tcg_out_qemu_ld(s, a0, a1, a2, args[3], false);
8289b279 1305 break;
cab0a7ea 1306 case INDEX_op_qemu_ld_i64:
b357f902 1307 tcg_out_qemu_ld(s, a0, a1, a2, args[3], true);
8289b279 1308 break;
cab0a7ea 1309 case INDEX_op_qemu_st_i32:
cab0a7ea 1310 case INDEX_op_qemu_st_i64:
b357f902 1311 tcg_out_qemu_st(s, a0, a1, a2, args[3]);
a0ce341a 1312 break;
8289b279 1313
53cd9273 1314 case INDEX_op_ld32s_i64:
b357f902 1315 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1316 break;
8289b279 1317 case INDEX_op_ld_i64:
b357f902 1318 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1319 break;
1320 case INDEX_op_st_i64:
b357f902 1321 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1322 break;
1323 case INDEX_op_shl_i64:
1324 c = SHIFT_SLLX;
1fd95946
RH
1325 do_shift64:
1326 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1327 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1328 break;
8289b279
BS
1329 case INDEX_op_shr_i64:
1330 c = SHIFT_SRLX;
1fd95946 1331 goto do_shift64;
8289b279
BS
1332 case INDEX_op_sar_i64:
1333 c = SHIFT_SRAX;
1fd95946 1334 goto do_shift64;
8289b279
BS
1335 case INDEX_op_mul_i64:
1336 c = ARITH_MULX;
ba225198 1337 goto gen_arith;
583d1215 1338 case INDEX_op_div_i64:
53cd9273 1339 c = ARITH_SDIVX;
ba225198 1340 goto gen_arith;
583d1215 1341 case INDEX_op_divu_i64:
8289b279 1342 c = ARITH_UDIVX;
ba225198 1343 goto gen_arith;
cc6dfecf 1344 case INDEX_op_ext32s_i64:
b357f902 1345 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf
RH
1346 break;
1347 case INDEX_op_ext32u_i64:
b357f902 1348 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1349 break;
a24fba93 1350 case INDEX_op_trunc_shr_i32:
b357f902
RH
1351 if (a2 == 0) {
1352 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
a24fba93 1353 } else {
b357f902 1354 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
a24fba93
RH
1355 }
1356 break;
8289b279
BS
1357
1358 case INDEX_op_brcond_i64:
b357f902 1359 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1360 break;
dbfe80e1 1361 case INDEX_op_setcond_i64:
b357f902 1362 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1363 break;
ded37f0d 1364 case INDEX_op_movcond_i64:
b357f902 1365 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1366 break;
34b1a49c 1367
ba225198 1368 gen_arith:
b357f902 1369 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1370 break;
1371
4b5a85c1 1372 gen_arith1:
b357f902 1373 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1374 break;
1375
98b90bab
RH
1376 case INDEX_op_mov_i64:
1377 case INDEX_op_mov_i32:
1378 case INDEX_op_movi_i64:
1379 case INDEX_op_movi_i32:
1380 /* Always implemented with tcg_out_mov/i, never with tcg_out_op. */
8289b279 1381 default:
98b90bab 1382 /* Opcode not implemented. */
8289b279
BS
1383 tcg_abort();
1384 }
1385}
1386
1387static const TCGTargetOpDef sparc_op_defs[] = {
1388 { INDEX_op_exit_tb, { } },
b3db8758 1389 { INDEX_op_goto_tb, { } },
8289b279 1390 { INDEX_op_call, { "ri" } },
8289b279
BS
1391 { INDEX_op_br, { } },
1392
1393 { INDEX_op_mov_i32, { "r", "r" } },
1394 { INDEX_op_movi_i32, { "r" } },
1395 { INDEX_op_ld8u_i32, { "r", "r" } },
1396 { INDEX_op_ld8s_i32, { "r", "r" } },
1397 { INDEX_op_ld16u_i32, { "r", "r" } },
1398 { INDEX_op_ld16s_i32, { "r", "r" } },
1399 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1400 { INDEX_op_st8_i32, { "rZ", "r" } },
1401 { INDEX_op_st16_i32, { "rZ", "r" } },
1402 { INDEX_op_st_i32, { "rZ", "r" } },
1403
1404 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1405 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1406 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1407 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1408 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1409 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1410 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1411 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1412 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1413 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1414
1415 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1416 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1417 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1418
4b5a85c1 1419 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1420 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1421
89269f6c
RH
1422 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1423 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1424 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1425
89269f6c
RH
1426 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1427 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1428 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
f4c16661 1429 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1430
34b1a49c
RH
1431 { INDEX_op_mov_i64, { "R", "R" } },
1432 { INDEX_op_movi_i64, { "R" } },
1433 { INDEX_op_ld8u_i64, { "R", "r" } },
1434 { INDEX_op_ld8s_i64, { "R", "r" } },
1435 { INDEX_op_ld16u_i64, { "R", "r" } },
1436 { INDEX_op_ld16s_i64, { "R", "r" } },
1437 { INDEX_op_ld32u_i64, { "R", "r" } },
1438 { INDEX_op_ld32s_i64, { "R", "r" } },
1439 { INDEX_op_ld_i64, { "R", "r" } },
1440 { INDEX_op_st8_i64, { "RZ", "r" } },
1441 { INDEX_op_st16_i64, { "RZ", "r" } },
1442 { INDEX_op_st32_i64, { "RZ", "r" } },
1443 { INDEX_op_st_i64, { "RZ", "r" } },
1444
1445 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1446 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1447 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1448 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1449 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1450 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1451 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1452 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1453 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1454 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1455
1456 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1457 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1458 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1459
1460 { INDEX_op_neg_i64, { "R", "RJ" } },
1461 { INDEX_op_not_i64, { "R", "RJ" } },
1462
1463 { INDEX_op_ext32s_i64, { "R", "r" } },
1464 { INDEX_op_ext32u_i64, { "R", "r" } },
1465 { INDEX_op_trunc_shr_i32, { "r", "R" } },
1466
1467 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1468 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1469 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1470
1471 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1472 { INDEX_op_qemu_ld_i64, { "R", "A" } },
ebd0c614
RH
1473 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1474 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
a0ce341a 1475
8289b279
BS
1476 { -1 },
1477};
1478
e4d58b41 1479static void tcg_target_init(TCGContext *s)
8289b279
BS
1480{
1481 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
34b1a49c
RH
1482 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1483
8289b279 1484 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1485 (1 << TCG_REG_G1) |
1486 (1 << TCG_REG_G2) |
1487 (1 << TCG_REG_G3) |
1488 (1 << TCG_REG_G4) |
1489 (1 << TCG_REG_G5) |
1490 (1 << TCG_REG_G6) |
1491 (1 << TCG_REG_G7) |
8289b279
BS
1492 (1 << TCG_REG_O0) |
1493 (1 << TCG_REG_O1) |
1494 (1 << TCG_REG_O2) |
1495 (1 << TCG_REG_O3) |
1496 (1 << TCG_REG_O4) |
1497 (1 << TCG_REG_O5) |
8289b279
BS
1498 (1 << TCG_REG_O7));
1499
1500 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1501 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1502 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1503 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1504 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1505 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1506 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1507 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1508 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1509
8289b279
BS
1510 tcg_add_target_add_op_defs(sparc_op_defs);
1511}
cb1977d3 1512
9f44adc5 1513#if SPARC64
cb1977d3 1514# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1515#else
cb1977d3
RH
1516# define ELF_HOST_MACHINE EM_SPARC32PLUS
1517# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1518#endif
1519
cb1977d3
RH
1520typedef struct {
1521 DebugFrameCIE cie;
497a22eb 1522 DebugFrameFDEHeader fde;
9f44adc5 1523 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1524 uint8_t fde_win_save;
1525 uint8_t fde_ret_save[3];
cb1977d3
RH
1526} DebugFrame;
1527
1528static DebugFrame debug_frame = {
1529 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1530 .cie.id = -1,
1531 .cie.version = 1,
1532 .cie.code_align = 1,
1533 .cie.data_align = -sizeof(void *) & 0x7f,
1534 .cie.return_column = 15, /* o7 */
1535
497a22eb
RH
1536 /* Total FDE size does not include the "len" member. */
1537 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1538
1539 .fde_def_cfa = {
9f44adc5 1540#if SPARC64
cb1977d3
RH
1541 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1542 (2047 & 0x7f) | 0x80, (2047 >> 7)
1543#else
1544 13, 30 /* DW_CFA_def_cfa_register i6 */
1545#endif
1546 },
497a22eb
RH
1547 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1548 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1549};
1550
1551void tcg_register_jit(void *buf, size_t buf_size)
1552{
c8fc56ce 1553 debug_frame.fde.func_start = (uintptr_t)buf;
cb1977d3
RH
1554 debug_frame.fde.func_len = buf_size;
1555
1556 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1557}
5bbd2cae
RH
1558
1559void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1560{
1561 uint32_t *ptr = (uint32_t *)jmp_addr;
c8fc56ce 1562 uintptr_t disp = addr - jmp_addr;
5bbd2cae
RH
1563
1564 /* We can reach the entire address space for 32-bit. For 64-bit
1565 the code_gen_buffer can't be larger than 2GB. */
c8fc56ce 1566 assert(disp == (int32_t)disp);
5bbd2cae 1567
c8fc56ce 1568 *ptr = CALL | (uint32_t)disp >> 2;
5bbd2cae
RH
1569 flush_icache_range(jmp_addr, jmp_addr + 4);
1570}