]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Support addsub2_i64
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
34b1a49c
RH
70/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
375816f8
RH
82/* Define some temporary registers. T2 is used for constant generation. */
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
c6f7e4fb 86#ifdef CONFIG_USE_GUEST_BASE
375816f8 87# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
88#else
89# define TCG_GUEST_BASE_REG TCG_REG_G0
90#endif
e141ab52 91
0954d0d9 92static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
93 TCG_REG_L0,
94 TCG_REG_L1,
95 TCG_REG_L2,
96 TCG_REG_L3,
97 TCG_REG_L4,
98 TCG_REG_L5,
99 TCG_REG_L6,
100 TCG_REG_L7,
26adfb75 101
8289b279
BS
102 TCG_REG_I0,
103 TCG_REG_I1,
104 TCG_REG_I2,
105 TCG_REG_I3,
106 TCG_REG_I4,
375816f8 107 TCG_REG_I5,
26adfb75
RH
108
109 TCG_REG_G2,
110 TCG_REG_G3,
111 TCG_REG_G4,
112 TCG_REG_G5,
113
114 TCG_REG_O0,
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
118 TCG_REG_O4,
119 TCG_REG_O5,
8289b279
BS
120};
121
122static const int tcg_target_call_iarg_regs[6] = {
123 TCG_REG_O0,
124 TCG_REG_O1,
125 TCG_REG_O2,
126 TCG_REG_O3,
127 TCG_REG_O4,
128 TCG_REG_O5,
129};
130
26a74ae3 131static const int tcg_target_call_oarg_regs[] = {
8289b279 132 TCG_REG_O0,
e141ab52
BS
133 TCG_REG_O1,
134 TCG_REG_O2,
135 TCG_REG_O3,
8289b279
BS
136};
137
8289b279
BS
138#define INSN_OP(x) ((x) << 30)
139#define INSN_OP2(x) ((x) << 22)
140#define INSN_OP3(x) ((x) << 19)
141#define INSN_OPF(x) ((x) << 5)
142#define INSN_RD(x) ((x) << 25)
143#define INSN_RS1(x) ((x) << 14)
144#define INSN_RS2(x) (x)
8384dd67 145#define INSN_ASI(x) ((x) << 5)
8289b279 146
203342d8 147#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 148#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 149#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 150#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 151#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 152#define INSN_COND(x) ((x) << 25)
8289b279 153
cf7c2ca5
BS
154#define COND_N 0x0
155#define COND_E 0x1
156#define COND_LE 0x2
157#define COND_L 0x3
158#define COND_LEU 0x4
159#define COND_CS 0x5
160#define COND_NEG 0x6
161#define COND_VS 0x7
b3db8758 162#define COND_A 0x8
cf7c2ca5
BS
163#define COND_NE 0x9
164#define COND_G 0xa
165#define COND_GE 0xb
166#define COND_GU 0xc
167#define COND_CC 0xd
168#define COND_POS 0xe
169#define COND_VC 0xf
a115f3ea 170#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 171
ab1339b9
RH
172#define RCOND_Z 1
173#define RCOND_LEZ 2
174#define RCOND_LZ 3
175#define RCOND_NZ 5
176#define RCOND_GZ 6
177#define RCOND_GEZ 7
178
dbfe80e1
RH
179#define MOVCC_ICC (1 << 18)
180#define MOVCC_XCC (1 << 18 | 1 << 12)
181
a115f3ea
RH
182#define BPCC_ICC 0
183#define BPCC_XCC (2 << 20)
184#define BPCC_PT (1 << 19)
185#define BPCC_PN 0
186#define BPCC_A (1 << 29)
187
ab1339b9
RH
188#define BPR_PT BPCC_PT
189
8289b279 190#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 191#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 192#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 193#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 194#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 195#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 196#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 197#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
198#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
199#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 200#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
201#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
202#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 203#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
204#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
205#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
206#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
207#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
208#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 209#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 210#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
211
212#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
213#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
214#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
215
216#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
217#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
218#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
219
7a3766f3 220#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 221#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 222#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 223#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
224#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
225#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
226#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
227#define CALL INSN_OP(1)
228#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
229#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
230#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
231#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
232#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
233#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
234#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
235#define STB (INSN_OP(3) | INSN_OP3(0x05))
236#define STH (INSN_OP(3) | INSN_OP3(0x06))
237#define STW (INSN_OP(3) | INSN_OP3(0x04))
238#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
239#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
240#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
241#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
242#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
243#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
244#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
245#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
246#define STBA (INSN_OP(3) | INSN_OP3(0x15))
247#define STHA (INSN_OP(3) | INSN_OP3(0x16))
248#define STWA (INSN_OP(3) | INSN_OP3(0x14))
249#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
250
251#ifndef ASI_PRIMARY_LITTLE
252#define ASI_PRIMARY_LITTLE 0x88
253#endif
8289b279 254
a0ce341a
RH
255#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
256#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
257#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
258#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
259#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
260
261#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
262#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
263#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
264
425532d7 265static inline int check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 266{
425532d7 267 return val == sextract64(val, 0, bits);
a115f3ea
RH
268}
269
425532d7 270static inline int check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 271{
425532d7 272 return val == sextract32(val, 0, bits);
a115f3ea
RH
273}
274
425532d7
RH
275#define check_fit_tl check_fit_i64
276#if SPARC64
277# define check_fit_ptr check_fit_i64
278#else
279# define check_fit_ptr check_fit_i32
280#endif
281
abce5964 282static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 283 intptr_t value, intptr_t addend)
a115f3ea
RH
284{
285 uint32_t insn;
abce5964
RH
286
287 assert(addend == 0);
288 value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
289
a115f3ea 290 switch (type) {
ab1339b9 291 case R_SPARC_WDISP16:
425532d7 292 if (!check_fit_ptr(value >> 2, 16)) {
ab1339b9
RH
293 tcg_abort();
294 }
abce5964 295 insn = *code_ptr;
ab1339b9
RH
296 insn &= ~INSN_OFF16(-1);
297 insn |= INSN_OFF16(value);
abce5964 298 *code_ptr = insn;
ab1339b9 299 break;
a115f3ea 300 case R_SPARC_WDISP19:
425532d7 301 if (!check_fit_ptr(value >> 2, 19)) {
a115f3ea
RH
302 tcg_abort();
303 }
abce5964 304 insn = *code_ptr;
a115f3ea
RH
305 insn &= ~INSN_OFF19(-1);
306 insn |= INSN_OFF19(value);
abce5964 307 *code_ptr = insn;
a115f3ea
RH
308 break;
309 default:
310 tcg_abort();
311 }
312}
313
314/* parse target specific constraints */
315static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
316{
317 const char *ct_str;
318
319 ct_str = *pct_str;
320 switch (ct_str[0]) {
321 case 'r':
322 ct->ct |= TCG_CT_REG;
323 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
324 break;
34b1a49c 325 case 'R':
a115f3ea 326 ct->ct |= TCG_CT_REG;
34b1a49c
RH
327 tcg_regset_set32(ct->u.regs, 0, ALL_64);
328 break;
329 case 'A': /* qemu_ld/st address constraint */
330 ct->ct |= TCG_CT_REG;
331 tcg_regset_set32(ct->u.regs, 0,
332 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
333 reserve_helpers:
a115f3ea
RH
334 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
335 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
336 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
337 break;
34b1a49c
RH
338 case 's': /* qemu_st data 32-bit constraint */
339 ct->ct |= TCG_CT_REG;
340 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
341 goto reserve_helpers;
342 case 'S': /* qemu_st data 64-bit constraint */
343 ct->ct |= TCG_CT_REG;
344 tcg_regset_set32(ct->u.regs, 0, ALL_64);
345 goto reserve_helpers;
a115f3ea
RH
346 case 'I':
347 ct->ct |= TCG_CT_CONST_S11;
348 break;
349 case 'J':
350 ct->ct |= TCG_CT_CONST_S13;
351 break;
352 case 'Z':
353 ct->ct |= TCG_CT_CONST_ZERO;
354 break;
355 default:
356 return -1;
357 }
358 ct_str++;
359 *pct_str = ct_str;
360 return 0;
361}
362
363/* test if a constant matches the constraint */
f6c6afc1 364static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
a115f3ea
RH
365 const TCGArgConstraint *arg_ct)
366{
367 int ct = arg_ct->ct;
368
369 if (ct & TCG_CT_CONST) {
370 return 1;
4b304cfa
RH
371 }
372
373 if (type == TCG_TYPE_I32) {
374 val = (int32_t)val;
375 }
376
377 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
378 return 1;
379 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
380 return 1;
381 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
382 return 1;
383 } else {
384 return 0;
385 }
386}
387
35e2da15
RH
388static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
389 TCGReg rs2, int op)
26cc915c 390{
35e2da15 391 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
392}
393
35e2da15
RH
394static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
395 int32_t offset, int op)
26cc915c 396{
35e2da15 397 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
398}
399
35e2da15
RH
400static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
401 int32_t val2, int val2const, int op)
ba225198
RH
402{
403 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
404 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
405}
406
2a534aff
RH
407static inline void tcg_out_mov(TCGContext *s, TCGType type,
408 TCGReg ret, TCGReg arg)
8289b279 409{
dda73c78
RH
410 if (ret != arg) {
411 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
412 }
26cc915c
BS
413}
414
35e2da15 415static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
416{
417 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
418}
419
35e2da15 420static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
421{
422 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
423}
424
a9c7d27b
RH
425static void tcg_out_movi(TCGContext *s, TCGType type,
426 TCGReg ret, tcg_target_long arg)
8289b279 427{
425532d7 428 tcg_target_long hi, lo = (int32_t)arg;
a9c7d27b 429
035b2398
RH
430 /* Make sure we test 32-bit constants for imm13 properly. */
431 if (type == TCG_TYPE_I32) {
432 arg = lo;
433 }
434
a9c7d27b
RH
435 /* A 13-bit constant sign-extended to 64-bits. */
436 if (check_fit_tl(arg, 13)) {
b101234a 437 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 438 return;
8289b279 439 }
8289b279 440
a9c7d27b 441 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
34b1a49c 442 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
a9c7d27b
RH
443 tcg_out_sethi(s, ret, arg);
444 if (arg & 0x3ff) {
445 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
446 }
447 return;
448 }
449
450 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 451 if (arg == lo) {
43172207
RH
452 tcg_out_sethi(s, ret, ~arg);
453 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
454 return;
455 }
456
457 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 458 if (check_fit_i32(lo, 13)) {
34b1a49c 459 hi = (arg - lo) >> 32;
a9c7d27b
RH
460 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
461 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
462 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 463 } else {
34b1a49c 464 hi = arg >> 32;
a9c7d27b
RH
465 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
466 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 467 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 468 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 469 }
b101234a
BS
470}
471
35e2da15
RH
472static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
473 TCGReg a2, int op)
8289b279 474{
a0ce341a 475 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
476}
477
35e2da15
RH
478static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
479 intptr_t offset, int op)
8289b279 480{
425532d7 481 if (check_fit_ptr(offset, 13)) {
8289b279
BS
482 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
483 INSN_IMM13(offset));
a0ce341a 484 } else {
375816f8
RH
485 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
486 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 487 }
8289b279
BS
488}
489
2a534aff 490static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 491 TCGReg arg1, intptr_t arg2)
8289b279 492{
a0ce341a 493 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
494}
495
2a534aff 496static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 497 TCGReg arg1, intptr_t arg2)
8289b279 498{
a0ce341a
RH
499 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
500}
501
35e2da15 502static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 503{
35e2da15
RH
504 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
505 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
506}
507
35e2da15 508static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 509{
583d1215 510 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
511}
512
35e2da15 513static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
7a3766f3
RH
514{
515 tcg_out32(s, RDY | INSN_RD(rd));
516}
517
35e2da15
RH
518static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
519 int32_t val2, int val2const, int uns)
583d1215
RH
520{
521 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
522 if (uns) {
523 tcg_out_sety(s, TCG_REG_G0);
524 } else {
375816f8
RH
525 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
526 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
527 }
528
529 tcg_out_arithc(s, rd, rs1, val2, val2const,
530 uns ? ARITH_UDIV : ARITH_SDIV);
531}
532
8289b279
BS
533static inline void tcg_out_nop(TCGContext *s)
534{
26cc915c 535 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
536}
537
0aed257f 538static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
539 [TCG_COND_EQ] = COND_E,
540 [TCG_COND_NE] = COND_NE,
541 [TCG_COND_LT] = COND_L,
542 [TCG_COND_GE] = COND_GE,
543 [TCG_COND_LE] = COND_LE,
544 [TCG_COND_GT] = COND_G,
545 [TCG_COND_LTU] = COND_CS,
546 [TCG_COND_GEU] = COND_CC,
547 [TCG_COND_LEU] = COND_LEU,
548 [TCG_COND_GTU] = COND_GU,
549};
550
ab1339b9
RH
551static const uint8_t tcg_cond_to_rcond[] = {
552 [TCG_COND_EQ] = RCOND_Z,
553 [TCG_COND_NE] = RCOND_NZ,
554 [TCG_COND_LT] = RCOND_LZ,
555 [TCG_COND_GT] = RCOND_GZ,
556 [TCG_COND_LE] = RCOND_LEZ,
557 [TCG_COND_GE] = RCOND_GEZ
558};
559
a115f3ea
RH
560static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
561{
562 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
563}
564
565static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
566{
567 TCGLabel *l = &s->labels[label];
568 int off19;
569
570 if (l->has_value) {
abce5964 571 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea
RH
572 } else {
573 /* Make sure to preserve destinations during retranslation. */
abce5964 574 off19 = *s->code_ptr & INSN_OFF19(-1);
a115f3ea
RH
575 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
576 }
577 tcg_out_bpcc0(s, scond, flags, off19);
578}
579
35e2da15 580static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 581{
ba225198 582 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
583}
584
35e2da15
RH
585static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
586 int32_t arg2, int const_arg2, int label)
cf7c2ca5 587{
56f4927e 588 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 589 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
590 tcg_out_nop(s);
591}
592
35e2da15
RH
593static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
594 int32_t v1, int v1const)
ded37f0d
RH
595{
596 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
597 | INSN_RS1(tcg_cond_to_bcond[cond])
598 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
599}
600
35e2da15
RH
601static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
602 TCGReg c1, int32_t c2, int c2const,
603 int32_t v1, int v1const)
ded37f0d
RH
604{
605 tcg_out_cmp(s, c1, c2, c2const);
606 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
607}
608
35e2da15
RH
609static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
610 int32_t arg2, int const_arg2, int label)
1da92db2 611{
ab1339b9
RH
612 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
613 if (arg2 == 0 && !is_unsigned_cond(cond)) {
614 TCGLabel *l = &s->labels[label];
615 int off16;
616
617 if (l->has_value) {
abce5964 618 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9
RH
619 } else {
620 /* Make sure to preserve destinations during retranslation. */
abce5964 621 off16 = *s->code_ptr & INSN_OFF16(-1);
ab1339b9
RH
622 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
623 }
624 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
625 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
626 } else {
627 tcg_out_cmp(s, arg1, arg2, const_arg2);
628 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
629 }
1da92db2
BS
630 tcg_out_nop(s);
631}
ded37f0d 632
35e2da15
RH
633static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
634 int32_t v1, int v1const)
203342d8
RH
635{
636 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
637 | (tcg_cond_to_rcond[cond] << 10)
638 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
639}
640
35e2da15
RH
641static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
642 TCGReg c1, int32_t c2, int c2const,
643 int32_t v1, int v1const)
ded37f0d 644{
203342d8
RH
645 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
646 Note that the immediate range is one bit smaller, so we must check
647 for that as well. */
648 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 649 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
650 tcg_out_movr(s, cond, ret, c1, v1, v1const);
651 } else {
652 tcg_out_cmp(s, c1, c2, c2const);
653 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
654 }
ded37f0d 655}
1da92db2 656
35e2da15
RH
657static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
658 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 659{
dbfe80e1
RH
660 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
661 switch (cond) {
7d458a75
RH
662 case TCG_COND_LTU:
663 case TCG_COND_GEU:
664 /* The result of the comparison is in the carry bit. */
665 break;
666
dbfe80e1
RH
667 case TCG_COND_EQ:
668 case TCG_COND_NE:
7d458a75 669 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
670 if (c2 != 0) {
671 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
672 }
673 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 674 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
675 break;
676
677 case TCG_COND_GTU:
dbfe80e1 678 case TCG_COND_LEU:
7d458a75
RH
679 /* If we don't need to load a constant into a register, we can
680 swap the operands on GTU/LEU. There's no benefit to loading
681 the constant into a temporary register. */
682 if (!c2const || c2 == 0) {
35e2da15 683 TCGReg t = c1;
7d458a75
RH
684 c1 = c2;
685 c2 = t;
686 c2const = 0;
687 cond = tcg_swap_cond(cond);
688 break;
689 }
690 /* FALLTHRU */
dbfe80e1
RH
691
692 default:
693 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 694 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 695 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
696 return;
697 }
698
699 tcg_out_cmp(s, c1, c2, c2const);
700 if (cond == TCG_COND_LTU) {
701 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
702 } else {
703 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
704 }
705}
706
35e2da15
RH
707static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
708 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 709{
203342d8
RH
710 /* For 64-bit signed comparisons vs zero, we can avoid the compare
711 if the input does not overlap the output. */
712 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
713 tcg_out_movi_imm13(s, ret, 0);
714 tcg_out_movr(s, cond, ret, c1, 1, 1);
715 } else {
716 tcg_out_cmp(s, c1, c2, c2const);
717 tcg_out_movi_imm13(s, ret, 0);
718 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
719 }
dbfe80e1 720}
4ec28e25 721
609ac1e1
RH
722static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
723 TCGReg al, TCGReg ah, int32_t bl, int blconst,
724 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 725{
35e2da15 726 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
727
728 /* Note that the low parts are fully consumed before tmp is set. */
729 if (rl != ah && (bhconst || rl != bh)) {
730 tmp = rl;
731 }
732
733 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
734 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
735 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
736}
dbfe80e1 737
609ac1e1
RH
738static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
739 TCGReg al, TCGReg ah, int32_t bl, int blconst,
740 int32_t bh, int bhconst, bool is_sub)
741{
742 TCGReg tmp = TCG_REG_T1;
743
744 /* Note that the low parts are fully consumed before tmp is set. */
745 if (rl != ah && (bhconst || rl != bh)) {
746 tmp = rl;
747 }
748
749 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
750
751 /* Note that ADDX/SUBX take the carry-in from %icc, the 32-bit carry,
752 while we want %xcc, the 64-bit carry. */
753 /* ??? There is a 2011 VIS3 ADDXC insn that does take a 64-bit carry. */
754
755 if (bh == TCG_REG_G0) {
756 /* If we have a zero, we can perform the operation in two insns,
757 with the arithmetic first, and a conditional move into place. */
758 if (rh == ah) {
759 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
760 is_sub ? ARITH_SUB : ARITH_ADD);
761 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
762 } else {
763 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
764 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
765 }
766 } else {
767 /* Otherwise adjust BH as if there is carry into T2 ... */
768 if (bhconst) {
769 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
770 } else {
771 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
772 is_sub ? ARITH_SUB : ARITH_ADD);
773 }
774 /* ... smoosh T2 back to original BH if carry is clear ... */
775 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
776 /* ... and finally perform the arithmetic with the new operand. */
777 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
778 }
779
780 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
781}
782
4e9cf840 783static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
aad2f06a 784{
abce5964 785 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
786
787 if (disp == (int32_t)disp) {
788 tcg_out32(s, CALL | (uint32_t)disp >> 2);
789 } else {
abce5964
RH
790 uintptr_t desti = (uintptr_t)dest;
791 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff);
792 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
aad2f06a
RH
793 }
794}
795
4e9cf840
RH
796static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
797{
798 tcg_out_call_nodelay(s, dest);
799 tcg_out_nop(s);
800}
801
7ea5d725 802#ifdef CONFIG_SOFTMMU
abce5964
RH
803static tcg_insn_unit *qemu_ld_trampoline[16];
804static tcg_insn_unit *qemu_st_trampoline[16];
7ea5d725
RH
805
806static void build_trampolines(TCGContext *s)
807{
abce5964
RH
808 static void * const qemu_ld_helpers[16] = {
809 [MO_UB] = helper_ret_ldub_mmu,
810 [MO_SB] = helper_ret_ldsb_mmu,
811 [MO_LEUW] = helper_le_lduw_mmu,
812 [MO_LESW] = helper_le_ldsw_mmu,
813 [MO_LEUL] = helper_le_ldul_mmu,
814 [MO_LEQ] = helper_le_ldq_mmu,
815 [MO_BEUW] = helper_be_lduw_mmu,
816 [MO_BESW] = helper_be_ldsw_mmu,
817 [MO_BEUL] = helper_be_ldul_mmu,
818 [MO_BEQ] = helper_be_ldq_mmu,
7ea5d725 819 };
abce5964
RH
820 static void * const qemu_st_helpers[16] = {
821 [MO_UB] = helper_ret_stb_mmu,
822 [MO_LEUW] = helper_le_stw_mmu,
823 [MO_LEUL] = helper_le_stl_mmu,
824 [MO_LEQ] = helper_le_stq_mmu,
825 [MO_BEUW] = helper_be_stw_mmu,
826 [MO_BEUL] = helper_be_stl_mmu,
827 [MO_BEQ] = helper_be_stq_mmu,
7ea5d725
RH
828 };
829
830 int i;
831 TCGReg ra;
7ea5d725
RH
832
833 for (i = 0; i < 16; ++i) {
abce5964 834 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
835 continue;
836 }
837
838 /* May as well align the trampoline. */
abce5964 839 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 840 tcg_out_nop(s);
7ea5d725 841 }
abce5964 842 qemu_ld_trampoline[i] = s->code_ptr;
7ea5d725 843
34b1a49c
RH
844 if (SPARC64 || TARGET_LONG_BITS == 32) {
845 ra = TCG_REG_O3;
846 } else {
847 /* Install the high part of the address. */
848 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
849 ra = TCG_REG_O4;
850 }
7ea5d725
RH
851
852 /* Set the retaddr operand. */
853 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
854 /* Set the env operand. */
855 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
856 /* Tail call. */
4e9cf840 857 tcg_out_call_nodelay(s, qemu_ld_helpers[i]);
7ea5d725
RH
858 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
859 }
860
861 for (i = 0; i < 16; ++i) {
abce5964 862 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
863 continue;
864 }
865
866 /* May as well align the trampoline. */
abce5964 867 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 868 tcg_out_nop(s);
7ea5d725 869 }
abce5964 870 qemu_st_trampoline[i] = s->code_ptr;
7ea5d725 871
34b1a49c
RH
872 if (SPARC64) {
873 ra = TCG_REG_O4;
874 } else {
875 ra = TCG_REG_O1;
876 if (TARGET_LONG_BITS == 64) {
877 /* Install the high part of the address. */
878 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
879 ra += 2;
880 } else {
881 ra += 1;
882 }
883 if ((i & MO_SIZE) == MO_64) {
884 /* Install the high part of the data. */
885 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
886 ra += 2;
887 } else {
888 ra += 1;
889 }
890 /* Skip the mem_index argument. */
891 ra += 1;
892 }
893
7ea5d725
RH
894 /* Set the retaddr operand. */
895 if (ra >= TCG_REG_O6) {
896 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
897 TCG_TARGET_CALL_STACK_OFFSET);
898 ra = TCG_REG_G1;
899 }
900 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
901 /* Set the env operand. */
902 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
903 /* Tail call. */
4e9cf840 904 tcg_out_call_nodelay(s, qemu_st_helpers[i]);
7ea5d725
RH
905 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
906 }
907}
908#endif
909
7d551702 910/* Generate global QEMU prologue and epilogue code */
e4d58b41 911static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 912{
4c3204cb
RH
913 int tmp_buf_size, frame_size;
914
915 /* The TCG temp buffer is at the top of the frame, immediately
916 below the frame pointer. */
917 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
918 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
919 tmp_buf_size);
920
921 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
922 otherwise the minimal frame usable by callees. */
923 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
924 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
925 frame_size += TCG_TARGET_STACK_ALIGN - 1;
926 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 927 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 928 INSN_IMM13(-frame_size));
c6f7e4fb
RH
929
930#ifdef CONFIG_USE_GUEST_BASE
931 if (GUEST_BASE != 0) {
932 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
933 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
934 }
935#endif
936
aad2f06a 937 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
938 /* delay slot */
939 tcg_out_nop(s);
4c3204cb
RH
940
941 /* No epilogue required. We issue ret + restore directly in the TB. */
7ea5d725
RH
942
943#ifdef CONFIG_SOFTMMU
944 build_trampolines(s);
945#endif
b3db8758
BS
946}
947
f5ef6aac 948#if defined(CONFIG_SOFTMMU)
a0ce341a 949/* Perform the TLB load and compare.
bffe1431 950
a0ce341a 951 Inputs:
a8b12c10 952 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
953
954 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
955
956 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
957 This should be offsetof addr_read or addr_write.
958
959 The result of the TLB comparison is in %[ix]cc. The sanitized address
960 is in the returned register, maybe %o0. The TLB addend is in %o1. */
961
34b1a49c
RH
962static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
963 TCGMemOp s_bits, int which)
a0ce341a 964{
a8b12c10
RH
965 const TCGReg r0 = TCG_REG_O0;
966 const TCGReg r1 = TCG_REG_O1;
967 const TCGReg r2 = TCG_REG_O2;
a0ce341a
RH
968 int tlb_ofs;
969
d801a8f2 970 /* Shift the page number down. */
34b1a49c 971 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a
RH
972
973 /* Mask out the page offset, except for the required alignment. */
d801a8f2
RH
974 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
975 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
976
977 /* Mask the tlb index. */
978 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
979
980 /* Mask page, part 2. */
981 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 982
d801a8f2
RH
983 /* Shift the tlb index into place. */
984 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
985
986 /* Relative to the current ENV. */
987 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
988
989 /* Find a base address that can load both tlb comparator and addend. */
990 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
425532d7 991 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
35e2da15
RH
992 if (tlb_ofs & ~0x3ff) {
993 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
994 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
995 }
d801a8f2 996 tlb_ofs &= 0x3ff;
a0ce341a
RH
997 }
998
999 /* Load the tlb comparator and the addend. */
1000 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
1001 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
1002
1003 /* subcc arg0, arg2, %g0 */
1004 tcg_out_cmp(s, r0, r2, 0);
1005
1006 /* If the guest address must be zero-extended, do so now. */
9f44adc5 1007 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c 1008 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1009 return r0;
1010 }
34b1a49c 1011 return addr;
a0ce341a
RH
1012}
1013#endif /* CONFIG_SOFTMMU */
1014
eef0d9e7
RH
1015static const int qemu_ld_opc[16] = {
1016 [MO_UB] = LDUB,
1017 [MO_SB] = LDSB,
1018
1019 [MO_BEUW] = LDUH,
1020 [MO_BESW] = LDSH,
1021 [MO_BEUL] = LDUW,
1022 [MO_BESL] = LDSW,
1023 [MO_BEQ] = LDX,
1024
1025 [MO_LEUW] = LDUH_LE,
1026 [MO_LESW] = LDSH_LE,
1027 [MO_LEUL] = LDUW_LE,
1028 [MO_LESL] = LDSW_LE,
1029 [MO_LEQ] = LDX_LE,
a0ce341a 1030};
9d0efc88 1031
eef0d9e7
RH
1032static const int qemu_st_opc[16] = {
1033 [MO_UB] = STB,
1034
1035 [MO_BEUW] = STH,
1036 [MO_BEUL] = STW,
1037 [MO_BEQ] = STX,
1038
1039 [MO_LEUW] = STH_LE,
1040 [MO_LEUL] = STW_LE,
1041 [MO_LEQ] = STX_LE,
a0ce341a 1042};
bffe1431 1043
34b1a49c
RH
1044static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1045 TCGMemOp memop, int memi, bool is_64)
f5ef6aac 1046{
34b1a49c
RH
1047#ifdef CONFIG_SOFTMMU
1048 TCGMemOp s_bits = memop & MO_SIZE;
cab0a7ea 1049 TCGReg addrz, param;
abce5964
RH
1050 tcg_insn_unit *func;
1051 tcg_insn_unit *label_ptr;
f5ef6aac 1052
34b1a49c 1053 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1054 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1055
34b1a49c
RH
1056 /* The fast path is exactly one insn. Thus we can perform the
1057 entire TLB Hit in the (annulled) delay slot of the branch
1058 over the TLB Miss case. */
a0ce341a 1059
34b1a49c 1060 /* beq,a,pt %[xi]cc, label0 */
abce5964 1061 label_ptr = s->code_ptr;
34b1a49c
RH
1062 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1063 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1064 /* delay slot */
1065 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
53c37487 1066
a0ce341a 1067 /* TLB Miss. */
f5ef6aac 1068
7ea5d725 1069 param = TCG_REG_O1;
34b1a49c
RH
1070 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1071 /* Skip the high-part; we'll perform the extract in the trampoline. */
1072 param++;
a0ce341a 1073 }
34b1a49c 1074 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
f5ef6aac 1075
7ea5d725
RH
1076 /* We use the helpers to extend SB and SW data, leaving the case
1077 of SL needing explicit extending below. */
1078 if ((memop & ~MO_BSWAP) == MO_SL) {
1079 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1080 } else {
1081 func = qemu_ld_trampoline[memop];
1082 }
abce5964 1083 assert(func != NULL);
4e9cf840 1084 tcg_out_call_nodelay(s, func);
a0ce341a 1085 /* delay slot */
7ea5d725
RH
1086 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1087
34b1a49c
RH
1088 /* Recall that all of the helpers return 64-bit results.
1089 Which complicates things for sparcv8plus. */
1090 if (SPARC64) {
1091 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1092 if (is_64 && (memop & ~MO_BSWAP) == MO_SL) {
1093 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1094 } else {
1095 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1096 }
1097 } else {
1098 if (s_bits == MO_64) {
1099 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1100 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1101 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1102 } else if (is_64) {
1103 /* Re-extend from 32-bit rather than reassembling when we
1104 know the high register must be an extension. */
1105 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1106 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1107 } else {
1108 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
a0ce341a 1109 }
f5ef6aac
BS
1110 }
1111
abce5964 1112 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1113#else
9f44adc5 1114 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1115 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1116 addr = TCG_REG_T1;
f5ef6aac 1117 }
34b1a49c
RH
1118 tcg_out_ldst_rr(s, data, addr,
1119 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1120 qemu_ld_opc[memop]);
a0ce341a 1121#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1122}
1123
34b1a49c
RH
1124static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1125 TCGMemOp memop, int memi)
f5ef6aac 1126{
34b1a49c
RH
1127#ifdef CONFIG_SOFTMMU
1128 TCGMemOp s_bits = memop & MO_SIZE;
1129 TCGReg addrz, param;
abce5964
RH
1130 tcg_insn_unit *func;
1131 tcg_insn_unit *label_ptr;
f5ef6aac 1132
34b1a49c 1133 addrz = tcg_out_tlb_load(s, addr, memi, s_bits,
cab0a7ea 1134 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1135
a0ce341a
RH
1136 /* The fast path is exactly one insn. Thus we can perform the entire
1137 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1138 /* beq,a,pt %[xi]cc, label0 */
abce5964 1139 label_ptr = s->code_ptr;
a115f3ea
RH
1140 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1141 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1142 /* delay slot */
34b1a49c 1143 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]);
a0ce341a
RH
1144
1145 /* TLB Miss. */
1146
7ea5d725 1147 param = TCG_REG_O1;
34b1a49c
RH
1148 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1149 /* Skip the high-part; we'll perform the extract in the trampoline. */
1150 param++;
a0ce341a 1151 }
34b1a49c
RH
1152 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1153 if (!SPARC64 && s_bits == MO_64) {
1154 /* Skip the high-part; we'll perform the extract in the trampoline. */
1155 param++;
a0ce341a 1156 }
34b1a49c 1157 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
53c37487 1158
7ea5d725 1159 func = qemu_st_trampoline[memop];
abce5964 1160 assert(func != NULL);
4e9cf840 1161 tcg_out_call_nodelay(s, func);
a0ce341a 1162 /* delay slot */
7ea5d725 1163 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
f5ef6aac 1164
abce5964 1165 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1166#else
9f44adc5 1167 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1168 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1169 addr = TCG_REG_T1;
a0ce341a 1170 }
34b1a49c 1171 tcg_out_ldst_rr(s, data, addr,
c6f7e4fb 1172 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1173 qemu_st_opc[memop]);
a0ce341a 1174#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1175}
1176
b357f902
RH
1177static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1178 const TCGArg args[TCG_MAX_OP_ARGS],
1179 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1180{
b357f902
RH
1181 TCGArg a0, a1, a2;
1182 int c, c2;
1183
1184 /* Hoist the loads of the most common arguments. */
1185 a0 = args[0];
1186 a1 = args[1];
1187 a2 = args[2];
1188 c2 = const_args[2];
8289b279
BS
1189
1190 switch (opc) {
1191 case INDEX_op_exit_tb:
b357f902 1192 if (check_fit_ptr(a0, 13)) {
8b66eefe 1193 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1194 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
8b66eefe 1195 } else {
b357f902 1196 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
8b66eefe 1197 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1198 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
8b66eefe 1199 }
8289b279
BS
1200 break;
1201 case INDEX_op_goto_tb:
1202 if (s->tb_jmp_offset) {
1203 /* direct jump method */
abce5964 1204 s->tb_jmp_offset[a0] = tcg_current_code_size(s);
5bbd2cae 1205 /* Make sure to preserve links during retranslation. */
abce5964 1206 tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
8289b279
BS
1207 } else {
1208 /* indirect jump method */
b357f902 1209 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0));
aad2f06a 1210 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
8289b279 1211 }
53cd9273 1212 tcg_out_nop(s);
abce5964 1213 s->tb_next_offset[a0] = tcg_current_code_size(s);
8289b279 1214 break;
8289b279 1215 case INDEX_op_br:
b357f902 1216 tcg_out_bpcc(s, COND_A, BPCC_PT, a0);
f5ef6aac 1217 tcg_out_nop(s);
8289b279 1218 break;
8289b279 1219
8289b279 1220#define OP_32_64(x) \
ba225198
RH
1221 glue(glue(case INDEX_op_, x), _i32): \
1222 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1223
ba225198 1224 OP_32_64(ld8u):
b357f902 1225 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1226 break;
ba225198 1227 OP_32_64(ld8s):
b357f902 1228 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1229 break;
ba225198 1230 OP_32_64(ld16u):
b357f902 1231 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1232 break;
ba225198 1233 OP_32_64(ld16s):
b357f902 1234 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1235 break;
1236 case INDEX_op_ld_i32:
53cd9273 1237 case INDEX_op_ld32u_i64:
b357f902 1238 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1239 break;
ba225198 1240 OP_32_64(st8):
b357f902 1241 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1242 break;
ba225198 1243 OP_32_64(st16):
b357f902 1244 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1245 break;
1246 case INDEX_op_st_i32:
53cd9273 1247 case INDEX_op_st32_i64:
b357f902 1248 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1249 break;
ba225198 1250 OP_32_64(add):
53cd9273 1251 c = ARITH_ADD;
ba225198
RH
1252 goto gen_arith;
1253 OP_32_64(sub):
8289b279 1254 c = ARITH_SUB;
ba225198
RH
1255 goto gen_arith;
1256 OP_32_64(and):
8289b279 1257 c = ARITH_AND;
ba225198 1258 goto gen_arith;
dc69960d
RH
1259 OP_32_64(andc):
1260 c = ARITH_ANDN;
1261 goto gen_arith;
ba225198 1262 OP_32_64(or):
8289b279 1263 c = ARITH_OR;
ba225198 1264 goto gen_arith;
18c8f7a3
RH
1265 OP_32_64(orc):
1266 c = ARITH_ORN;
1267 goto gen_arith;
ba225198 1268 OP_32_64(xor):
8289b279 1269 c = ARITH_XOR;
ba225198 1270 goto gen_arith;
8289b279
BS
1271 case INDEX_op_shl_i32:
1272 c = SHIFT_SLL;
1fd95946
RH
1273 do_shift32:
1274 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1275 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1276 break;
8289b279
BS
1277 case INDEX_op_shr_i32:
1278 c = SHIFT_SRL;
1fd95946 1279 goto do_shift32;
8289b279
BS
1280 case INDEX_op_sar_i32:
1281 c = SHIFT_SRA;
1fd95946 1282 goto do_shift32;
8289b279
BS
1283 case INDEX_op_mul_i32:
1284 c = ARITH_UMUL;
ba225198 1285 goto gen_arith;
583d1215 1286
4b5a85c1
RH
1287 OP_32_64(neg):
1288 c = ARITH_SUB;
1289 goto gen_arith1;
be6551b1
RH
1290 OP_32_64(not):
1291 c = ARITH_ORN;
1292 goto gen_arith1;
4b5a85c1 1293
583d1215 1294 case INDEX_op_div_i32:
b357f902 1295 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1296 break;
1297 case INDEX_op_divu_i32:
b357f902 1298 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1299 break;
1300
8289b279 1301 case INDEX_op_brcond_i32:
b357f902 1302 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1303 break;
dbfe80e1 1304 case INDEX_op_setcond_i32:
b357f902 1305 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1306 break;
ded37f0d 1307 case INDEX_op_movcond_i32:
b357f902 1308 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1309 break;
dbfe80e1 1310
7a3766f3 1311 case INDEX_op_add2_i32:
609ac1e1
RH
1312 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1313 args[4], const_args[4], args[5], const_args[5],
1314 ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1315 break;
1316 case INDEX_op_sub2_i32:
609ac1e1
RH
1317 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1318 args[4], const_args[4], args[5], const_args[5],
1319 ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1320 break;
1321 case INDEX_op_mulu2_i32:
f4c16661
RH
1322 c = ARITH_UMUL;
1323 goto do_mul2;
1324 case INDEX_op_muls2_i32:
1325 c = ARITH_SMUL;
1326 do_mul2:
1327 /* The 32-bit multiply insns produce a full 64-bit result. If the
1328 destination register can hold it, we can avoid the slower RDY. */
b357f902
RH
1329 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1330 if (SPARC64 || a0 <= TCG_REG_O7) {
1331 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
f4c16661 1332 } else {
b357f902 1333 tcg_out_rdy(s, a1);
f4c16661 1334 }
7a3766f3 1335 break;
8289b279 1336
cab0a7ea 1337 case INDEX_op_qemu_ld_i32:
b357f902 1338 tcg_out_qemu_ld(s, a0, a1, a2, args[3], false);
8289b279 1339 break;
cab0a7ea 1340 case INDEX_op_qemu_ld_i64:
b357f902 1341 tcg_out_qemu_ld(s, a0, a1, a2, args[3], true);
8289b279 1342 break;
cab0a7ea 1343 case INDEX_op_qemu_st_i32:
cab0a7ea 1344 case INDEX_op_qemu_st_i64:
b357f902 1345 tcg_out_qemu_st(s, a0, a1, a2, args[3]);
a0ce341a 1346 break;
8289b279 1347
53cd9273 1348 case INDEX_op_ld32s_i64:
b357f902 1349 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1350 break;
8289b279 1351 case INDEX_op_ld_i64:
b357f902 1352 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1353 break;
1354 case INDEX_op_st_i64:
b357f902 1355 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1356 break;
1357 case INDEX_op_shl_i64:
1358 c = SHIFT_SLLX;
1fd95946
RH
1359 do_shift64:
1360 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1361 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1362 break;
8289b279
BS
1363 case INDEX_op_shr_i64:
1364 c = SHIFT_SRLX;
1fd95946 1365 goto do_shift64;
8289b279
BS
1366 case INDEX_op_sar_i64:
1367 c = SHIFT_SRAX;
1fd95946 1368 goto do_shift64;
8289b279
BS
1369 case INDEX_op_mul_i64:
1370 c = ARITH_MULX;
ba225198 1371 goto gen_arith;
583d1215 1372 case INDEX_op_div_i64:
53cd9273 1373 c = ARITH_SDIVX;
ba225198 1374 goto gen_arith;
583d1215 1375 case INDEX_op_divu_i64:
8289b279 1376 c = ARITH_UDIVX;
ba225198 1377 goto gen_arith;
cc6dfecf 1378 case INDEX_op_ext32s_i64:
b357f902 1379 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf
RH
1380 break;
1381 case INDEX_op_ext32u_i64:
b357f902 1382 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1383 break;
a24fba93 1384 case INDEX_op_trunc_shr_i32:
b357f902
RH
1385 if (a2 == 0) {
1386 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
a24fba93 1387 } else {
b357f902 1388 tcg_out_arithi(s, a0, a1, a2, SHIFT_SRLX);
a24fba93
RH
1389 }
1390 break;
8289b279
BS
1391
1392 case INDEX_op_brcond_i64:
b357f902 1393 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], args[3]);
8289b279 1394 break;
dbfe80e1 1395 case INDEX_op_setcond_i64:
b357f902 1396 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1397 break;
ded37f0d 1398 case INDEX_op_movcond_i64:
b357f902 1399 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1400 break;
609ac1e1
RH
1401 case INDEX_op_add2_i64:
1402 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1403 const_args[4], args[5], const_args[5], false);
1404 break;
1405 case INDEX_op_sub2_i64:
1406 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1407 const_args[4], args[5], const_args[5], true);
1408 break;
34b1a49c 1409
ba225198 1410 gen_arith:
b357f902 1411 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1412 break;
1413
4b5a85c1 1414 gen_arith1:
b357f902 1415 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1416 break;
1417
96d0ee7f 1418 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1419 case INDEX_op_mov_i64:
96d0ee7f 1420 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
98b90bab 1421 case INDEX_op_movi_i64:
96d0ee7f 1422 case INDEX_op_call: /* Always emitted via tcg_out_call. */
8289b279 1423 default:
8289b279
BS
1424 tcg_abort();
1425 }
1426}
1427
1428static const TCGTargetOpDef sparc_op_defs[] = {
1429 { INDEX_op_exit_tb, { } },
b3db8758 1430 { INDEX_op_goto_tb, { } },
8289b279
BS
1431 { INDEX_op_br, { } },
1432
8289b279
BS
1433 { INDEX_op_ld8u_i32, { "r", "r" } },
1434 { INDEX_op_ld8s_i32, { "r", "r" } },
1435 { INDEX_op_ld16u_i32, { "r", "r" } },
1436 { INDEX_op_ld16s_i32, { "r", "r" } },
1437 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1438 { INDEX_op_st8_i32, { "rZ", "r" } },
1439 { INDEX_op_st16_i32, { "rZ", "r" } },
1440 { INDEX_op_st_i32, { "rZ", "r" } },
1441
1442 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1443 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1444 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1445 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1446 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1447 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1448 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1449 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1450 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1451 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1452
1453 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1454 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1455 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1456
4b5a85c1 1457 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1458 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1459
89269f6c
RH
1460 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1461 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1462 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1463
89269f6c
RH
1464 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1465 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1466 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
f4c16661 1467 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1468
34b1a49c
RH
1469 { INDEX_op_ld8u_i64, { "R", "r" } },
1470 { INDEX_op_ld8s_i64, { "R", "r" } },
1471 { INDEX_op_ld16u_i64, { "R", "r" } },
1472 { INDEX_op_ld16s_i64, { "R", "r" } },
1473 { INDEX_op_ld32u_i64, { "R", "r" } },
1474 { INDEX_op_ld32s_i64, { "R", "r" } },
1475 { INDEX_op_ld_i64, { "R", "r" } },
1476 { INDEX_op_st8_i64, { "RZ", "r" } },
1477 { INDEX_op_st16_i64, { "RZ", "r" } },
1478 { INDEX_op_st32_i64, { "RZ", "r" } },
1479 { INDEX_op_st_i64, { "RZ", "r" } },
1480
1481 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1482 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1483 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1484 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1485 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1486 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1487 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1488 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1489 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1490 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1491
1492 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1493 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1494 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1495
1496 { INDEX_op_neg_i64, { "R", "RJ" } },
1497 { INDEX_op_not_i64, { "R", "RJ" } },
1498
1499 { INDEX_op_ext32s_i64, { "R", "r" } },
1500 { INDEX_op_ext32u_i64, { "R", "r" } },
1501 { INDEX_op_trunc_shr_i32, { "r", "R" } },
1502
1503 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1504 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1505 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1506
609ac1e1
RH
1507 { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1508 { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1509
34b1a49c
RH
1510 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1511 { INDEX_op_qemu_ld_i64, { "R", "A" } },
ebd0c614
RH
1512 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1513 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
a0ce341a 1514
8289b279
BS
1515 { -1 },
1516};
1517
e4d58b41 1518static void tcg_target_init(TCGContext *s)
8289b279
BS
1519{
1520 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
34b1a49c
RH
1521 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1522
8289b279 1523 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1524 (1 << TCG_REG_G1) |
1525 (1 << TCG_REG_G2) |
1526 (1 << TCG_REG_G3) |
1527 (1 << TCG_REG_G4) |
1528 (1 << TCG_REG_G5) |
1529 (1 << TCG_REG_G6) |
1530 (1 << TCG_REG_G7) |
8289b279
BS
1531 (1 << TCG_REG_O0) |
1532 (1 << TCG_REG_O1) |
1533 (1 << TCG_REG_O2) |
1534 (1 << TCG_REG_O3) |
1535 (1 << TCG_REG_O4) |
1536 (1 << TCG_REG_O5) |
8289b279
BS
1537 (1 << TCG_REG_O7));
1538
1539 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1540 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1541 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1542 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1543 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1544 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1545 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1546 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1547 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1548
8289b279
BS
1549 tcg_add_target_add_op_defs(sparc_op_defs);
1550}
cb1977d3 1551
9f44adc5 1552#if SPARC64
cb1977d3 1553# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1554#else
cb1977d3
RH
1555# define ELF_HOST_MACHINE EM_SPARC32PLUS
1556# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1557#endif
1558
cb1977d3 1559typedef struct {
ae18b28d 1560 DebugFrameHeader h;
9f44adc5 1561 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1562 uint8_t fde_win_save;
1563 uint8_t fde_ret_save[3];
cb1977d3
RH
1564} DebugFrame;
1565
ae18b28d
RH
1566static const DebugFrame debug_frame = {
1567 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1568 .h.cie.id = -1,
1569 .h.cie.version = 1,
1570 .h.cie.code_align = 1,
1571 .h.cie.data_align = -sizeof(void *) & 0x7f,
1572 .h.cie.return_column = 15, /* o7 */
cb1977d3 1573
497a22eb 1574 /* Total FDE size does not include the "len" member. */
ae18b28d 1575 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1576
1577 .fde_def_cfa = {
9f44adc5 1578#if SPARC64
cb1977d3
RH
1579 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1580 (2047 & 0x7f) | 0x80, (2047 >> 7)
1581#else
1582 13, 30 /* DW_CFA_def_cfa_register i6 */
1583#endif
1584 },
497a22eb
RH
1585 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1586 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1587};
1588
1589void tcg_register_jit(void *buf, size_t buf_size)
1590{
cb1977d3
RH
1591 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1592}
5bbd2cae
RH
1593
1594void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1595{
1596 uint32_t *ptr = (uint32_t *)jmp_addr;
c8fc56ce 1597 uintptr_t disp = addr - jmp_addr;
5bbd2cae
RH
1598
1599 /* We can reach the entire address space for 32-bit. For 64-bit
1600 the code_gen_buffer can't be larger than 2GB. */
c8fc56ce 1601 assert(disp == (int32_t)disp);
5bbd2cae 1602
c8fc56ce 1603 *ptr = CALL | (uint32_t)disp >> 2;
5bbd2cae
RH
1604 flush_icache_range(jmp_addr, jmp_addr + 4);
1605}