]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Fix ld64 for 32-bit mode
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
375816f8
RH
64/* Define some temporary registers. T2 is used for constant generation. */
65#define TCG_REG_T1 TCG_REG_G1
66#define TCG_REG_T2 TCG_REG_O7
67
c6f7e4fb 68#ifdef CONFIG_USE_GUEST_BASE
375816f8 69# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
70#else
71# define TCG_GUEST_BASE_REG TCG_REG_G0
72#endif
e141ab52 73
0954d0d9 74static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
75 TCG_REG_L0,
76 TCG_REG_L1,
77 TCG_REG_L2,
78 TCG_REG_L3,
79 TCG_REG_L4,
80 TCG_REG_L5,
81 TCG_REG_L6,
82 TCG_REG_L7,
26adfb75 83
8289b279
BS
84 TCG_REG_I0,
85 TCG_REG_I1,
86 TCG_REG_I2,
87 TCG_REG_I3,
88 TCG_REG_I4,
375816f8 89 TCG_REG_I5,
26adfb75
RH
90
91 TCG_REG_G2,
92 TCG_REG_G3,
93 TCG_REG_G4,
94 TCG_REG_G5,
95
96 TCG_REG_O0,
97 TCG_REG_O1,
98 TCG_REG_O2,
99 TCG_REG_O3,
100 TCG_REG_O4,
101 TCG_REG_O5,
8289b279
BS
102};
103
104static const int tcg_target_call_iarg_regs[6] = {
105 TCG_REG_O0,
106 TCG_REG_O1,
107 TCG_REG_O2,
108 TCG_REG_O3,
109 TCG_REG_O4,
110 TCG_REG_O5,
111};
112
26a74ae3 113static const int tcg_target_call_oarg_regs[] = {
8289b279 114 TCG_REG_O0,
e141ab52
BS
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
8289b279
BS
118};
119
8289b279
BS
120#define INSN_OP(x) ((x) << 30)
121#define INSN_OP2(x) ((x) << 22)
122#define INSN_OP3(x) ((x) << 19)
123#define INSN_OPF(x) ((x) << 5)
124#define INSN_RD(x) ((x) << 25)
125#define INSN_RS1(x) ((x) << 14)
126#define INSN_RS2(x) (x)
8384dd67 127#define INSN_ASI(x) ((x) << 5)
8289b279 128
203342d8 129#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 130#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 131#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 132#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 133#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 134#define INSN_COND(x) ((x) << 25)
8289b279 135
cf7c2ca5
BS
136#define COND_N 0x0
137#define COND_E 0x1
138#define COND_LE 0x2
139#define COND_L 0x3
140#define COND_LEU 0x4
141#define COND_CS 0x5
142#define COND_NEG 0x6
143#define COND_VS 0x7
b3db8758 144#define COND_A 0x8
cf7c2ca5
BS
145#define COND_NE 0x9
146#define COND_G 0xa
147#define COND_GE 0xb
148#define COND_GU 0xc
149#define COND_CC 0xd
150#define COND_POS 0xe
151#define COND_VC 0xf
a115f3ea 152#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 153
ab1339b9
RH
154#define RCOND_Z 1
155#define RCOND_LEZ 2
156#define RCOND_LZ 3
157#define RCOND_NZ 5
158#define RCOND_GZ 6
159#define RCOND_GEZ 7
160
dbfe80e1
RH
161#define MOVCC_ICC (1 << 18)
162#define MOVCC_XCC (1 << 18 | 1 << 12)
163
a115f3ea
RH
164#define BPCC_ICC 0
165#define BPCC_XCC (2 << 20)
166#define BPCC_PT (1 << 19)
167#define BPCC_PN 0
168#define BPCC_A (1 << 29)
169
ab1339b9
RH
170#define BPR_PT BPCC_PT
171
8289b279 172#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 173#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 174#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 175#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 176#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 177#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 178#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 179#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
180#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
181#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 182#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
183#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
184#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
185#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
186#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
187#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
188#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
189#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 190#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 191#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
192
193#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
194#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
195#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
196
197#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
198#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
199#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
200
7a3766f3 201#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 202#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
203#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
204#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
205#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
206#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
207#define CALL INSN_OP(1)
208#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
209#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
210#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
211#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
212#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
213#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
214#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
215#define STB (INSN_OP(3) | INSN_OP3(0x05))
216#define STH (INSN_OP(3) | INSN_OP3(0x06))
217#define STW (INSN_OP(3) | INSN_OP3(0x04))
218#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
219#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
220#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
221#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
222#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
223#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
224#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
225#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
226#define STBA (INSN_OP(3) | INSN_OP3(0x15))
227#define STHA (INSN_OP(3) | INSN_OP3(0x16))
228#define STWA (INSN_OP(3) | INSN_OP3(0x14))
229#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
230
231#ifndef ASI_PRIMARY_LITTLE
232#define ASI_PRIMARY_LITTLE 0x88
233#endif
8289b279 234
a0ce341a
RH
235#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
236#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
237#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
238#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
239#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
240
241#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
242#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
243#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
244
a115f3ea
RH
245static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
246{
247 return (val << ((sizeof(tcg_target_long) * 8 - bits))
248 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
249}
250
251static inline int check_fit_i32(uint32_t val, unsigned int bits)
252{
253 return ((val << (32 - bits)) >> (32 - bits)) == val;
254}
255
256static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 257 intptr_t value, intptr_t addend)
a115f3ea
RH
258{
259 uint32_t insn;
260 value += addend;
261 switch (type) {
262 case R_SPARC_32:
263 if (value != (uint32_t)value) {
264 tcg_abort();
265 }
266 *(uint32_t *)code_ptr = value;
267 break;
ab1339b9 268 case R_SPARC_WDISP16:
2ba7fae2 269 value -= (intptr_t)code_ptr;
ab1339b9
RH
270 if (!check_fit_tl(value >> 2, 16)) {
271 tcg_abort();
272 }
273 insn = *(uint32_t *)code_ptr;
274 insn &= ~INSN_OFF16(-1);
275 insn |= INSN_OFF16(value);
276 *(uint32_t *)code_ptr = insn;
277 break;
a115f3ea 278 case R_SPARC_WDISP19:
2ba7fae2 279 value -= (intptr_t)code_ptr;
a115f3ea
RH
280 if (!check_fit_tl(value >> 2, 19)) {
281 tcg_abort();
282 }
283 insn = *(uint32_t *)code_ptr;
284 insn &= ~INSN_OFF19(-1);
285 insn |= INSN_OFF19(value);
286 *(uint32_t *)code_ptr = insn;
287 break;
288 default:
289 tcg_abort();
290 }
291}
292
293/* parse target specific constraints */
294static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
295{
296 const char *ct_str;
297
298 ct_str = *pct_str;
299 switch (ct_str[0]) {
300 case 'r':
301 ct->ct |= TCG_CT_REG;
302 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
303 break;
304 case 'L': /* qemu_ld/st constraint */
305 ct->ct |= TCG_CT_REG;
306 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
307 // Helper args
308 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
309 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
310 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
311 break;
312 case 'I':
313 ct->ct |= TCG_CT_CONST_S11;
314 break;
315 case 'J':
316 ct->ct |= TCG_CT_CONST_S13;
317 break;
318 case 'Z':
319 ct->ct |= TCG_CT_CONST_ZERO;
320 break;
321 default:
322 return -1;
323 }
324 ct_str++;
325 *pct_str = ct_str;
326 return 0;
327}
328
329/* test if a constant matches the constraint */
330static inline int tcg_target_const_match(tcg_target_long val,
331 const TCGArgConstraint *arg_ct)
332{
333 int ct = arg_ct->ct;
334
335 if (ct & TCG_CT_CONST) {
336 return 1;
337 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
338 return 1;
339 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
340 return 1;
341 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
342 return 1;
343 } else {
344 return 0;
345 }
346}
347
26cc915c
BS
348static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
349 int op)
350{
351 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
352 INSN_RS2(rs2));
353}
354
6f41b777
BS
355static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
356 uint32_t offset, int op)
26cc915c
BS
357{
358 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
359 INSN_IMM13(offset));
360}
361
ba225198
RH
362static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
363 int val2, int val2const, int op)
364{
365 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
366 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
367}
368
2a534aff
RH
369static inline void tcg_out_mov(TCGContext *s, TCGType type,
370 TCGReg ret, TCGReg arg)
8289b279 371{
dda73c78
RH
372 if (ret != arg) {
373 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
374 }
26cc915c
BS
375}
376
377static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
378{
379 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
380}
381
b101234a
BS
382static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
383{
384 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
385}
386
387static inline void tcg_out_movi_imm32(TCGContext *s, int ret, uint32_t arg)
8289b279 388{
4a09aa89 389 if (check_fit_tl(arg, 13))
b101234a 390 tcg_out_movi_imm13(s, ret, arg);
8289b279 391 else {
26cc915c 392 tcg_out_sethi(s, ret, arg);
8289b279 393 if (arg & 0x3ff)
b101234a 394 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
8289b279
BS
395 }
396}
397
b101234a 398static inline void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 399 TCGReg ret, tcg_target_long arg)
b101234a 400{
43172207
RH
401 /* All 32-bit constants, as well as 64-bit constants with
402 no high bits set go through movi_imm32. */
403 if (TCG_TARGET_REG_BITS == 32
404 || type == TCG_TYPE_I32
405 || (arg & ~(tcg_target_long)0xffffffff) == 0) {
406 tcg_out_movi_imm32(s, ret, arg);
407 } else if (check_fit_tl(arg, 13)) {
408 /* A 13-bit constant sign-extended to 64-bits. */
409 tcg_out_movi_imm13(s, ret, arg);
410 } else if (check_fit_tl(arg, 32)) {
411 /* A 32-bit constant sign-extended to 64-bits. */
412 tcg_out_sethi(s, ret, ~arg);
413 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
414 } else {
375816f8
RH
415 tcg_out_movi_imm32(s, ret, arg >> (TCG_TARGET_REG_BITS / 2));
416 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
417 tcg_out_movi_imm32(s, TCG_REG_T2, arg);
418 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 419 }
b101234a
BS
420}
421
a0ce341a
RH
422static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
423 int a2, int op)
8289b279 424{
a0ce341a 425 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
426}
427
a0ce341a
RH
428static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
429 int offset, int op)
8289b279 430{
a0ce341a 431 if (check_fit_tl(offset, 13)) {
8289b279
BS
432 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
433 INSN_IMM13(offset));
a0ce341a 434 } else {
375816f8
RH
435 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
436 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 437 }
8289b279
BS
438}
439
2a534aff 440static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 441 TCGReg arg1, intptr_t arg2)
8289b279 442{
a0ce341a 443 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
444}
445
2a534aff 446static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 447 TCGReg arg1, intptr_t arg2)
8289b279 448{
a0ce341a
RH
449 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
450}
451
452static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
453 tcg_target_long arg)
454{
455 if (!check_fit_tl(arg, 10)) {
456 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
457 }
458 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
459}
460
583d1215 461static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 462{
583d1215 463 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
464}
465
7a3766f3
RH
466static inline void tcg_out_rdy(TCGContext *s, int rd)
467{
468 tcg_out32(s, RDY | INSN_RD(rd));
469}
470
8289b279
BS
471static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
472{
473 if (val != 0) {
57e49b40 474 if (check_fit_tl(val, 13))
8289b279 475 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 476 else {
375816f8
RH
477 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
478 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 479 }
8289b279
BS
480 }
481}
482
a0ce341a
RH
483static inline void tcg_out_andi(TCGContext *s, int rd, int rs,
484 tcg_target_long val)
53c37487
BS
485{
486 if (val != 0) {
487 if (check_fit_tl(val, 13))
a0ce341a 488 tcg_out_arithi(s, rd, rs, val, ARITH_AND);
53c37487 489 else {
375816f8
RH
490 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T1, val);
491 tcg_out_arith(s, rd, rs, TCG_REG_T1, ARITH_AND);
53c37487
BS
492 }
493 }
494}
495
583d1215
RH
496static void tcg_out_div32(TCGContext *s, int rd, int rs1,
497 int val2, int val2const, int uns)
498{
499 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
500 if (uns) {
501 tcg_out_sety(s, TCG_REG_G0);
502 } else {
375816f8
RH
503 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
504 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
505 }
506
507 tcg_out_arithc(s, rd, rs1, val2, val2const,
508 uns ? ARITH_UDIV : ARITH_SDIV);
509}
510
8289b279
BS
511static inline void tcg_out_nop(TCGContext *s)
512{
26cc915c 513 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
514}
515
0aed257f 516static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
517 [TCG_COND_EQ] = COND_E,
518 [TCG_COND_NE] = COND_NE,
519 [TCG_COND_LT] = COND_L,
520 [TCG_COND_GE] = COND_GE,
521 [TCG_COND_LE] = COND_LE,
522 [TCG_COND_GT] = COND_G,
523 [TCG_COND_LTU] = COND_CS,
524 [TCG_COND_GEU] = COND_CC,
525 [TCG_COND_LEU] = COND_LEU,
526 [TCG_COND_GTU] = COND_GU,
527};
528
ab1339b9
RH
529static const uint8_t tcg_cond_to_rcond[] = {
530 [TCG_COND_EQ] = RCOND_Z,
531 [TCG_COND_NE] = RCOND_NZ,
532 [TCG_COND_LT] = RCOND_LZ,
533 [TCG_COND_GT] = RCOND_GZ,
534 [TCG_COND_LE] = RCOND_LEZ,
535 [TCG_COND_GE] = RCOND_GEZ
536};
537
a115f3ea
RH
538static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
539{
540 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
541}
542
543static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
544{
545 TCGLabel *l = &s->labels[label];
546 int off19;
547
548 if (l->has_value) {
549 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
550 } else {
551 /* Make sure to preserve destinations during retranslation. */
552 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
553 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
554 }
555 tcg_out_bpcc0(s, scond, flags, off19);
556}
557
56f4927e
RH
558static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
559{
ba225198 560 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
561}
562
a115f3ea
RH
563static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
564 TCGArg arg2, int const_arg2, int label)
cf7c2ca5 565{
56f4927e 566 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 567 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
568 tcg_out_nop(s);
569}
570
ded37f0d
RH
571static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
572 TCGArg v1, int v1const)
573{
574 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
575 | INSN_RS1(tcg_cond_to_bcond[cond])
576 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
577}
578
579static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
580 TCGArg c1, TCGArg c2, int c2const,
581 TCGArg v1, int v1const)
582{
583 tcg_out_cmp(s, c1, c2, c2const);
584 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
585}
586
a212ea75 587#if TCG_TARGET_REG_BITS == 64
a115f3ea
RH
588static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
589 TCGArg arg2, int const_arg2, int label)
1da92db2 590{
ab1339b9
RH
591 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
592 if (arg2 == 0 && !is_unsigned_cond(cond)) {
593 TCGLabel *l = &s->labels[label];
594 int off16;
595
596 if (l->has_value) {
597 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
598 } else {
599 /* Make sure to preserve destinations during retranslation. */
600 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
601 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
602 }
603 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
604 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
605 } else {
606 tcg_out_cmp(s, arg1, arg2, const_arg2);
607 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
608 }
1da92db2
BS
609 tcg_out_nop(s);
610}
ded37f0d 611
203342d8
RH
612static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
613 TCGArg v1, int v1const)
614{
615 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
616 | (tcg_cond_to_rcond[cond] << 10)
617 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
618}
619
ded37f0d
RH
620static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
621 TCGArg c1, TCGArg c2, int c2const,
622 TCGArg v1, int v1const)
623{
203342d8
RH
624 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
625 Note that the immediate range is one bit smaller, so we must check
626 for that as well. */
627 if (c2 == 0 && !is_unsigned_cond(cond)
628 && (!v1const || check_fit_tl(v1, 10))) {
629 tcg_out_movr(s, cond, ret, c1, v1, v1const);
630 } else {
631 tcg_out_cmp(s, c1, c2, c2const);
632 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
633 }
ded37f0d 634}
56f4927e 635#else
8a56e840 636static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
637 TCGArg al, TCGArg ah,
638 TCGArg bl, int blconst,
639 TCGArg bh, int bhconst, int label_dest)
640{
a115f3ea 641 int scond, label_next = gen_new_label();
56f4927e
RH
642
643 tcg_out_cmp(s, ah, bh, bhconst);
644
645 /* Note that we fill one of the delay slots with the second compare. */
646 switch (cond) {
647 case TCG_COND_EQ:
a115f3ea 648 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 649 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 650 tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
651 break;
652
653 case TCG_COND_NE:
a115f3ea 654 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 655 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 656 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
657 break;
658
659 default:
a115f3ea
RH
660 scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
661 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 662 tcg_out_nop(s);
a115f3ea 663 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 664 tcg_out_cmp(s, al, bl, blconst);
a115f3ea
RH
665 scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
666 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
667 break;
668 }
669 tcg_out_nop(s);
670
9d6fca70 671 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 672}
1da92db2
BS
673#endif
674
8a56e840 675static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
676 TCGArg c1, TCGArg c2, int c2const)
677{
dbfe80e1
RH
678 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
679 switch (cond) {
7d458a75
RH
680 case TCG_COND_LTU:
681 case TCG_COND_GEU:
682 /* The result of the comparison is in the carry bit. */
683 break;
684
dbfe80e1
RH
685 case TCG_COND_EQ:
686 case TCG_COND_NE:
7d458a75 687 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
688 if (c2 != 0) {
689 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
690 }
691 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 692 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
693 break;
694
695 case TCG_COND_GTU:
dbfe80e1 696 case TCG_COND_LEU:
7d458a75
RH
697 /* If we don't need to load a constant into a register, we can
698 swap the operands on GTU/LEU. There's no benefit to loading
699 the constant into a temporary register. */
700 if (!c2const || c2 == 0) {
701 TCGArg t = c1;
702 c1 = c2;
703 c2 = t;
704 c2const = 0;
705 cond = tcg_swap_cond(cond);
706 break;
707 }
708 /* FALLTHRU */
dbfe80e1
RH
709
710 default:
711 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 712 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 713 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
714 return;
715 }
716
717 tcg_out_cmp(s, c1, c2, c2const);
718 if (cond == TCG_COND_LTU) {
719 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
720 } else {
721 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
722 }
723}
724
725#if TCG_TARGET_REG_BITS == 64
8a56e840 726static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
727 TCGArg c1, TCGArg c2, int c2const)
728{
203342d8
RH
729 /* For 64-bit signed comparisons vs zero, we can avoid the compare
730 if the input does not overlap the output. */
731 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
732 tcg_out_movi_imm13(s, ret, 0);
733 tcg_out_movr(s, cond, ret, c1, 1, 1);
734 } else {
735 tcg_out_cmp(s, c1, c2, c2const);
736 tcg_out_movi_imm13(s, ret, 0);
737 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
738 }
dbfe80e1
RH
739}
740#else
8a56e840 741static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
742 TCGArg al, TCGArg ah,
743 TCGArg bl, int blconst,
744 TCGArg bh, int bhconst)
745{
dda73c78
RH
746 int tmp = TCG_REG_T1;
747
748 /* Note that the low parts are fully consumed before tmp is set. */
749 if (ret != ah && (bhconst || ret != bh)) {
750 tmp = ret;
751 }
dbfe80e1
RH
752
753 switch (cond) {
754 case TCG_COND_EQ:
dbfe80e1 755 case TCG_COND_NE:
fd84ea23
RH
756 if (bl == 0 && bh == 0) {
757 if (cond == TCG_COND_EQ) {
758 tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
759 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
760 } else {
761 tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
762 }
763 } else {
764 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
765 tcg_out_cmp(s, ah, bh, bhconst);
766 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
767 }
dda73c78 768 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
dbfe80e1
RH
769 break;
770
771 default:
dda73c78
RH
772 /* <= : ah < bh | (ah == bh && al <= bl) */
773 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
dbfe80e1 774 tcg_out_cmp(s, ah, bh, bhconst);
dda73c78
RH
775 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
776 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
777 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
778 break;
779 }
780}
07ca08ba 781#endif
4ec28e25
RH
782
783static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
784 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
785 TCGArg bh, int bhconst, int opl, int oph)
786{
787 TCGArg tmp = TCG_REG_T1;
788
789 /* Note that the low parts are fully consumed before tmp is set. */
790 if (rl != ah && (bhconst || rl != bh)) {
791 tmp = rl;
792 }
793
794 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
795 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
796 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
797}
dbfe80e1 798
7d551702 799/* Generate global QEMU prologue and epilogue code */
e4d58b41 800static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 801{
4c3204cb
RH
802 int tmp_buf_size, frame_size;
803
804 /* The TCG temp buffer is at the top of the frame, immediately
805 below the frame pointer. */
806 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
807 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
808 tmp_buf_size);
809
810 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
811 otherwise the minimal frame usable by callees. */
812 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
813 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
814 frame_size += TCG_TARGET_STACK_ALIGN - 1;
815 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 816 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 817 INSN_IMM13(-frame_size));
c6f7e4fb
RH
818
819#ifdef CONFIG_USE_GUEST_BASE
820 if (GUEST_BASE != 0) {
821 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
822 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
823 }
824#endif
825
cea5f9a2 826 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I1) |
7d551702 827 INSN_RS2(TCG_REG_G0));
0c554161
RH
828 /* delay slot */
829 tcg_out_nop(s);
4c3204cb
RH
830
831 /* No epilogue required. We issue ret + restore directly in the TB. */
b3db8758
BS
832}
833
f5ef6aac 834#if defined(CONFIG_SOFTMMU)
f5ef6aac 835
e141ab52
BS
836/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
837 int mmu_idx) */
838static const void * const qemu_ld_helpers[4] = {
839 helper_ldb_mmu,
840 helper_ldw_mmu,
841 helper_ldl_mmu,
842 helper_ldq_mmu,
843};
844
845/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
846 uintxx_t val, int mmu_idx) */
847static const void * const qemu_st_helpers[4] = {
848 helper_stb_mmu,
849 helper_stw_mmu,
850 helper_stl_mmu,
851 helper_stq_mmu,
852};
f5ef6aac 853
a0ce341a 854/* Perform the TLB load and compare.
bffe1431 855
a0ce341a
RH
856 Inputs:
857 ADDRLO_IDX contains the index into ARGS of the low part of the
858 address; the high part of the address is at ADDR_LOW_IDX+1.
859
860 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
861
862 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
863 This should be offsetof addr_read or addr_write.
864
865 The result of the TLB comparison is in %[ix]cc. The sanitized address
866 is in the returned register, maybe %o0. The TLB addend is in %o1. */
867
868static int tcg_out_tlb_load(TCGContext *s, int addrlo_idx, int mem_index,
869 int s_bits, const TCGArg *args, int which)
870{
871 const int addrlo = args[addrlo_idx];
872 const int r0 = TCG_REG_O0;
873 const int r1 = TCG_REG_O1;
874 const int r2 = TCG_REG_O2;
875 int addr = addrlo;
876 int tlb_ofs;
877
878 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
879 /* Assemble the 64-bit address in R0. */
880 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
881 tcg_out_arithi(s, r1, args[addrlo_idx + 1], 32, SHIFT_SLLX);
882 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
883 }
884
885 /* Shift the page number down to tlb-entry. */
886 tcg_out_arithi(s, r1, addrlo,
887 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS, SHIFT_SRL);
888
889 /* Mask out the page offset, except for the required alignment. */
890 tcg_out_andi(s, r0, addr, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
891
892 /* Compute tlb index, modulo tlb size. */
893 tcg_out_andi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS);
894
895 /* Relative to the current ENV. */
896 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
897
898 /* Find a base address that can load both tlb comparator and addend. */
899 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
900 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
901 tcg_out_addi(s, r1, tlb_ofs);
902 tlb_ofs = 0;
903 }
904
905 /* Load the tlb comparator and the addend. */
906 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
907 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
908
909 /* subcc arg0, arg2, %g0 */
910 tcg_out_cmp(s, r0, r2, 0);
911
912 /* If the guest address must be zero-extended, do so now. */
913 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
914 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
915 return r0;
916 }
917 return addrlo;
918}
919#endif /* CONFIG_SOFTMMU */
920
921static const int qemu_ld_opc[8] = {
922#ifdef TARGET_WORDS_BIGENDIAN
923 LDUB, LDUH, LDUW, LDX, LDSB, LDSH, LDSW, LDX
65850a02 924#else
a0ce341a 925 LDUB, LDUH_LE, LDUW_LE, LDX_LE, LDSB, LDSH_LE, LDSW_LE, LDX_LE
65850a02 926#endif
a0ce341a 927};
9d0efc88 928
a0ce341a
RH
929static const int qemu_st_opc[4] = {
930#ifdef TARGET_WORDS_BIGENDIAN
931 STB, STH, STW, STX
bffe1431 932#else
a0ce341a 933 STB, STH_LE, STW_LE, STX_LE
bffe1431 934#endif
a0ce341a 935};
bffe1431 936
a0ce341a 937static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 938{
a0ce341a 939 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 940#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
941 int memi_idx, memi, s_bits, n;
942 uint32_t *label_ptr[2];
f5ef6aac
BS
943#endif
944
a0ce341a
RH
945 datahi = datalo = args[0];
946 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
947 datahi = args[1];
948 addrlo_idx = 2;
949 }
f5ef6aac 950
f5ef6aac 951#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
952 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
953 memi = args[memi_idx];
954 s_bits = sizeop & 3;
955
956 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, s_bits, args,
957 offsetof(CPUTLBEntry, addr_read));
958
959 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
960 int reg64;
961
962 /* bne,pn %[xi]cc, label0 */
963 label_ptr[0] = (uint32_t *)s->code_ptr;
a115f3ea
RH
964 tcg_out_bpcc0(s, COND_NE, BPCC_PN
965 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
e7bc9004 966 tcg_out_nop(s);
a0ce341a
RH
967
968 /* TLB Hit. */
969 /* Load all 64-bits into an O/G register. */
970 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
971 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
972
973 /* Move the two 32-bit pieces into the destination registers. */
974 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
975 if (reg64 != datalo) {
976 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
977 }
f5ef6aac 978
a0ce341a
RH
979 /* b,a,pt label1 */
980 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea 981 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
a0ce341a
RH
982 } else {
983 /* The fast path is exactly one insn. Thus we can perform the
984 entire TLB Hit in the (annulled) delay slot of the branch
985 over the TLB Miss case. */
986
987 /* beq,a,pt %[xi]cc, label0 */
988 label_ptr[0] = NULL;
989 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea
RH
990 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
991 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a
RH
992 /* delay slot */
993 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[sizeop]);
994 }
53c37487 995
a0ce341a 996 /* TLB Miss. */
f5ef6aac 997
a0ce341a
RH
998 if (label_ptr[0]) {
999 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
1000 (unsigned long)label_ptr[0]);
1001 }
1002 n = 0;
1003 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1004 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1005 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1006 args[addrlo_idx + 1]);
1007 }
1008 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1009 args[addrlo_idx]);
f5ef6aac 1010
53c37487 1011 /* qemu_ld_helper[s_bits](arg0, arg1) */
f5ef6aac
BS
1012 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_ld_helpers[s_bits]
1013 - (tcg_target_ulong)s->code_ptr) >> 2)
1014 & 0x3fffffff));
a0ce341a
RH
1015 /* delay slot */
1016 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[n], memi);
1017
a0ce341a
RH
1018 n = tcg_target_call_oarg_regs[0];
1019 /* datalo = sign_extend(arg0) */
1020 switch (sizeop) {
f5ef6aac 1021 case 0 | 4:
a0ce341a
RH
1022 /* Recall that SRA sign extends from bit 31 through bit 63. */
1023 tcg_out_arithi(s, datalo, n, 24, SHIFT_SLL);
1024 tcg_out_arithi(s, datalo, datalo, 24, SHIFT_SRA);
f5ef6aac
BS
1025 break;
1026 case 1 | 4:
a0ce341a
RH
1027 tcg_out_arithi(s, datalo, n, 16, SHIFT_SLL);
1028 tcg_out_arithi(s, datalo, datalo, 16, SHIFT_SRA);
f5ef6aac
BS
1029 break;
1030 case 2 | 4:
a0ce341a 1031 tcg_out_arithi(s, datalo, n, 0, SHIFT_SRA);
f5ef6aac 1032 break;
a0ce341a
RH
1033 case 3:
1034 if (TCG_TARGET_REG_BITS == 32) {
1035 tcg_out_mov(s, TCG_TYPE_REG, datahi, n);
1036 tcg_out_mov(s, TCG_TYPE_REG, datalo, n + 1);
1037 break;
1038 }
1039 /* FALLTHRU */
f5ef6aac
BS
1040 case 0:
1041 case 1:
1042 case 2:
f5ef6aac
BS
1043 default:
1044 /* mov */
a0ce341a 1045 tcg_out_mov(s, TCG_TYPE_REG, datalo, n);
f5ef6aac
BS
1046 break;
1047 }
1048
a0ce341a
RH
1049 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1050 (unsigned long)label_ptr[1]);
90cbed46 1051#else
a0ce341a
RH
1052 addr_reg = args[addrlo_idx];
1053 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1054 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1055 addr_reg = TCG_REG_T1;
a0ce341a
RH
1056 }
1057 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1058 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 1059
c6f7e4fb
RH
1060 tcg_out_ldst_rr(s, reg64, addr_reg,
1061 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1062 qemu_ld_opc[sizeop]);
f5ef6aac 1063
a0ce341a
RH
1064 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1065 if (reg64 != datalo) {
1066 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1067 }
1068 } else {
c6f7e4fb
RH
1069 tcg_out_ldst_rr(s, datalo, addr_reg,
1070 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1071 qemu_ld_opc[sizeop]);
f5ef6aac 1072 }
a0ce341a 1073#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1074}
1075
a0ce341a 1076static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int sizeop)
f5ef6aac 1077{
a0ce341a 1078 int addrlo_idx = 1, datalo, datahi, addr_reg;
f5ef6aac 1079#if defined(CONFIG_SOFTMMU)
a7a49843 1080 int memi_idx, memi, n, datafull;
a0ce341a 1081 uint32_t *label_ptr;
f5ef6aac
BS
1082#endif
1083
a0ce341a
RH
1084 datahi = datalo = args[0];
1085 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1086 datahi = args[1];
1087 addrlo_idx = 2;
1088 }
f5ef6aac 1089
f5ef6aac 1090#if defined(CONFIG_SOFTMMU)
a0ce341a
RH
1091 memi_idx = addrlo_idx + 1 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
1092 memi = args[memi_idx];
1093
1094 addr_reg = tcg_out_tlb_load(s, addrlo_idx, memi, sizeop, args,
1095 offsetof(CPUTLBEntry, addr_write));
1096
a7a49843 1097 datafull = datalo;
a0ce341a 1098 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8
RH
1099 /* Reconstruct the full 64-bit value. */
1100 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1101 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8 1102 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
a7a49843 1103 datafull = TCG_REG_O2;
a0ce341a 1104 }
f5ef6aac 1105
a0ce341a
RH
1106 /* The fast path is exactly one insn. Thus we can perform the entire
1107 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1108 /* beq,a,pt %[xi]cc, label0 */
1109 label_ptr = (uint32_t *)s->code_ptr;
a115f3ea
RH
1110 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1111 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1112 /* delay slot */
a7a49843 1113 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[sizeop]);
a0ce341a
RH
1114
1115 /* TLB Miss. */
1116
1117 n = 0;
1118 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[n++], TCG_AREG0);
1119 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
1120 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1121 args[addrlo_idx + 1]);
1122 }
1123 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++],
1124 args[addrlo_idx]);
1125 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
1126 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datahi);
1127 }
1128 tcg_out_mov(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n++], datalo);
53c37487 1129
53c37487 1130 /* qemu_st_helper[s_bits](arg0, arg1, arg2) */
a0ce341a 1131 tcg_out32(s, CALL | ((((tcg_target_ulong)qemu_st_helpers[sizeop]
f5ef6aac
BS
1132 - (tcg_target_ulong)s->code_ptr) >> 2)
1133 & 0x3fffffff));
a0ce341a
RH
1134 /* delay slot */
1135 tcg_out_movi(s, TCG_TYPE_REG, tcg_target_call_iarg_regs[n], memi);
f5ef6aac 1136
a0ce341a
RH
1137 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1138 (unsigned long)label_ptr);
8384dd67 1139#else
a0ce341a
RH
1140 addr_reg = args[addrlo_idx];
1141 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1142 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1143 addr_reg = TCG_REG_T1;
f5ef6aac 1144 }
a0ce341a 1145 if (TCG_TARGET_REG_BITS == 32 && sizeop == 3) {
375816f8 1146 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1147 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1148 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1149 datalo = TCG_REG_O2;
a0ce341a 1150 }
c6f7e4fb
RH
1151 tcg_out_ldst_rr(s, datalo, addr_reg,
1152 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1153 qemu_st_opc[sizeop]);
a0ce341a 1154#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1155}
1156
a9751609 1157static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1158 const int *const_args)
1159{
1160 int c;
1161
1162 switch (opc) {
1163 case INDEX_op_exit_tb:
b3db8758
BS
1164 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
1165 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_I7) |
8289b279 1166 INSN_IMM13(8));
b3db8758
BS
1167 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1168 INSN_RS2(TCG_REG_G0));
8289b279
BS
1169 break;
1170 case INDEX_op_goto_tb:
1171 if (s->tb_jmp_offset) {
1172 /* direct jump method */
5bbd2cae 1173 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1174 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1175 /* Make sure to preserve links during retranslation. */
1176 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1177 } else {
1178 /* indirect jump method */
375816f8
RH
1179 tcg_out_ld_ptr(s, TCG_REG_T1,
1180 (tcg_target_long)(s->tb_next + args[0]));
1181 tcg_out32(s, JMPL | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_T1) |
b3db8758 1182 INSN_RS2(TCG_REG_G0));
8289b279 1183 }
53cd9273 1184 tcg_out_nop(s);
8289b279
BS
1185 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1186 break;
1187 case INDEX_op_call:
375816f8 1188 if (const_args[0]) {
bffe1431
BS
1189 tcg_out32(s, CALL | ((((tcg_target_ulong)args[0]
1190 - (tcg_target_ulong)s->code_ptr) >> 2)
1191 & 0x3fffffff));
375816f8
RH
1192 } else {
1193 tcg_out_ld_ptr(s, TCG_REG_T1,
bffe1431 1194 (tcg_target_long)(s->tb_next + args[0]));
375816f8 1195 tcg_out32(s, JMPL | INSN_RD(TCG_REG_O7) | INSN_RS1(TCG_REG_T1) |
bffe1431 1196 INSN_RS2(TCG_REG_G0));
8289b279 1197 }
4c3204cb
RH
1198 /* delay slot */
1199 tcg_out_nop(s);
8289b279 1200 break;
8289b279 1201 case INDEX_op_br:
a115f3ea 1202 tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
f5ef6aac 1203 tcg_out_nop(s);
8289b279
BS
1204 break;
1205 case INDEX_op_movi_i32:
1206 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1207 break;
1208
a212ea75 1209#if TCG_TARGET_REG_BITS == 64
8289b279 1210#define OP_32_64(x) \
ba225198
RH
1211 glue(glue(case INDEX_op_, x), _i32): \
1212 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1213#else
1214#define OP_32_64(x) \
ba225198 1215 glue(glue(case INDEX_op_, x), _i32)
8289b279 1216#endif
ba225198 1217 OP_32_64(ld8u):
8289b279
BS
1218 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1219 break;
ba225198 1220 OP_32_64(ld8s):
8289b279
BS
1221 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1222 break;
ba225198 1223 OP_32_64(ld16u):
8289b279
BS
1224 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1225 break;
ba225198 1226 OP_32_64(ld16s):
8289b279
BS
1227 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1228 break;
1229 case INDEX_op_ld_i32:
a212ea75 1230#if TCG_TARGET_REG_BITS == 64
53cd9273 1231 case INDEX_op_ld32u_i64:
8289b279
BS
1232#endif
1233 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1234 break;
ba225198 1235 OP_32_64(st8):
8289b279
BS
1236 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1237 break;
ba225198 1238 OP_32_64(st16):
8289b279
BS
1239 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1240 break;
1241 case INDEX_op_st_i32:
a212ea75 1242#if TCG_TARGET_REG_BITS == 64
53cd9273 1243 case INDEX_op_st32_i64:
8289b279
BS
1244#endif
1245 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1246 break;
ba225198 1247 OP_32_64(add):
53cd9273 1248 c = ARITH_ADD;
ba225198
RH
1249 goto gen_arith;
1250 OP_32_64(sub):
8289b279 1251 c = ARITH_SUB;
ba225198
RH
1252 goto gen_arith;
1253 OP_32_64(and):
8289b279 1254 c = ARITH_AND;
ba225198 1255 goto gen_arith;
dc69960d
RH
1256 OP_32_64(andc):
1257 c = ARITH_ANDN;
1258 goto gen_arith;
ba225198 1259 OP_32_64(or):
8289b279 1260 c = ARITH_OR;
ba225198 1261 goto gen_arith;
18c8f7a3
RH
1262 OP_32_64(orc):
1263 c = ARITH_ORN;
1264 goto gen_arith;
ba225198 1265 OP_32_64(xor):
8289b279 1266 c = ARITH_XOR;
ba225198 1267 goto gen_arith;
8289b279
BS
1268 case INDEX_op_shl_i32:
1269 c = SHIFT_SLL;
1fd95946
RH
1270 do_shift32:
1271 /* Limit immediate shift count lest we create an illegal insn. */
1272 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1273 break;
8289b279
BS
1274 case INDEX_op_shr_i32:
1275 c = SHIFT_SRL;
1fd95946 1276 goto do_shift32;
8289b279
BS
1277 case INDEX_op_sar_i32:
1278 c = SHIFT_SRA;
1fd95946 1279 goto do_shift32;
8289b279
BS
1280 case INDEX_op_mul_i32:
1281 c = ARITH_UMUL;
ba225198 1282 goto gen_arith;
583d1215 1283
4b5a85c1
RH
1284 OP_32_64(neg):
1285 c = ARITH_SUB;
1286 goto gen_arith1;
be6551b1
RH
1287 OP_32_64(not):
1288 c = ARITH_ORN;
1289 goto gen_arith1;
4b5a85c1 1290
583d1215
RH
1291 case INDEX_op_div_i32:
1292 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1293 break;
1294 case INDEX_op_divu_i32:
1295 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1296 break;
1297
1298 case INDEX_op_rem_i32:
1299 case INDEX_op_remu_i32:
375816f8 1300 tcg_out_div32(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1301 opc == INDEX_op_remu_i32);
375816f8 1302 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1303 ARITH_UMUL);
375816f8 1304 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1305 break;
8289b279
BS
1306
1307 case INDEX_op_brcond_i32:
1da92db2
BS
1308 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1309 args[3]);
8289b279 1310 break;
dbfe80e1
RH
1311 case INDEX_op_setcond_i32:
1312 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1313 args[2], const_args[2]);
1314 break;
ded37f0d
RH
1315 case INDEX_op_movcond_i32:
1316 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1317 args[2], const_args[2], args[3], const_args[3]);
1318 break;
dbfe80e1 1319
56f4927e
RH
1320#if TCG_TARGET_REG_BITS == 32
1321 case INDEX_op_brcond2_i32:
1322 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1323 args[2], const_args[2],
1324 args[3], const_args[3], args[5]);
1325 break;
dbfe80e1
RH
1326 case INDEX_op_setcond2_i32:
1327 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1328 args[3], const_args[3],
1329 args[4], const_args[4]);
1330 break;
803d805b
RH
1331#endif
1332
7a3766f3 1333 case INDEX_op_add2_i32:
4ec28e25
RH
1334 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1335 args[4], const_args[4], args[5], const_args[5],
1336 ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1337 break;
1338 case INDEX_op_sub2_i32:
4ec28e25
RH
1339 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1340 args[4], const_args[4], args[5], const_args[5],
1341 ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1342 break;
1343 case INDEX_op_mulu2_i32:
1344 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1345 ARITH_UMUL);
1346 tcg_out_rdy(s, args[1]);
1347 break;
8289b279
BS
1348
1349 case INDEX_op_qemu_ld8u:
f5ef6aac 1350 tcg_out_qemu_ld(s, args, 0);
8289b279
BS
1351 break;
1352 case INDEX_op_qemu_ld8s:
f5ef6aac 1353 tcg_out_qemu_ld(s, args, 0 | 4);
8289b279
BS
1354 break;
1355 case INDEX_op_qemu_ld16u:
f5ef6aac 1356 tcg_out_qemu_ld(s, args, 1);
8289b279
BS
1357 break;
1358 case INDEX_op_qemu_ld16s:
f5ef6aac 1359 tcg_out_qemu_ld(s, args, 1 | 4);
8289b279 1360 break;
86feb1c8
RH
1361 case INDEX_op_qemu_ld32:
1362#if TCG_TARGET_REG_BITS == 64
8289b279 1363 case INDEX_op_qemu_ld32u:
86feb1c8 1364#endif
f5ef6aac 1365 tcg_out_qemu_ld(s, args, 2);
8289b279 1366 break;
30c0c76c 1367#if TCG_TARGET_REG_BITS == 64
8289b279 1368 case INDEX_op_qemu_ld32s:
f5ef6aac 1369 tcg_out_qemu_ld(s, args, 2 | 4);
8289b279 1370 break;
30c0c76c 1371#endif
a0ce341a
RH
1372 case INDEX_op_qemu_ld64:
1373 tcg_out_qemu_ld(s, args, 3);
1374 break;
8289b279 1375 case INDEX_op_qemu_st8:
f5ef6aac 1376 tcg_out_qemu_st(s, args, 0);
8289b279
BS
1377 break;
1378 case INDEX_op_qemu_st16:
f5ef6aac 1379 tcg_out_qemu_st(s, args, 1);
8289b279
BS
1380 break;
1381 case INDEX_op_qemu_st32:
f5ef6aac 1382 tcg_out_qemu_st(s, args, 2);
8289b279 1383 break;
a0ce341a
RH
1384 case INDEX_op_qemu_st64:
1385 tcg_out_qemu_st(s, args, 3);
1386 break;
8289b279 1387
a212ea75 1388#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1389 case INDEX_op_movi_i64:
1390 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1391 break;
53cd9273
BS
1392 case INDEX_op_ld32s_i64:
1393 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1394 break;
8289b279
BS
1395 case INDEX_op_ld_i64:
1396 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1397 break;
1398 case INDEX_op_st_i64:
1399 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1400 break;
1401 case INDEX_op_shl_i64:
1402 c = SHIFT_SLLX;
1fd95946
RH
1403 do_shift64:
1404 /* Limit immediate shift count lest we create an illegal insn. */
1405 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1406 break;
8289b279
BS
1407 case INDEX_op_shr_i64:
1408 c = SHIFT_SRLX;
1fd95946 1409 goto do_shift64;
8289b279
BS
1410 case INDEX_op_sar_i64:
1411 c = SHIFT_SRAX;
1fd95946 1412 goto do_shift64;
8289b279
BS
1413 case INDEX_op_mul_i64:
1414 c = ARITH_MULX;
ba225198 1415 goto gen_arith;
583d1215 1416 case INDEX_op_div_i64:
53cd9273 1417 c = ARITH_SDIVX;
ba225198 1418 goto gen_arith;
583d1215 1419 case INDEX_op_divu_i64:
8289b279 1420 c = ARITH_UDIVX;
ba225198 1421 goto gen_arith;
583d1215
RH
1422 case INDEX_op_rem_i64:
1423 case INDEX_op_remu_i64:
375816f8 1424 tcg_out_arithc(s, TCG_REG_T1, args[1], args[2], const_args[2],
583d1215 1425 opc == INDEX_op_rem_i64 ? ARITH_SDIVX : ARITH_UDIVX);
375816f8 1426 tcg_out_arithc(s, TCG_REG_T1, TCG_REG_T1, args[2], const_args[2],
583d1215 1427 ARITH_MULX);
375816f8 1428 tcg_out_arith(s, args[0], args[1], TCG_REG_T1, ARITH_SUB);
583d1215 1429 break;
cc6dfecf
RH
1430 case INDEX_op_ext32s_i64:
1431 if (const_args[1]) {
1432 tcg_out_movi(s, TCG_TYPE_I64, args[0], (int32_t)args[1]);
1433 } else {
1434 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
1435 }
1436 break;
1437 case INDEX_op_ext32u_i64:
1438 if (const_args[1]) {
1439 tcg_out_movi_imm32(s, args[0], args[1]);
1440 } else {
1441 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
1442 }
1443 break;
8289b279
BS
1444
1445 case INDEX_op_brcond_i64:
1da92db2
BS
1446 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1447 args[3]);
8289b279 1448 break;
dbfe80e1
RH
1449 case INDEX_op_setcond_i64:
1450 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1451 args[2], const_args[2]);
1452 break;
ded37f0d
RH
1453 case INDEX_op_movcond_i64:
1454 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1455 args[2], const_args[2], args[3], const_args[3]);
1456 break;
8289b279 1457#endif
ba225198
RH
1458 gen_arith:
1459 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1460 break;
1461
4b5a85c1
RH
1462 gen_arith1:
1463 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1464 break;
1465
8289b279
BS
1466 default:
1467 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1468 tcg_abort();
1469 }
1470}
1471
1472static const TCGTargetOpDef sparc_op_defs[] = {
1473 { INDEX_op_exit_tb, { } },
b3db8758 1474 { INDEX_op_goto_tb, { } },
8289b279 1475 { INDEX_op_call, { "ri" } },
8289b279
BS
1476 { INDEX_op_br, { } },
1477
1478 { INDEX_op_mov_i32, { "r", "r" } },
1479 { INDEX_op_movi_i32, { "r" } },
1480 { INDEX_op_ld8u_i32, { "r", "r" } },
1481 { INDEX_op_ld8s_i32, { "r", "r" } },
1482 { INDEX_op_ld16u_i32, { "r", "r" } },
1483 { INDEX_op_ld16s_i32, { "r", "r" } },
1484 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1485 { INDEX_op_st8_i32, { "rZ", "r" } },
1486 { INDEX_op_st16_i32, { "rZ", "r" } },
1487 { INDEX_op_st_i32, { "rZ", "r" } },
1488
1489 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1490 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1491 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1492 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1493 { INDEX_op_rem_i32, { "r", "rZ", "rJ" } },
1494 { INDEX_op_remu_i32, { "r", "rZ", "rJ" } },
1495 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1496 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1497 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1498 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1499 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1500 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1501
1502 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1503 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1504 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1505
4b5a85c1 1506 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1507 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1508
89269f6c
RH
1509 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1510 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1511 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1512
56f4927e 1513#if TCG_TARGET_REG_BITS == 32
89269f6c
RH
1514 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1515 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
803d805b
RH
1516#endif
1517
89269f6c
RH
1518 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1519 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1520 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1521
a212ea75 1522#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1523 { INDEX_op_mov_i64, { "r", "r" } },
1524 { INDEX_op_movi_i64, { "r" } },
1525 { INDEX_op_ld8u_i64, { "r", "r" } },
1526 { INDEX_op_ld8s_i64, { "r", "r" } },
1527 { INDEX_op_ld16u_i64, { "r", "r" } },
1528 { INDEX_op_ld16s_i64, { "r", "r" } },
1529 { INDEX_op_ld32u_i64, { "r", "r" } },
1530 { INDEX_op_ld32s_i64, { "r", "r" } },
1531 { INDEX_op_ld_i64, { "r", "r" } },
89269f6c
RH
1532 { INDEX_op_st8_i64, { "rZ", "r" } },
1533 { INDEX_op_st16_i64, { "rZ", "r" } },
1534 { INDEX_op_st32_i64, { "rZ", "r" } },
1535 { INDEX_op_st_i64, { "rZ", "r" } },
1536
1537 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1538 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1539 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1540 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
1541 { INDEX_op_rem_i64, { "r", "rZ", "rJ" } },
1542 { INDEX_op_remu_i64, { "r", "rZ", "rJ" } },
1543 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1544 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1545 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1546 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1547 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1548 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1549
1550 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1551 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1552 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
4b5a85c1
RH
1553
1554 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1555 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1556
cc6dfecf
RH
1557 { INDEX_op_ext32s_i64, { "r", "ri" } },
1558 { INDEX_op_ext32u_i64, { "r", "ri" } },
8289b279 1559
89269f6c
RH
1560 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1561 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1562 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
8289b279 1563#endif
a0ce341a
RH
1564
1565#if TCG_TARGET_REG_BITS == 64
1566 { INDEX_op_qemu_ld8u, { "r", "L" } },
1567 { INDEX_op_qemu_ld8s, { "r", "L" } },
1568 { INDEX_op_qemu_ld16u, { "r", "L" } },
1569 { INDEX_op_qemu_ld16s, { "r", "L" } },
1570 { INDEX_op_qemu_ld32, { "r", "L" } },
1571 { INDEX_op_qemu_ld32u, { "r", "L" } },
1572 { INDEX_op_qemu_ld32s, { "r", "L" } },
1573 { INDEX_op_qemu_ld64, { "r", "L" } },
1574
1575 { INDEX_op_qemu_st8, { "L", "L" } },
1576 { INDEX_op_qemu_st16, { "L", "L" } },
1577 { INDEX_op_qemu_st32, { "L", "L" } },
1578 { INDEX_op_qemu_st64, { "L", "L" } },
1579#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1580 { INDEX_op_qemu_ld8u, { "r", "L" } },
1581 { INDEX_op_qemu_ld8s, { "r", "L" } },
1582 { INDEX_op_qemu_ld16u, { "r", "L" } },
1583 { INDEX_op_qemu_ld16s, { "r", "L" } },
1584 { INDEX_op_qemu_ld32, { "r", "L" } },
1585 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1586
1587 { INDEX_op_qemu_st8, { "L", "L" } },
1588 { INDEX_op_qemu_st16, { "L", "L" } },
1589 { INDEX_op_qemu_st32, { "L", "L" } },
3ee60ad4 1590 { INDEX_op_qemu_st64, { "L", "L", "L" } },
a0ce341a
RH
1591#else
1592 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1593 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1594 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1595 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1596 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1597 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1598
1599 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1600 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1601 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1602 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
8289b279 1603#endif
a0ce341a 1604
8289b279
BS
1605 { -1 },
1606};
1607
e4d58b41 1608static void tcg_target_init(TCGContext *s)
8289b279
BS
1609{
1610 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
a212ea75 1611#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1612 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1613#endif
1614 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1615 (1 << TCG_REG_G1) |
1616 (1 << TCG_REG_G2) |
1617 (1 << TCG_REG_G3) |
1618 (1 << TCG_REG_G4) |
1619 (1 << TCG_REG_G5) |
1620 (1 << TCG_REG_G6) |
1621 (1 << TCG_REG_G7) |
8289b279
BS
1622 (1 << TCG_REG_O0) |
1623 (1 << TCG_REG_O1) |
1624 (1 << TCG_REG_O2) |
1625 (1 << TCG_REG_O3) |
1626 (1 << TCG_REG_O4) |
1627 (1 << TCG_REG_O5) |
8289b279
BS
1628 (1 << TCG_REG_O7));
1629
1630 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1631 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1632 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1633 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1634 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1635 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1636 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1637 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1638 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1639
8289b279
BS
1640 tcg_add_target_add_op_defs(sparc_op_defs);
1641}
cb1977d3
RH
1642
1643#if TCG_TARGET_REG_BITS == 64
1644# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1645#else
cb1977d3
RH
1646# define ELF_HOST_MACHINE EM_SPARC32PLUS
1647# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1648#endif
1649
cb1977d3
RH
1650typedef struct {
1651 DebugFrameCIE cie;
497a22eb
RH
1652 DebugFrameFDEHeader fde;
1653 uint8_t fde_def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1654 uint8_t fde_win_save;
1655 uint8_t fde_ret_save[3];
cb1977d3
RH
1656} DebugFrame;
1657
1658static DebugFrame debug_frame = {
1659 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1660 .cie.id = -1,
1661 .cie.version = 1,
1662 .cie.code_align = 1,
1663 .cie.data_align = -sizeof(void *) & 0x7f,
1664 .cie.return_column = 15, /* o7 */
1665
497a22eb
RH
1666 /* Total FDE size does not include the "len" member. */
1667 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1668
1669 .fde_def_cfa = {
cb1977d3
RH
1670#if TCG_TARGET_REG_BITS == 64
1671 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1672 (2047 & 0x7f) | 0x80, (2047 >> 7)
1673#else
1674 13, 30 /* DW_CFA_def_cfa_register i6 */
1675#endif
1676 },
497a22eb
RH
1677 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1678 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1679};
1680
1681void tcg_register_jit(void *buf, size_t buf_size)
1682{
1683 debug_frame.fde.func_start = (tcg_target_long) buf;
1684 debug_frame.fde.func_len = buf_size;
1685
1686 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1687}
5bbd2cae
RH
1688
1689void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1690{
1691 uint32_t *ptr = (uint32_t *)jmp_addr;
1692 tcg_target_long disp = (tcg_target_long)(addr - jmp_addr) >> 2;
1693
1694 /* We can reach the entire address space for 32-bit. For 64-bit
1695 the code_gen_buffer can't be larger than 2GB. */
1696 if (TCG_TARGET_REG_BITS == 64 && !check_fit_tl(disp, 30)) {
1697 tcg_abort();
1698 }
1699
1700 *ptr = CALL | (disp & 0x3fffffff);
1701 flush_icache_range(jmp_addr, jmp_addr + 4);
1702}