]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Remove most uses of TCG_TARGET_REG_BITS
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
375816f8
RH
70/* Define some temporary registers. T2 is used for constant generation. */
71#define TCG_REG_T1 TCG_REG_G1
72#define TCG_REG_T2 TCG_REG_O7
73
c6f7e4fb 74#ifdef CONFIG_USE_GUEST_BASE
375816f8 75# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
76#else
77# define TCG_GUEST_BASE_REG TCG_REG_G0
78#endif
e141ab52 79
0954d0d9 80static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
81 TCG_REG_L0,
82 TCG_REG_L1,
83 TCG_REG_L2,
84 TCG_REG_L3,
85 TCG_REG_L4,
86 TCG_REG_L5,
87 TCG_REG_L6,
88 TCG_REG_L7,
26adfb75 89
8289b279
BS
90 TCG_REG_I0,
91 TCG_REG_I1,
92 TCG_REG_I2,
93 TCG_REG_I3,
94 TCG_REG_I4,
375816f8 95 TCG_REG_I5,
26adfb75
RH
96
97 TCG_REG_G2,
98 TCG_REG_G3,
99 TCG_REG_G4,
100 TCG_REG_G5,
101
102 TCG_REG_O0,
103 TCG_REG_O1,
104 TCG_REG_O2,
105 TCG_REG_O3,
106 TCG_REG_O4,
107 TCG_REG_O5,
8289b279
BS
108};
109
110static const int tcg_target_call_iarg_regs[6] = {
111 TCG_REG_O0,
112 TCG_REG_O1,
113 TCG_REG_O2,
114 TCG_REG_O3,
115 TCG_REG_O4,
116 TCG_REG_O5,
117};
118
26a74ae3 119static const int tcg_target_call_oarg_regs[] = {
8289b279 120 TCG_REG_O0,
e141ab52
BS
121 TCG_REG_O1,
122 TCG_REG_O2,
123 TCG_REG_O3,
8289b279
BS
124};
125
8289b279
BS
126#define INSN_OP(x) ((x) << 30)
127#define INSN_OP2(x) ((x) << 22)
128#define INSN_OP3(x) ((x) << 19)
129#define INSN_OPF(x) ((x) << 5)
130#define INSN_RD(x) ((x) << 25)
131#define INSN_RS1(x) ((x) << 14)
132#define INSN_RS2(x) (x)
8384dd67 133#define INSN_ASI(x) ((x) << 5)
8289b279 134
203342d8 135#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 136#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 137#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 138#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 139#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 140#define INSN_COND(x) ((x) << 25)
8289b279 141
cf7c2ca5
BS
142#define COND_N 0x0
143#define COND_E 0x1
144#define COND_LE 0x2
145#define COND_L 0x3
146#define COND_LEU 0x4
147#define COND_CS 0x5
148#define COND_NEG 0x6
149#define COND_VS 0x7
b3db8758 150#define COND_A 0x8
cf7c2ca5
BS
151#define COND_NE 0x9
152#define COND_G 0xa
153#define COND_GE 0xb
154#define COND_GU 0xc
155#define COND_CC 0xd
156#define COND_POS 0xe
157#define COND_VC 0xf
a115f3ea 158#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 159
ab1339b9
RH
160#define RCOND_Z 1
161#define RCOND_LEZ 2
162#define RCOND_LZ 3
163#define RCOND_NZ 5
164#define RCOND_GZ 6
165#define RCOND_GEZ 7
166
dbfe80e1
RH
167#define MOVCC_ICC (1 << 18)
168#define MOVCC_XCC (1 << 18 | 1 << 12)
169
a115f3ea
RH
170#define BPCC_ICC 0
171#define BPCC_XCC (2 << 20)
172#define BPCC_PT (1 << 19)
173#define BPCC_PN 0
174#define BPCC_A (1 << 29)
175
ab1339b9
RH
176#define BPR_PT BPCC_PT
177
8289b279 178#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 179#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 180#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 181#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 182#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 183#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 184#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 185#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
186#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
187#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 188#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
189#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
190#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
191#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
192#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
193#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
194#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
195#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 196#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 197#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
198
199#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
200#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
201#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
202
203#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
204#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
205#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
206
7a3766f3 207#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 208#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
209#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
210#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
211#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
212#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
213#define CALL INSN_OP(1)
214#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
215#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
216#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
217#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
218#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
219#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
220#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
221#define STB (INSN_OP(3) | INSN_OP3(0x05))
222#define STH (INSN_OP(3) | INSN_OP3(0x06))
223#define STW (INSN_OP(3) | INSN_OP3(0x04))
224#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
225#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
226#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
227#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
228#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
229#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
230#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
231#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
232#define STBA (INSN_OP(3) | INSN_OP3(0x15))
233#define STHA (INSN_OP(3) | INSN_OP3(0x16))
234#define STWA (INSN_OP(3) | INSN_OP3(0x14))
235#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
236
237#ifndef ASI_PRIMARY_LITTLE
238#define ASI_PRIMARY_LITTLE 0x88
239#endif
8289b279 240
a0ce341a
RH
241#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
242#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
243#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
244#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
245#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
246
247#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
248#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
249#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
250
a115f3ea
RH
251static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
252{
253 return (val << ((sizeof(tcg_target_long) * 8 - bits))
254 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
255}
256
257static inline int check_fit_i32(uint32_t val, unsigned int bits)
258{
259 return ((val << (32 - bits)) >> (32 - bits)) == val;
260}
261
262static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 263 intptr_t value, intptr_t addend)
a115f3ea
RH
264{
265 uint32_t insn;
266 value += addend;
267 switch (type) {
268 case R_SPARC_32:
269 if (value != (uint32_t)value) {
270 tcg_abort();
271 }
272 *(uint32_t *)code_ptr = value;
273 break;
ab1339b9 274 case R_SPARC_WDISP16:
2ba7fae2 275 value -= (intptr_t)code_ptr;
ab1339b9
RH
276 if (!check_fit_tl(value >> 2, 16)) {
277 tcg_abort();
278 }
279 insn = *(uint32_t *)code_ptr;
280 insn &= ~INSN_OFF16(-1);
281 insn |= INSN_OFF16(value);
282 *(uint32_t *)code_ptr = insn;
283 break;
a115f3ea 284 case R_SPARC_WDISP19:
2ba7fae2 285 value -= (intptr_t)code_ptr;
a115f3ea
RH
286 if (!check_fit_tl(value >> 2, 19)) {
287 tcg_abort();
288 }
289 insn = *(uint32_t *)code_ptr;
290 insn &= ~INSN_OFF19(-1);
291 insn |= INSN_OFF19(value);
292 *(uint32_t *)code_ptr = insn;
293 break;
294 default:
295 tcg_abort();
296 }
297}
298
299/* parse target specific constraints */
300static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
301{
302 const char *ct_str;
303
304 ct_str = *pct_str;
305 switch (ct_str[0]) {
306 case 'r':
307 ct->ct |= TCG_CT_REG;
308 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
309 break;
310 case 'L': /* qemu_ld/st constraint */
311 ct->ct |= TCG_CT_REG;
312 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
313 // Helper args
314 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
315 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
316 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
317 break;
318 case 'I':
319 ct->ct |= TCG_CT_CONST_S11;
320 break;
321 case 'J':
322 ct->ct |= TCG_CT_CONST_S13;
323 break;
324 case 'Z':
325 ct->ct |= TCG_CT_CONST_ZERO;
326 break;
327 default:
328 return -1;
329 }
330 ct_str++;
331 *pct_str = ct_str;
332 return 0;
333}
334
335/* test if a constant matches the constraint */
f6c6afc1 336static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
a115f3ea
RH
337 const TCGArgConstraint *arg_ct)
338{
339 int ct = arg_ct->ct;
340
341 if (ct & TCG_CT_CONST) {
342 return 1;
4b304cfa
RH
343 }
344
345 if (type == TCG_TYPE_I32) {
346 val = (int32_t)val;
347 }
348
349 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
350 return 1;
351 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
352 return 1;
353 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
354 return 1;
355 } else {
356 return 0;
357 }
358}
359
26cc915c
BS
360static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
361 int op)
362{
363 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
364 INSN_RS2(rs2));
365}
366
6f41b777
BS
367static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
368 uint32_t offset, int op)
26cc915c
BS
369{
370 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
371 INSN_IMM13(offset));
372}
373
ba225198
RH
374static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
375 int val2, int val2const, int op)
376{
377 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
378 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
379}
380
2a534aff
RH
381static inline void tcg_out_mov(TCGContext *s, TCGType type,
382 TCGReg ret, TCGReg arg)
8289b279 383{
dda73c78
RH
384 if (ret != arg) {
385 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
386 }
26cc915c
BS
387}
388
389static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
390{
391 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
392}
393
b101234a
BS
394static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
395{
396 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
397}
398
a9c7d27b
RH
399static void tcg_out_movi(TCGContext *s, TCGType type,
400 TCGReg ret, tcg_target_long arg)
8289b279 401{
a9c7d27b
RH
402 tcg_target_long hi, lo;
403
404 /* A 13-bit constant sign-extended to 64-bits. */
405 if (check_fit_tl(arg, 13)) {
b101234a 406 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 407 return;
8289b279 408 }
8289b279 409
a9c7d27b 410 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
9f44adc5 411 if (type == TCG_TYPE_I32 || (arg & ~0xffffffffu) == 0) {
a9c7d27b
RH
412 tcg_out_sethi(s, ret, arg);
413 if (arg & 0x3ff) {
414 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
415 }
416 return;
417 }
418
419 /* A 32-bit constant sign-extended to 64-bits. */
420 if (check_fit_tl(arg, 32)) {
43172207
RH
421 tcg_out_sethi(s, ret, ~arg);
422 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
423 return;
424 }
425
426 /* A 64-bit constant decomposed into 2 32-bit pieces. */
427 lo = (int32_t)arg;
428 if (check_fit_tl(lo, 13)) {
429 hi = (arg - lo) >> 31 >> 1;
430 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
431 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
432 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 433 } else {
a9c7d27b
RH
434 hi = arg >> 31 >> 1;
435 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
436 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 437 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 438 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 439 }
b101234a
BS
440}
441
a0ce341a
RH
442static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
443 int a2, int op)
8289b279 444{
a0ce341a 445 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
446}
447
a0ce341a
RH
448static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
449 int offset, int op)
8289b279 450{
a0ce341a 451 if (check_fit_tl(offset, 13)) {
8289b279
BS
452 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
453 INSN_IMM13(offset));
a0ce341a 454 } else {
375816f8
RH
455 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
456 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 457 }
8289b279
BS
458}
459
2a534aff 460static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 461 TCGReg arg1, intptr_t arg2)
8289b279 462{
a0ce341a 463 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
464}
465
2a534aff 466static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 467 TCGReg arg1, intptr_t arg2)
8289b279 468{
a0ce341a
RH
469 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
470}
471
c8fc56ce 472static inline void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 473{
c8fc56ce 474 TCGReg base = TCG_REG_G0;
a0ce341a
RH
475 if (!check_fit_tl(arg, 10)) {
476 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
c8fc56ce 477 base = ret;
a0ce341a 478 }
c8fc56ce 479 tcg_out_ld(s, TCG_TYPE_PTR, ret, base, arg & 0x3ff);
8289b279
BS
480}
481
583d1215 482static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 483{
583d1215 484 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
485}
486
7a3766f3
RH
487static inline void tcg_out_rdy(TCGContext *s, int rd)
488{
489 tcg_out32(s, RDY | INSN_RD(rd));
490}
491
8289b279
BS
492static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
493{
494 if (val != 0) {
57e49b40 495 if (check_fit_tl(val, 13))
8289b279 496 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 497 else {
375816f8
RH
498 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
499 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 500 }
8289b279
BS
501 }
502}
503
583d1215
RH
504static void tcg_out_div32(TCGContext *s, int rd, int rs1,
505 int val2, int val2const, int uns)
506{
507 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
508 if (uns) {
509 tcg_out_sety(s, TCG_REG_G0);
510 } else {
375816f8
RH
511 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
512 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
513 }
514
515 tcg_out_arithc(s, rd, rs1, val2, val2const,
516 uns ? ARITH_UDIV : ARITH_SDIV);
517}
518
8289b279
BS
519static inline void tcg_out_nop(TCGContext *s)
520{
26cc915c 521 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
522}
523
0aed257f 524static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
525 [TCG_COND_EQ] = COND_E,
526 [TCG_COND_NE] = COND_NE,
527 [TCG_COND_LT] = COND_L,
528 [TCG_COND_GE] = COND_GE,
529 [TCG_COND_LE] = COND_LE,
530 [TCG_COND_GT] = COND_G,
531 [TCG_COND_LTU] = COND_CS,
532 [TCG_COND_GEU] = COND_CC,
533 [TCG_COND_LEU] = COND_LEU,
534 [TCG_COND_GTU] = COND_GU,
535};
536
ab1339b9
RH
537static const uint8_t tcg_cond_to_rcond[] = {
538 [TCG_COND_EQ] = RCOND_Z,
539 [TCG_COND_NE] = RCOND_NZ,
540 [TCG_COND_LT] = RCOND_LZ,
541 [TCG_COND_GT] = RCOND_GZ,
542 [TCG_COND_LE] = RCOND_LEZ,
543 [TCG_COND_GE] = RCOND_GEZ
544};
545
a115f3ea
RH
546static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
547{
548 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
549}
550
551static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
552{
553 TCGLabel *l = &s->labels[label];
554 int off19;
555
556 if (l->has_value) {
557 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
558 } else {
559 /* Make sure to preserve destinations during retranslation. */
560 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
561 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
562 }
563 tcg_out_bpcc0(s, scond, flags, off19);
564}
565
56f4927e
RH
566static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
567{
ba225198 568 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
569}
570
a115f3ea
RH
571static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
572 TCGArg arg2, int const_arg2, int label)
cf7c2ca5 573{
56f4927e 574 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 575 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
576 tcg_out_nop(s);
577}
578
ded37f0d
RH
579static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
580 TCGArg v1, int v1const)
581{
582 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
583 | INSN_RS1(tcg_cond_to_bcond[cond])
584 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
585}
586
587static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
588 TCGArg c1, TCGArg c2, int c2const,
589 TCGArg v1, int v1const)
590{
591 tcg_out_cmp(s, c1, c2, c2const);
592 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
593}
594
9f44adc5 595#if SPARC64
a115f3ea
RH
596static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
597 TCGArg arg2, int const_arg2, int label)
1da92db2 598{
ab1339b9
RH
599 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
600 if (arg2 == 0 && !is_unsigned_cond(cond)) {
601 TCGLabel *l = &s->labels[label];
602 int off16;
603
604 if (l->has_value) {
605 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
606 } else {
607 /* Make sure to preserve destinations during retranslation. */
608 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
609 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
610 }
611 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
612 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
613 } else {
614 tcg_out_cmp(s, arg1, arg2, const_arg2);
615 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
616 }
1da92db2
BS
617 tcg_out_nop(s);
618}
ded37f0d 619
203342d8
RH
620static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
621 TCGArg v1, int v1const)
622{
623 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
624 | (tcg_cond_to_rcond[cond] << 10)
625 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
626}
627
ded37f0d
RH
628static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
629 TCGArg c1, TCGArg c2, int c2const,
630 TCGArg v1, int v1const)
631{
203342d8
RH
632 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
633 Note that the immediate range is one bit smaller, so we must check
634 for that as well. */
635 if (c2 == 0 && !is_unsigned_cond(cond)
636 && (!v1const || check_fit_tl(v1, 10))) {
637 tcg_out_movr(s, cond, ret, c1, v1, v1const);
638 } else {
639 tcg_out_cmp(s, c1, c2, c2const);
640 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
641 }
ded37f0d 642}
56f4927e 643#else
8a56e840 644static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
645 TCGArg al, TCGArg ah,
646 TCGArg bl, int blconst,
647 TCGArg bh, int bhconst, int label_dest)
648{
a115f3ea 649 int scond, label_next = gen_new_label();
56f4927e
RH
650
651 tcg_out_cmp(s, ah, bh, bhconst);
652
653 /* Note that we fill one of the delay slots with the second compare. */
654 switch (cond) {
655 case TCG_COND_EQ:
a115f3ea 656 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 657 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 658 tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
659 break;
660
661 case TCG_COND_NE:
a115f3ea 662 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 663 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 664 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
665 break;
666
667 default:
a115f3ea
RH
668 scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
669 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 670 tcg_out_nop(s);
a115f3ea 671 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 672 tcg_out_cmp(s, al, bl, blconst);
a115f3ea
RH
673 scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
674 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
675 break;
676 }
677 tcg_out_nop(s);
678
9d6fca70 679 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 680}
1da92db2
BS
681#endif
682
8a56e840 683static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
684 TCGArg c1, TCGArg c2, int c2const)
685{
dbfe80e1
RH
686 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
687 switch (cond) {
7d458a75
RH
688 case TCG_COND_LTU:
689 case TCG_COND_GEU:
690 /* The result of the comparison is in the carry bit. */
691 break;
692
dbfe80e1
RH
693 case TCG_COND_EQ:
694 case TCG_COND_NE:
7d458a75 695 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
696 if (c2 != 0) {
697 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
698 }
699 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 700 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
701 break;
702
703 case TCG_COND_GTU:
dbfe80e1 704 case TCG_COND_LEU:
7d458a75
RH
705 /* If we don't need to load a constant into a register, we can
706 swap the operands on GTU/LEU. There's no benefit to loading
707 the constant into a temporary register. */
708 if (!c2const || c2 == 0) {
709 TCGArg t = c1;
710 c1 = c2;
711 c2 = t;
712 c2const = 0;
713 cond = tcg_swap_cond(cond);
714 break;
715 }
716 /* FALLTHRU */
dbfe80e1
RH
717
718 default:
719 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 720 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 721 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
722 return;
723 }
724
725 tcg_out_cmp(s, c1, c2, c2const);
726 if (cond == TCG_COND_LTU) {
727 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
728 } else {
729 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
730 }
731}
732
9f44adc5 733#if SPARC64
8a56e840 734static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
735 TCGArg c1, TCGArg c2, int c2const)
736{
203342d8
RH
737 /* For 64-bit signed comparisons vs zero, we can avoid the compare
738 if the input does not overlap the output. */
739 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
740 tcg_out_movi_imm13(s, ret, 0);
741 tcg_out_movr(s, cond, ret, c1, 1, 1);
742 } else {
743 tcg_out_cmp(s, c1, c2, c2const);
744 tcg_out_movi_imm13(s, ret, 0);
745 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
746 }
dbfe80e1
RH
747}
748#else
8a56e840 749static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
750 TCGArg al, TCGArg ah,
751 TCGArg bl, int blconst,
752 TCGArg bh, int bhconst)
753{
dda73c78
RH
754 int tmp = TCG_REG_T1;
755
756 /* Note that the low parts are fully consumed before tmp is set. */
757 if (ret != ah && (bhconst || ret != bh)) {
758 tmp = ret;
759 }
dbfe80e1
RH
760
761 switch (cond) {
762 case TCG_COND_EQ:
dbfe80e1 763 case TCG_COND_NE:
fd84ea23
RH
764 if (bl == 0 && bh == 0) {
765 if (cond == TCG_COND_EQ) {
766 tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
767 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
768 } else {
769 tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
770 }
771 } else {
772 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
773 tcg_out_cmp(s, ah, bh, bhconst);
774 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
775 }
dda73c78 776 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
dbfe80e1
RH
777 break;
778
779 default:
dda73c78
RH
780 /* <= : ah < bh | (ah == bh && al <= bl) */
781 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
dbfe80e1 782 tcg_out_cmp(s, ah, bh, bhconst);
dda73c78
RH
783 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
784 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
785 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
786 break;
787 }
788}
07ca08ba 789#endif
4ec28e25
RH
790
791static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
792 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
793 TCGArg bh, int bhconst, int opl, int oph)
794{
795 TCGArg tmp = TCG_REG_T1;
796
797 /* Note that the low parts are fully consumed before tmp is set. */
798 if (rl != ah && (bhconst || rl != bh)) {
799 tmp = rl;
800 }
801
802 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
803 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
804 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
805}
dbfe80e1 806
aad2f06a
RH
807static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
808{
809 intptr_t disp = dest - (uintptr_t)s->code_ptr;
810
811 if (disp == (int32_t)disp) {
812 tcg_out32(s, CALL | (uint32_t)disp >> 2);
813 } else {
814 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, dest & ~0xfff);
815 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, dest & 0xfff, JMPL);
816 }
817}
818
7ea5d725
RH
819#ifdef CONFIG_SOFTMMU
820static uintptr_t qemu_ld_trampoline[16];
821static uintptr_t qemu_st_trampoline[16];
822
823static void build_trampolines(TCGContext *s)
824{
825 static uintptr_t const qemu_ld_helpers[16] = {
826 [MO_UB] = (uintptr_t)helper_ret_ldub_mmu,
827 [MO_SB] = (uintptr_t)helper_ret_ldsb_mmu,
828 [MO_LEUW] = (uintptr_t)helper_le_lduw_mmu,
829 [MO_LESW] = (uintptr_t)helper_le_ldsw_mmu,
830 [MO_LEUL] = (uintptr_t)helper_le_ldul_mmu,
831 [MO_LEQ] = (uintptr_t)helper_le_ldq_mmu,
832 [MO_BEUW] = (uintptr_t)helper_be_lduw_mmu,
833 [MO_BESW] = (uintptr_t)helper_be_ldsw_mmu,
834 [MO_BEUL] = (uintptr_t)helper_be_ldul_mmu,
835 [MO_BEQ] = (uintptr_t)helper_be_ldq_mmu,
836 };
837 static uintptr_t const qemu_st_helpers[16] = {
838 [MO_UB] = (uintptr_t)helper_ret_stb_mmu,
839 [MO_LEUW] = (uintptr_t)helper_le_stw_mmu,
840 [MO_LEUL] = (uintptr_t)helper_le_stl_mmu,
841 [MO_LEQ] = (uintptr_t)helper_le_stq_mmu,
842 [MO_BEUW] = (uintptr_t)helper_be_stw_mmu,
843 [MO_BEUL] = (uintptr_t)helper_be_stl_mmu,
844 [MO_BEQ] = (uintptr_t)helper_be_stq_mmu,
845 };
846
847 int i;
848 TCGReg ra;
849 uintptr_t tramp;
850
851 for (i = 0; i < 16; ++i) {
852 if (qemu_ld_helpers[i] == 0) {
853 continue;
854 }
855
856 /* May as well align the trampoline. */
857 tramp = (uintptr_t)s->code_ptr;
858 while (tramp & 15) {
859 tcg_out_nop(s);
860 tramp += 4;
861 }
862 qemu_ld_trampoline[i] = tramp;
863
864 /* Find the retaddr argument register. */
9f44adc5 865 ra = TCG_REG_O3 + (!SPARC64 && TARGET_LONG_BITS == 64);
7ea5d725
RH
866
867 /* Set the retaddr operand. */
868 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
869 /* Set the env operand. */
870 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
871 /* Tail call. */
872 tcg_out_calli(s, qemu_ld_helpers[i]);
873 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
874 }
875
876 for (i = 0; i < 16; ++i) {
877 if (qemu_st_helpers[i] == 0) {
878 continue;
879 }
880
881 /* May as well align the trampoline. */
882 tramp = (uintptr_t)s->code_ptr;
883 while (tramp & 15) {
884 tcg_out_nop(s);
885 tramp += 4;
886 }
887 qemu_st_trampoline[i] = tramp;
888
889 /* Find the retaddr argument. For 32-bit, this may be past the
890 last argument register, and need passing on the stack. */
891 ra = (TCG_REG_O4
9f44adc5
RH
892 + (!SPARC64 && TARGET_LONG_BITS == 64)
893 + (!SPARC64 && (i & MO_SIZE) == MO_64));
7ea5d725
RH
894
895 /* Set the retaddr operand. */
896 if (ra >= TCG_REG_O6) {
897 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
898 TCG_TARGET_CALL_STACK_OFFSET);
899 ra = TCG_REG_G1;
900 }
901 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
902 /* Set the env operand. */
903 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
904 /* Tail call. */
905 tcg_out_calli(s, qemu_st_helpers[i]);
906 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
907 }
908}
909#endif
910
7d551702 911/* Generate global QEMU prologue and epilogue code */
e4d58b41 912static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 913{
4c3204cb
RH
914 int tmp_buf_size, frame_size;
915
916 /* The TCG temp buffer is at the top of the frame, immediately
917 below the frame pointer. */
918 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
919 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
920 tmp_buf_size);
921
922 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
923 otherwise the minimal frame usable by callees. */
924 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
925 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
926 frame_size += TCG_TARGET_STACK_ALIGN - 1;
927 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 928 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 929 INSN_IMM13(-frame_size));
c6f7e4fb
RH
930
931#ifdef CONFIG_USE_GUEST_BASE
932 if (GUEST_BASE != 0) {
933 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
934 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
935 }
936#endif
937
aad2f06a 938 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
939 /* delay slot */
940 tcg_out_nop(s);
4c3204cb
RH
941
942 /* No epilogue required. We issue ret + restore directly in the TB. */
7ea5d725
RH
943
944#ifdef CONFIG_SOFTMMU
945 build_trampolines(s);
946#endif
b3db8758
BS
947}
948
f5ef6aac 949#if defined(CONFIG_SOFTMMU)
a0ce341a 950/* Perform the TLB load and compare.
bffe1431 951
a0ce341a 952 Inputs:
a8b12c10 953 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
954
955 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
956
957 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
958 This should be offsetof addr_read or addr_write.
959
960 The result of the TLB comparison is in %[ix]cc. The sanitized address
961 is in the returned register, maybe %o0. The TLB addend is in %o1. */
962
a8b12c10
RH
963static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
964 int mem_index, TCGMemOp s_bits, int which)
a0ce341a 965{
a8b12c10
RH
966 const TCGReg r0 = TCG_REG_O0;
967 const TCGReg r1 = TCG_REG_O1;
968 const TCGReg r2 = TCG_REG_O2;
969 TCGReg addr = addrlo;
a0ce341a
RH
970 int tlb_ofs;
971
9f44adc5 972 if (!SPARC64 && TARGET_LONG_BITS == 64) {
a0ce341a
RH
973 /* Assemble the 64-bit address in R0. */
974 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
a8b12c10 975 tcg_out_arithi(s, r1, addrhi, 32, SHIFT_SLLX);
a0ce341a 976 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
d801a8f2 977 addr = r0;
a0ce341a
RH
978 }
979
d801a8f2
RH
980 /* Shift the page number down. */
981 tcg_out_arithi(s, r1, addrlo, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a
RH
982
983 /* Mask out the page offset, except for the required alignment. */
d801a8f2
RH
984 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
985 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
986
987 /* Mask the tlb index. */
988 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
989
990 /* Mask page, part 2. */
991 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 992
d801a8f2
RH
993 /* Shift the tlb index into place. */
994 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
995
996 /* Relative to the current ENV. */
997 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
998
999 /* Find a base address that can load both tlb comparator and addend. */
1000 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
1001 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
d801a8f2
RH
1002 tcg_out_addi(s, r1, tlb_ofs & ~0x3ff);
1003 tlb_ofs &= 0x3ff;
a0ce341a
RH
1004 }
1005
1006 /* Load the tlb comparator and the addend. */
1007 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
1008 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
1009
1010 /* subcc arg0, arg2, %g0 */
1011 tcg_out_cmp(s, r0, r2, 0);
1012
1013 /* If the guest address must be zero-extended, do so now. */
9f44adc5 1014 if (SPARC64 && TARGET_LONG_BITS == 32) {
a0ce341a
RH
1015 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
1016 return r0;
1017 }
1018 return addrlo;
1019}
1020#endif /* CONFIG_SOFTMMU */
1021
eef0d9e7
RH
1022static const int qemu_ld_opc[16] = {
1023 [MO_UB] = LDUB,
1024 [MO_SB] = LDSB,
1025
1026 [MO_BEUW] = LDUH,
1027 [MO_BESW] = LDSH,
1028 [MO_BEUL] = LDUW,
1029 [MO_BESL] = LDSW,
1030 [MO_BEQ] = LDX,
1031
1032 [MO_LEUW] = LDUH_LE,
1033 [MO_LESW] = LDSH_LE,
1034 [MO_LEUL] = LDUW_LE,
1035 [MO_LESL] = LDSW_LE,
1036 [MO_LEQ] = LDX_LE,
a0ce341a 1037};
9d0efc88 1038
eef0d9e7
RH
1039static const int qemu_st_opc[16] = {
1040 [MO_UB] = STB,
1041
1042 [MO_BEUW] = STH,
1043 [MO_BEUL] = STW,
1044 [MO_BEQ] = STX,
1045
1046 [MO_LEUW] = STH_LE,
1047 [MO_LEUL] = STW_LE,
1048 [MO_LEQ] = STX_LE,
a0ce341a 1049};
bffe1431 1050
cab0a7ea 1051static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is64)
f5ef6aac 1052{
cab0a7ea
RH
1053 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1054 TCGMemOp memop, s_bits;
f5ef6aac 1055#if defined(CONFIG_SOFTMMU)
cab0a7ea 1056 TCGReg addrz, param;
7ea5d725
RH
1057 uintptr_t func;
1058 int memi;
a0ce341a 1059 uint32_t *label_ptr[2];
f5ef6aac
BS
1060#endif
1061
a8b12c10 1062 datalo = *args++;
9f44adc5 1063 datahi = (!SPARC64 && is64 ? *args++ : 0);
cab0a7ea 1064 addrlo = *args++;
9f44adc5 1065 addrhi = (!SPARC64 && TARGET_LONG_BITS == 64 ? *args++ : 0);
cab0a7ea
RH
1066 memop = *args++;
1067 s_bits = memop & MO_SIZE;
f5ef6aac 1068
f5ef6aac 1069#if defined(CONFIG_SOFTMMU)
a8b12c10 1070 memi = *args++;
cab0a7ea
RH
1071 addrz = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
1072 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1073
9f44adc5 1074 if (!SPARC64 && s_bits == MO_64) {
a0ce341a
RH
1075 int reg64;
1076
1077 /* bne,pn %[xi]cc, label0 */
1078 label_ptr[0] = (uint32_t *)s->code_ptr;
a115f3ea
RH
1079 tcg_out_bpcc0(s, COND_NE, BPCC_PN
1080 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
e7bc9004 1081 tcg_out_nop(s);
a0ce341a
RH
1082
1083 /* TLB Hit. */
1084 /* Load all 64-bits into an O/G register. */
1085 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
cab0a7ea 1086 tcg_out_ldst_rr(s, reg64, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
a0ce341a
RH
1087
1088 /* Move the two 32-bit pieces into the destination registers. */
1089 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1090 if (reg64 != datalo) {
1091 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1092 }
f5ef6aac 1093
a0ce341a
RH
1094 /* b,a,pt label1 */
1095 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea 1096 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
a0ce341a
RH
1097 } else {
1098 /* The fast path is exactly one insn. Thus we can perform the
1099 entire TLB Hit in the (annulled) delay slot of the branch
1100 over the TLB Miss case. */
1101
1102 /* beq,a,pt %[xi]cc, label0 */
1103 label_ptr[0] = NULL;
1104 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea
RH
1105 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1106 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1107 /* delay slot */
cab0a7ea 1108 tcg_out_ldst_rr(s, datalo, addrz, TCG_REG_O1, qemu_ld_opc[memop]);
a0ce341a 1109 }
53c37487 1110
a0ce341a 1111 /* TLB Miss. */
f5ef6aac 1112
a0ce341a
RH
1113 if (label_ptr[0]) {
1114 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
1115 (unsigned long)label_ptr[0]);
1116 }
7ea5d725
RH
1117
1118 param = TCG_REG_O1;
a0ce341a 1119 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
7ea5d725 1120 tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
a0ce341a 1121 }
7ea5d725 1122 tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
f5ef6aac 1123
7ea5d725
RH
1124 /* We use the helpers to extend SB and SW data, leaving the case
1125 of SL needing explicit extending below. */
1126 if ((memop & ~MO_BSWAP) == MO_SL) {
1127 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1128 } else {
1129 func = qemu_ld_trampoline[memop];
1130 }
1131 assert(func != 0);
1132 tcg_out_calli(s, func);
a0ce341a 1133 /* delay slot */
7ea5d725
RH
1134 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1135
1136 switch (memop & ~MO_BSWAP) {
eef0d9e7 1137 case MO_SL:
7ea5d725 1138 tcg_out_arithi(s, datalo, TCG_REG_O0, 0, SHIFT_SRA);
f5ef6aac 1139 break;
eef0d9e7 1140 case MO_Q:
a0ce341a 1141 if (TCG_TARGET_REG_BITS == 32) {
7ea5d725
RH
1142 tcg_out_mov(s, TCG_TYPE_REG, datahi, TCG_REG_O0);
1143 tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O1);
a0ce341a
RH
1144 break;
1145 }
1146 /* FALLTHRU */
f5ef6aac
BS
1147 default:
1148 /* mov */
7ea5d725 1149 tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O0);
f5ef6aac
BS
1150 break;
1151 }
1152
a0ce341a
RH
1153 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1154 (unsigned long)label_ptr[1]);
90cbed46 1155#else
9f44adc5 1156 if (SPARC64 && TARGET_LONG_BITS == 32) {
cab0a7ea
RH
1157 tcg_out_arithi(s, TCG_REG_T1, addrlo, 0, SHIFT_SRL);
1158 addrlo = TCG_REG_T1;
a0ce341a 1159 }
9f44adc5 1160 if (!SPARC64 && s_bits == MO_64) {
a0ce341a 1161 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 1162
cab0a7ea 1163 tcg_out_ldst_rr(s, reg64, addrlo,
c6f7e4fb 1164 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1165 qemu_ld_opc[memop]);
f5ef6aac 1166
a0ce341a
RH
1167 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1168 if (reg64 != datalo) {
1169 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1170 }
1171 } else {
cab0a7ea 1172 tcg_out_ldst_rr(s, datalo, addrlo,
c6f7e4fb 1173 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1174 qemu_ld_opc[memop]);
f5ef6aac 1175 }
a0ce341a 1176#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1177}
1178
cab0a7ea 1179static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is64)
f5ef6aac 1180{
cab0a7ea
RH
1181 TCGReg addrlo, datalo, datahi, addrhi __attribute__((unused));
1182 TCGMemOp memop, s_bits;
f5ef6aac 1183#if defined(CONFIG_SOFTMMU)
cab0a7ea 1184 TCGReg addrz, datafull, param;
7ea5d725
RH
1185 uintptr_t func;
1186 int memi;
a0ce341a 1187 uint32_t *label_ptr;
f5ef6aac
BS
1188#endif
1189
a8b12c10 1190 datalo = *args++;
9f44adc5 1191 datahi = (!SPARC64 && is64 ? *args++ : 0);
cab0a7ea 1192 addrlo = *args++;
9f44adc5 1193 addrhi = (!SPARC64 && TARGET_LONG_BITS == 64 ? *args++ : 0);
cab0a7ea
RH
1194 memop = *args++;
1195 s_bits = memop & MO_SIZE;
f5ef6aac 1196
f5ef6aac 1197#if defined(CONFIG_SOFTMMU)
a8b12c10 1198 memi = *args++;
cab0a7ea
RH
1199 addrz = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
1200 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1201
a7a49843 1202 datafull = datalo;
9f44adc5 1203 if (!SPARC64 && s_bits == MO_64) {
375816f8
RH
1204 /* Reconstruct the full 64-bit value. */
1205 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1206 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8 1207 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
a7a49843 1208 datafull = TCG_REG_O2;
a0ce341a 1209 }
f5ef6aac 1210
a0ce341a
RH
1211 /* The fast path is exactly one insn. Thus we can perform the entire
1212 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1213 /* beq,a,pt %[xi]cc, label0 */
1214 label_ptr = (uint32_t *)s->code_ptr;
a115f3ea
RH
1215 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1216 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1217 /* delay slot */
cab0a7ea 1218 tcg_out_ldst_rr(s, datafull, addrz, TCG_REG_O1, qemu_st_opc[memop]);
a0ce341a
RH
1219
1220 /* TLB Miss. */
1221
7ea5d725 1222 param = TCG_REG_O1;
a0ce341a 1223 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
7ea5d725 1224 tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
a0ce341a 1225 }
7ea5d725 1226 tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
eef0d9e7 1227 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
7ea5d725 1228 tcg_out_mov(s, TCG_TYPE_REG, param++, datahi);
a0ce341a 1229 }
7ea5d725 1230 tcg_out_mov(s, TCG_TYPE_REG, param++, datalo);
53c37487 1231
7ea5d725
RH
1232 func = qemu_st_trampoline[memop];
1233 assert(func != 0);
1234 tcg_out_calli(s, func);
a0ce341a 1235 /* delay slot */
7ea5d725 1236 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
f5ef6aac 1237
a0ce341a
RH
1238 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1239 (unsigned long)label_ptr);
8384dd67 1240#else
9f44adc5 1241 if (SPARC64 && TARGET_LONG_BITS == 32) {
cab0a7ea
RH
1242 tcg_out_arithi(s, TCG_REG_T1, addrlo, 0, SHIFT_SRL);
1243 addrlo = TCG_REG_T1;
f5ef6aac 1244 }
9f44adc5 1245 if (!SPARC64 && s_bits == MO_64) {
375816f8 1246 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1247 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1248 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1249 datalo = TCG_REG_O2;
a0ce341a 1250 }
cab0a7ea 1251 tcg_out_ldst_rr(s, datalo, addrlo,
c6f7e4fb 1252 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1253 qemu_st_opc[memop]);
a0ce341a 1254#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1255}
1256
a9751609 1257static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1258 const int *const_args)
1259{
1260 int c;
1261
1262 switch (opc) {
1263 case INDEX_op_exit_tb:
b3db8758 1264 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
aad2f06a 1265 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, JMPL);
b3db8758
BS
1266 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1267 INSN_RS2(TCG_REG_G0));
8289b279
BS
1268 break;
1269 case INDEX_op_goto_tb:
1270 if (s->tb_jmp_offset) {
1271 /* direct jump method */
5bbd2cae 1272 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1273 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1274 /* Make sure to preserve links during retranslation. */
1275 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1276 } else {
1277 /* indirect jump method */
c8fc56ce 1278 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + args[0]));
aad2f06a 1279 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
8289b279 1280 }
53cd9273 1281 tcg_out_nop(s);
8289b279
BS
1282 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1283 break;
1284 case INDEX_op_call:
375816f8 1285 if (const_args[0]) {
aad2f06a 1286 tcg_out_calli(s, args[0]);
375816f8 1287 } else {
aad2f06a 1288 tcg_out_arithi(s, TCG_REG_O7, args[0], 0, JMPL);
8289b279 1289 }
4c3204cb
RH
1290 /* delay slot */
1291 tcg_out_nop(s);
8289b279 1292 break;
8289b279 1293 case INDEX_op_br:
a115f3ea 1294 tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
f5ef6aac 1295 tcg_out_nop(s);
8289b279
BS
1296 break;
1297 case INDEX_op_movi_i32:
1298 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1299 break;
1300
9f44adc5 1301#if SPARC64
8289b279 1302#define OP_32_64(x) \
ba225198
RH
1303 glue(glue(case INDEX_op_, x), _i32): \
1304 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1305#else
1306#define OP_32_64(x) \
ba225198 1307 glue(glue(case INDEX_op_, x), _i32)
8289b279 1308#endif
ba225198 1309 OP_32_64(ld8u):
8289b279
BS
1310 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1311 break;
ba225198 1312 OP_32_64(ld8s):
8289b279
BS
1313 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1314 break;
ba225198 1315 OP_32_64(ld16u):
8289b279
BS
1316 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1317 break;
ba225198 1318 OP_32_64(ld16s):
8289b279
BS
1319 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1320 break;
1321 case INDEX_op_ld_i32:
9f44adc5 1322#if SPARC64
53cd9273 1323 case INDEX_op_ld32u_i64:
8289b279
BS
1324#endif
1325 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1326 break;
ba225198 1327 OP_32_64(st8):
8289b279
BS
1328 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1329 break;
ba225198 1330 OP_32_64(st16):
8289b279
BS
1331 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1332 break;
1333 case INDEX_op_st_i32:
9f44adc5 1334#if SPARC64
53cd9273 1335 case INDEX_op_st32_i64:
8289b279
BS
1336#endif
1337 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1338 break;
ba225198 1339 OP_32_64(add):
53cd9273 1340 c = ARITH_ADD;
ba225198
RH
1341 goto gen_arith;
1342 OP_32_64(sub):
8289b279 1343 c = ARITH_SUB;
ba225198
RH
1344 goto gen_arith;
1345 OP_32_64(and):
8289b279 1346 c = ARITH_AND;
ba225198 1347 goto gen_arith;
dc69960d
RH
1348 OP_32_64(andc):
1349 c = ARITH_ANDN;
1350 goto gen_arith;
ba225198 1351 OP_32_64(or):
8289b279 1352 c = ARITH_OR;
ba225198 1353 goto gen_arith;
18c8f7a3
RH
1354 OP_32_64(orc):
1355 c = ARITH_ORN;
1356 goto gen_arith;
ba225198 1357 OP_32_64(xor):
8289b279 1358 c = ARITH_XOR;
ba225198 1359 goto gen_arith;
8289b279
BS
1360 case INDEX_op_shl_i32:
1361 c = SHIFT_SLL;
1fd95946
RH
1362 do_shift32:
1363 /* Limit immediate shift count lest we create an illegal insn. */
1364 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1365 break;
8289b279
BS
1366 case INDEX_op_shr_i32:
1367 c = SHIFT_SRL;
1fd95946 1368 goto do_shift32;
8289b279
BS
1369 case INDEX_op_sar_i32:
1370 c = SHIFT_SRA;
1fd95946 1371 goto do_shift32;
8289b279
BS
1372 case INDEX_op_mul_i32:
1373 c = ARITH_UMUL;
ba225198 1374 goto gen_arith;
583d1215 1375
4b5a85c1
RH
1376 OP_32_64(neg):
1377 c = ARITH_SUB;
1378 goto gen_arith1;
be6551b1
RH
1379 OP_32_64(not):
1380 c = ARITH_ORN;
1381 goto gen_arith1;
4b5a85c1 1382
583d1215
RH
1383 case INDEX_op_div_i32:
1384 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1385 break;
1386 case INDEX_op_divu_i32:
1387 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1388 break;
1389
8289b279 1390 case INDEX_op_brcond_i32:
1da92db2
BS
1391 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1392 args[3]);
8289b279 1393 break;
dbfe80e1
RH
1394 case INDEX_op_setcond_i32:
1395 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1396 args[2], const_args[2]);
1397 break;
ded37f0d
RH
1398 case INDEX_op_movcond_i32:
1399 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1400 args[2], const_args[2], args[3], const_args[3]);
1401 break;
dbfe80e1 1402
9f44adc5 1403#if !SPARC64
56f4927e
RH
1404 case INDEX_op_brcond2_i32:
1405 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1406 args[2], const_args[2],
1407 args[3], const_args[3], args[5]);
1408 break;
dbfe80e1
RH
1409 case INDEX_op_setcond2_i32:
1410 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1411 args[3], const_args[3],
1412 args[4], const_args[4]);
1413 break;
803d805b
RH
1414#endif
1415
7a3766f3 1416 case INDEX_op_add2_i32:
4ec28e25
RH
1417 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1418 args[4], const_args[4], args[5], const_args[5],
1419 ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1420 break;
1421 case INDEX_op_sub2_i32:
4ec28e25
RH
1422 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1423 args[4], const_args[4], args[5], const_args[5],
1424 ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1425 break;
1426 case INDEX_op_mulu2_i32:
1427 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1428 ARITH_UMUL);
1429 tcg_out_rdy(s, args[1]);
1430 break;
8289b279 1431
cab0a7ea
RH
1432 case INDEX_op_qemu_ld_i32:
1433 tcg_out_qemu_ld(s, args, 0);
8289b279 1434 break;
cab0a7ea
RH
1435 case INDEX_op_qemu_ld_i64:
1436 tcg_out_qemu_ld(s, args, 1);
8289b279 1437 break;
cab0a7ea
RH
1438 case INDEX_op_qemu_st_i32:
1439 tcg_out_qemu_st(s, args, 0);
8289b279 1440 break;
cab0a7ea
RH
1441 case INDEX_op_qemu_st_i64:
1442 tcg_out_qemu_st(s, args, 1);
a0ce341a 1443 break;
8289b279 1444
9f44adc5 1445#if SPARC64
8289b279
BS
1446 case INDEX_op_movi_i64:
1447 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1448 break;
53cd9273
BS
1449 case INDEX_op_ld32s_i64:
1450 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1451 break;
8289b279
BS
1452 case INDEX_op_ld_i64:
1453 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1454 break;
1455 case INDEX_op_st_i64:
1456 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1457 break;
1458 case INDEX_op_shl_i64:
1459 c = SHIFT_SLLX;
1fd95946
RH
1460 do_shift64:
1461 /* Limit immediate shift count lest we create an illegal insn. */
1462 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1463 break;
8289b279
BS
1464 case INDEX_op_shr_i64:
1465 c = SHIFT_SRLX;
1fd95946 1466 goto do_shift64;
8289b279
BS
1467 case INDEX_op_sar_i64:
1468 c = SHIFT_SRAX;
1fd95946 1469 goto do_shift64;
8289b279
BS
1470 case INDEX_op_mul_i64:
1471 c = ARITH_MULX;
ba225198 1472 goto gen_arith;
583d1215 1473 case INDEX_op_div_i64:
53cd9273 1474 c = ARITH_SDIVX;
ba225198 1475 goto gen_arith;
583d1215 1476 case INDEX_op_divu_i64:
8289b279 1477 c = ARITH_UDIVX;
ba225198 1478 goto gen_arith;
cc6dfecf 1479 case INDEX_op_ext32s_i64:
1d0a6068 1480 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
cc6dfecf
RH
1481 break;
1482 case INDEX_op_ext32u_i64:
1d0a6068 1483 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
cc6dfecf 1484 break;
8289b279
BS
1485
1486 case INDEX_op_brcond_i64:
1da92db2
BS
1487 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1488 args[3]);
8289b279 1489 break;
dbfe80e1
RH
1490 case INDEX_op_setcond_i64:
1491 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1492 args[2], const_args[2]);
1493 break;
ded37f0d
RH
1494 case INDEX_op_movcond_i64:
1495 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1496 args[2], const_args[2], args[3], const_args[3]);
1497 break;
8289b279 1498#endif
ba225198
RH
1499 gen_arith:
1500 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1501 break;
1502
4b5a85c1
RH
1503 gen_arith1:
1504 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1505 break;
1506
8289b279
BS
1507 default:
1508 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1509 tcg_abort();
1510 }
1511}
1512
1513static const TCGTargetOpDef sparc_op_defs[] = {
1514 { INDEX_op_exit_tb, { } },
b3db8758 1515 { INDEX_op_goto_tb, { } },
8289b279 1516 { INDEX_op_call, { "ri" } },
8289b279
BS
1517 { INDEX_op_br, { } },
1518
1519 { INDEX_op_mov_i32, { "r", "r" } },
1520 { INDEX_op_movi_i32, { "r" } },
1521 { INDEX_op_ld8u_i32, { "r", "r" } },
1522 { INDEX_op_ld8s_i32, { "r", "r" } },
1523 { INDEX_op_ld16u_i32, { "r", "r" } },
1524 { INDEX_op_ld16s_i32, { "r", "r" } },
1525 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1526 { INDEX_op_st8_i32, { "rZ", "r" } },
1527 { INDEX_op_st16_i32, { "rZ", "r" } },
1528 { INDEX_op_st_i32, { "rZ", "r" } },
1529
1530 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1531 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1532 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1533 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1534 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1535 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1536 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1537 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1538 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1539 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1540
1541 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1542 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1543 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1544
4b5a85c1 1545 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1546 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1547
89269f6c
RH
1548 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1549 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1550 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1551
9f44adc5 1552#if !SPARC64
89269f6c
RH
1553 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1554 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
803d805b
RH
1555#endif
1556
89269f6c
RH
1557 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1558 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1559 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1560
9f44adc5 1561#if SPARC64
8289b279
BS
1562 { INDEX_op_mov_i64, { "r", "r" } },
1563 { INDEX_op_movi_i64, { "r" } },
1564 { INDEX_op_ld8u_i64, { "r", "r" } },
1565 { INDEX_op_ld8s_i64, { "r", "r" } },
1566 { INDEX_op_ld16u_i64, { "r", "r" } },
1567 { INDEX_op_ld16s_i64, { "r", "r" } },
1568 { INDEX_op_ld32u_i64, { "r", "r" } },
1569 { INDEX_op_ld32s_i64, { "r", "r" } },
1570 { INDEX_op_ld_i64, { "r", "r" } },
89269f6c
RH
1571 { INDEX_op_st8_i64, { "rZ", "r" } },
1572 { INDEX_op_st16_i64, { "rZ", "r" } },
1573 { INDEX_op_st32_i64, { "rZ", "r" } },
1574 { INDEX_op_st_i64, { "rZ", "r" } },
1575
1576 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1577 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1578 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1579 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
89269f6c
RH
1580 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1581 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1582 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1583 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1584 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1585 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1586
1587 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1588 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1589 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
4b5a85c1
RH
1590
1591 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1592 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1593
1d0a6068
RH
1594 { INDEX_op_ext32s_i64, { "r", "r" } },
1595 { INDEX_op_ext32u_i64, { "r", "r" } },
8289b279 1596
89269f6c
RH
1597 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1598 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1599 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
8289b279 1600#endif
a0ce341a 1601
9f44adc5 1602#if SPARC64
cab0a7ea
RH
1603 { INDEX_op_qemu_ld_i32, { "r", "L" } },
1604 { INDEX_op_qemu_ld_i64, { "r", "L" } },
1605 { INDEX_op_qemu_st_i32, { "L", "L" } },
1606 { INDEX_op_qemu_st_i64, { "L", "L" } },
9f44adc5 1607#elif TARGET_LONG_BITS == 32
cab0a7ea
RH
1608 { INDEX_op_qemu_ld_i32, { "r", "L" } },
1609 { INDEX_op_qemu_ld_i64, { "r", "r", "L" } },
1610 { INDEX_op_qemu_st_i32, { "L", "L" } },
1611 { INDEX_op_qemu_st_i64, { "L", "L", "L" } },
a0ce341a 1612#else
cab0a7ea
RH
1613 { INDEX_op_qemu_ld_i32, { "r", "L", "L" } },
1614 { INDEX_op_qemu_ld_i64, { "L", "L", "L", "L" } },
1615 { INDEX_op_qemu_st_i32, { "L", "L", "L" } },
1616 { INDEX_op_qemu_st_i64, { "L", "L", "L", "L" } },
8289b279 1617#endif
a0ce341a 1618
8289b279
BS
1619 { -1 },
1620};
1621
e4d58b41 1622static void tcg_target_init(TCGContext *s)
8289b279
BS
1623{
1624 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
9f44adc5 1625#if SPARC64
8289b279
BS
1626 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1627#endif
1628 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1629 (1 << TCG_REG_G1) |
1630 (1 << TCG_REG_G2) |
1631 (1 << TCG_REG_G3) |
1632 (1 << TCG_REG_G4) |
1633 (1 << TCG_REG_G5) |
1634 (1 << TCG_REG_G6) |
1635 (1 << TCG_REG_G7) |
8289b279
BS
1636 (1 << TCG_REG_O0) |
1637 (1 << TCG_REG_O1) |
1638 (1 << TCG_REG_O2) |
1639 (1 << TCG_REG_O3) |
1640 (1 << TCG_REG_O4) |
1641 (1 << TCG_REG_O5) |
8289b279
BS
1642 (1 << TCG_REG_O7));
1643
1644 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1645 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1646 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1647 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1648 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1649 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1650 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1651 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1652 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1653
8289b279
BS
1654 tcg_add_target_add_op_defs(sparc_op_defs);
1655}
cb1977d3 1656
9f44adc5 1657#if SPARC64
cb1977d3 1658# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1659#else
cb1977d3
RH
1660# define ELF_HOST_MACHINE EM_SPARC32PLUS
1661# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1662#endif
1663
cb1977d3
RH
1664typedef struct {
1665 DebugFrameCIE cie;
497a22eb 1666 DebugFrameFDEHeader fde;
9f44adc5 1667 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1668 uint8_t fde_win_save;
1669 uint8_t fde_ret_save[3];
cb1977d3
RH
1670} DebugFrame;
1671
1672static DebugFrame debug_frame = {
1673 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1674 .cie.id = -1,
1675 .cie.version = 1,
1676 .cie.code_align = 1,
1677 .cie.data_align = -sizeof(void *) & 0x7f,
1678 .cie.return_column = 15, /* o7 */
1679
497a22eb
RH
1680 /* Total FDE size does not include the "len" member. */
1681 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1682
1683 .fde_def_cfa = {
9f44adc5 1684#if SPARC64
cb1977d3
RH
1685 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1686 (2047 & 0x7f) | 0x80, (2047 >> 7)
1687#else
1688 13, 30 /* DW_CFA_def_cfa_register i6 */
1689#endif
1690 },
497a22eb
RH
1691 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1692 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1693};
1694
1695void tcg_register_jit(void *buf, size_t buf_size)
1696{
c8fc56ce 1697 debug_frame.fde.func_start = (uintptr_t)buf;
cb1977d3
RH
1698 debug_frame.fde.func_len = buf_size;
1699
1700 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1701}
5bbd2cae
RH
1702
1703void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1704{
1705 uint32_t *ptr = (uint32_t *)jmp_addr;
c8fc56ce 1706 uintptr_t disp = addr - jmp_addr;
5bbd2cae
RH
1707
1708 /* We can reach the entire address space for 32-bit. For 64-bit
1709 the code_gen_buffer can't be larger than 2GB. */
c8fc56ce 1710 assert(disp == (int32_t)disp);
5bbd2cae 1711
c8fc56ce 1712 *ptr = CALL | (uint32_t)disp >> 2;
5bbd2cae
RH
1713 flush_icache_range(jmp_addr, jmp_addr + 4);
1714}