]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c
tcg-sparc: Convert to new ldst helpers
[mirror_qemu.git] / tcg / sparc / tcg-target.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3cf246f0
RH
25#include "tcg-be-null.h"
26
d4a9eb1f 27#ifndef NDEBUG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
375816f8
RH
64/* Define some temporary registers. T2 is used for constant generation. */
65#define TCG_REG_T1 TCG_REG_G1
66#define TCG_REG_T2 TCG_REG_O7
67
c6f7e4fb 68#ifdef CONFIG_USE_GUEST_BASE
375816f8 69# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb
RH
70#else
71# define TCG_GUEST_BASE_REG TCG_REG_G0
72#endif
e141ab52 73
0954d0d9 74static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
75 TCG_REG_L0,
76 TCG_REG_L1,
77 TCG_REG_L2,
78 TCG_REG_L3,
79 TCG_REG_L4,
80 TCG_REG_L5,
81 TCG_REG_L6,
82 TCG_REG_L7,
26adfb75 83
8289b279
BS
84 TCG_REG_I0,
85 TCG_REG_I1,
86 TCG_REG_I2,
87 TCG_REG_I3,
88 TCG_REG_I4,
375816f8 89 TCG_REG_I5,
26adfb75
RH
90
91 TCG_REG_G2,
92 TCG_REG_G3,
93 TCG_REG_G4,
94 TCG_REG_G5,
95
96 TCG_REG_O0,
97 TCG_REG_O1,
98 TCG_REG_O2,
99 TCG_REG_O3,
100 TCG_REG_O4,
101 TCG_REG_O5,
8289b279
BS
102};
103
104static const int tcg_target_call_iarg_regs[6] = {
105 TCG_REG_O0,
106 TCG_REG_O1,
107 TCG_REG_O2,
108 TCG_REG_O3,
109 TCG_REG_O4,
110 TCG_REG_O5,
111};
112
26a74ae3 113static const int tcg_target_call_oarg_regs[] = {
8289b279 114 TCG_REG_O0,
e141ab52
BS
115 TCG_REG_O1,
116 TCG_REG_O2,
117 TCG_REG_O3,
8289b279
BS
118};
119
8289b279
BS
120#define INSN_OP(x) ((x) << 30)
121#define INSN_OP2(x) ((x) << 22)
122#define INSN_OP3(x) ((x) << 19)
123#define INSN_OPF(x) ((x) << 5)
124#define INSN_RD(x) ((x) << 25)
125#define INSN_RS1(x) ((x) << 14)
126#define INSN_RS2(x) (x)
8384dd67 127#define INSN_ASI(x) ((x) << 5)
8289b279 128
203342d8 129#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 130#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 131#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 132#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 133#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 134#define INSN_COND(x) ((x) << 25)
8289b279 135
cf7c2ca5
BS
136#define COND_N 0x0
137#define COND_E 0x1
138#define COND_LE 0x2
139#define COND_L 0x3
140#define COND_LEU 0x4
141#define COND_CS 0x5
142#define COND_NEG 0x6
143#define COND_VS 0x7
b3db8758 144#define COND_A 0x8
cf7c2ca5
BS
145#define COND_NE 0x9
146#define COND_G 0xa
147#define COND_GE 0xb
148#define COND_GU 0xc
149#define COND_CC 0xd
150#define COND_POS 0xe
151#define COND_VC 0xf
a115f3ea 152#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 153
ab1339b9
RH
154#define RCOND_Z 1
155#define RCOND_LEZ 2
156#define RCOND_LZ 3
157#define RCOND_NZ 5
158#define RCOND_GZ 6
159#define RCOND_GEZ 7
160
dbfe80e1
RH
161#define MOVCC_ICC (1 << 18)
162#define MOVCC_XCC (1 << 18 | 1 << 12)
163
a115f3ea
RH
164#define BPCC_ICC 0
165#define BPCC_XCC (2 << 20)
166#define BPCC_PT (1 << 19)
167#define BPCC_PN 0
168#define BPCC_A (1 << 29)
169
ab1339b9
RH
170#define BPR_PT BPCC_PT
171
8289b279 172#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 173#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 174#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 175#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 176#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 177#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 178#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 179#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
180#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
181#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
a221ae3f 182#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08))
8289b279
BS
183#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c))
184#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
185#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
186#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
187#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
188#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
189#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 190#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 191#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279
BS
192
193#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
194#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
195#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
196
197#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
198#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
199#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
200
7a3766f3 201#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 202#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279
BS
203#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
204#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
205#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
206#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
207#define CALL INSN_OP(1)
208#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
209#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
210#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
211#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
212#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
213#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
214#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
215#define STB (INSN_OP(3) | INSN_OP3(0x05))
216#define STH (INSN_OP(3) | INSN_OP3(0x06))
217#define STW (INSN_OP(3) | INSN_OP3(0x04))
218#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
219#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
220#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
221#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
222#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
223#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
224#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
225#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
226#define STBA (INSN_OP(3) | INSN_OP3(0x15))
227#define STHA (INSN_OP(3) | INSN_OP3(0x16))
228#define STWA (INSN_OP(3) | INSN_OP3(0x14))
229#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
230
231#ifndef ASI_PRIMARY_LITTLE
232#define ASI_PRIMARY_LITTLE 0x88
233#endif
8289b279 234
a0ce341a
RH
235#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
236#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
237#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
238#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
239#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
240
241#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
242#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
243#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
244
a115f3ea
RH
245static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
246{
247 return (val << ((sizeof(tcg_target_long) * 8 - bits))
248 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
249}
250
251static inline int check_fit_i32(uint32_t val, unsigned int bits)
252{
253 return ((val << (32 - bits)) >> (32 - bits)) == val;
254}
255
256static void patch_reloc(uint8_t *code_ptr, int type,
2ba7fae2 257 intptr_t value, intptr_t addend)
a115f3ea
RH
258{
259 uint32_t insn;
260 value += addend;
261 switch (type) {
262 case R_SPARC_32:
263 if (value != (uint32_t)value) {
264 tcg_abort();
265 }
266 *(uint32_t *)code_ptr = value;
267 break;
ab1339b9 268 case R_SPARC_WDISP16:
2ba7fae2 269 value -= (intptr_t)code_ptr;
ab1339b9
RH
270 if (!check_fit_tl(value >> 2, 16)) {
271 tcg_abort();
272 }
273 insn = *(uint32_t *)code_ptr;
274 insn &= ~INSN_OFF16(-1);
275 insn |= INSN_OFF16(value);
276 *(uint32_t *)code_ptr = insn;
277 break;
a115f3ea 278 case R_SPARC_WDISP19:
2ba7fae2 279 value -= (intptr_t)code_ptr;
a115f3ea
RH
280 if (!check_fit_tl(value >> 2, 19)) {
281 tcg_abort();
282 }
283 insn = *(uint32_t *)code_ptr;
284 insn &= ~INSN_OFF19(-1);
285 insn |= INSN_OFF19(value);
286 *(uint32_t *)code_ptr = insn;
287 break;
288 default:
289 tcg_abort();
290 }
291}
292
293/* parse target specific constraints */
294static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
295{
296 const char *ct_str;
297
298 ct_str = *pct_str;
299 switch (ct_str[0]) {
300 case 'r':
301 ct->ct |= TCG_CT_REG;
302 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
303 break;
304 case 'L': /* qemu_ld/st constraint */
305 ct->ct |= TCG_CT_REG;
306 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
307 // Helper args
308 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
309 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
310 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
311 break;
312 case 'I':
313 ct->ct |= TCG_CT_CONST_S11;
314 break;
315 case 'J':
316 ct->ct |= TCG_CT_CONST_S13;
317 break;
318 case 'Z':
319 ct->ct |= TCG_CT_CONST_ZERO;
320 break;
321 default:
322 return -1;
323 }
324 ct_str++;
325 *pct_str = ct_str;
326 return 0;
327}
328
329/* test if a constant matches the constraint */
330static inline int tcg_target_const_match(tcg_target_long val,
331 const TCGArgConstraint *arg_ct)
332{
333 int ct = arg_ct->ct;
334
335 if (ct & TCG_CT_CONST) {
336 return 1;
337 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
338 return 1;
339 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
340 return 1;
341 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
342 return 1;
343 } else {
344 return 0;
345 }
346}
347
26cc915c
BS
348static inline void tcg_out_arith(TCGContext *s, int rd, int rs1, int rs2,
349 int op)
350{
351 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
352 INSN_RS2(rs2));
353}
354
6f41b777
BS
355static inline void tcg_out_arithi(TCGContext *s, int rd, int rs1,
356 uint32_t offset, int op)
26cc915c
BS
357{
358 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) |
359 INSN_IMM13(offset));
360}
361
ba225198
RH
362static void tcg_out_arithc(TCGContext *s, int rd, int rs1,
363 int val2, int val2const, int op)
364{
365 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
366 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
367}
368
2a534aff
RH
369static inline void tcg_out_mov(TCGContext *s, TCGType type,
370 TCGReg ret, TCGReg arg)
8289b279 371{
dda73c78
RH
372 if (ret != arg) {
373 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
374 }
26cc915c
BS
375}
376
377static inline void tcg_out_sethi(TCGContext *s, int ret, uint32_t arg)
378{
379 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
380}
381
b101234a
BS
382static inline void tcg_out_movi_imm13(TCGContext *s, int ret, uint32_t arg)
383{
384 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
385}
386
a9c7d27b
RH
387static void tcg_out_movi(TCGContext *s, TCGType type,
388 TCGReg ret, tcg_target_long arg)
8289b279 389{
a9c7d27b
RH
390 tcg_target_long hi, lo;
391
392 /* A 13-bit constant sign-extended to 64-bits. */
393 if (check_fit_tl(arg, 13)) {
b101234a 394 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 395 return;
8289b279 396 }
8289b279 397
a9c7d27b 398 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
43172207
RH
399 if (TCG_TARGET_REG_BITS == 32
400 || type == TCG_TYPE_I32
a9c7d27b
RH
401 || (arg & ~0xffffffffu) == 0) {
402 tcg_out_sethi(s, ret, arg);
403 if (arg & 0x3ff) {
404 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
405 }
406 return;
407 }
408
409 /* A 32-bit constant sign-extended to 64-bits. */
410 if (check_fit_tl(arg, 32)) {
43172207
RH
411 tcg_out_sethi(s, ret, ~arg);
412 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
413 return;
414 }
415
416 /* A 64-bit constant decomposed into 2 32-bit pieces. */
417 lo = (int32_t)arg;
418 if (check_fit_tl(lo, 13)) {
419 hi = (arg - lo) >> 31 >> 1;
420 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
421 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
422 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 423 } else {
a9c7d27b
RH
424 hi = arg >> 31 >> 1;
425 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
426 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 427 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 428 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 429 }
b101234a
BS
430}
431
a0ce341a
RH
432static inline void tcg_out_ldst_rr(TCGContext *s, int data, int a1,
433 int a2, int op)
8289b279 434{
a0ce341a 435 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
436}
437
a0ce341a
RH
438static inline void tcg_out_ldst(TCGContext *s, int ret, int addr,
439 int offset, int op)
8289b279 440{
a0ce341a 441 if (check_fit_tl(offset, 13)) {
8289b279
BS
442 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
443 INSN_IMM13(offset));
a0ce341a 444 } else {
375816f8
RH
445 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
446 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 447 }
8289b279
BS
448}
449
2a534aff 450static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 451 TCGReg arg1, intptr_t arg2)
8289b279 452{
a0ce341a 453 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
454}
455
2a534aff 456static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 457 TCGReg arg1, intptr_t arg2)
8289b279 458{
a0ce341a
RH
459 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
460}
461
c8fc56ce 462static inline void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 463{
c8fc56ce 464 TCGReg base = TCG_REG_G0;
a0ce341a
RH
465 if (!check_fit_tl(arg, 10)) {
466 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
c8fc56ce 467 base = ret;
a0ce341a 468 }
c8fc56ce 469 tcg_out_ld(s, TCG_TYPE_PTR, ret, base, arg & 0x3ff);
8289b279
BS
470}
471
583d1215 472static inline void tcg_out_sety(TCGContext *s, int rs)
8289b279 473{
583d1215 474 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
475}
476
7a3766f3
RH
477static inline void tcg_out_rdy(TCGContext *s, int rd)
478{
479 tcg_out32(s, RDY | INSN_RD(rd));
480}
481
8289b279
BS
482static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
483{
484 if (val != 0) {
57e49b40 485 if (check_fit_tl(val, 13))
8289b279 486 tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
f5ef6aac 487 else {
375816f8
RH
488 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, val);
489 tcg_out_arith(s, reg, reg, TCG_REG_T1, ARITH_ADD);
f5ef6aac 490 }
8289b279
BS
491 }
492}
493
583d1215
RH
494static void tcg_out_div32(TCGContext *s, int rd, int rs1,
495 int val2, int val2const, int uns)
496{
497 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
498 if (uns) {
499 tcg_out_sety(s, TCG_REG_G0);
500 } else {
375816f8
RH
501 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
502 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
503 }
504
505 tcg_out_arithc(s, rd, rs1, val2, val2const,
506 uns ? ARITH_UDIV : ARITH_SDIV);
507}
508
8289b279
BS
509static inline void tcg_out_nop(TCGContext *s)
510{
26cc915c 511 tcg_out_sethi(s, TCG_REG_G0, 0);
8289b279
BS
512}
513
0aed257f 514static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
515 [TCG_COND_EQ] = COND_E,
516 [TCG_COND_NE] = COND_NE,
517 [TCG_COND_LT] = COND_L,
518 [TCG_COND_GE] = COND_GE,
519 [TCG_COND_LE] = COND_LE,
520 [TCG_COND_GT] = COND_G,
521 [TCG_COND_LTU] = COND_CS,
522 [TCG_COND_GEU] = COND_CC,
523 [TCG_COND_LEU] = COND_LEU,
524 [TCG_COND_GTU] = COND_GU,
525};
526
ab1339b9
RH
527static const uint8_t tcg_cond_to_rcond[] = {
528 [TCG_COND_EQ] = RCOND_Z,
529 [TCG_COND_NE] = RCOND_NZ,
530 [TCG_COND_LT] = RCOND_LZ,
531 [TCG_COND_GT] = RCOND_GZ,
532 [TCG_COND_LE] = RCOND_LEZ,
533 [TCG_COND_GE] = RCOND_GEZ
534};
535
a115f3ea
RH
536static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
537{
538 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
539}
540
541static void tcg_out_bpcc(TCGContext *s, int scond, int flags, int label)
542{
543 TCGLabel *l = &s->labels[label];
544 int off19;
545
546 if (l->has_value) {
547 off19 = INSN_OFF19(l->u.value - (unsigned long)s->code_ptr);
548 } else {
549 /* Make sure to preserve destinations during retranslation. */
550 off19 = *(uint32_t *)s->code_ptr & INSN_OFF19(-1);
551 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, label, 0);
552 }
553 tcg_out_bpcc0(s, scond, flags, off19);
554}
555
56f4927e
RH
556static void tcg_out_cmp(TCGContext *s, TCGArg c1, TCGArg c2, int c2const)
557{
ba225198 558 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
559}
560
a115f3ea
RH
561static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGArg arg1,
562 TCGArg arg2, int const_arg2, int label)
cf7c2ca5 563{
56f4927e 564 tcg_out_cmp(s, arg1, arg2, const_arg2);
a115f3ea 565 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, label);
cf7c2ca5
BS
566 tcg_out_nop(s);
567}
568
ded37f0d
RH
569static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGArg ret,
570 TCGArg v1, int v1const)
571{
572 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
573 | INSN_RS1(tcg_cond_to_bcond[cond])
574 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
575}
576
577static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
578 TCGArg c1, TCGArg c2, int c2const,
579 TCGArg v1, int v1const)
580{
581 tcg_out_cmp(s, c1, c2, c2const);
582 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
583}
584
a212ea75 585#if TCG_TARGET_REG_BITS == 64
a115f3ea
RH
586static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGArg arg1,
587 TCGArg arg2, int const_arg2, int label)
1da92db2 588{
ab1339b9
RH
589 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
590 if (arg2 == 0 && !is_unsigned_cond(cond)) {
591 TCGLabel *l = &s->labels[label];
592 int off16;
593
594 if (l->has_value) {
595 off16 = INSN_OFF16(l->u.value - (unsigned long)s->code_ptr);
596 } else {
597 /* Make sure to preserve destinations during retranslation. */
598 off16 = *(uint32_t *)s->code_ptr & INSN_OFF16(-1);
599 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, label, 0);
600 }
601 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
602 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
603 } else {
604 tcg_out_cmp(s, arg1, arg2, const_arg2);
605 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, label);
606 }
1da92db2
BS
607 tcg_out_nop(s);
608}
ded37f0d 609
203342d8
RH
610static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGArg ret, TCGArg c1,
611 TCGArg v1, int v1const)
612{
613 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
614 | (tcg_cond_to_rcond[cond] << 10)
615 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
616}
617
ded37f0d
RH
618static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
619 TCGArg c1, TCGArg c2, int c2const,
620 TCGArg v1, int v1const)
621{
203342d8
RH
622 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
623 Note that the immediate range is one bit smaller, so we must check
624 for that as well. */
625 if (c2 == 0 && !is_unsigned_cond(cond)
626 && (!v1const || check_fit_tl(v1, 10))) {
627 tcg_out_movr(s, cond, ret, c1, v1, v1const);
628 } else {
629 tcg_out_cmp(s, c1, c2, c2const);
630 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
631 }
ded37f0d 632}
56f4927e 633#else
8a56e840 634static void tcg_out_brcond2_i32(TCGContext *s, TCGCond cond,
56f4927e
RH
635 TCGArg al, TCGArg ah,
636 TCGArg bl, int blconst,
637 TCGArg bh, int bhconst, int label_dest)
638{
a115f3ea 639 int scond, label_next = gen_new_label();
56f4927e
RH
640
641 tcg_out_cmp(s, ah, bh, bhconst);
642
643 /* Note that we fill one of the delay slots with the second compare. */
644 switch (cond) {
645 case TCG_COND_EQ:
a115f3ea 646 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 647 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 648 tcg_out_bpcc(s, COND_E, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
649 break;
650
651 case TCG_COND_NE:
a115f3ea 652 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 653 tcg_out_cmp(s, al, bl, blconst);
a115f3ea 654 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
655 break;
656
657 default:
a115f3ea
RH
658 scond = tcg_cond_to_bcond[tcg_high_cond(cond)];
659 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e 660 tcg_out_nop(s);
a115f3ea 661 tcg_out_bpcc(s, COND_NE, BPCC_ICC | BPCC_PT, label_next);
56f4927e 662 tcg_out_cmp(s, al, bl, blconst);
a115f3ea
RH
663 scond = tcg_cond_to_bcond[tcg_unsigned_cond(cond)];
664 tcg_out_bpcc(s, scond, BPCC_ICC | BPCC_PT, label_dest);
56f4927e
RH
665 break;
666 }
667 tcg_out_nop(s);
668
9d6fca70 669 tcg_out_label(s, label_next, s->code_ptr);
56f4927e 670}
1da92db2
BS
671#endif
672
8a56e840 673static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
674 TCGArg c1, TCGArg c2, int c2const)
675{
dbfe80e1
RH
676 /* For 32-bit comparisons, we can play games with ADDX/SUBX. */
677 switch (cond) {
7d458a75
RH
678 case TCG_COND_LTU:
679 case TCG_COND_GEU:
680 /* The result of the comparison is in the carry bit. */
681 break;
682
dbfe80e1
RH
683 case TCG_COND_EQ:
684 case TCG_COND_NE:
7d458a75 685 /* For equality, we can transform to inequality vs zero. */
dbfe80e1
RH
686 if (c2 != 0) {
687 tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR);
688 }
689 c1 = TCG_REG_G0, c2 = ret, c2const = 0;
7d458a75 690 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
691 break;
692
693 case TCG_COND_GTU:
dbfe80e1 694 case TCG_COND_LEU:
7d458a75
RH
695 /* If we don't need to load a constant into a register, we can
696 swap the operands on GTU/LEU. There's no benefit to loading
697 the constant into a temporary register. */
698 if (!c2const || c2 == 0) {
699 TCGArg t = c1;
700 c1 = c2;
701 c2 = t;
702 c2const = 0;
703 cond = tcg_swap_cond(cond);
704 break;
705 }
706 /* FALLTHRU */
dbfe80e1
RH
707
708 default:
709 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 710 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 711 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
712 return;
713 }
714
715 tcg_out_cmp(s, c1, c2, c2const);
716 if (cond == TCG_COND_LTU) {
717 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX);
718 } else {
719 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX);
720 }
721}
722
723#if TCG_TARGET_REG_BITS == 64
8a56e840 724static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
725 TCGArg c1, TCGArg c2, int c2const)
726{
203342d8
RH
727 /* For 64-bit signed comparisons vs zero, we can avoid the compare
728 if the input does not overlap the output. */
729 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
730 tcg_out_movi_imm13(s, ret, 0);
731 tcg_out_movr(s, cond, ret, c1, 1, 1);
732 } else {
733 tcg_out_cmp(s, c1, c2, c2const);
734 tcg_out_movi_imm13(s, ret, 0);
735 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
736 }
dbfe80e1
RH
737}
738#else
8a56e840 739static void tcg_out_setcond2_i32(TCGContext *s, TCGCond cond, TCGArg ret,
dbfe80e1
RH
740 TCGArg al, TCGArg ah,
741 TCGArg bl, int blconst,
742 TCGArg bh, int bhconst)
743{
dda73c78
RH
744 int tmp = TCG_REG_T1;
745
746 /* Note that the low parts are fully consumed before tmp is set. */
747 if (ret != ah && (bhconst || ret != bh)) {
748 tmp = ret;
749 }
dbfe80e1
RH
750
751 switch (cond) {
752 case TCG_COND_EQ:
dbfe80e1 753 case TCG_COND_NE:
fd84ea23
RH
754 if (bl == 0 && bh == 0) {
755 if (cond == TCG_COND_EQ) {
756 tcg_out_arith(s, TCG_REG_G0, al, ah, ARITH_ORCC);
757 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
758 } else {
759 tcg_out_arith(s, ret, al, ah, ARITH_ORCC);
760 }
761 } else {
762 tcg_out_setcond_i32(s, cond, tmp, al, bl, blconst);
763 tcg_out_cmp(s, ah, bh, bhconst);
764 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
765 }
dda73c78 766 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, cond == TCG_COND_NE, 1);
dbfe80e1
RH
767 break;
768
769 default:
dda73c78
RH
770 /* <= : ah < bh | (ah == bh && al <= bl) */
771 tcg_out_setcond_i32(s, tcg_unsigned_cond(cond), tmp, al, bl, blconst);
dbfe80e1 772 tcg_out_cmp(s, ah, bh, bhconst);
dda73c78
RH
773 tcg_out_mov(s, TCG_TYPE_I32, ret, tmp);
774 tcg_out_movcc(s, TCG_COND_NE, MOVCC_ICC, ret, 0, 1);
775 tcg_out_movcc(s, tcg_high_cond(cond), MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
776 break;
777 }
778}
07ca08ba 779#endif
4ec28e25
RH
780
781static void tcg_out_addsub2(TCGContext *s, TCGArg rl, TCGArg rh,
782 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
783 TCGArg bh, int bhconst, int opl, int oph)
784{
785 TCGArg tmp = TCG_REG_T1;
786
787 /* Note that the low parts are fully consumed before tmp is set. */
788 if (rl != ah && (bhconst || rl != bh)) {
789 tmp = rl;
790 }
791
792 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
793 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
794 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
795}
dbfe80e1 796
aad2f06a
RH
797static inline void tcg_out_calli(TCGContext *s, uintptr_t dest)
798{
799 intptr_t disp = dest - (uintptr_t)s->code_ptr;
800
801 if (disp == (int32_t)disp) {
802 tcg_out32(s, CALL | (uint32_t)disp >> 2);
803 } else {
804 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, dest & ~0xfff);
805 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, dest & 0xfff, JMPL);
806 }
807}
808
7ea5d725
RH
809#ifdef CONFIG_SOFTMMU
810static uintptr_t qemu_ld_trampoline[16];
811static uintptr_t qemu_st_trampoline[16];
812
813static void build_trampolines(TCGContext *s)
814{
815 static uintptr_t const qemu_ld_helpers[16] = {
816 [MO_UB] = (uintptr_t)helper_ret_ldub_mmu,
817 [MO_SB] = (uintptr_t)helper_ret_ldsb_mmu,
818 [MO_LEUW] = (uintptr_t)helper_le_lduw_mmu,
819 [MO_LESW] = (uintptr_t)helper_le_ldsw_mmu,
820 [MO_LEUL] = (uintptr_t)helper_le_ldul_mmu,
821 [MO_LEQ] = (uintptr_t)helper_le_ldq_mmu,
822 [MO_BEUW] = (uintptr_t)helper_be_lduw_mmu,
823 [MO_BESW] = (uintptr_t)helper_be_ldsw_mmu,
824 [MO_BEUL] = (uintptr_t)helper_be_ldul_mmu,
825 [MO_BEQ] = (uintptr_t)helper_be_ldq_mmu,
826 };
827 static uintptr_t const qemu_st_helpers[16] = {
828 [MO_UB] = (uintptr_t)helper_ret_stb_mmu,
829 [MO_LEUW] = (uintptr_t)helper_le_stw_mmu,
830 [MO_LEUL] = (uintptr_t)helper_le_stl_mmu,
831 [MO_LEQ] = (uintptr_t)helper_le_stq_mmu,
832 [MO_BEUW] = (uintptr_t)helper_be_stw_mmu,
833 [MO_BEUL] = (uintptr_t)helper_be_stl_mmu,
834 [MO_BEQ] = (uintptr_t)helper_be_stq_mmu,
835 };
836
837 int i;
838 TCGReg ra;
839 uintptr_t tramp;
840
841 for (i = 0; i < 16; ++i) {
842 if (qemu_ld_helpers[i] == 0) {
843 continue;
844 }
845
846 /* May as well align the trampoline. */
847 tramp = (uintptr_t)s->code_ptr;
848 while (tramp & 15) {
849 tcg_out_nop(s);
850 tramp += 4;
851 }
852 qemu_ld_trampoline[i] = tramp;
853
854 /* Find the retaddr argument register. */
855 ra = TCG_REG_O3 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS);
856
857 /* Set the retaddr operand. */
858 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
859 /* Set the env operand. */
860 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
861 /* Tail call. */
862 tcg_out_calli(s, qemu_ld_helpers[i]);
863 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
864 }
865
866 for (i = 0; i < 16; ++i) {
867 if (qemu_st_helpers[i] == 0) {
868 continue;
869 }
870
871 /* May as well align the trampoline. */
872 tramp = (uintptr_t)s->code_ptr;
873 while (tramp & 15) {
874 tcg_out_nop(s);
875 tramp += 4;
876 }
877 qemu_st_trampoline[i] = tramp;
878
879 /* Find the retaddr argument. For 32-bit, this may be past the
880 last argument register, and need passing on the stack. */
881 ra = (TCG_REG_O4
882 + (TARGET_LONG_BITS > TCG_TARGET_REG_BITS)
883 + (TCG_TARGET_REG_BITS == 32 && (i & MO_SIZE) == MO_64));
884
885 /* Set the retaddr operand. */
886 if (ra >= TCG_REG_O6) {
887 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
888 TCG_TARGET_CALL_STACK_OFFSET);
889 ra = TCG_REG_G1;
890 }
891 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
892 /* Set the env operand. */
893 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
894 /* Tail call. */
895 tcg_out_calli(s, qemu_st_helpers[i]);
896 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
897 }
898}
899#endif
900
7d551702 901/* Generate global QEMU prologue and epilogue code */
e4d58b41 902static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 903{
4c3204cb
RH
904 int tmp_buf_size, frame_size;
905
906 /* The TCG temp buffer is at the top of the frame, immediately
907 below the frame pointer. */
908 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
909 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
910 tmp_buf_size);
911
912 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
913 otherwise the minimal frame usable by callees. */
914 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
915 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
916 frame_size += TCG_TARGET_STACK_ALIGN - 1;
917 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 918 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 919 INSN_IMM13(-frame_size));
c6f7e4fb
RH
920
921#ifdef CONFIG_USE_GUEST_BASE
922 if (GUEST_BASE != 0) {
923 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
924 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
925 }
926#endif
927
aad2f06a 928 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
929 /* delay slot */
930 tcg_out_nop(s);
4c3204cb
RH
931
932 /* No epilogue required. We issue ret + restore directly in the TB. */
7ea5d725
RH
933
934#ifdef CONFIG_SOFTMMU
935 build_trampolines(s);
936#endif
b3db8758
BS
937}
938
f5ef6aac 939#if defined(CONFIG_SOFTMMU)
a0ce341a 940/* Perform the TLB load and compare.
bffe1431 941
a0ce341a 942 Inputs:
a8b12c10 943 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
944
945 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
946
947 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
948 This should be offsetof addr_read or addr_write.
949
950 The result of the TLB comparison is in %[ix]cc. The sanitized address
951 is in the returned register, maybe %o0. The TLB addend is in %o1. */
952
a8b12c10
RH
953static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addrlo, TCGReg addrhi,
954 int mem_index, TCGMemOp s_bits, int which)
a0ce341a 955{
a8b12c10
RH
956 const TCGReg r0 = TCG_REG_O0;
957 const TCGReg r1 = TCG_REG_O1;
958 const TCGReg r2 = TCG_REG_O2;
959 TCGReg addr = addrlo;
a0ce341a
RH
960 int tlb_ofs;
961
962 if (TCG_TARGET_REG_BITS == 32 && TARGET_LONG_BITS == 64) {
963 /* Assemble the 64-bit address in R0. */
964 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
a8b12c10 965 tcg_out_arithi(s, r1, addrhi, 32, SHIFT_SLLX);
a0ce341a 966 tcg_out_arith(s, r0, r0, r1, ARITH_OR);
d801a8f2 967 addr = r0;
a0ce341a
RH
968 }
969
d801a8f2
RH
970 /* Shift the page number down. */
971 tcg_out_arithi(s, r1, addrlo, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a
RH
972
973 /* Mask out the page offset, except for the required alignment. */
d801a8f2
RH
974 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
975 TARGET_PAGE_MASK | ((1 << s_bits) - 1));
976
977 /* Mask the tlb index. */
978 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
979
980 /* Mask page, part 2. */
981 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 982
d801a8f2
RH
983 /* Shift the tlb index into place. */
984 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
985
986 /* Relative to the current ENV. */
987 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
988
989 /* Find a base address that can load both tlb comparator and addend. */
990 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
991 if (!check_fit_tl(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
d801a8f2
RH
992 tcg_out_addi(s, r1, tlb_ofs & ~0x3ff);
993 tlb_ofs &= 0x3ff;
a0ce341a
RH
994 }
995
996 /* Load the tlb comparator and the addend. */
997 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
998 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
999
1000 /* subcc arg0, arg2, %g0 */
1001 tcg_out_cmp(s, r0, r2, 0);
1002
1003 /* If the guest address must be zero-extended, do so now. */
1004 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
1005 tcg_out_arithi(s, r0, addrlo, 0, SHIFT_SRL);
1006 return r0;
1007 }
1008 return addrlo;
1009}
1010#endif /* CONFIG_SOFTMMU */
1011
eef0d9e7
RH
1012static const int qemu_ld_opc[16] = {
1013 [MO_UB] = LDUB,
1014 [MO_SB] = LDSB,
1015
1016 [MO_BEUW] = LDUH,
1017 [MO_BESW] = LDSH,
1018 [MO_BEUL] = LDUW,
1019 [MO_BESL] = LDSW,
1020 [MO_BEQ] = LDX,
1021
1022 [MO_LEUW] = LDUH_LE,
1023 [MO_LESW] = LDSH_LE,
1024 [MO_LEUL] = LDUW_LE,
1025 [MO_LESL] = LDSW_LE,
1026 [MO_LEQ] = LDX_LE,
a0ce341a 1027};
9d0efc88 1028
eef0d9e7
RH
1029static const int qemu_st_opc[16] = {
1030 [MO_UB] = STB,
1031
1032 [MO_BEUW] = STH,
1033 [MO_BEUL] = STW,
1034 [MO_BEQ] = STX,
1035
1036 [MO_LEUW] = STH_LE,
1037 [MO_LEUL] = STW_LE,
1038 [MO_LEQ] = STX_LE,
a0ce341a 1039};
bffe1431 1040
eef0d9e7 1041static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGMemOp memop)
f5ef6aac 1042{
a8b12c10 1043 TCGReg addrlo, datalo, datahi, addr_reg;
eef0d9e7 1044 TCGMemOp s_bits = memop & MO_SIZE;
f5ef6aac 1045#if defined(CONFIG_SOFTMMU)
7ea5d725
RH
1046 TCGReg addrhi, param;
1047 uintptr_t func;
1048 int memi;
a0ce341a 1049 uint32_t *label_ptr[2];
f5ef6aac
BS
1050#endif
1051
a8b12c10
RH
1052 datalo = *args++;
1053 datahi = (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64 ? *args++ : 0);
1054 addr_reg = addrlo = *args++;
f5ef6aac 1055
f5ef6aac 1056#if defined(CONFIG_SOFTMMU)
a8b12c10
RH
1057 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1058 memi = *args++;
a0ce341a 1059
a8b12c10 1060 addr_reg = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
a0ce341a
RH
1061 offsetof(CPUTLBEntry, addr_read));
1062
eef0d9e7 1063 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
a0ce341a
RH
1064 int reg64;
1065
1066 /* bne,pn %[xi]cc, label0 */
1067 label_ptr[0] = (uint32_t *)s->code_ptr;
a115f3ea
RH
1068 tcg_out_bpcc0(s, COND_NE, BPCC_PN
1069 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
e7bc9004 1070 tcg_out_nop(s);
a0ce341a
RH
1071
1072 /* TLB Hit. */
1073 /* Load all 64-bits into an O/G register. */
1074 reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
eef0d9e7 1075 tcg_out_ldst_rr(s, reg64, addr_reg, TCG_REG_O1, qemu_ld_opc[memop]);
a0ce341a
RH
1076
1077 /* Move the two 32-bit pieces into the destination registers. */
1078 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1079 if (reg64 != datalo) {
1080 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1081 }
f5ef6aac 1082
a0ce341a
RH
1083 /* b,a,pt label1 */
1084 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea 1085 tcg_out_bpcc0(s, COND_A, BPCC_A | BPCC_PT, 0);
a0ce341a
RH
1086 } else {
1087 /* The fast path is exactly one insn. Thus we can perform the
1088 entire TLB Hit in the (annulled) delay slot of the branch
1089 over the TLB Miss case. */
1090
1091 /* beq,a,pt %[xi]cc, label0 */
1092 label_ptr[0] = NULL;
1093 label_ptr[1] = (uint32_t *)s->code_ptr;
a115f3ea
RH
1094 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1095 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1096 /* delay slot */
eef0d9e7 1097 tcg_out_ldst_rr(s, datalo, addr_reg, TCG_REG_O1, qemu_ld_opc[memop]);
a0ce341a 1098 }
53c37487 1099
a0ce341a 1100 /* TLB Miss. */
f5ef6aac 1101
a0ce341a
RH
1102 if (label_ptr[0]) {
1103 *label_ptr[0] |= INSN_OFF19((unsigned long)s->code_ptr -
1104 (unsigned long)label_ptr[0]);
1105 }
7ea5d725
RH
1106
1107 param = TCG_REG_O1;
a0ce341a 1108 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
7ea5d725 1109 tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
a0ce341a 1110 }
7ea5d725 1111 tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
f5ef6aac 1112
7ea5d725
RH
1113 /* We use the helpers to extend SB and SW data, leaving the case
1114 of SL needing explicit extending below. */
1115 if ((memop & ~MO_BSWAP) == MO_SL) {
1116 func = qemu_ld_trampoline[memop & ~MO_SIGN];
1117 } else {
1118 func = qemu_ld_trampoline[memop];
1119 }
1120 assert(func != 0);
1121 tcg_out_calli(s, func);
a0ce341a 1122 /* delay slot */
7ea5d725
RH
1123 tcg_out_movi(s, TCG_TYPE_I32, param, memi);
1124
1125 switch (memop & ~MO_BSWAP) {
eef0d9e7 1126 case MO_SL:
7ea5d725 1127 tcg_out_arithi(s, datalo, TCG_REG_O0, 0, SHIFT_SRA);
f5ef6aac 1128 break;
eef0d9e7 1129 case MO_Q:
a0ce341a 1130 if (TCG_TARGET_REG_BITS == 32) {
7ea5d725
RH
1131 tcg_out_mov(s, TCG_TYPE_REG, datahi, TCG_REG_O0);
1132 tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O1);
a0ce341a
RH
1133 break;
1134 }
1135 /* FALLTHRU */
f5ef6aac
BS
1136 default:
1137 /* mov */
7ea5d725 1138 tcg_out_mov(s, TCG_TYPE_REG, datalo, TCG_REG_O0);
f5ef6aac
BS
1139 break;
1140 }
1141
a0ce341a
RH
1142 *label_ptr[1] |= INSN_OFF19((unsigned long)s->code_ptr -
1143 (unsigned long)label_ptr[1]);
90cbed46 1144#else
a0ce341a 1145 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1146 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1147 addr_reg = TCG_REG_T1;
a0ce341a 1148 }
eef0d9e7 1149 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
a0ce341a 1150 int reg64 = (datalo < 16 ? datalo : TCG_REG_O0);
90cbed46 1151
c6f7e4fb
RH
1152 tcg_out_ldst_rr(s, reg64, addr_reg,
1153 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1154 qemu_ld_opc[memop]);
f5ef6aac 1155
a0ce341a
RH
1156 tcg_out_arithi(s, datahi, reg64, 32, SHIFT_SRLX);
1157 if (reg64 != datalo) {
1158 tcg_out_mov(s, TCG_TYPE_I32, datalo, reg64);
1159 }
1160 } else {
c6f7e4fb
RH
1161 tcg_out_ldst_rr(s, datalo, addr_reg,
1162 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1163 qemu_ld_opc[memop]);
f5ef6aac 1164 }
a0ce341a 1165#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1166}
1167
eef0d9e7 1168static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, TCGMemOp memop)
f5ef6aac 1169{
a8b12c10 1170 TCGReg addrlo, datalo, datahi, addr_reg;
eef0d9e7 1171 TCGMemOp s_bits = memop & MO_SIZE;
f5ef6aac 1172#if defined(CONFIG_SOFTMMU)
7ea5d725
RH
1173 TCGReg addrhi, datafull, param;
1174 uintptr_t func;
1175 int memi;
a0ce341a 1176 uint32_t *label_ptr;
f5ef6aac
BS
1177#endif
1178
a8b12c10
RH
1179 datalo = *args++;
1180 datahi = (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64 ? *args++ : 0);
1181 addr_reg = addrlo = *args++;
f5ef6aac 1182
f5ef6aac 1183#if defined(CONFIG_SOFTMMU)
a8b12c10
RH
1184 addrhi = (TARGET_LONG_BITS > TCG_TARGET_REG_BITS ? *args++ : 0);
1185 memi = *args++;
a0ce341a 1186
a8b12c10 1187 addr_reg = tcg_out_tlb_load(s, addrlo, addrhi, memi, s_bits,
a0ce341a
RH
1188 offsetof(CPUTLBEntry, addr_write));
1189
a7a49843 1190 datafull = datalo;
eef0d9e7 1191 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
375816f8
RH
1192 /* Reconstruct the full 64-bit value. */
1193 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1194 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8 1195 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
a7a49843 1196 datafull = TCG_REG_O2;
a0ce341a 1197 }
f5ef6aac 1198
a0ce341a
RH
1199 /* The fast path is exactly one insn. Thus we can perform the entire
1200 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1201 /* beq,a,pt %[xi]cc, label0 */
1202 label_ptr = (uint32_t *)s->code_ptr;
a115f3ea
RH
1203 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1204 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1205 /* delay slot */
eef0d9e7 1206 tcg_out_ldst_rr(s, datafull, addr_reg, TCG_REG_O1, qemu_st_opc[memop]);
a0ce341a
RH
1207
1208 /* TLB Miss. */
1209
7ea5d725 1210 param = TCG_REG_O1;
a0ce341a 1211 if (TARGET_LONG_BITS > TCG_TARGET_REG_BITS) {
7ea5d725 1212 tcg_out_mov(s, TCG_TYPE_REG, param++, addrhi);
a0ce341a 1213 }
7ea5d725 1214 tcg_out_mov(s, TCG_TYPE_REG, param++, addrlo);
eef0d9e7 1215 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
7ea5d725 1216 tcg_out_mov(s, TCG_TYPE_REG, param++, datahi);
a0ce341a 1217 }
7ea5d725 1218 tcg_out_mov(s, TCG_TYPE_REG, param++, datalo);
53c37487 1219
7ea5d725
RH
1220 func = qemu_st_trampoline[memop];
1221 assert(func != 0);
1222 tcg_out_calli(s, func);
a0ce341a 1223 /* delay slot */
7ea5d725 1224 tcg_out_movi(s, TCG_TYPE_REG, param, memi);
f5ef6aac 1225
a0ce341a
RH
1226 *label_ptr |= INSN_OFF19((unsigned long)s->code_ptr -
1227 (unsigned long)label_ptr);
8384dd67 1228#else
a0ce341a 1229 if (TCG_TARGET_REG_BITS == 64 && TARGET_LONG_BITS == 32) {
375816f8
RH
1230 tcg_out_arithi(s, TCG_REG_T1, addr_reg, 0, SHIFT_SRL);
1231 addr_reg = TCG_REG_T1;
f5ef6aac 1232 }
eef0d9e7 1233 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
375816f8 1234 tcg_out_arithi(s, TCG_REG_T1, datalo, 0, SHIFT_SRL);
a0ce341a 1235 tcg_out_arithi(s, TCG_REG_O2, datahi, 32, SHIFT_SLLX);
375816f8
RH
1236 tcg_out_arith(s, TCG_REG_O2, TCG_REG_T1, TCG_REG_O2, ARITH_OR);
1237 datalo = TCG_REG_O2;
a0ce341a 1238 }
c6f7e4fb
RH
1239 tcg_out_ldst_rr(s, datalo, addr_reg,
1240 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0),
eef0d9e7 1241 qemu_st_opc[memop]);
a0ce341a 1242#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1243}
1244
a9751609 1245static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
8289b279
BS
1246 const int *const_args)
1247{
1248 int c;
1249
1250 switch (opc) {
1251 case INDEX_op_exit_tb:
b3db8758 1252 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, args[0]);
aad2f06a 1253 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, JMPL);
b3db8758
BS
1254 tcg_out32(s, RESTORE | INSN_RD(TCG_REG_G0) | INSN_RS1(TCG_REG_G0) |
1255 INSN_RS2(TCG_REG_G0));
8289b279
BS
1256 break;
1257 case INDEX_op_goto_tb:
1258 if (s->tb_jmp_offset) {
1259 /* direct jump method */
5bbd2cae 1260 uint32_t old_insn = *(uint32_t *)s->code_ptr;
8289b279 1261 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5bbd2cae
RH
1262 /* Make sure to preserve links during retranslation. */
1263 tcg_out32(s, CALL | (old_insn & ~INSN_OP(-1)));
8289b279
BS
1264 } else {
1265 /* indirect jump method */
c8fc56ce 1266 tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + args[0]));
aad2f06a 1267 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
8289b279 1268 }
53cd9273 1269 tcg_out_nop(s);
8289b279
BS
1270 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1271 break;
1272 case INDEX_op_call:
375816f8 1273 if (const_args[0]) {
aad2f06a 1274 tcg_out_calli(s, args[0]);
375816f8 1275 } else {
aad2f06a 1276 tcg_out_arithi(s, TCG_REG_O7, args[0], 0, JMPL);
8289b279 1277 }
4c3204cb
RH
1278 /* delay slot */
1279 tcg_out_nop(s);
8289b279 1280 break;
8289b279 1281 case INDEX_op_br:
a115f3ea 1282 tcg_out_bpcc(s, COND_A, BPCC_PT, args[0]);
f5ef6aac 1283 tcg_out_nop(s);
8289b279
BS
1284 break;
1285 case INDEX_op_movi_i32:
1286 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1287 break;
1288
a212ea75 1289#if TCG_TARGET_REG_BITS == 64
8289b279 1290#define OP_32_64(x) \
ba225198
RH
1291 glue(glue(case INDEX_op_, x), _i32): \
1292 glue(glue(case INDEX_op_, x), _i64)
8289b279
BS
1293#else
1294#define OP_32_64(x) \
ba225198 1295 glue(glue(case INDEX_op_, x), _i32)
8289b279 1296#endif
ba225198 1297 OP_32_64(ld8u):
8289b279
BS
1298 tcg_out_ldst(s, args[0], args[1], args[2], LDUB);
1299 break;
ba225198 1300 OP_32_64(ld8s):
8289b279
BS
1301 tcg_out_ldst(s, args[0], args[1], args[2], LDSB);
1302 break;
ba225198 1303 OP_32_64(ld16u):
8289b279
BS
1304 tcg_out_ldst(s, args[0], args[1], args[2], LDUH);
1305 break;
ba225198 1306 OP_32_64(ld16s):
8289b279
BS
1307 tcg_out_ldst(s, args[0], args[1], args[2], LDSH);
1308 break;
1309 case INDEX_op_ld_i32:
a212ea75 1310#if TCG_TARGET_REG_BITS == 64
53cd9273 1311 case INDEX_op_ld32u_i64:
8289b279
BS
1312#endif
1313 tcg_out_ldst(s, args[0], args[1], args[2], LDUW);
1314 break;
ba225198 1315 OP_32_64(st8):
8289b279
BS
1316 tcg_out_ldst(s, args[0], args[1], args[2], STB);
1317 break;
ba225198 1318 OP_32_64(st16):
8289b279
BS
1319 tcg_out_ldst(s, args[0], args[1], args[2], STH);
1320 break;
1321 case INDEX_op_st_i32:
a212ea75 1322#if TCG_TARGET_REG_BITS == 64
53cd9273 1323 case INDEX_op_st32_i64:
8289b279
BS
1324#endif
1325 tcg_out_ldst(s, args[0], args[1], args[2], STW);
1326 break;
ba225198 1327 OP_32_64(add):
53cd9273 1328 c = ARITH_ADD;
ba225198
RH
1329 goto gen_arith;
1330 OP_32_64(sub):
8289b279 1331 c = ARITH_SUB;
ba225198
RH
1332 goto gen_arith;
1333 OP_32_64(and):
8289b279 1334 c = ARITH_AND;
ba225198 1335 goto gen_arith;
dc69960d
RH
1336 OP_32_64(andc):
1337 c = ARITH_ANDN;
1338 goto gen_arith;
ba225198 1339 OP_32_64(or):
8289b279 1340 c = ARITH_OR;
ba225198 1341 goto gen_arith;
18c8f7a3
RH
1342 OP_32_64(orc):
1343 c = ARITH_ORN;
1344 goto gen_arith;
ba225198 1345 OP_32_64(xor):
8289b279 1346 c = ARITH_XOR;
ba225198 1347 goto gen_arith;
8289b279
BS
1348 case INDEX_op_shl_i32:
1349 c = SHIFT_SLL;
1fd95946
RH
1350 do_shift32:
1351 /* Limit immediate shift count lest we create an illegal insn. */
1352 tcg_out_arithc(s, args[0], args[1], args[2] & 31, const_args[2], c);
1353 break;
8289b279
BS
1354 case INDEX_op_shr_i32:
1355 c = SHIFT_SRL;
1fd95946 1356 goto do_shift32;
8289b279
BS
1357 case INDEX_op_sar_i32:
1358 c = SHIFT_SRA;
1fd95946 1359 goto do_shift32;
8289b279
BS
1360 case INDEX_op_mul_i32:
1361 c = ARITH_UMUL;
ba225198 1362 goto gen_arith;
583d1215 1363
4b5a85c1
RH
1364 OP_32_64(neg):
1365 c = ARITH_SUB;
1366 goto gen_arith1;
be6551b1
RH
1367 OP_32_64(not):
1368 c = ARITH_ORN;
1369 goto gen_arith1;
4b5a85c1 1370
583d1215
RH
1371 case INDEX_op_div_i32:
1372 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 0);
1373 break;
1374 case INDEX_op_divu_i32:
1375 tcg_out_div32(s, args[0], args[1], args[2], const_args[2], 1);
1376 break;
1377
8289b279 1378 case INDEX_op_brcond_i32:
1da92db2
BS
1379 tcg_out_brcond_i32(s, args[2], args[0], args[1], const_args[1],
1380 args[3]);
8289b279 1381 break;
dbfe80e1
RH
1382 case INDEX_op_setcond_i32:
1383 tcg_out_setcond_i32(s, args[3], args[0], args[1],
1384 args[2], const_args[2]);
1385 break;
ded37f0d
RH
1386 case INDEX_op_movcond_i32:
1387 tcg_out_movcond_i32(s, args[5], args[0], args[1],
1388 args[2], const_args[2], args[3], const_args[3]);
1389 break;
dbfe80e1 1390
56f4927e
RH
1391#if TCG_TARGET_REG_BITS == 32
1392 case INDEX_op_brcond2_i32:
1393 tcg_out_brcond2_i32(s, args[4], args[0], args[1],
1394 args[2], const_args[2],
1395 args[3], const_args[3], args[5]);
1396 break;
dbfe80e1
RH
1397 case INDEX_op_setcond2_i32:
1398 tcg_out_setcond2_i32(s, args[5], args[0], args[1], args[2],
1399 args[3], const_args[3],
1400 args[4], const_args[4]);
1401 break;
803d805b
RH
1402#endif
1403
7a3766f3 1404 case INDEX_op_add2_i32:
4ec28e25
RH
1405 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1406 args[4], const_args[4], args[5], const_args[5],
1407 ARITH_ADDCC, ARITH_ADDX);
7a3766f3
RH
1408 break;
1409 case INDEX_op_sub2_i32:
4ec28e25
RH
1410 tcg_out_addsub2(s, args[0], args[1], args[2], args[3],
1411 args[4], const_args[4], args[5], const_args[5],
1412 ARITH_SUBCC, ARITH_SUBX);
7a3766f3
RH
1413 break;
1414 case INDEX_op_mulu2_i32:
1415 tcg_out_arithc(s, args[0], args[2], args[3], const_args[3],
1416 ARITH_UMUL);
1417 tcg_out_rdy(s, args[1]);
1418 break;
8289b279
BS
1419
1420 case INDEX_op_qemu_ld8u:
eef0d9e7 1421 tcg_out_qemu_ld(s, args, MO_UB);
8289b279
BS
1422 break;
1423 case INDEX_op_qemu_ld8s:
eef0d9e7 1424 tcg_out_qemu_ld(s, args, MO_SB);
8289b279
BS
1425 break;
1426 case INDEX_op_qemu_ld16u:
eef0d9e7 1427 tcg_out_qemu_ld(s, args, MO_TEUW);
8289b279
BS
1428 break;
1429 case INDEX_op_qemu_ld16s:
eef0d9e7 1430 tcg_out_qemu_ld(s, args, MO_TESW);
8289b279 1431 break;
86feb1c8
RH
1432 case INDEX_op_qemu_ld32:
1433#if TCG_TARGET_REG_BITS == 64
8289b279 1434 case INDEX_op_qemu_ld32u:
86feb1c8 1435#endif
eef0d9e7 1436 tcg_out_qemu_ld(s, args, MO_TEUL);
8289b279 1437 break;
30c0c76c 1438#if TCG_TARGET_REG_BITS == 64
8289b279 1439 case INDEX_op_qemu_ld32s:
eef0d9e7 1440 tcg_out_qemu_ld(s, args, MO_TESL);
8289b279 1441 break;
30c0c76c 1442#endif
a0ce341a 1443 case INDEX_op_qemu_ld64:
eef0d9e7 1444 tcg_out_qemu_ld(s, args, MO_TEQ);
a0ce341a 1445 break;
8289b279 1446 case INDEX_op_qemu_st8:
eef0d9e7 1447 tcg_out_qemu_st(s, args, MO_UB);
8289b279
BS
1448 break;
1449 case INDEX_op_qemu_st16:
eef0d9e7 1450 tcg_out_qemu_st(s, args, MO_TEUW);
8289b279
BS
1451 break;
1452 case INDEX_op_qemu_st32:
eef0d9e7 1453 tcg_out_qemu_st(s, args, MO_TEUL);
8289b279 1454 break;
a0ce341a 1455 case INDEX_op_qemu_st64:
eef0d9e7 1456 tcg_out_qemu_st(s, args, MO_TEQ);
a0ce341a 1457 break;
8289b279 1458
a212ea75 1459#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1460 case INDEX_op_movi_i64:
1461 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
1462 break;
53cd9273
BS
1463 case INDEX_op_ld32s_i64:
1464 tcg_out_ldst(s, args[0], args[1], args[2], LDSW);
1465 break;
8289b279
BS
1466 case INDEX_op_ld_i64:
1467 tcg_out_ldst(s, args[0], args[1], args[2], LDX);
1468 break;
1469 case INDEX_op_st_i64:
1470 tcg_out_ldst(s, args[0], args[1], args[2], STX);
1471 break;
1472 case INDEX_op_shl_i64:
1473 c = SHIFT_SLLX;
1fd95946
RH
1474 do_shift64:
1475 /* Limit immediate shift count lest we create an illegal insn. */
1476 tcg_out_arithc(s, args[0], args[1], args[2] & 63, const_args[2], c);
1477 break;
8289b279
BS
1478 case INDEX_op_shr_i64:
1479 c = SHIFT_SRLX;
1fd95946 1480 goto do_shift64;
8289b279
BS
1481 case INDEX_op_sar_i64:
1482 c = SHIFT_SRAX;
1fd95946 1483 goto do_shift64;
8289b279
BS
1484 case INDEX_op_mul_i64:
1485 c = ARITH_MULX;
ba225198 1486 goto gen_arith;
583d1215 1487 case INDEX_op_div_i64:
53cd9273 1488 c = ARITH_SDIVX;
ba225198 1489 goto gen_arith;
583d1215 1490 case INDEX_op_divu_i64:
8289b279 1491 c = ARITH_UDIVX;
ba225198 1492 goto gen_arith;
cc6dfecf 1493 case INDEX_op_ext32s_i64:
1d0a6068 1494 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRA);
cc6dfecf
RH
1495 break;
1496 case INDEX_op_ext32u_i64:
1d0a6068 1497 tcg_out_arithi(s, args[0], args[1], 0, SHIFT_SRL);
cc6dfecf 1498 break;
8289b279
BS
1499
1500 case INDEX_op_brcond_i64:
1da92db2
BS
1501 tcg_out_brcond_i64(s, args[2], args[0], args[1], const_args[1],
1502 args[3]);
8289b279 1503 break;
dbfe80e1
RH
1504 case INDEX_op_setcond_i64:
1505 tcg_out_setcond_i64(s, args[3], args[0], args[1],
1506 args[2], const_args[2]);
1507 break;
ded37f0d
RH
1508 case INDEX_op_movcond_i64:
1509 tcg_out_movcond_i64(s, args[5], args[0], args[1],
1510 args[2], const_args[2], args[3], const_args[3]);
1511 break;
8289b279 1512#endif
ba225198
RH
1513 gen_arith:
1514 tcg_out_arithc(s, args[0], args[1], args[2], const_args[2], c);
53cd9273
BS
1515 break;
1516
4b5a85c1
RH
1517 gen_arith1:
1518 tcg_out_arithc(s, args[0], TCG_REG_G0, args[1], const_args[1], c);
1519 break;
1520
8289b279
BS
1521 default:
1522 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1523 tcg_abort();
1524 }
1525}
1526
1527static const TCGTargetOpDef sparc_op_defs[] = {
1528 { INDEX_op_exit_tb, { } },
b3db8758 1529 { INDEX_op_goto_tb, { } },
8289b279 1530 { INDEX_op_call, { "ri" } },
8289b279
BS
1531 { INDEX_op_br, { } },
1532
1533 { INDEX_op_mov_i32, { "r", "r" } },
1534 { INDEX_op_movi_i32, { "r" } },
1535 { INDEX_op_ld8u_i32, { "r", "r" } },
1536 { INDEX_op_ld8s_i32, { "r", "r" } },
1537 { INDEX_op_ld16u_i32, { "r", "r" } },
1538 { INDEX_op_ld16s_i32, { "r", "r" } },
1539 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1540 { INDEX_op_st8_i32, { "rZ", "r" } },
1541 { INDEX_op_st16_i32, { "rZ", "r" } },
1542 { INDEX_op_st_i32, { "rZ", "r" } },
1543
1544 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1545 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1546 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1547 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1548 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1549 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1550 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1551 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1552 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1553 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1554
1555 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1556 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1557 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1558
4b5a85c1 1559 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1560 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1561
89269f6c
RH
1562 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1563 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1564 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1565
56f4927e 1566#if TCG_TARGET_REG_BITS == 32
89269f6c
RH
1567 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1568 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rJ", "rJ" } },
803d805b
RH
1569#endif
1570
89269f6c
RH
1571 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1572 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1573 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1574
a212ea75 1575#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1576 { INDEX_op_mov_i64, { "r", "r" } },
1577 { INDEX_op_movi_i64, { "r" } },
1578 { INDEX_op_ld8u_i64, { "r", "r" } },
1579 { INDEX_op_ld8s_i64, { "r", "r" } },
1580 { INDEX_op_ld16u_i64, { "r", "r" } },
1581 { INDEX_op_ld16s_i64, { "r", "r" } },
1582 { INDEX_op_ld32u_i64, { "r", "r" } },
1583 { INDEX_op_ld32s_i64, { "r", "r" } },
1584 { INDEX_op_ld_i64, { "r", "r" } },
89269f6c
RH
1585 { INDEX_op_st8_i64, { "rZ", "r" } },
1586 { INDEX_op_st16_i64, { "rZ", "r" } },
1587 { INDEX_op_st32_i64, { "rZ", "r" } },
1588 { INDEX_op_st_i64, { "rZ", "r" } },
1589
1590 { INDEX_op_add_i64, { "r", "rZ", "rJ" } },
1591 { INDEX_op_mul_i64, { "r", "rZ", "rJ" } },
1592 { INDEX_op_div_i64, { "r", "rZ", "rJ" } },
1593 { INDEX_op_divu_i64, { "r", "rZ", "rJ" } },
89269f6c
RH
1594 { INDEX_op_sub_i64, { "r", "rZ", "rJ" } },
1595 { INDEX_op_and_i64, { "r", "rZ", "rJ" } },
1596 { INDEX_op_andc_i64, { "r", "rZ", "rJ" } },
1597 { INDEX_op_or_i64, { "r", "rZ", "rJ" } },
1598 { INDEX_op_orc_i64, { "r", "rZ", "rJ" } },
1599 { INDEX_op_xor_i64, { "r", "rZ", "rJ" } },
1600
1601 { INDEX_op_shl_i64, { "r", "rZ", "rJ" } },
1602 { INDEX_op_shr_i64, { "r", "rZ", "rJ" } },
1603 { INDEX_op_sar_i64, { "r", "rZ", "rJ" } },
4b5a85c1
RH
1604
1605 { INDEX_op_neg_i64, { "r", "rJ" } },
be6551b1 1606 { INDEX_op_not_i64, { "r", "rJ" } },
4b5a85c1 1607
1d0a6068
RH
1608 { INDEX_op_ext32s_i64, { "r", "r" } },
1609 { INDEX_op_ext32u_i64, { "r", "r" } },
8289b279 1610
89269f6c
RH
1611 { INDEX_op_brcond_i64, { "rZ", "rJ" } },
1612 { INDEX_op_setcond_i64, { "r", "rZ", "rJ" } },
1613 { INDEX_op_movcond_i64, { "r", "rZ", "rJ", "rI", "0" } },
8289b279 1614#endif
a0ce341a
RH
1615
1616#if TCG_TARGET_REG_BITS == 64
1617 { INDEX_op_qemu_ld8u, { "r", "L" } },
1618 { INDEX_op_qemu_ld8s, { "r", "L" } },
1619 { INDEX_op_qemu_ld16u, { "r", "L" } },
1620 { INDEX_op_qemu_ld16s, { "r", "L" } },
1621 { INDEX_op_qemu_ld32, { "r", "L" } },
1622 { INDEX_op_qemu_ld32u, { "r", "L" } },
1623 { INDEX_op_qemu_ld32s, { "r", "L" } },
1624 { INDEX_op_qemu_ld64, { "r", "L" } },
1625
1626 { INDEX_op_qemu_st8, { "L", "L" } },
1627 { INDEX_op_qemu_st16, { "L", "L" } },
1628 { INDEX_op_qemu_st32, { "L", "L" } },
1629 { INDEX_op_qemu_st64, { "L", "L" } },
1630#elif TARGET_LONG_BITS <= TCG_TARGET_REG_BITS
1631 { INDEX_op_qemu_ld8u, { "r", "L" } },
1632 { INDEX_op_qemu_ld8s, { "r", "L" } },
1633 { INDEX_op_qemu_ld16u, { "r", "L" } },
1634 { INDEX_op_qemu_ld16s, { "r", "L" } },
1635 { INDEX_op_qemu_ld32, { "r", "L" } },
1636 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1637
1638 { INDEX_op_qemu_st8, { "L", "L" } },
1639 { INDEX_op_qemu_st16, { "L", "L" } },
1640 { INDEX_op_qemu_st32, { "L", "L" } },
3ee60ad4 1641 { INDEX_op_qemu_st64, { "L", "L", "L" } },
a0ce341a
RH
1642#else
1643 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1644 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1645 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1646 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1647 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1648 { INDEX_op_qemu_ld64, { "L", "L", "L", "L" } },
1649
1650 { INDEX_op_qemu_st8, { "L", "L", "L" } },
1651 { INDEX_op_qemu_st16, { "L", "L", "L" } },
1652 { INDEX_op_qemu_st32, { "L", "L", "L" } },
1653 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
8289b279 1654#endif
a0ce341a 1655
8289b279
BS
1656 { -1 },
1657};
1658
e4d58b41 1659static void tcg_target_init(TCGContext *s)
8289b279
BS
1660{
1661 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
a212ea75 1662#if TCG_TARGET_REG_BITS == 64
8289b279
BS
1663 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1664#endif
1665 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1666 (1 << TCG_REG_G1) |
1667 (1 << TCG_REG_G2) |
1668 (1 << TCG_REG_G3) |
1669 (1 << TCG_REG_G4) |
1670 (1 << TCG_REG_G5) |
1671 (1 << TCG_REG_G6) |
1672 (1 << TCG_REG_G7) |
8289b279
BS
1673 (1 << TCG_REG_O0) |
1674 (1 << TCG_REG_O1) |
1675 (1 << TCG_REG_O2) |
1676 (1 << TCG_REG_O3) |
1677 (1 << TCG_REG_O4) |
1678 (1 << TCG_REG_O5) |
8289b279
BS
1679 (1 << TCG_REG_O7));
1680
1681 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1682 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1683 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1684 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1685 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1686 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1687 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1688 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1689 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1690
8289b279
BS
1691 tcg_add_target_add_op_defs(sparc_op_defs);
1692}
cb1977d3
RH
1693
1694#if TCG_TARGET_REG_BITS == 64
1695# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1696#else
cb1977d3
RH
1697# define ELF_HOST_MACHINE EM_SPARC32PLUS
1698# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1699#endif
1700
cb1977d3
RH
1701typedef struct {
1702 DebugFrameCIE cie;
497a22eb
RH
1703 DebugFrameFDEHeader fde;
1704 uint8_t fde_def_cfa[TCG_TARGET_REG_BITS == 64 ? 4 : 2];
1705 uint8_t fde_win_save;
1706 uint8_t fde_ret_save[3];
cb1977d3
RH
1707} DebugFrame;
1708
1709static DebugFrame debug_frame = {
1710 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1711 .cie.id = -1,
1712 .cie.version = 1,
1713 .cie.code_align = 1,
1714 .cie.data_align = -sizeof(void *) & 0x7f,
1715 .cie.return_column = 15, /* o7 */
1716
497a22eb
RH
1717 /* Total FDE size does not include the "len" member. */
1718 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1719
1720 .fde_def_cfa = {
cb1977d3
RH
1721#if TCG_TARGET_REG_BITS == 64
1722 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1723 (2047 & 0x7f) | 0x80, (2047 >> 7)
1724#else
1725 13, 30 /* DW_CFA_def_cfa_register i6 */
1726#endif
1727 },
497a22eb
RH
1728 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1729 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1730};
1731
1732void tcg_register_jit(void *buf, size_t buf_size)
1733{
c8fc56ce 1734 debug_frame.fde.func_start = (uintptr_t)buf;
cb1977d3
RH
1735 debug_frame.fde.func_len = buf_size;
1736
1737 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1738}
5bbd2cae
RH
1739
1740void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1741{
1742 uint32_t *ptr = (uint32_t *)jmp_addr;
c8fc56ce 1743 uintptr_t disp = addr - jmp_addr;
5bbd2cae
RH
1744
1745 /* We can reach the entire address space for 32-bit. For 64-bit
1746 the code_gen_buffer can't be larger than 2GB. */
c8fc56ce 1747 assert(disp == (int32_t)disp);
5bbd2cae 1748
c8fc56ce 1749 *ptr = CALL | (uint32_t)disp >> 2;
5bbd2cae
RH
1750 flush_icache_range(jmp_addr, jmp_addr + 4);
1751}