]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.inc.c
tcg: Add tcg_op_supported
[mirror_qemu.git] / tcg / sparc / tcg-target.inc.c
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
e9823b4c
RH
25#include "tcg-pool.inc.c"
26
8d8fdbae 27#ifdef CONFIG_DEBUG_TCG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
34b1a49c
RH
70/* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76#if SPARC64
77# define ALL_64 0xffffffffu
78#else
79# define ALL_64 0xffffu
80#endif
81
375816f8
RH
82/* Define some temporary registers. T2 is used for constant generation. */
83#define TCG_REG_T1 TCG_REG_G1
84#define TCG_REG_T2 TCG_REG_O7
85
4cbea598 86#ifndef CONFIG_SOFTMMU
375816f8 87# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 88#endif
e141ab52 89
ab20bdc1
RH
90#define TCG_REG_TB TCG_REG_I1
91#define USE_REG_TB (sizeof(void *) > 4)
92
0954d0d9 93static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
94 TCG_REG_L0,
95 TCG_REG_L1,
96 TCG_REG_L2,
97 TCG_REG_L3,
98 TCG_REG_L4,
99 TCG_REG_L5,
100 TCG_REG_L6,
101 TCG_REG_L7,
26adfb75 102
8289b279
BS
103 TCG_REG_I0,
104 TCG_REG_I1,
105 TCG_REG_I2,
106 TCG_REG_I3,
107 TCG_REG_I4,
375816f8 108 TCG_REG_I5,
26adfb75
RH
109
110 TCG_REG_G2,
111 TCG_REG_G3,
112 TCG_REG_G4,
113 TCG_REG_G5,
114
115 TCG_REG_O0,
116 TCG_REG_O1,
117 TCG_REG_O2,
118 TCG_REG_O3,
119 TCG_REG_O4,
120 TCG_REG_O5,
8289b279
BS
121};
122
123static const int tcg_target_call_iarg_regs[6] = {
124 TCG_REG_O0,
125 TCG_REG_O1,
126 TCG_REG_O2,
127 TCG_REG_O3,
128 TCG_REG_O4,
129 TCG_REG_O5,
130};
131
26a74ae3 132static const int tcg_target_call_oarg_regs[] = {
8289b279 133 TCG_REG_O0,
e141ab52
BS
134 TCG_REG_O1,
135 TCG_REG_O2,
136 TCG_REG_O3,
8289b279
BS
137};
138
8289b279
BS
139#define INSN_OP(x) ((x) << 30)
140#define INSN_OP2(x) ((x) << 22)
141#define INSN_OP3(x) ((x) << 19)
142#define INSN_OPF(x) ((x) << 5)
143#define INSN_RD(x) ((x) << 25)
144#define INSN_RS1(x) ((x) << 14)
145#define INSN_RS2(x) (x)
8384dd67 146#define INSN_ASI(x) ((x) << 5)
8289b279 147
203342d8 148#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 149#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 150#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 151#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 152#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 153#define INSN_COND(x) ((x) << 25)
8289b279 154
cf7c2ca5
BS
155#define COND_N 0x0
156#define COND_E 0x1
157#define COND_LE 0x2
158#define COND_L 0x3
159#define COND_LEU 0x4
160#define COND_CS 0x5
161#define COND_NEG 0x6
162#define COND_VS 0x7
b3db8758 163#define COND_A 0x8
cf7c2ca5
BS
164#define COND_NE 0x9
165#define COND_G 0xa
166#define COND_GE 0xb
167#define COND_GU 0xc
168#define COND_CC 0xd
169#define COND_POS 0xe
170#define COND_VC 0xf
a115f3ea 171#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 172
ab1339b9
RH
173#define RCOND_Z 1
174#define RCOND_LEZ 2
175#define RCOND_LZ 3
176#define RCOND_NZ 5
177#define RCOND_GZ 6
178#define RCOND_GEZ 7
179
dbfe80e1
RH
180#define MOVCC_ICC (1 << 18)
181#define MOVCC_XCC (1 << 18 | 1 << 12)
182
a115f3ea
RH
183#define BPCC_ICC 0
184#define BPCC_XCC (2 << 20)
185#define BPCC_PT (1 << 19)
186#define BPCC_PN 0
187#define BPCC_A (1 << 29)
188
ab1339b9
RH
189#define BPR_PT BPCC_PT
190
8289b279 191#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 192#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 193#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 194#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 195#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 196#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 197#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 198#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
199#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
200#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
201#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
202#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 203#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 204#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
205#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
206#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
207#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
208#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
209#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 210#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 211#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 212
90379ca8 213#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 214#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 215
8289b279
BS
216#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
217#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
218#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
219
220#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
221#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
222#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
223
7a3766f3 224#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 225#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 226#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 227#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
228#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
229#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
230#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
231#define CALL INSN_OP(1)
232#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
233#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
234#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
235#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
236#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
237#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
238#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
239#define STB (INSN_OP(3) | INSN_OP3(0x05))
240#define STH (INSN_OP(3) | INSN_OP3(0x06))
241#define STW (INSN_OP(3) | INSN_OP3(0x04))
242#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
243#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
244#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
245#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
246#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
247#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
248#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
249#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
250#define STBA (INSN_OP(3) | INSN_OP3(0x15))
251#define STHA (INSN_OP(3) | INSN_OP3(0x16))
252#define STWA (INSN_OP(3) | INSN_OP3(0x14))
253#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
254
f8f03b37
PK
255#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
256
ab20bdc1
RH
257#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
258
8384dd67
BS
259#ifndef ASI_PRIMARY_LITTLE
260#define ASI_PRIMARY_LITTLE 0x88
261#endif
8289b279 262
a0ce341a
RH
263#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
264#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
265#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
266#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
267#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
268
269#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
272
90379ca8
RH
273#ifndef use_vis3_instructions
274bool use_vis3_instructions;
275#endif
276
425532d7 277static inline int check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 278{
425532d7 279 return val == sextract64(val, 0, bits);
a115f3ea
RH
280}
281
425532d7 282static inline int check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 283{
425532d7 284 return val == sextract32(val, 0, bits);
a115f3ea
RH
285}
286
425532d7
RH
287#define check_fit_tl check_fit_i64
288#if SPARC64
289# define check_fit_ptr check_fit_i64
290#else
291# define check_fit_ptr check_fit_i32
292#endif
293
abce5964 294static void patch_reloc(tcg_insn_unit *code_ptr, int type,
2ba7fae2 295 intptr_t value, intptr_t addend)
a115f3ea 296{
e9823b4c
RH
297 uint32_t insn = *code_ptr;
298 intptr_t pcrel;
abce5964 299
e9823b4c
RH
300 value += addend;
301 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
abce5964 302
a115f3ea 303 switch (type) {
ab1339b9 304 case R_SPARC_WDISP16:
e9823b4c 305 assert(check_fit_ptr(pcrel >> 2, 16));
ab1339b9 306 insn &= ~INSN_OFF16(-1);
e9823b4c 307 insn |= INSN_OFF16(pcrel);
ab1339b9 308 break;
a115f3ea 309 case R_SPARC_WDISP19:
e9823b4c 310 assert(check_fit_ptr(pcrel >> 2, 19));
a115f3ea 311 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
312 insn |= INSN_OFF19(pcrel);
313 break;
314 case R_SPARC_13:
315 /* Note that we're abusing this reloc type for our own needs. */
316 if (!check_fit_ptr(value, 13)) {
317 int adj = (value > 0 ? 0xff8 : -0x1000);
318 value -= adj;
319 assert(check_fit_ptr(value, 13));
320 *code_ptr++ = (ARITH_ADD | INSN_RD(TCG_REG_T2)
321 | INSN_RS1(TCG_REG_TB) | INSN_IMM13(adj));
322 insn ^= INSN_RS1(TCG_REG_TB) ^ INSN_RS1(TCG_REG_T2);
323 }
324 insn &= ~INSN_IMM13(-1);
325 insn |= INSN_IMM13(value);
a115f3ea 326 break;
e9823b4c
RH
327 case R_SPARC_32:
328 /* Note that we're abusing this reloc type for our own needs. */
329 code_ptr[0] = deposit32(code_ptr[0], 0, 22, value >> 10);
330 code_ptr[1] = deposit32(code_ptr[1], 0, 10, value);
331 return;
a115f3ea 332 default:
e9823b4c 333 g_assert_not_reached();
a115f3ea 334 }
e9823b4c
RH
335
336 *code_ptr = insn;
a115f3ea
RH
337}
338
339/* parse target specific constraints */
069ea736
RH
340static const char *target_parse_constraint(TCGArgConstraint *ct,
341 const char *ct_str, TCGType type)
a115f3ea 342{
069ea736 343 switch (*ct_str++) {
a115f3ea
RH
344 case 'r':
345 ct->ct |= TCG_CT_REG;
346 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
347 break;
34b1a49c 348 case 'R':
a115f3ea 349 ct->ct |= TCG_CT_REG;
34b1a49c
RH
350 tcg_regset_set32(ct->u.regs, 0, ALL_64);
351 break;
352 case 'A': /* qemu_ld/st address constraint */
353 ct->ct |= TCG_CT_REG;
354 tcg_regset_set32(ct->u.regs, 0,
355 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
356 reserve_helpers:
a115f3ea
RH
357 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
358 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
359 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
360 break;
34b1a49c
RH
361 case 's': /* qemu_st data 32-bit constraint */
362 ct->ct |= TCG_CT_REG;
363 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
364 goto reserve_helpers;
365 case 'S': /* qemu_st data 64-bit constraint */
366 ct->ct |= TCG_CT_REG;
367 tcg_regset_set32(ct->u.regs, 0, ALL_64);
368 goto reserve_helpers;
a115f3ea
RH
369 case 'I':
370 ct->ct |= TCG_CT_CONST_S11;
371 break;
372 case 'J':
373 ct->ct |= TCG_CT_CONST_S13;
374 break;
375 case 'Z':
376 ct->ct |= TCG_CT_CONST_ZERO;
377 break;
378 default:
069ea736 379 return NULL;
a115f3ea 380 }
069ea736 381 return ct_str;
a115f3ea
RH
382}
383
384/* test if a constant matches the constraint */
f6c6afc1 385static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
a115f3ea
RH
386 const TCGArgConstraint *arg_ct)
387{
388 int ct = arg_ct->ct;
389
390 if (ct & TCG_CT_CONST) {
391 return 1;
4b304cfa
RH
392 }
393
394 if (type == TCG_TYPE_I32) {
395 val = (int32_t)val;
396 }
397
398 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
399 return 1;
400 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
401 return 1;
402 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
403 return 1;
404 } else {
405 return 0;
406 }
407}
408
35e2da15
RH
409static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
410 TCGReg rs2, int op)
26cc915c 411{
35e2da15 412 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
413}
414
35e2da15
RH
415static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
416 int32_t offset, int op)
26cc915c 417{
35e2da15 418 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
419}
420
35e2da15
RH
421static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
422 int32_t val2, int val2const, int op)
ba225198
RH
423{
424 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
425 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
426}
427
2a534aff
RH
428static inline void tcg_out_mov(TCGContext *s, TCGType type,
429 TCGReg ret, TCGReg arg)
8289b279 430{
dda73c78
RH
431 if (ret != arg) {
432 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
433 }
26cc915c
BS
434}
435
35e2da15 436static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
437{
438 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
439}
440
35e2da15 441static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
442{
443 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
444}
445
ab20bdc1
RH
446static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
447 tcg_target_long arg, bool in_prologue)
8289b279 448{
425532d7 449 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 450 tcg_target_long test, lsb;
a9c7d27b 451
035b2398
RH
452 /* Make sure we test 32-bit constants for imm13 properly. */
453 if (type == TCG_TYPE_I32) {
454 arg = lo;
455 }
456
a9c7d27b
RH
457 /* A 13-bit constant sign-extended to 64-bits. */
458 if (check_fit_tl(arg, 13)) {
b101234a 459 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 460 return;
8289b279 461 }
8289b279 462
a9c7d27b 463 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
34b1a49c 464 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
a9c7d27b
RH
465 tcg_out_sethi(s, ret, arg);
466 if (arg & 0x3ff) {
467 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
468 }
469 return;
470 }
471
472 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 473 if (arg == lo) {
43172207
RH
474 tcg_out_sethi(s, ret, ~arg);
475 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
476 return;
477 }
478
ab20bdc1
RH
479 /* A 21-bit constant, shifted. */
480 lsb = ctz64(arg);
481 test = (tcg_target_long)arg >> lsb;
482 if (check_fit_tl(test, 13)) {
483 tcg_out_movi_imm13(s, ret, test);
484 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
485 return;
486 } else if (lsb > 10 && test == extract64(test, 0, 21)) {
487 tcg_out_sethi(s, ret, test << 10);
488 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
489 return;
490 }
491
e9823b4c
RH
492 if (!in_prologue) {
493 if (USE_REG_TB) {
494 intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
495 if (check_fit_ptr(diff, 13)) {
496 tcg_out_arithi(s, ret, TCG_REG_TB, diff, ARITH_ADD);
497 } else {
498 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
499 -(intptr_t)s->code_gen_ptr);
500 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
501 /* May be used to extend the 13-bit range in patch_reloc. */
502 tcg_out32(s, NOP);
503 }
504 } else {
505 new_pool_label(s, arg, R_SPARC_32, s->code_ptr, 0);
506 tcg_out_sethi(s, ret, 0);
507 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(ret) | INSN_IMM13(0));
ab20bdc1 508 }
e9823b4c 509 return;
ab20bdc1
RH
510 }
511
a9c7d27b 512 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 513 if (check_fit_i32(lo, 13)) {
34b1a49c 514 hi = (arg - lo) >> 32;
a9c7d27b
RH
515 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
516 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
517 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 518 } else {
34b1a49c 519 hi = arg >> 32;
a9c7d27b
RH
520 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
521 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
375816f8 522 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
375816f8 523 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
6f41b777 524 }
b101234a
BS
525}
526
ab20bdc1
RH
527static inline void tcg_out_movi(TCGContext *s, TCGType type,
528 TCGReg ret, tcg_target_long arg)
529{
530 tcg_out_movi_int(s, type, ret, arg, false);
531}
532
35e2da15
RH
533static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
534 TCGReg a2, int op)
8289b279 535{
a0ce341a 536 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
537}
538
35e2da15
RH
539static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
540 intptr_t offset, int op)
8289b279 541{
425532d7 542 if (check_fit_ptr(offset, 13)) {
8289b279
BS
543 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
544 INSN_IMM13(offset));
a0ce341a 545 } else {
375816f8
RH
546 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
547 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 548 }
8289b279
BS
549}
550
2a534aff 551static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
a05b5b9b 552 TCGReg arg1, intptr_t arg2)
8289b279 553{
a0ce341a 554 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
555}
556
2a534aff 557static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
a05b5b9b 558 TCGReg arg1, intptr_t arg2)
8289b279 559{
a0ce341a
RH
560 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
561}
562
59d7c14e
RH
563static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
564 TCGReg base, intptr_t ofs)
565{
566 if (val == 0) {
567 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
568 return true;
569 }
570 return false;
571}
572
35e2da15 573static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
a0ce341a 574{
ab20bdc1
RH
575 intptr_t diff = arg - (uintptr_t)s->code_gen_ptr;
576 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
577 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
578 return;
579 }
35e2da15
RH
580 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
581 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
8289b279
BS
582}
583
35e2da15 584static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 585{
583d1215 586 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
587}
588
35e2da15 589static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
7a3766f3
RH
590{
591 tcg_out32(s, RDY | INSN_RD(rd));
592}
593
35e2da15
RH
594static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
595 int32_t val2, int val2const, int uns)
583d1215
RH
596{
597 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
598 if (uns) {
599 tcg_out_sety(s, TCG_REG_G0);
600 } else {
375816f8
RH
601 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
602 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
603 }
604
605 tcg_out_arithc(s, rd, rs1, val2, val2const,
606 uns ? ARITH_UDIV : ARITH_SDIV);
607}
608
8289b279
BS
609static inline void tcg_out_nop(TCGContext *s)
610{
ab20bdc1 611 tcg_out32(s, NOP);
8289b279
BS
612}
613
0aed257f 614static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
615 [TCG_COND_EQ] = COND_E,
616 [TCG_COND_NE] = COND_NE,
617 [TCG_COND_LT] = COND_L,
618 [TCG_COND_GE] = COND_GE,
619 [TCG_COND_LE] = COND_LE,
620 [TCG_COND_GT] = COND_G,
621 [TCG_COND_LTU] = COND_CS,
622 [TCG_COND_GEU] = COND_CC,
623 [TCG_COND_LEU] = COND_LEU,
624 [TCG_COND_GTU] = COND_GU,
625};
626
ab1339b9
RH
627static const uint8_t tcg_cond_to_rcond[] = {
628 [TCG_COND_EQ] = RCOND_Z,
629 [TCG_COND_NE] = RCOND_NZ,
630 [TCG_COND_LT] = RCOND_LZ,
631 [TCG_COND_GT] = RCOND_GZ,
632 [TCG_COND_LE] = RCOND_LEZ,
633 [TCG_COND_GE] = RCOND_GEZ
634};
635
a115f3ea
RH
636static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
637{
638 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
639}
640
bec16311 641static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 642{
a115f3ea
RH
643 int off19;
644
645 if (l->has_value) {
abce5964 646 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea
RH
647 } else {
648 /* Make sure to preserve destinations during retranslation. */
abce5964 649 off19 = *s->code_ptr & INSN_OFF19(-1);
bec16311 650 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
651 }
652 tcg_out_bpcc0(s, scond, flags, off19);
653}
654
35e2da15 655static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 656{
ba225198 657 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
658}
659
35e2da15 660static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 661 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 662{
56f4927e 663 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 664 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
665 tcg_out_nop(s);
666}
667
35e2da15
RH
668static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
669 int32_t v1, int v1const)
ded37f0d
RH
670{
671 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
672 | INSN_RS1(tcg_cond_to_bcond[cond])
673 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
674}
675
35e2da15
RH
676static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
677 TCGReg c1, int32_t c2, int c2const,
678 int32_t v1, int v1const)
ded37f0d
RH
679{
680 tcg_out_cmp(s, c1, c2, c2const);
681 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
682}
683
35e2da15 684static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 685 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 686{
ab1339b9
RH
687 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
688 if (arg2 == 0 && !is_unsigned_cond(cond)) {
ab1339b9
RH
689 int off16;
690
691 if (l->has_value) {
abce5964 692 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9
RH
693 } else {
694 /* Make sure to preserve destinations during retranslation. */
abce5964 695 off16 = *s->code_ptr & INSN_OFF16(-1);
bec16311 696 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
697 }
698 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
699 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
700 } else {
701 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 702 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 703 }
1da92db2
BS
704 tcg_out_nop(s);
705}
ded37f0d 706
35e2da15
RH
707static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
708 int32_t v1, int v1const)
203342d8
RH
709{
710 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
711 | (tcg_cond_to_rcond[cond] << 10)
712 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
713}
714
35e2da15
RH
715static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
716 TCGReg c1, int32_t c2, int c2const,
717 int32_t v1, int v1const)
ded37f0d 718{
203342d8
RH
719 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
720 Note that the immediate range is one bit smaller, so we must check
721 for that as well. */
722 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 723 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
724 tcg_out_movr(s, cond, ret, c1, v1, v1const);
725 } else {
726 tcg_out_cmp(s, c1, c2, c2const);
727 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
728 }
ded37f0d 729}
1da92db2 730
35e2da15
RH
731static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
732 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 733{
c470b663 734 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 735 switch (cond) {
7d458a75
RH
736 case TCG_COND_LTU:
737 case TCG_COND_GEU:
738 /* The result of the comparison is in the carry bit. */
739 break;
740
dbfe80e1
RH
741 case TCG_COND_EQ:
742 case TCG_COND_NE:
7d458a75 743 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 744 if (c2 != 0) {
321b6c05
RH
745 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
746 c2 = TCG_REG_T1;
747 } else {
748 c2 = c1;
dbfe80e1 749 }
321b6c05 750 c1 = TCG_REG_G0, c2const = 0;
7d458a75 751 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
752 break;
753
754 case TCG_COND_GTU:
dbfe80e1 755 case TCG_COND_LEU:
7d458a75
RH
756 /* If we don't need to load a constant into a register, we can
757 swap the operands on GTU/LEU. There's no benefit to loading
758 the constant into a temporary register. */
759 if (!c2const || c2 == 0) {
35e2da15 760 TCGReg t = c1;
7d458a75
RH
761 c1 = c2;
762 c2 = t;
763 c2const = 0;
764 cond = tcg_swap_cond(cond);
765 break;
766 }
767 /* FALLTHRU */
dbfe80e1
RH
768
769 default:
770 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 771 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 772 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
773 return;
774 }
775
776 tcg_out_cmp(s, c1, c2, c2const);
777 if (cond == TCG_COND_LTU) {
c470b663 778 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 779 } else {
c470b663 780 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
781 }
782}
783
35e2da15
RH
784static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
785 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 786{
9d6a7a85
RH
787 if (use_vis3_instructions) {
788 switch (cond) {
789 case TCG_COND_NE:
790 if (c2 != 0) {
791 break;
792 }
793 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
794 /* FALLTHRU */
795 case TCG_COND_LTU:
796 tcg_out_cmp(s, c1, c2, c2const);
797 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
798 return;
799 default:
800 break;
801 }
802 }
803
203342d8
RH
804 /* For 64-bit signed comparisons vs zero, we can avoid the compare
805 if the input does not overlap the output. */
806 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
807 tcg_out_movi_imm13(s, ret, 0);
808 tcg_out_movr(s, cond, ret, c1, 1, 1);
809 } else {
810 tcg_out_cmp(s, c1, c2, c2const);
811 tcg_out_movi_imm13(s, ret, 0);
812 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
813 }
dbfe80e1 814}
4ec28e25 815
609ac1e1
RH
816static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
817 TCGReg al, TCGReg ah, int32_t bl, int blconst,
818 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 819{
35e2da15 820 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
821
822 /* Note that the low parts are fully consumed before tmp is set. */
823 if (rl != ah && (bhconst || rl != bh)) {
824 tmp = rl;
825 }
826
827 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
828 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
829 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
830}
dbfe80e1 831
609ac1e1
RH
832static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
833 TCGReg al, TCGReg ah, int32_t bl, int blconst,
834 int32_t bh, int bhconst, bool is_sub)
835{
836 TCGReg tmp = TCG_REG_T1;
837
838 /* Note that the low parts are fully consumed before tmp is set. */
839 if (rl != ah && (bhconst || rl != bh)) {
840 tmp = rl;
841 }
842
843 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
844
90379ca8
RH
845 if (use_vis3_instructions && !is_sub) {
846 /* Note that ADDXC doesn't accept immediates. */
847 if (bhconst && bh != 0) {
848 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
849 bh = TCG_REG_T2;
850 }
851 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
852 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
853 /* If we have a zero, we can perform the operation in two insns,
854 with the arithmetic first, and a conditional move into place. */
855 if (rh == ah) {
856 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
857 is_sub ? ARITH_SUB : ARITH_ADD);
858 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
859 } else {
860 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
861 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
862 }
863 } else {
864 /* Otherwise adjust BH as if there is carry into T2 ... */
865 if (bhconst) {
866 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
867 } else {
868 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
869 is_sub ? ARITH_SUB : ARITH_ADD);
870 }
871 /* ... smoosh T2 back to original BH if carry is clear ... */
872 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
873 /* ... and finally perform the arithmetic with the new operand. */
874 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
875 }
876
877 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
878}
879
ab20bdc1
RH
880static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest,
881 bool in_prologue)
aad2f06a 882{
abce5964 883 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
884
885 if (disp == (int32_t)disp) {
886 tcg_out32(s, CALL | (uint32_t)disp >> 2);
887 } else {
abce5964 888 uintptr_t desti = (uintptr_t)dest;
ab20bdc1
RH
889 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
890 desti & ~0xfff, in_prologue);
abce5964 891 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
aad2f06a
RH
892 }
893}
894
4e9cf840
RH
895static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
896{
ab20bdc1 897 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
898 tcg_out_nop(s);
899}
900
f8f03b37
PK
901static void tcg_out_mb(TCGContext *s, TCGArg a0)
902{
903 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
904 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
905}
906
7ea5d725 907#ifdef CONFIG_SOFTMMU
abce5964
RH
908static tcg_insn_unit *qemu_ld_trampoline[16];
909static tcg_insn_unit *qemu_st_trampoline[16];
7ea5d725 910
709a340d
PM
911static void emit_extend(TCGContext *s, TCGReg r, int op)
912{
913 /* Emit zero extend of 8, 16 or 32 bit data as
914 * required by the MO_* value op; do nothing for 64 bit.
915 */
916 switch (op & MO_SIZE) {
917 case MO_8:
918 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
919 break;
920 case MO_16:
921 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
922 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
923 break;
924 case MO_32:
925 if (SPARC64) {
926 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
927 }
928 break;
929 case MO_64:
930 break;
931 }
932}
933
7ea5d725
RH
934static void build_trampolines(TCGContext *s)
935{
abce5964
RH
936 static void * const qemu_ld_helpers[16] = {
937 [MO_UB] = helper_ret_ldub_mmu,
938 [MO_SB] = helper_ret_ldsb_mmu,
939 [MO_LEUW] = helper_le_lduw_mmu,
940 [MO_LESW] = helper_le_ldsw_mmu,
941 [MO_LEUL] = helper_le_ldul_mmu,
942 [MO_LEQ] = helper_le_ldq_mmu,
943 [MO_BEUW] = helper_be_lduw_mmu,
944 [MO_BESW] = helper_be_ldsw_mmu,
945 [MO_BEUL] = helper_be_ldul_mmu,
946 [MO_BEQ] = helper_be_ldq_mmu,
7ea5d725 947 };
abce5964
RH
948 static void * const qemu_st_helpers[16] = {
949 [MO_UB] = helper_ret_stb_mmu,
950 [MO_LEUW] = helper_le_stw_mmu,
951 [MO_LEUL] = helper_le_stl_mmu,
952 [MO_LEQ] = helper_le_stq_mmu,
953 [MO_BEUW] = helper_be_stw_mmu,
954 [MO_BEUL] = helper_be_stl_mmu,
955 [MO_BEQ] = helper_be_stq_mmu,
7ea5d725
RH
956 };
957
958 int i;
959 TCGReg ra;
7ea5d725
RH
960
961 for (i = 0; i < 16; ++i) {
abce5964 962 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
963 continue;
964 }
965
966 /* May as well align the trampoline. */
abce5964 967 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 968 tcg_out_nop(s);
7ea5d725 969 }
abce5964 970 qemu_ld_trampoline[i] = s->code_ptr;
7ea5d725 971
34b1a49c
RH
972 if (SPARC64 || TARGET_LONG_BITS == 32) {
973 ra = TCG_REG_O3;
974 } else {
975 /* Install the high part of the address. */
976 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
977 ra = TCG_REG_O4;
978 }
7ea5d725
RH
979
980 /* Set the retaddr operand. */
981 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
982 /* Set the env operand. */
983 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
984 /* Tail call. */
ab20bdc1 985 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
7ea5d725
RH
986 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
987 }
988
989 for (i = 0; i < 16; ++i) {
abce5964 990 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
991 continue;
992 }
993
994 /* May as well align the trampoline. */
abce5964 995 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 996 tcg_out_nop(s);
7ea5d725 997 }
abce5964 998 qemu_st_trampoline[i] = s->code_ptr;
7ea5d725 999
34b1a49c 1000 if (SPARC64) {
709a340d 1001 emit_extend(s, TCG_REG_O2, i);
34b1a49c
RH
1002 ra = TCG_REG_O4;
1003 } else {
1004 ra = TCG_REG_O1;
1005 if (TARGET_LONG_BITS == 64) {
1006 /* Install the high part of the address. */
1007 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
1008 ra += 2;
1009 } else {
1010 ra += 1;
1011 }
1012 if ((i & MO_SIZE) == MO_64) {
1013 /* Install the high part of the data. */
1014 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
1015 ra += 2;
1016 } else {
709a340d 1017 emit_extend(s, ra, i);
34b1a49c
RH
1018 ra += 1;
1019 }
3972ef6f 1020 /* Skip the oi argument. */
34b1a49c
RH
1021 ra += 1;
1022 }
1023
7ea5d725
RH
1024 /* Set the retaddr operand. */
1025 if (ra >= TCG_REG_O6) {
1026 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
1027 TCG_TARGET_CALL_STACK_OFFSET);
1028 ra = TCG_REG_G1;
1029 }
1030 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
1031 /* Set the env operand. */
1032 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
1033 /* Tail call. */
ab20bdc1 1034 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
7ea5d725
RH
1035 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1036 }
1037}
1038#endif
1039
7d551702 1040/* Generate global QEMU prologue and epilogue code */
e4d58b41 1041static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 1042{
4c3204cb
RH
1043 int tmp_buf_size, frame_size;
1044
1045 /* The TCG temp buffer is at the top of the frame, immediately
1046 below the frame pointer. */
1047 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1048 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
1049 tmp_buf_size);
1050
1051 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1052 otherwise the minimal frame usable by callees. */
1053 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1054 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1055 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1056 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1057 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1058 INSN_IMM13(-frame_size));
c6f7e4fb 1059
4cbea598 1060#ifndef CONFIG_SOFTMMU
b76f21a7 1061 if (guest_base != 0) {
ab20bdc1 1062 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
c6f7e4fb
RH
1063 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1064 }
1065#endif
1066
ab20bdc1
RH
1067 /* We choose TCG_REG_TB such that no move is required. */
1068 if (USE_REG_TB) {
1069 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1070 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1071 }
1072
aad2f06a 1073 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1074 /* delay slot */
1075 tcg_out_nop(s);
4c3204cb 1076
38f81dc5
RH
1077 /* Epilogue for goto_ptr. */
1078 s->code_gen_epilogue = s->code_ptr;
1079 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1080 /* delay slot */
1081 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725
RH
1082
1083#ifdef CONFIG_SOFTMMU
1084 build_trampolines(s);
1085#endif
b3db8758
BS
1086}
1087
e9823b4c
RH
1088static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1089{
1090 int i;
1091 for (i = 0; i < count; ++i) {
1092 p[i] = NOP;
1093 }
1094}
1095
f5ef6aac 1096#if defined(CONFIG_SOFTMMU)
a0ce341a 1097/* Perform the TLB load and compare.
bffe1431 1098
a0ce341a 1099 Inputs:
a8b12c10 1100 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1101
1102 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1103
1104 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1105 This should be offsetof addr_read or addr_write.
1106
1107 The result of the TLB comparison is in %[ix]cc. The sanitized address
1108 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1109
34b1a49c 1110static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
85aa8081 1111 TCGMemOp opc, int which)
a0ce341a 1112{
a8b12c10
RH
1113 const TCGReg r0 = TCG_REG_O0;
1114 const TCGReg r1 = TCG_REG_O1;
1115 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1116 unsigned s_bits = opc & MO_SIZE;
1117 unsigned a_bits = get_alignment_bits(opc);
a0ce341a
RH
1118 int tlb_ofs;
1119
d801a8f2 1120 /* Shift the page number down. */
34b1a49c 1121 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
a0ce341a 1122
85aa8081
RH
1123 /* Mask out the page offset, except for the required alignment.
1124 We don't support unaligned accesses. */
1125 if (a_bits < s_bits) {
1126 a_bits = s_bits;
1127 }
d801a8f2 1128 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
85aa8081 1129 TARGET_PAGE_MASK | ((1 << a_bits) - 1));
d801a8f2
RH
1130
1131 /* Mask the tlb index. */
1132 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
1133
1134 /* Mask page, part 2. */
1135 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
a0ce341a 1136
d801a8f2
RH
1137 /* Shift the tlb index into place. */
1138 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
a0ce341a
RH
1139
1140 /* Relative to the current ENV. */
1141 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
1142
1143 /* Find a base address that can load both tlb comparator and addend. */
1144 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
425532d7 1145 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
35e2da15
RH
1146 if (tlb_ofs & ~0x3ff) {
1147 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
1148 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
1149 }
d801a8f2 1150 tlb_ofs &= 0x3ff;
a0ce341a
RH
1151 }
1152
1153 /* Load the tlb comparator and the addend. */
1154 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
1155 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
1156
1157 /* subcc arg0, arg2, %g0 */
1158 tcg_out_cmp(s, r0, r2, 0);
1159
1160 /* If the guest address must be zero-extended, do so now. */
9f44adc5 1161 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c 1162 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1163 return r0;
1164 }
34b1a49c 1165 return addr;
a0ce341a
RH
1166}
1167#endif /* CONFIG_SOFTMMU */
1168
eef0d9e7
RH
1169static const int qemu_ld_opc[16] = {
1170 [MO_UB] = LDUB,
1171 [MO_SB] = LDSB,
1172
1173 [MO_BEUW] = LDUH,
1174 [MO_BESW] = LDSH,
1175 [MO_BEUL] = LDUW,
1176 [MO_BESL] = LDSW,
1177 [MO_BEQ] = LDX,
1178
1179 [MO_LEUW] = LDUH_LE,
1180 [MO_LESW] = LDSH_LE,
1181 [MO_LEUL] = LDUW_LE,
1182 [MO_LESL] = LDSW_LE,
1183 [MO_LEQ] = LDX_LE,
a0ce341a 1184};
9d0efc88 1185
eef0d9e7
RH
1186static const int qemu_st_opc[16] = {
1187 [MO_UB] = STB,
1188
1189 [MO_BEUW] = STH,
1190 [MO_BEUL] = STW,
1191 [MO_BEQ] = STX,
1192
1193 [MO_LEUW] = STH_LE,
1194 [MO_LEUL] = STW_LE,
1195 [MO_LEQ] = STX_LE,
a0ce341a 1196};
bffe1431 1197
34b1a49c 1198static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
59227d5d 1199 TCGMemOpIdx oi, bool is_64)
f5ef6aac 1200{
59227d5d 1201 TCGMemOp memop = get_memop(oi);
34b1a49c 1202#ifdef CONFIG_SOFTMMU
59227d5d 1203 unsigned memi = get_mmuidx(oi);
cab0a7ea 1204 TCGReg addrz, param;
abce5964
RH
1205 tcg_insn_unit *func;
1206 tcg_insn_unit *label_ptr;
f5ef6aac 1207
85aa8081 1208 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1209 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1210
34b1a49c
RH
1211 /* The fast path is exactly one insn. Thus we can perform the
1212 entire TLB Hit in the (annulled) delay slot of the branch
1213 over the TLB Miss case. */
a0ce341a 1214
34b1a49c 1215 /* beq,a,pt %[xi]cc, label0 */
abce5964 1216 label_ptr = s->code_ptr;
34b1a49c
RH
1217 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1218 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1219 /* delay slot */
2b7ec66f
RH
1220 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1221 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1222
a0ce341a 1223 /* TLB Miss. */
f5ef6aac 1224
7ea5d725 1225 param = TCG_REG_O1;
34b1a49c
RH
1226 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1227 /* Skip the high-part; we'll perform the extract in the trampoline. */
1228 param++;
a0ce341a 1229 }
5c32be5b 1230 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
f5ef6aac 1231
7ea5d725
RH
1232 /* We use the helpers to extend SB and SW data, leaving the case
1233 of SL needing explicit extending below. */
2b7ec66f
RH
1234 if ((memop & MO_SSIZE) == MO_SL) {
1235 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1236 } else {
2b7ec66f 1237 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1238 }
eabb7b91 1239 tcg_debug_assert(func != NULL);
ab20bdc1 1240 tcg_out_call_nodelay(s, func, false);
a0ce341a 1241 /* delay slot */
3972ef6f 1242 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
7ea5d725 1243
34b1a49c
RH
1244 /* Recall that all of the helpers return 64-bit results.
1245 Which complicates things for sparcv8plus. */
1246 if (SPARC64) {
1247 /* We let the helper sign-extend SB and SW, but leave SL for here. */
2b7ec66f 1248 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
34b1a49c
RH
1249 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1250 } else {
1251 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1252 }
1253 } else {
2b7ec66f 1254 if ((memop & MO_SIZE) == MO_64) {
34b1a49c
RH
1255 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1256 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1257 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1258 } else if (is_64) {
1259 /* Re-extend from 32-bit rather than reassembling when we
1260 know the high register must be an extension. */
1261 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1262 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1263 } else {
1264 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
a0ce341a 1265 }
f5ef6aac
BS
1266 }
1267
abce5964 1268 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1269#else
9f44adc5 1270 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1271 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1272 addr = TCG_REG_T1;
f5ef6aac 1273 }
34b1a49c 1274 tcg_out_ldst_rr(s, data, addr,
b76f21a7 1275 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
2b7ec66f 1276 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
a0ce341a 1277#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1278}
1279
34b1a49c 1280static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
59227d5d 1281 TCGMemOpIdx oi)
f5ef6aac 1282{
59227d5d 1283 TCGMemOp memop = get_memop(oi);
34b1a49c 1284#ifdef CONFIG_SOFTMMU
59227d5d 1285 unsigned memi = get_mmuidx(oi);
34b1a49c 1286 TCGReg addrz, param;
abce5964
RH
1287 tcg_insn_unit *func;
1288 tcg_insn_unit *label_ptr;
f5ef6aac 1289
85aa8081 1290 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1291 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1292
a0ce341a
RH
1293 /* The fast path is exactly one insn. Thus we can perform the entire
1294 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1295 /* beq,a,pt %[xi]cc, label0 */
abce5964 1296 label_ptr = s->code_ptr;
a115f3ea
RH
1297 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1298 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1299 /* delay slot */
2b7ec66f
RH
1300 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1301 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1302
1303 /* TLB Miss. */
1304
7ea5d725 1305 param = TCG_REG_O1;
34b1a49c
RH
1306 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1307 /* Skip the high-part; we'll perform the extract in the trampoline. */
1308 param++;
a0ce341a 1309 }
5c32be5b 1310 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
2b7ec66f 1311 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
34b1a49c
RH
1312 /* Skip the high-part; we'll perform the extract in the trampoline. */
1313 param++;
a0ce341a 1314 }
34b1a49c 1315 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
53c37487 1316
2b7ec66f 1317 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1318 tcg_debug_assert(func != NULL);
ab20bdc1 1319 tcg_out_call_nodelay(s, func, false);
a0ce341a 1320 /* delay slot */
3972ef6f 1321 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
f5ef6aac 1322
abce5964 1323 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1324#else
9f44adc5 1325 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1326 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1327 addr = TCG_REG_T1;
a0ce341a 1328 }
34b1a49c 1329 tcg_out_ldst_rr(s, data, addr,
b76f21a7 1330 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
2b7ec66f 1331 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a 1332#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1333}
1334
b357f902
RH
1335static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1336 const TCGArg args[TCG_MAX_OP_ARGS],
1337 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1338{
b357f902
RH
1339 TCGArg a0, a1, a2;
1340 int c, c2;
1341
1342 /* Hoist the loads of the most common arguments. */
1343 a0 = args[0];
1344 a1 = args[1];
1345 a2 = args[2];
1346 c2 = const_args[2];
8289b279
BS
1347
1348 switch (opc) {
1349 case INDEX_op_exit_tb:
b357f902 1350 if (check_fit_ptr(a0, 13)) {
8b66eefe 1351 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1352 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
ab20bdc1
RH
1353 break;
1354 } else if (USE_REG_TB) {
1355 intptr_t tb_diff = a0 - (uintptr_t)s->code_gen_ptr;
1356 if (check_fit_ptr(tb_diff, 13)) {
1357 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1358 /* Note that TCG_REG_TB has been unwound to O1. */
1359 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1360 break;
1361 }
8b66eefe 1362 }
ab20bdc1
RH
1363 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1364 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1365 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
8289b279
BS
1366 break;
1367 case INDEX_op_goto_tb:
f309101c 1368 if (s->tb_jmp_insn_offset) {
8289b279 1369 /* direct jump method */
ab20bdc1
RH
1370 if (USE_REG_TB) {
1371 /* make sure the patch is 8-byte aligned. */
1372 if ((intptr_t)s->code_ptr & 4) {
1373 tcg_out_nop(s);
1374 }
1375 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1376 tcg_out_sethi(s, TCG_REG_T1, 0);
1377 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1378 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1379 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1380 } else {
1381 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1382 tcg_out32(s, CALL);
1383 tcg_out_nop(s);
1384 }
8289b279
BS
1385 } else {
1386 /* indirect jump method */
ab20bdc1 1387 tcg_out_ld_ptr(s, TCG_REG_TB,
f309101c 1388 (uintptr_t)(s->tb_jmp_target_addr + a0));
ab20bdc1
RH
1389 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1390 tcg_out_nop(s);
1391 }
1392 s->tb_jmp_reset_offset[a0] = c = tcg_current_code_size(s);
1393
1394 /* For the unlinked path of goto_tb, we need to reset
1395 TCG_REG_TB to the beginning of this TB. */
1396 if (USE_REG_TB) {
1397 c = -c;
1398 if (check_fit_i32(c, 13)) {
1399 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1400 } else {
1401 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1402 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1403 TCG_REG_T1, ARITH_ADD);
1404 }
8289b279 1405 }
8289b279 1406 break;
38f81dc5
RH
1407 case INDEX_op_goto_ptr:
1408 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
ab20bdc1
RH
1409 if (USE_REG_TB) {
1410 tcg_out_arith(s, TCG_REG_TB, a0, TCG_REG_G0, ARITH_OR);
1411 } else {
1412 tcg_out_nop(s);
1413 }
38f81dc5 1414 break;
8289b279 1415 case INDEX_op_br:
bec16311 1416 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1417 tcg_out_nop(s);
8289b279 1418 break;
8289b279 1419
8289b279 1420#define OP_32_64(x) \
ba225198
RH
1421 glue(glue(case INDEX_op_, x), _i32): \
1422 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1423
ba225198 1424 OP_32_64(ld8u):
b357f902 1425 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1426 break;
ba225198 1427 OP_32_64(ld8s):
b357f902 1428 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1429 break;
ba225198 1430 OP_32_64(ld16u):
b357f902 1431 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1432 break;
ba225198 1433 OP_32_64(ld16s):
b357f902 1434 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1435 break;
1436 case INDEX_op_ld_i32:
53cd9273 1437 case INDEX_op_ld32u_i64:
b357f902 1438 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1439 break;
ba225198 1440 OP_32_64(st8):
b357f902 1441 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1442 break;
ba225198 1443 OP_32_64(st16):
b357f902 1444 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1445 break;
1446 case INDEX_op_st_i32:
53cd9273 1447 case INDEX_op_st32_i64:
b357f902 1448 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1449 break;
ba225198 1450 OP_32_64(add):
53cd9273 1451 c = ARITH_ADD;
ba225198
RH
1452 goto gen_arith;
1453 OP_32_64(sub):
8289b279 1454 c = ARITH_SUB;
ba225198
RH
1455 goto gen_arith;
1456 OP_32_64(and):
8289b279 1457 c = ARITH_AND;
ba225198 1458 goto gen_arith;
dc69960d
RH
1459 OP_32_64(andc):
1460 c = ARITH_ANDN;
1461 goto gen_arith;
ba225198 1462 OP_32_64(or):
8289b279 1463 c = ARITH_OR;
ba225198 1464 goto gen_arith;
18c8f7a3
RH
1465 OP_32_64(orc):
1466 c = ARITH_ORN;
1467 goto gen_arith;
ba225198 1468 OP_32_64(xor):
8289b279 1469 c = ARITH_XOR;
ba225198 1470 goto gen_arith;
8289b279
BS
1471 case INDEX_op_shl_i32:
1472 c = SHIFT_SLL;
1fd95946
RH
1473 do_shift32:
1474 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1475 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1476 break;
8289b279
BS
1477 case INDEX_op_shr_i32:
1478 c = SHIFT_SRL;
1fd95946 1479 goto do_shift32;
8289b279
BS
1480 case INDEX_op_sar_i32:
1481 c = SHIFT_SRA;
1fd95946 1482 goto do_shift32;
8289b279
BS
1483 case INDEX_op_mul_i32:
1484 c = ARITH_UMUL;
ba225198 1485 goto gen_arith;
583d1215 1486
4b5a85c1
RH
1487 OP_32_64(neg):
1488 c = ARITH_SUB;
1489 goto gen_arith1;
be6551b1
RH
1490 OP_32_64(not):
1491 c = ARITH_ORN;
1492 goto gen_arith1;
4b5a85c1 1493
583d1215 1494 case INDEX_op_div_i32:
b357f902 1495 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1496 break;
1497 case INDEX_op_divu_i32:
b357f902 1498 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1499 break;
1500
8289b279 1501 case INDEX_op_brcond_i32:
bec16311 1502 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1503 break;
dbfe80e1 1504 case INDEX_op_setcond_i32:
b357f902 1505 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1506 break;
ded37f0d 1507 case INDEX_op_movcond_i32:
b357f902 1508 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1509 break;
dbfe80e1 1510
7a3766f3 1511 case INDEX_op_add2_i32:
609ac1e1
RH
1512 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1513 args[4], const_args[4], args[5], const_args[5],
c470b663 1514 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1515 break;
1516 case INDEX_op_sub2_i32:
609ac1e1
RH
1517 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1518 args[4], const_args[4], args[5], const_args[5],
c470b663 1519 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1520 break;
1521 case INDEX_op_mulu2_i32:
f4c16661
RH
1522 c = ARITH_UMUL;
1523 goto do_mul2;
1524 case INDEX_op_muls2_i32:
1525 c = ARITH_SMUL;
1526 do_mul2:
1527 /* The 32-bit multiply insns produce a full 64-bit result. If the
1528 destination register can hold it, we can avoid the slower RDY. */
b357f902
RH
1529 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1530 if (SPARC64 || a0 <= TCG_REG_O7) {
1531 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
f4c16661 1532 } else {
b357f902 1533 tcg_out_rdy(s, a1);
f4c16661 1534 }
7a3766f3 1535 break;
8289b279 1536
cab0a7ea 1537 case INDEX_op_qemu_ld_i32:
59227d5d 1538 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1539 break;
cab0a7ea 1540 case INDEX_op_qemu_ld_i64:
59227d5d 1541 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1542 break;
cab0a7ea 1543 case INDEX_op_qemu_st_i32:
cab0a7ea 1544 case INDEX_op_qemu_st_i64:
59227d5d 1545 tcg_out_qemu_st(s, a0, a1, a2);
a0ce341a 1546 break;
8289b279 1547
53cd9273 1548 case INDEX_op_ld32s_i64:
b357f902 1549 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1550 break;
8289b279 1551 case INDEX_op_ld_i64:
b357f902 1552 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1553 break;
1554 case INDEX_op_st_i64:
b357f902 1555 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1556 break;
1557 case INDEX_op_shl_i64:
1558 c = SHIFT_SLLX;
1fd95946
RH
1559 do_shift64:
1560 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1561 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1562 break;
8289b279
BS
1563 case INDEX_op_shr_i64:
1564 c = SHIFT_SRLX;
1fd95946 1565 goto do_shift64;
8289b279
BS
1566 case INDEX_op_sar_i64:
1567 c = SHIFT_SRAX;
1fd95946 1568 goto do_shift64;
8289b279
BS
1569 case INDEX_op_mul_i64:
1570 c = ARITH_MULX;
ba225198 1571 goto gen_arith;
583d1215 1572 case INDEX_op_div_i64:
53cd9273 1573 c = ARITH_SDIVX;
ba225198 1574 goto gen_arith;
583d1215 1575 case INDEX_op_divu_i64:
8289b279 1576 c = ARITH_UDIVX;
ba225198 1577 goto gen_arith;
4f2331e5 1578 case INDEX_op_ext_i32_i64:
cc6dfecf 1579 case INDEX_op_ext32s_i64:
b357f902 1580 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf 1581 break;
4f2331e5 1582 case INDEX_op_extu_i32_i64:
cc6dfecf 1583 case INDEX_op_ext32u_i64:
b357f902 1584 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1585 break;
609ad705
RH
1586 case INDEX_op_extrl_i64_i32:
1587 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1588 break;
1589 case INDEX_op_extrh_i64_i32:
1590 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1591 break;
8289b279
BS
1592
1593 case INDEX_op_brcond_i64:
bec16311 1594 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1595 break;
dbfe80e1 1596 case INDEX_op_setcond_i64:
b357f902 1597 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1598 break;
ded37f0d 1599 case INDEX_op_movcond_i64:
b357f902 1600 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1601 break;
609ac1e1
RH
1602 case INDEX_op_add2_i64:
1603 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1604 const_args[4], args[5], const_args[5], false);
1605 break;
1606 case INDEX_op_sub2_i64:
1607 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1608 const_args[4], args[5], const_args[5], true);
1609 break;
de8301e5
RH
1610 case INDEX_op_muluh_i64:
1611 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1612 break;
34b1a49c 1613
ba225198 1614 gen_arith:
b357f902 1615 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1616 break;
1617
4b5a85c1 1618 gen_arith1:
b357f902 1619 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1620 break;
1621
f8f03b37
PK
1622 case INDEX_op_mb:
1623 tcg_out_mb(s, a0);
1624 break;
1625
96d0ee7f 1626 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1627 case INDEX_op_mov_i64:
96d0ee7f 1628 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
98b90bab 1629 case INDEX_op_movi_i64:
96d0ee7f 1630 case INDEX_op_call: /* Always emitted via tcg_out_call. */
8289b279 1631 default:
8289b279
BS
1632 tcg_abort();
1633 }
1634}
1635
1636static const TCGTargetOpDef sparc_op_defs[] = {
1637 { INDEX_op_exit_tb, { } },
b3db8758 1638 { INDEX_op_goto_tb, { } },
8289b279 1639 { INDEX_op_br, { } },
38f81dc5 1640 { INDEX_op_goto_ptr, { "r" } },
8289b279 1641
8289b279
BS
1642 { INDEX_op_ld8u_i32, { "r", "r" } },
1643 { INDEX_op_ld8s_i32, { "r", "r" } },
1644 { INDEX_op_ld16u_i32, { "r", "r" } },
1645 { INDEX_op_ld16s_i32, { "r", "r" } },
1646 { INDEX_op_ld_i32, { "r", "r" } },
89269f6c
RH
1647 { INDEX_op_st8_i32, { "rZ", "r" } },
1648 { INDEX_op_st16_i32, { "rZ", "r" } },
1649 { INDEX_op_st_i32, { "rZ", "r" } },
1650
1651 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1652 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1653 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1654 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
89269f6c
RH
1655 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1656 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1657 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1658 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1659 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1660 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1661
1662 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1663 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1664 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
8289b279 1665
4b5a85c1 1666 { INDEX_op_neg_i32, { "r", "rJ" } },
be6551b1 1667 { INDEX_op_not_i32, { "r", "rJ" } },
4b5a85c1 1668
89269f6c
RH
1669 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1670 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1671 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
dbfe80e1 1672
89269f6c
RH
1673 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1674 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1675 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
f4c16661 1676 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
8289b279 1677
34b1a49c
RH
1678 { INDEX_op_ld8u_i64, { "R", "r" } },
1679 { INDEX_op_ld8s_i64, { "R", "r" } },
1680 { INDEX_op_ld16u_i64, { "R", "r" } },
1681 { INDEX_op_ld16s_i64, { "R", "r" } },
1682 { INDEX_op_ld32u_i64, { "R", "r" } },
1683 { INDEX_op_ld32s_i64, { "R", "r" } },
1684 { INDEX_op_ld_i64, { "R", "r" } },
1685 { INDEX_op_st8_i64, { "RZ", "r" } },
1686 { INDEX_op_st16_i64, { "RZ", "r" } },
1687 { INDEX_op_st32_i64, { "RZ", "r" } },
1688 { INDEX_op_st_i64, { "RZ", "r" } },
1689
1690 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1691 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1692 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1693 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1694 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1695 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1696 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1697 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1698 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1699 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1700
1701 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1702 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1703 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1704
1705 { INDEX_op_neg_i64, { "R", "RJ" } },
1706 { INDEX_op_not_i64, { "R", "RJ" } },
1707
4f2331e5
AJ
1708 { INDEX_op_ext32s_i64, { "R", "R" } },
1709 { INDEX_op_ext32u_i64, { "R", "R" } },
1710 { INDEX_op_ext_i32_i64, { "R", "r" } },
1711 { INDEX_op_extu_i32_i64, { "R", "r" } },
609ad705
RH
1712 { INDEX_op_extrl_i64_i32, { "r", "R" } },
1713 { INDEX_op_extrh_i64_i32, { "r", "R" } },
34b1a49c
RH
1714
1715 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1716 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1717 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1718
609ac1e1
RH
1719 { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1720 { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
de8301e5 1721 { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } },
609ac1e1 1722
34b1a49c
RH
1723 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1724 { INDEX_op_qemu_ld_i64, { "R", "A" } },
ebd0c614
RH
1725 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1726 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
a0ce341a 1727
f8f03b37 1728 { INDEX_op_mb, { } },
8289b279
BS
1729 { -1 },
1730};
1731
f69d277e
RH
1732static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op)
1733{
1734 int i, n = ARRAY_SIZE(sparc_op_defs);
1735
1736 for (i = 0; i < n; ++i) {
1737 if (sparc_op_defs[i].op == op) {
1738 return &sparc_op_defs[i];
1739 }
1740 }
1741 return NULL;
1742}
1743
e4d58b41 1744static void tcg_target_init(TCGContext *s)
8289b279 1745{
90379ca8
RH
1746 /* Only probe for the platform and capabilities if we havn't already
1747 determined maximum values at compile time. */
1748#ifndef use_vis3_instructions
1749 {
1750 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1751 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1752 }
1753#endif
1754
8289b279 1755 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
34b1a49c
RH
1756 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1757
8289b279 1758 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
b3db8758
BS
1759 (1 << TCG_REG_G1) |
1760 (1 << TCG_REG_G2) |
1761 (1 << TCG_REG_G3) |
1762 (1 << TCG_REG_G4) |
1763 (1 << TCG_REG_G5) |
1764 (1 << TCG_REG_G6) |
1765 (1 << TCG_REG_G7) |
8289b279
BS
1766 (1 << TCG_REG_O0) |
1767 (1 << TCG_REG_O1) |
1768 (1 << TCG_REG_O2) |
1769 (1 << TCG_REG_O3) |
1770 (1 << TCG_REG_O4) |
1771 (1 << TCG_REG_O5) |
8289b279
BS
1772 (1 << TCG_REG_O7));
1773
1774 tcg_regset_clear(s->reserved_regs);
375816f8
RH
1775 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1776 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1777 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1778 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1779 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1780 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1781 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1782 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1783}
cb1977d3 1784
9f44adc5 1785#if SPARC64
cb1977d3 1786# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1787#else
cb1977d3
RH
1788# define ELF_HOST_MACHINE EM_SPARC32PLUS
1789# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1790#endif
1791
cb1977d3 1792typedef struct {
ae18b28d 1793 DebugFrameHeader h;
9f44adc5 1794 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1795 uint8_t fde_win_save;
1796 uint8_t fde_ret_save[3];
cb1977d3
RH
1797} DebugFrame;
1798
ae18b28d
RH
1799static const DebugFrame debug_frame = {
1800 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1801 .h.cie.id = -1,
1802 .h.cie.version = 1,
1803 .h.cie.code_align = 1,
1804 .h.cie.data_align = -sizeof(void *) & 0x7f,
1805 .h.cie.return_column = 15, /* o7 */
cb1977d3 1806
497a22eb 1807 /* Total FDE size does not include the "len" member. */
ae18b28d 1808 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1809
1810 .fde_def_cfa = {
9f44adc5 1811#if SPARC64
cb1977d3
RH
1812 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1813 (2047 & 0x7f) | 0x80, (2047 >> 7)
1814#else
1815 13, 30 /* DW_CFA_def_cfa_register i6 */
1816#endif
1817 },
497a22eb
RH
1818 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1819 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1820};
1821
1822void tcg_register_jit(void *buf, size_t buf_size)
1823{
cb1977d3
RH
1824 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1825}
5bbd2cae 1826
a8583393
RH
1827void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr,
1828 uintptr_t addr)
5bbd2cae 1829{
ab20bdc1
RH
1830 intptr_t tb_disp = addr - tc_ptr;
1831 intptr_t br_disp = addr - jmp_addr;
1832 tcg_insn_unit i1, i2;
1833
1834 /* We can reach the entire address space for ILP32.
1835 For LP64, the code_gen_buffer can't be larger than 2GB. */
1836 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1837 tcg_debug_assert(br_disp == (int32_t)br_disp);
1838
1839 if (!USE_REG_TB) {
1840 atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2));
1841 flush_icache_range(jmp_addr, jmp_addr + 4);
1842 return;
1843 }
5bbd2cae 1844
ab20bdc1
RH
1845 /* This does not exercise the range of the branch, but we do
1846 still need to be able to load the new value of TCG_REG_TB.
1847 But this does still happen quite often. */
1848 if (check_fit_ptr(tb_disp, 13)) {
1849 /* ba,pt %icc, addr */
1850 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1851 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1852 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1853 | INSN_IMM13(tb_disp));
1854 } else if (tb_disp >= 0) {
1855 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1856 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1857 | INSN_IMM13(tb_disp & 0x3ff));
1858 } else {
1859 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1860 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1861 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1862 }
5bbd2cae 1863
ab20bdc1
RH
1864 atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1));
1865 flush_icache_range(jmp_addr, jmp_addr + 8);
5bbd2cae 1866}