]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/sparc/tcg-target.c.inc
tcg/sparc: Use the constant pool for 64-bit constants
[mirror_qemu.git] / tcg / sparc / tcg-target.c.inc
CommitLineData
8289b279
BS
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
139c1837 25#include "../tcg-pool.c.inc"
e9823b4c 26
8d8fdbae 27#ifdef CONFIG_DEBUG_TCG
8289b279
BS
28static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61};
d4a9eb1f 62#endif
8289b279 63
9f44adc5
RH
64#ifdef __arch64__
65# define SPARC64 1
66#else
67# define SPARC64 0
68#endif
69
77f268e8
RH
70#define TCG_CT_CONST_S11 0x100
71#define TCG_CT_CONST_S13 0x200
72#define TCG_CT_CONST_ZERO 0x400
73
74/*
75 * For softmmu, we need to avoid conflicts with the first 3
76 * argument registers to perform the tlb lookup, and to call
77 * the helper function.
78 */
79#ifdef CONFIG_SOFTMMU
80#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
81#else
82#define SOFTMMU_RESERVE_REGS 0
83#endif
84
85/*
86 * Note that sparcv8plus can only hold 64 bit quantities in %g and %o
87 * registers. These are saved manually by the kernel in full 64-bit
88 * slots. The %i and %l registers are saved by the register window
89 * mechanism, which only allocates space for 32 bits. Given that this
90 * window spill/fill can happen on any signal, we must consider the
91 * high bits of the %i and %l registers garbage at all times.
92 */
93#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
34b1a49c 94#if SPARC64
77f268e8 95# define ALL_GENERAL_REGS64 ALL_GENERAL_REGS
34b1a49c 96#else
77f268e8 97# define ALL_GENERAL_REGS64 MAKE_64BIT_MASK(0, 16)
34b1a49c 98#endif
77f268e8
RH
99#define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
100#define ALL_QLDST_REGS64 (ALL_GENERAL_REGS64 & ~SOFTMMU_RESERVE_REGS)
34b1a49c 101
375816f8
RH
102/* Define some temporary registers. T2 is used for constant generation. */
103#define TCG_REG_T1 TCG_REG_G1
104#define TCG_REG_T2 TCG_REG_O7
105
4cbea598 106#ifndef CONFIG_SOFTMMU
375816f8 107# define TCG_GUEST_BASE_REG TCG_REG_I5
c6f7e4fb 108#endif
e141ab52 109
ab20bdc1
RH
110#define TCG_REG_TB TCG_REG_I1
111#define USE_REG_TB (sizeof(void *) > 4)
112
0954d0d9 113static const int tcg_target_reg_alloc_order[] = {
8289b279
BS
114 TCG_REG_L0,
115 TCG_REG_L1,
116 TCG_REG_L2,
117 TCG_REG_L3,
118 TCG_REG_L4,
119 TCG_REG_L5,
120 TCG_REG_L6,
121 TCG_REG_L7,
26adfb75 122
8289b279
BS
123 TCG_REG_I0,
124 TCG_REG_I1,
125 TCG_REG_I2,
126 TCG_REG_I3,
127 TCG_REG_I4,
375816f8 128 TCG_REG_I5,
26adfb75
RH
129
130 TCG_REG_G2,
131 TCG_REG_G3,
132 TCG_REG_G4,
133 TCG_REG_G5,
134
135 TCG_REG_O0,
136 TCG_REG_O1,
137 TCG_REG_O2,
138 TCG_REG_O3,
139 TCG_REG_O4,
140 TCG_REG_O5,
8289b279
BS
141};
142
143static const int tcg_target_call_iarg_regs[6] = {
144 TCG_REG_O0,
145 TCG_REG_O1,
146 TCG_REG_O2,
147 TCG_REG_O3,
148 TCG_REG_O4,
149 TCG_REG_O5,
150};
151
26a74ae3 152static const int tcg_target_call_oarg_regs[] = {
8289b279 153 TCG_REG_O0,
e141ab52
BS
154 TCG_REG_O1,
155 TCG_REG_O2,
156 TCG_REG_O3,
8289b279
BS
157};
158
8289b279
BS
159#define INSN_OP(x) ((x) << 30)
160#define INSN_OP2(x) ((x) << 22)
161#define INSN_OP3(x) ((x) << 19)
162#define INSN_OPF(x) ((x) << 5)
163#define INSN_RD(x) ((x) << 25)
164#define INSN_RS1(x) ((x) << 14)
165#define INSN_RS2(x) (x)
8384dd67 166#define INSN_ASI(x) ((x) << 5)
8289b279 167
203342d8 168#define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
dbfe80e1 169#define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
8289b279 170#define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
ab1339b9 171#define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
1da92db2 172#define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
a115f3ea 173#define INSN_COND(x) ((x) << 25)
8289b279 174
cf7c2ca5
BS
175#define COND_N 0x0
176#define COND_E 0x1
177#define COND_LE 0x2
178#define COND_L 0x3
179#define COND_LEU 0x4
180#define COND_CS 0x5
181#define COND_NEG 0x6
182#define COND_VS 0x7
b3db8758 183#define COND_A 0x8
cf7c2ca5
BS
184#define COND_NE 0x9
185#define COND_G 0xa
186#define COND_GE 0xb
187#define COND_GU 0xc
188#define COND_CC 0xd
189#define COND_POS 0xe
190#define COND_VC 0xf
a115f3ea 191#define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
8289b279 192
ab1339b9
RH
193#define RCOND_Z 1
194#define RCOND_LEZ 2
195#define RCOND_LZ 3
196#define RCOND_NZ 5
197#define RCOND_GZ 6
198#define RCOND_GEZ 7
199
dbfe80e1
RH
200#define MOVCC_ICC (1 << 18)
201#define MOVCC_XCC (1 << 18 | 1 << 12)
202
a115f3ea
RH
203#define BPCC_ICC 0
204#define BPCC_XCC (2 << 20)
205#define BPCC_PT (1 << 19)
206#define BPCC_PN 0
207#define BPCC_A (1 << 29)
208
ab1339b9
RH
209#define BPR_PT BPCC_PT
210
8289b279 211#define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
7a3766f3 212#define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
8289b279 213#define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
dc69960d 214#define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
8289b279 215#define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
9a7f3228 216#define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
be6551b1 217#define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
8289b279 218#define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
f5ef6aac
BS
219#define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
220#define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
c470b663
RH
221#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
222#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
8289b279 223#define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
f4c16661 224#define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
8289b279
BS
225#define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
226#define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
227#define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
228#define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
229#define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
dbfe80e1 230#define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
203342d8 231#define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
8289b279 232
90379ca8 233#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
de8301e5 234#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
90379ca8 235
8289b279
BS
236#define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
237#define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
238#define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
239
240#define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
241#define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
242#define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
243
7a3766f3 244#define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
583d1215 245#define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
8289b279 246#define JMPL (INSN_OP(2) | INSN_OP3(0x38))
8b66eefe 247#define RETURN (INSN_OP(2) | INSN_OP3(0x39))
8289b279
BS
248#define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
249#define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
250#define SETHI (INSN_OP(0) | INSN_OP2(0x4))
251#define CALL INSN_OP(1)
252#define LDUB (INSN_OP(3) | INSN_OP3(0x01))
253#define LDSB (INSN_OP(3) | INSN_OP3(0x09))
254#define LDUH (INSN_OP(3) | INSN_OP3(0x02))
255#define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
256#define LDUW (INSN_OP(3) | INSN_OP3(0x00))
257#define LDSW (INSN_OP(3) | INSN_OP3(0x08))
258#define LDX (INSN_OP(3) | INSN_OP3(0x0b))
259#define STB (INSN_OP(3) | INSN_OP3(0x05))
260#define STH (INSN_OP(3) | INSN_OP3(0x06))
261#define STW (INSN_OP(3) | INSN_OP3(0x04))
262#define STX (INSN_OP(3) | INSN_OP3(0x0e))
8384dd67
BS
263#define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
264#define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
265#define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
266#define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
267#define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
268#define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
269#define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
270#define STBA (INSN_OP(3) | INSN_OP3(0x15))
271#define STHA (INSN_OP(3) | INSN_OP3(0x16))
272#define STWA (INSN_OP(3) | INSN_OP3(0x14))
273#define STXA (INSN_OP(3) | INSN_OP3(0x1e))
274
f8f03b37
PK
275#define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
276
ab20bdc1
RH
277#define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
278
8384dd67
BS
279#ifndef ASI_PRIMARY_LITTLE
280#define ASI_PRIMARY_LITTLE 0x88
281#endif
8289b279 282
a0ce341a
RH
283#define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
284#define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
285#define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
286#define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
287#define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
288
289#define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
290#define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
291#define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
292
90379ca8
RH
293#ifndef use_vis3_instructions
294bool use_vis3_instructions;
295#endif
296
897fd616 297static bool check_fit_i64(int64_t val, unsigned int bits)
a115f3ea 298{
425532d7 299 return val == sextract64(val, 0, bits);
a115f3ea
RH
300}
301
897fd616 302static bool check_fit_i32(int32_t val, unsigned int bits)
a115f3ea 303{
425532d7 304 return val == sextract32(val, 0, bits);
a115f3ea
RH
305}
306
425532d7
RH
307#define check_fit_tl check_fit_i64
308#if SPARC64
309# define check_fit_ptr check_fit_i64
310#else
311# define check_fit_ptr check_fit_i32
312#endif
313
0d8b6191 314static bool patch_reloc(tcg_insn_unit *src_rw, int type,
2ba7fae2 315 intptr_t value, intptr_t addend)
a115f3ea 316{
0d8b6191
RH
317 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
318 uint32_t insn = *src_rw;
e9823b4c 319 intptr_t pcrel;
abce5964 320
e9823b4c 321 value += addend;
0d8b6191 322 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
abce5964 323
a115f3ea 324 switch (type) {
ab1339b9 325 case R_SPARC_WDISP16:
6a6bfa3c
RH
326 if (!check_fit_ptr(pcrel >> 2, 16)) {
327 return false;
328 }
ab1339b9 329 insn &= ~INSN_OFF16(-1);
e9823b4c 330 insn |= INSN_OFF16(pcrel);
ab1339b9 331 break;
a115f3ea 332 case R_SPARC_WDISP19:
6a6bfa3c
RH
333 if (!check_fit_ptr(pcrel >> 2, 19)) {
334 return false;
335 }
a115f3ea 336 insn &= ~INSN_OFF19(-1);
e9823b4c
RH
337 insn |= INSN_OFF19(pcrel);
338 break;
c834b8d8
RH
339 case R_SPARC_13:
340 if (!check_fit_ptr(value, 13)) {
341 return false;
342 }
343 insn &= ~INSN_IMM13(-1);
344 insn |= INSN_IMM13(value);
345 break;
a115f3ea 346 default:
e9823b4c 347 g_assert_not_reached();
a115f3ea 348 }
e9823b4c 349
0d8b6191 350 *src_rw = insn;
6ac17786 351 return true;
a115f3ea
RH
352}
353
a115f3ea 354/* test if a constant matches the constraint */
a4fbbd77 355static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
a115f3ea 356{
a115f3ea
RH
357 if (ct & TCG_CT_CONST) {
358 return 1;
4b304cfa
RH
359 }
360
361 if (type == TCG_TYPE_I32) {
362 val = (int32_t)val;
363 }
364
365 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
a115f3ea
RH
366 return 1;
367 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
368 return 1;
369 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
370 return 1;
371 } else {
372 return 0;
373 }
374}
375
220b2da7
RH
376static void tcg_out_nop(TCGContext *s)
377{
378 tcg_out32(s, NOP);
379}
380
897fd616
RH
381static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
382 TCGReg rs2, int op)
26cc915c 383{
35e2da15 384 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
26cc915c
BS
385}
386
897fd616
RH
387static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
388 int32_t offset, int op)
26cc915c 389{
35e2da15 390 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
26cc915c
BS
391}
392
35e2da15
RH
393static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
394 int32_t val2, int val2const, int op)
ba225198
RH
395{
396 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
397 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
398}
399
897fd616 400static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
8289b279 401{
dda73c78
RH
402 if (ret != arg) {
403 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
404 }
78113e83 405 return true;
26cc915c
BS
406}
407
220b2da7
RH
408static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
409{
410 if (ret != arg) {
411 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
412 } else {
413 tcg_out_nop(s);
414 }
415}
416
897fd616 417static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
26cc915c
BS
418{
419 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
8289b279
BS
420}
421
897fd616 422static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
b101234a
BS
423{
424 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
425}
426
c71929c3
RH
427static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
428{
429 if (check_fit_i32(arg, 13)) {
430 /* A 13-bit constant sign-extended to 64-bits. */
431 tcg_out_movi_imm13(s, ret, arg);
432 } else {
433 /* A 32-bit constant zero-extended to 64 bits. */
434 tcg_out_sethi(s, ret, arg);
435 if (arg & 0x3ff) {
436 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
437 }
438 }
439}
440
ab20bdc1 441static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
92840d06
RH
442 tcg_target_long arg, bool in_prologue,
443 TCGReg scratch)
8289b279 444{
425532d7 445 tcg_target_long hi, lo = (int32_t)arg;
ab20bdc1 446 tcg_target_long test, lsb;
a9c7d27b 447
c71929c3
RH
448 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
449 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
450 tcg_out_movi_imm32(s, ret, arg);
451 return;
035b2398
RH
452 }
453
a9c7d27b
RH
454 /* A 13-bit constant sign-extended to 64-bits. */
455 if (check_fit_tl(arg, 13)) {
b101234a 456 tcg_out_movi_imm13(s, ret, arg);
a9c7d27b 457 return;
8289b279 458 }
8289b279 459
f6823cbe
RH
460 /* A 13-bit constant relative to the TB. */
461 if (!in_prologue && USE_REG_TB) {
47c2206b 462 test = tcg_tbrel_diff(s, (void *)arg);
f6823cbe
RH
463 if (check_fit_ptr(test, 13)) {
464 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
465 return;
466 }
467 }
468
a9c7d27b 469 /* A 32-bit constant sign-extended to 64-bits. */
425532d7 470 if (arg == lo) {
43172207
RH
471 tcg_out_sethi(s, ret, ~arg);
472 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
a9c7d27b
RH
473 return;
474 }
475
684db2a0 476 /* A 32-bit constant, shifted. */
ab20bdc1
RH
477 lsb = ctz64(arg);
478 test = (tcg_target_long)arg >> lsb;
684db2a0 479 if (lsb > 10 && test == extract64(test, 0, 21)) {
ab20bdc1
RH
480 tcg_out_sethi(s, ret, test << 10);
481 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
482 return;
684db2a0
RH
483 } else if (test == (uint32_t)test || test == (int32_t)test) {
484 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
485 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
486 return;
ab20bdc1
RH
487 }
488
c834b8d8
RH
489 /* Use the constant pool, if possible. */
490 if (!in_prologue && USE_REG_TB) {
491 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
492 tcg_tbrel_diff(s, NULL));
493 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
494 return;
495 }
496
a9c7d27b 497 /* A 64-bit constant decomposed into 2 32-bit pieces. */
425532d7 498 if (check_fit_i32(lo, 13)) {
34b1a49c 499 hi = (arg - lo) >> 32;
c71929c3 500 tcg_out_movi_imm32(s, ret, hi);
a9c7d27b
RH
501 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
502 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
43172207 503 } else {
34b1a49c 504 hi = arg >> 32;
c71929c3 505 tcg_out_movi_imm32(s, ret, hi);
92840d06 506 tcg_out_movi_imm32(s, scratch, lo);
375816f8 507 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
92840d06 508 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
6f41b777 509 }
b101234a
BS
510}
511
897fd616
RH
512static void tcg_out_movi(TCGContext *s, TCGType type,
513 TCGReg ret, tcg_target_long arg)
ab20bdc1 514{
92840d06
RH
515 tcg_debug_assert(ret != TCG_REG_T2);
516 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
ab20bdc1
RH
517}
518
897fd616
RH
519static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
520 TCGReg a2, int op)
8289b279 521{
a0ce341a 522 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
8289b279
BS
523}
524
35e2da15
RH
525static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
526 intptr_t offset, int op)
8289b279 527{
425532d7 528 if (check_fit_ptr(offset, 13)) {
8289b279
BS
529 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
530 INSN_IMM13(offset));
a0ce341a 531 } else {
375816f8
RH
532 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
533 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
cf7c2ca5 534 }
8289b279
BS
535}
536
897fd616
RH
537static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
538 TCGReg arg1, intptr_t arg2)
8289b279 539{
a0ce341a 540 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
8289b279
BS
541}
542
897fd616
RH
543static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
544 TCGReg arg1, intptr_t arg2)
8289b279 545{
a0ce341a
RH
546 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
547}
548
897fd616
RH
549static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
550 TCGReg base, intptr_t ofs)
59d7c14e
RH
551{
552 if (val == 0) {
553 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
554 return true;
555 }
556 return false;
557}
558
47c2206b 559static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
a0ce341a 560{
47c2206b 561 intptr_t diff = tcg_tbrel_diff(s, arg);
ab20bdc1
RH
562 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
563 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
564 return;
565 }
47c2206b
RH
566 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
567 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
8289b279
BS
568}
569
897fd616 570static void tcg_out_sety(TCGContext *s, TCGReg rs)
8289b279 571{
583d1215 572 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
8289b279
BS
573}
574
897fd616 575static void tcg_out_rdy(TCGContext *s, TCGReg rd)
7a3766f3
RH
576{
577 tcg_out32(s, RDY | INSN_RD(rd));
578}
579
35e2da15
RH
580static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
581 int32_t val2, int val2const, int uns)
583d1215
RH
582{
583 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
584 if (uns) {
585 tcg_out_sety(s, TCG_REG_G0);
586 } else {
375816f8
RH
587 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
588 tcg_out_sety(s, TCG_REG_T1);
583d1215
RH
589 }
590
591 tcg_out_arithc(s, rd, rs1, val2, val2const,
592 uns ? ARITH_UDIV : ARITH_SDIV);
593}
594
0aed257f 595static const uint8_t tcg_cond_to_bcond[] = {
cf7c2ca5
BS
596 [TCG_COND_EQ] = COND_E,
597 [TCG_COND_NE] = COND_NE,
598 [TCG_COND_LT] = COND_L,
599 [TCG_COND_GE] = COND_GE,
600 [TCG_COND_LE] = COND_LE,
601 [TCG_COND_GT] = COND_G,
602 [TCG_COND_LTU] = COND_CS,
603 [TCG_COND_GEU] = COND_CC,
604 [TCG_COND_LEU] = COND_LEU,
605 [TCG_COND_GTU] = COND_GU,
606};
607
ab1339b9
RH
608static const uint8_t tcg_cond_to_rcond[] = {
609 [TCG_COND_EQ] = RCOND_Z,
610 [TCG_COND_NE] = RCOND_NZ,
611 [TCG_COND_LT] = RCOND_LZ,
612 [TCG_COND_GT] = RCOND_GZ,
613 [TCG_COND_LE] = RCOND_LEZ,
614 [TCG_COND_GE] = RCOND_GEZ
615};
616
a115f3ea
RH
617static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
618{
619 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
620}
621
bec16311 622static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
a115f3ea 623{
791645f0 624 int off19 = 0;
a115f3ea
RH
625
626 if (l->has_value) {
abce5964 627 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
a115f3ea 628 } else {
bec16311 629 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
a115f3ea
RH
630 }
631 tcg_out_bpcc0(s, scond, flags, off19);
632}
633
35e2da15 634static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
56f4927e 635{
ba225198 636 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
56f4927e
RH
637}
638
35e2da15 639static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 640 int32_t arg2, int const_arg2, TCGLabel *l)
cf7c2ca5 641{
56f4927e 642 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 643 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
cf7c2ca5
BS
644 tcg_out_nop(s);
645}
646
35e2da15
RH
647static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
648 int32_t v1, int v1const)
ded37f0d
RH
649{
650 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
651 | INSN_RS1(tcg_cond_to_bcond[cond])
652 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
653}
654
35e2da15
RH
655static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
656 TCGReg c1, int32_t c2, int c2const,
657 int32_t v1, int v1const)
ded37f0d
RH
658{
659 tcg_out_cmp(s, c1, c2, c2const);
660 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
661}
662
35e2da15 663static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
bec16311 664 int32_t arg2, int const_arg2, TCGLabel *l)
1da92db2 665{
ab1339b9
RH
666 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
667 if (arg2 == 0 && !is_unsigned_cond(cond)) {
791645f0 668 int off16 = 0;
ab1339b9
RH
669
670 if (l->has_value) {
abce5964 671 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
ab1339b9 672 } else {
bec16311 673 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
ab1339b9
RH
674 }
675 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
676 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
677 } else {
678 tcg_out_cmp(s, arg1, arg2, const_arg2);
bec16311 679 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
ab1339b9 680 }
1da92db2
BS
681 tcg_out_nop(s);
682}
ded37f0d 683
35e2da15
RH
684static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
685 int32_t v1, int v1const)
203342d8
RH
686{
687 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
688 | (tcg_cond_to_rcond[cond] << 10)
689 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
690}
691
35e2da15
RH
692static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
693 TCGReg c1, int32_t c2, int c2const,
694 int32_t v1, int v1const)
ded37f0d 695{
203342d8
RH
696 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
697 Note that the immediate range is one bit smaller, so we must check
698 for that as well. */
699 if (c2 == 0 && !is_unsigned_cond(cond)
35e2da15 700 && (!v1const || check_fit_i32(v1, 10))) {
203342d8
RH
701 tcg_out_movr(s, cond, ret, c1, v1, v1const);
702 } else {
703 tcg_out_cmp(s, c1, c2, c2const);
704 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
705 }
ded37f0d 706}
1da92db2 707
35e2da15
RH
708static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
709 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 710{
c470b663 711 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
dbfe80e1 712 switch (cond) {
7d458a75
RH
713 case TCG_COND_LTU:
714 case TCG_COND_GEU:
715 /* The result of the comparison is in the carry bit. */
716 break;
717
dbfe80e1
RH
718 case TCG_COND_EQ:
719 case TCG_COND_NE:
7d458a75 720 /* For equality, we can transform to inequality vs zero. */
dbfe80e1 721 if (c2 != 0) {
321b6c05
RH
722 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
723 c2 = TCG_REG_T1;
724 } else {
725 c2 = c1;
dbfe80e1 726 }
321b6c05 727 c1 = TCG_REG_G0, c2const = 0;
7d458a75 728 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
dbfe80e1
RH
729 break;
730
731 case TCG_COND_GTU:
dbfe80e1 732 case TCG_COND_LEU:
7d458a75
RH
733 /* If we don't need to load a constant into a register, we can
734 swap the operands on GTU/LEU. There's no benefit to loading
735 the constant into a temporary register. */
736 if (!c2const || c2 == 0) {
35e2da15 737 TCGReg t = c1;
7d458a75
RH
738 c1 = c2;
739 c2 = t;
740 c2const = 0;
741 cond = tcg_swap_cond(cond);
742 break;
743 }
744 /* FALLTHRU */
dbfe80e1
RH
745
746 default:
747 tcg_out_cmp(s, c1, c2, c2const);
dbfe80e1 748 tcg_out_movi_imm13(s, ret, 0);
ded37f0d 749 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
dbfe80e1
RH
750 return;
751 }
752
753 tcg_out_cmp(s, c1, c2, c2const);
754 if (cond == TCG_COND_LTU) {
c470b663 755 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
dbfe80e1 756 } else {
c470b663 757 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
dbfe80e1
RH
758 }
759}
760
35e2da15
RH
761static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
762 TCGReg c1, int32_t c2, int c2const)
dbfe80e1 763{
9d6a7a85
RH
764 if (use_vis3_instructions) {
765 switch (cond) {
766 case TCG_COND_NE:
767 if (c2 != 0) {
768 break;
769 }
770 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
771 /* FALLTHRU */
772 case TCG_COND_LTU:
773 tcg_out_cmp(s, c1, c2, c2const);
774 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
775 return;
776 default:
777 break;
778 }
779 }
780
203342d8
RH
781 /* For 64-bit signed comparisons vs zero, we can avoid the compare
782 if the input does not overlap the output. */
783 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
784 tcg_out_movi_imm13(s, ret, 0);
785 tcg_out_movr(s, cond, ret, c1, 1, 1);
786 } else {
787 tcg_out_cmp(s, c1, c2, c2const);
788 tcg_out_movi_imm13(s, ret, 0);
789 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
790 }
dbfe80e1 791}
4ec28e25 792
609ac1e1
RH
793static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
794 TCGReg al, TCGReg ah, int32_t bl, int blconst,
795 int32_t bh, int bhconst, int opl, int oph)
4ec28e25 796{
35e2da15 797 TCGReg tmp = TCG_REG_T1;
4ec28e25
RH
798
799 /* Note that the low parts are fully consumed before tmp is set. */
800 if (rl != ah && (bhconst || rl != bh)) {
801 tmp = rl;
802 }
803
804 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
805 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
806 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
807}
dbfe80e1 808
609ac1e1
RH
809static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
810 TCGReg al, TCGReg ah, int32_t bl, int blconst,
811 int32_t bh, int bhconst, bool is_sub)
812{
813 TCGReg tmp = TCG_REG_T1;
814
815 /* Note that the low parts are fully consumed before tmp is set. */
816 if (rl != ah && (bhconst || rl != bh)) {
817 tmp = rl;
818 }
819
820 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
821
90379ca8
RH
822 if (use_vis3_instructions && !is_sub) {
823 /* Note that ADDXC doesn't accept immediates. */
824 if (bhconst && bh != 0) {
414399b6 825 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
90379ca8
RH
826 bh = TCG_REG_T2;
827 }
828 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
829 } else if (bh == TCG_REG_G0) {
609ac1e1
RH
830 /* If we have a zero, we can perform the operation in two insns,
831 with the arithmetic first, and a conditional move into place. */
832 if (rh == ah) {
833 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
834 is_sub ? ARITH_SUB : ARITH_ADD);
835 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
836 } else {
837 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
838 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
839 }
840 } else {
414399b6
RH
841 /*
842 * Otherwise adjust BH as if there is carry into T2.
843 * Note that constant BH is constrained to 11 bits for the MOVCC,
844 * so the adjustment fits 12 bits.
845 */
609ac1e1 846 if (bhconst) {
414399b6 847 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
609ac1e1
RH
848 } else {
849 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
850 is_sub ? ARITH_SUB : ARITH_ADD);
851 }
852 /* ... smoosh T2 back to original BH if carry is clear ... */
853 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
854 /* ... and finally perform the arithmetic with the new operand. */
855 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
856 }
857
858 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
859}
860
2be7d76b 861static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
ab20bdc1 862 bool in_prologue)
aad2f06a 863{
abce5964 864 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
aad2f06a
RH
865
866 if (disp == (int32_t)disp) {
867 tcg_out32(s, CALL | (uint32_t)disp >> 2);
868 } else {
abce5964 869 uintptr_t desti = (uintptr_t)dest;
ab20bdc1 870 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
92840d06 871 desti & ~0xfff, in_prologue, TCG_REG_O7);
abce5964 872 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
aad2f06a
RH
873 }
874}
875
2be7d76b 876static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
4e9cf840 877{
ab20bdc1 878 tcg_out_call_nodelay(s, dest, false);
4e9cf840
RH
879 tcg_out_nop(s);
880}
881
f8f03b37
PK
882static void tcg_out_mb(TCGContext *s, TCGArg a0)
883{
884 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
885 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
886}
887
7ea5d725 888#ifdef CONFIG_SOFTMMU
4b473e0c
RH
889static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
890static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
7ea5d725 891
709a340d
PM
892static void emit_extend(TCGContext *s, TCGReg r, int op)
893{
894 /* Emit zero extend of 8, 16 or 32 bit data as
895 * required by the MO_* value op; do nothing for 64 bit.
896 */
897 switch (op & MO_SIZE) {
898 case MO_8:
899 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
900 break;
901 case MO_16:
902 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
903 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
904 break;
905 case MO_32:
906 if (SPARC64) {
907 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
908 }
909 break;
910 case MO_64:
911 break;
912 }
913}
914
7ea5d725
RH
915static void build_trampolines(TCGContext *s)
916{
4b473e0c 917 static void * const qemu_ld_helpers[] = {
abce5964
RH
918 [MO_UB] = helper_ret_ldub_mmu,
919 [MO_SB] = helper_ret_ldsb_mmu,
920 [MO_LEUW] = helper_le_lduw_mmu,
921 [MO_LESW] = helper_le_ldsw_mmu,
922 [MO_LEUL] = helper_le_ldul_mmu,
fc313c64 923 [MO_LEUQ] = helper_le_ldq_mmu,
abce5964
RH
924 [MO_BEUW] = helper_be_lduw_mmu,
925 [MO_BESW] = helper_be_ldsw_mmu,
926 [MO_BEUL] = helper_be_ldul_mmu,
fc313c64 927 [MO_BEUQ] = helper_be_ldq_mmu,
7ea5d725 928 };
4b473e0c 929 static void * const qemu_st_helpers[] = {
abce5964
RH
930 [MO_UB] = helper_ret_stb_mmu,
931 [MO_LEUW] = helper_le_stw_mmu,
932 [MO_LEUL] = helper_le_stl_mmu,
fc313c64 933 [MO_LEUQ] = helper_le_stq_mmu,
abce5964
RH
934 [MO_BEUW] = helper_be_stw_mmu,
935 [MO_BEUL] = helper_be_stl_mmu,
fc313c64 936 [MO_BEUQ] = helper_be_stq_mmu,
7ea5d725
RH
937 };
938
939 int i;
940 TCGReg ra;
7ea5d725 941
4b473e0c 942 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
abce5964 943 if (qemu_ld_helpers[i] == NULL) {
7ea5d725
RH
944 continue;
945 }
946
947 /* May as well align the trampoline. */
abce5964 948 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 949 tcg_out_nop(s);
7ea5d725 950 }
0d8b6191 951 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 952
34b1a49c
RH
953 if (SPARC64 || TARGET_LONG_BITS == 32) {
954 ra = TCG_REG_O3;
955 } else {
956 /* Install the high part of the address. */
957 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
958 ra = TCG_REG_O4;
959 }
7ea5d725
RH
960
961 /* Set the retaddr operand. */
962 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
963 /* Set the env operand. */
964 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
965 /* Tail call. */
ab20bdc1 966 tcg_out_call_nodelay(s, qemu_ld_helpers[i], true);
7ea5d725
RH
967 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
968 }
969
4b473e0c 970 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
abce5964 971 if (qemu_st_helpers[i] == NULL) {
7ea5d725
RH
972 continue;
973 }
974
975 /* May as well align the trampoline. */
abce5964 976 while ((uintptr_t)s->code_ptr & 15) {
7ea5d725 977 tcg_out_nop(s);
7ea5d725 978 }
0d8b6191 979 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
7ea5d725 980
34b1a49c 981 if (SPARC64) {
709a340d 982 emit_extend(s, TCG_REG_O2, i);
34b1a49c
RH
983 ra = TCG_REG_O4;
984 } else {
985 ra = TCG_REG_O1;
986 if (TARGET_LONG_BITS == 64) {
987 /* Install the high part of the address. */
988 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
989 ra += 2;
990 } else {
991 ra += 1;
992 }
993 if ((i & MO_SIZE) == MO_64) {
994 /* Install the high part of the data. */
995 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
996 ra += 2;
997 } else {
709a340d 998 emit_extend(s, ra, i);
34b1a49c
RH
999 ra += 1;
1000 }
3972ef6f 1001 /* Skip the oi argument. */
34b1a49c
RH
1002 ra += 1;
1003 }
1004
7ea5d725
RH
1005 /* Set the retaddr operand. */
1006 if (ra >= TCG_REG_O6) {
1007 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
1008 TCG_TARGET_CALL_STACK_OFFSET);
1009 ra = TCG_REG_G1;
1010 }
1011 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
1012 /* Set the env operand. */
1013 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
1014 /* Tail call. */
ab20bdc1 1015 tcg_out_call_nodelay(s, qemu_st_helpers[i], true);
7ea5d725
RH
1016 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
1017 }
1018}
1019#endif
1020
7d551702 1021/* Generate global QEMU prologue and epilogue code */
e4d58b41 1022static void tcg_target_qemu_prologue(TCGContext *s)
b3db8758 1023{
4c3204cb
RH
1024 int tmp_buf_size, frame_size;
1025
9defd1bd
RH
1026 /*
1027 * The TCG temp buffer is at the top of the frame, immediately
1028 * below the frame pointer. Use the logical (aligned) offset here;
1029 * the stack bias is applied in temp_allocate_frame().
1030 */
4c3204cb 1031 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
9defd1bd 1032 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
4c3204cb 1033
9defd1bd
RH
1034 /*
1035 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1036 * otherwise the minimal frame usable by callees.
1037 */
4c3204cb
RH
1038 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1039 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1040 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1041 frame_size &= -TCG_TARGET_STACK_ALIGN;
b3db8758 1042 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
4c3204cb 1043 INSN_IMM13(-frame_size));
c6f7e4fb 1044
4cbea598 1045#ifndef CONFIG_SOFTMMU
b76f21a7 1046 if (guest_base != 0) {
92840d06
RH
1047 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1048 guest_base, true, TCG_REG_T1);
c6f7e4fb
RH
1049 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1050 }
1051#endif
1052
ab20bdc1
RH
1053 /* We choose TCG_REG_TB such that no move is required. */
1054 if (USE_REG_TB) {
1055 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1056 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1057 }
1058
aad2f06a 1059 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
0c554161
RH
1060 /* delay slot */
1061 tcg_out_nop(s);
4c3204cb 1062
38f81dc5 1063 /* Epilogue for goto_ptr. */
c8bc1168 1064 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
38f81dc5
RH
1065 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1066 /* delay slot */
1067 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
7ea5d725
RH
1068
1069#ifdef CONFIG_SOFTMMU
1070 build_trampolines(s);
1071#endif
b3db8758
BS
1072}
1073
e9823b4c
RH
1074static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1075{
1076 int i;
1077 for (i = 0; i < count; ++i) {
1078 p[i] = NOP;
1079 }
1080}
1081
f5ef6aac 1082#if defined(CONFIG_SOFTMMU)
269bd5d8
RH
1083
1084/* We expect to use a 13-bit negative offset from ENV. */
1085QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1086QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1087
a0ce341a 1088/* Perform the TLB load and compare.
bffe1431 1089
a0ce341a 1090 Inputs:
a8b12c10 1091 ADDRLO and ADDRHI contain the possible two parts of the address.
a0ce341a
RH
1092
1093 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1094
1095 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1096 This should be offsetof addr_read or addr_write.
1097
1098 The result of the TLB comparison is in %[ix]cc. The sanitized address
1099 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1100
34b1a49c 1101static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
14776ab5 1102 MemOp opc, int which)
a0ce341a 1103{
269bd5d8
RH
1104 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1105 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1106 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
a8b12c10
RH
1107 const TCGReg r0 = TCG_REG_O0;
1108 const TCGReg r1 = TCG_REG_O1;
1109 const TCGReg r2 = TCG_REG_O2;
85aa8081
RH
1110 unsigned s_bits = opc & MO_SIZE;
1111 unsigned a_bits = get_alignment_bits(opc);
17ff9f78
RH
1112 tcg_target_long compare_mask;
1113
17ff9f78 1114 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
269bd5d8
RH
1115 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1116 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
a0ce341a 1117
17ff9f78
RH
1118 /* Extract the page index, shifted into place for tlb index. */
1119 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1120 SHIFT_SRL);
1121 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1122
1123 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1124 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1125
1126 /* Load the tlb comparator and the addend. */
1127 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1128 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
a0ce341a 1129
85aa8081
RH
1130 /* Mask out the page offset, except for the required alignment.
1131 We don't support unaligned accesses. */
1132 if (a_bits < s_bits) {
1133 a_bits = s_bits;
1134 }
17ff9f78
RH
1135 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1136 if (check_fit_tl(compare_mask, 13)) {
1137 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1138 } else {
1139 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1140 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
a0ce341a 1141 }
a0ce341a
RH
1142 tcg_out_cmp(s, r0, r2, 0);
1143
1144 /* If the guest address must be zero-extended, do so now. */
9f44adc5 1145 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c 1146 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
a0ce341a
RH
1147 return r0;
1148 }
34b1a49c 1149 return addr;
a0ce341a
RH
1150}
1151#endif /* CONFIG_SOFTMMU */
1152
4b473e0c 1153static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1154 [MO_UB] = LDUB,
1155 [MO_SB] = LDSB,
1156
1157 [MO_BEUW] = LDUH,
1158 [MO_BESW] = LDSH,
1159 [MO_BEUL] = LDUW,
1160 [MO_BESL] = LDSW,
fc313c64 1161 [MO_BEUQ] = LDX,
eef0d9e7
RH
1162
1163 [MO_LEUW] = LDUH_LE,
1164 [MO_LESW] = LDSH_LE,
1165 [MO_LEUL] = LDUW_LE,
1166 [MO_LESL] = LDSW_LE,
fc313c64 1167 [MO_LEUQ] = LDX_LE,
a0ce341a 1168};
9d0efc88 1169
4b473e0c 1170static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
eef0d9e7
RH
1171 [MO_UB] = STB,
1172
1173 [MO_BEUW] = STH,
1174 [MO_BEUL] = STW,
fc313c64 1175 [MO_BEUQ] = STX,
eef0d9e7
RH
1176
1177 [MO_LEUW] = STH_LE,
1178 [MO_LEUL] = STW_LE,
fc313c64 1179 [MO_LEUQ] = STX_LE,
a0ce341a 1180};
bffe1431 1181
34b1a49c 1182static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1183 MemOpIdx oi, bool is_64)
f5ef6aac 1184{
14776ab5 1185 MemOp memop = get_memop(oi);
34b1a49c 1186#ifdef CONFIG_SOFTMMU
59227d5d 1187 unsigned memi = get_mmuidx(oi);
cab0a7ea 1188 TCGReg addrz, param;
0d8b6191 1189 const tcg_insn_unit *func;
abce5964 1190 tcg_insn_unit *label_ptr;
f5ef6aac 1191
85aa8081 1192 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1193 offsetof(CPUTLBEntry, addr_read));
a0ce341a 1194
34b1a49c
RH
1195 /* The fast path is exactly one insn. Thus we can perform the
1196 entire TLB Hit in the (annulled) delay slot of the branch
1197 over the TLB Miss case. */
a0ce341a 1198
34b1a49c 1199 /* beq,a,pt %[xi]cc, label0 */
abce5964 1200 label_ptr = s->code_ptr;
34b1a49c
RH
1201 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1202 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1203 /* delay slot */
2b7ec66f
RH
1204 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1205 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
53c37487 1206
a0ce341a 1207 /* TLB Miss. */
f5ef6aac 1208
7ea5d725 1209 param = TCG_REG_O1;
34b1a49c
RH
1210 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1211 /* Skip the high-part; we'll perform the extract in the trampoline. */
1212 param++;
a0ce341a 1213 }
5c32be5b 1214 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
f5ef6aac 1215
7ea5d725
RH
1216 /* We use the helpers to extend SB and SW data, leaving the case
1217 of SL needing explicit extending below. */
2b7ec66f
RH
1218 if ((memop & MO_SSIZE) == MO_SL) {
1219 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
7ea5d725 1220 } else {
2b7ec66f 1221 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
7ea5d725 1222 }
eabb7b91 1223 tcg_debug_assert(func != NULL);
ab20bdc1 1224 tcg_out_call_nodelay(s, func, false);
a0ce341a 1225 /* delay slot */
3972ef6f 1226 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
7ea5d725 1227
34b1a49c
RH
1228 /* Recall that all of the helpers return 64-bit results.
1229 Which complicates things for sparcv8plus. */
1230 if (SPARC64) {
1231 /* We let the helper sign-extend SB and SW, but leave SL for here. */
2b7ec66f 1232 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
34b1a49c
RH
1233 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1234 } else {
1235 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1236 }
1237 } else {
2b7ec66f 1238 if ((memop & MO_SIZE) == MO_64) {
34b1a49c
RH
1239 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1240 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1241 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1242 } else if (is_64) {
1243 /* Re-extend from 32-bit rather than reassembling when we
1244 know the high register must be an extension. */
1245 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1246 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1247 } else {
1248 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
a0ce341a 1249 }
f5ef6aac
BS
1250 }
1251
abce5964 1252 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
90cbed46 1253#else
9f44adc5 1254 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1255 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1256 addr = TCG_REG_T1;
f5ef6aac 1257 }
34b1a49c 1258 tcg_out_ldst_rr(s, data, addr,
b76f21a7 1259 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
2b7ec66f 1260 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
a0ce341a 1261#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1262}
1263
34b1a49c 1264static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
9002ffcb 1265 MemOpIdx oi)
f5ef6aac 1266{
14776ab5 1267 MemOp memop = get_memop(oi);
34b1a49c 1268#ifdef CONFIG_SOFTMMU
59227d5d 1269 unsigned memi = get_mmuidx(oi);
34b1a49c 1270 TCGReg addrz, param;
0d8b6191 1271 const tcg_insn_unit *func;
abce5964 1272 tcg_insn_unit *label_ptr;
f5ef6aac 1273
85aa8081 1274 addrz = tcg_out_tlb_load(s, addr, memi, memop,
cab0a7ea 1275 offsetof(CPUTLBEntry, addr_write));
a0ce341a 1276
a0ce341a
RH
1277 /* The fast path is exactly one insn. Thus we can perform the entire
1278 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1279 /* beq,a,pt %[xi]cc, label0 */
abce5964 1280 label_ptr = s->code_ptr;
a115f3ea
RH
1281 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1282 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
a0ce341a 1283 /* delay slot */
2b7ec66f
RH
1284 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1285 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a
RH
1286
1287 /* TLB Miss. */
1288
7ea5d725 1289 param = TCG_REG_O1;
34b1a49c
RH
1290 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1291 /* Skip the high-part; we'll perform the extract in the trampoline. */
1292 param++;
a0ce341a 1293 }
5c32be5b 1294 tcg_out_mov(s, TCG_TYPE_REG, param++, addrz);
2b7ec66f 1295 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
34b1a49c
RH
1296 /* Skip the high-part; we'll perform the extract in the trampoline. */
1297 param++;
a0ce341a 1298 }
34b1a49c 1299 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
53c37487 1300
2b7ec66f 1301 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
eabb7b91 1302 tcg_debug_assert(func != NULL);
ab20bdc1 1303 tcg_out_call_nodelay(s, func, false);
a0ce341a 1304 /* delay slot */
3972ef6f 1305 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
f5ef6aac 1306
abce5964 1307 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
8384dd67 1308#else
9f44adc5 1309 if (SPARC64 && TARGET_LONG_BITS == 32) {
34b1a49c
RH
1310 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1311 addr = TCG_REG_T1;
a0ce341a 1312 }
34b1a49c 1313 tcg_out_ldst_rr(s, data, addr,
b76f21a7 1314 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
2b7ec66f 1315 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
a0ce341a 1316#endif /* CONFIG_SOFTMMU */
f5ef6aac
BS
1317}
1318
b357f902
RH
1319static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1320 const TCGArg args[TCG_MAX_OP_ARGS],
1321 const int const_args[TCG_MAX_OP_ARGS])
8289b279 1322{
b357f902
RH
1323 TCGArg a0, a1, a2;
1324 int c, c2;
1325
1326 /* Hoist the loads of the most common arguments. */
1327 a0 = args[0];
1328 a1 = args[1];
1329 a2 = args[2];
1330 c2 = const_args[2];
8289b279
BS
1331
1332 switch (opc) {
1333 case INDEX_op_exit_tb:
b357f902 1334 if (check_fit_ptr(a0, 13)) {
8b66eefe 1335 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
b357f902 1336 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
ab20bdc1
RH
1337 break;
1338 } else if (USE_REG_TB) {
47c2206b 1339 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
ab20bdc1
RH
1340 if (check_fit_ptr(tb_diff, 13)) {
1341 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1342 /* Note that TCG_REG_TB has been unwound to O1. */
1343 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1344 break;
1345 }
8b66eefe 1346 }
ab20bdc1
RH
1347 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1348 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1349 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
8289b279
BS
1350 break;
1351 case INDEX_op_goto_tb:
f309101c 1352 if (s->tb_jmp_insn_offset) {
8289b279 1353 /* direct jump method */
ab20bdc1
RH
1354 if (USE_REG_TB) {
1355 /* make sure the patch is 8-byte aligned. */
1356 if ((intptr_t)s->code_ptr & 4) {
1357 tcg_out_nop(s);
1358 }
1359 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1360 tcg_out_sethi(s, TCG_REG_T1, 0);
1361 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1362 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1363 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1364 } else {
1365 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1366 tcg_out32(s, CALL);
1367 tcg_out_nop(s);
1368 }
8289b279
BS
1369 } else {
1370 /* indirect jump method */
47c2206b 1371 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
ab20bdc1
RH
1372 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1373 tcg_out_nop(s);
1374 }
9f754620 1375 set_jmp_reset_offset(s, a0);
ab20bdc1
RH
1376
1377 /* For the unlinked path of goto_tb, we need to reset
1378 TCG_REG_TB to the beginning of this TB. */
1379 if (USE_REG_TB) {
9f754620 1380 c = -tcg_current_code_size(s);
ab20bdc1
RH
1381 if (check_fit_i32(c, 13)) {
1382 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1383 } else {
1384 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1385 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1386 TCG_REG_T1, ARITH_ADD);
1387 }
8289b279 1388 }
8289b279 1389 break;
38f81dc5
RH
1390 case INDEX_op_goto_ptr:
1391 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
ab20bdc1 1392 if (USE_REG_TB) {
220b2da7 1393 tcg_out_mov_delay(s, TCG_REG_TB, a0);
ab20bdc1
RH
1394 } else {
1395 tcg_out_nop(s);
1396 }
38f81dc5 1397 break;
8289b279 1398 case INDEX_op_br:
bec16311 1399 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
f5ef6aac 1400 tcg_out_nop(s);
8289b279 1401 break;
8289b279 1402
8289b279 1403#define OP_32_64(x) \
ba225198
RH
1404 glue(glue(case INDEX_op_, x), _i32): \
1405 glue(glue(case INDEX_op_, x), _i64)
34b1a49c 1406
ba225198 1407 OP_32_64(ld8u):
b357f902 1408 tcg_out_ldst(s, a0, a1, a2, LDUB);
8289b279 1409 break;
ba225198 1410 OP_32_64(ld8s):
b357f902 1411 tcg_out_ldst(s, a0, a1, a2, LDSB);
8289b279 1412 break;
ba225198 1413 OP_32_64(ld16u):
b357f902 1414 tcg_out_ldst(s, a0, a1, a2, LDUH);
8289b279 1415 break;
ba225198 1416 OP_32_64(ld16s):
b357f902 1417 tcg_out_ldst(s, a0, a1, a2, LDSH);
8289b279
BS
1418 break;
1419 case INDEX_op_ld_i32:
53cd9273 1420 case INDEX_op_ld32u_i64:
b357f902 1421 tcg_out_ldst(s, a0, a1, a2, LDUW);
8289b279 1422 break;
ba225198 1423 OP_32_64(st8):
b357f902 1424 tcg_out_ldst(s, a0, a1, a2, STB);
8289b279 1425 break;
ba225198 1426 OP_32_64(st16):
b357f902 1427 tcg_out_ldst(s, a0, a1, a2, STH);
8289b279
BS
1428 break;
1429 case INDEX_op_st_i32:
53cd9273 1430 case INDEX_op_st32_i64:
b357f902 1431 tcg_out_ldst(s, a0, a1, a2, STW);
8289b279 1432 break;
ba225198 1433 OP_32_64(add):
53cd9273 1434 c = ARITH_ADD;
ba225198
RH
1435 goto gen_arith;
1436 OP_32_64(sub):
8289b279 1437 c = ARITH_SUB;
ba225198
RH
1438 goto gen_arith;
1439 OP_32_64(and):
8289b279 1440 c = ARITH_AND;
ba225198 1441 goto gen_arith;
dc69960d
RH
1442 OP_32_64(andc):
1443 c = ARITH_ANDN;
1444 goto gen_arith;
ba225198 1445 OP_32_64(or):
8289b279 1446 c = ARITH_OR;
ba225198 1447 goto gen_arith;
18c8f7a3
RH
1448 OP_32_64(orc):
1449 c = ARITH_ORN;
1450 goto gen_arith;
ba225198 1451 OP_32_64(xor):
8289b279 1452 c = ARITH_XOR;
ba225198 1453 goto gen_arith;
8289b279
BS
1454 case INDEX_op_shl_i32:
1455 c = SHIFT_SLL;
1fd95946
RH
1456 do_shift32:
1457 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1458 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1fd95946 1459 break;
8289b279
BS
1460 case INDEX_op_shr_i32:
1461 c = SHIFT_SRL;
1fd95946 1462 goto do_shift32;
8289b279
BS
1463 case INDEX_op_sar_i32:
1464 c = SHIFT_SRA;
1fd95946 1465 goto do_shift32;
8289b279
BS
1466 case INDEX_op_mul_i32:
1467 c = ARITH_UMUL;
ba225198 1468 goto gen_arith;
583d1215 1469
4b5a85c1
RH
1470 OP_32_64(neg):
1471 c = ARITH_SUB;
1472 goto gen_arith1;
be6551b1
RH
1473 OP_32_64(not):
1474 c = ARITH_ORN;
1475 goto gen_arith1;
4b5a85c1 1476
583d1215 1477 case INDEX_op_div_i32:
b357f902 1478 tcg_out_div32(s, a0, a1, a2, c2, 0);
583d1215
RH
1479 break;
1480 case INDEX_op_divu_i32:
b357f902 1481 tcg_out_div32(s, a0, a1, a2, c2, 1);
583d1215
RH
1482 break;
1483
8289b279 1484 case INDEX_op_brcond_i32:
bec16311 1485 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1486 break;
dbfe80e1 1487 case INDEX_op_setcond_i32:
b357f902 1488 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
dbfe80e1 1489 break;
ded37f0d 1490 case INDEX_op_movcond_i32:
b357f902 1491 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1492 break;
dbfe80e1 1493
7a3766f3 1494 case INDEX_op_add2_i32:
609ac1e1
RH
1495 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1496 args[4], const_args[4], args[5], const_args[5],
c470b663 1497 ARITH_ADDCC, ARITH_ADDC);
7a3766f3
RH
1498 break;
1499 case INDEX_op_sub2_i32:
609ac1e1
RH
1500 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1501 args[4], const_args[4], args[5], const_args[5],
c470b663 1502 ARITH_SUBCC, ARITH_SUBC);
7a3766f3
RH
1503 break;
1504 case INDEX_op_mulu2_i32:
f4c16661
RH
1505 c = ARITH_UMUL;
1506 goto do_mul2;
1507 case INDEX_op_muls2_i32:
1508 c = ARITH_SMUL;
1509 do_mul2:
1510 /* The 32-bit multiply insns produce a full 64-bit result. If the
1511 destination register can hold it, we can avoid the slower RDY. */
b357f902
RH
1512 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1513 if (SPARC64 || a0 <= TCG_REG_O7) {
1514 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
f4c16661 1515 } else {
b357f902 1516 tcg_out_rdy(s, a1);
f4c16661 1517 }
7a3766f3 1518 break;
8289b279 1519
cab0a7ea 1520 case INDEX_op_qemu_ld_i32:
59227d5d 1521 tcg_out_qemu_ld(s, a0, a1, a2, false);
8289b279 1522 break;
cab0a7ea 1523 case INDEX_op_qemu_ld_i64:
59227d5d 1524 tcg_out_qemu_ld(s, a0, a1, a2, true);
8289b279 1525 break;
cab0a7ea 1526 case INDEX_op_qemu_st_i32:
cab0a7ea 1527 case INDEX_op_qemu_st_i64:
59227d5d 1528 tcg_out_qemu_st(s, a0, a1, a2);
a0ce341a 1529 break;
8289b279 1530
53cd9273 1531 case INDEX_op_ld32s_i64:
b357f902 1532 tcg_out_ldst(s, a0, a1, a2, LDSW);
53cd9273 1533 break;
8289b279 1534 case INDEX_op_ld_i64:
b357f902 1535 tcg_out_ldst(s, a0, a1, a2, LDX);
8289b279
BS
1536 break;
1537 case INDEX_op_st_i64:
b357f902 1538 tcg_out_ldst(s, a0, a1, a2, STX);
8289b279
BS
1539 break;
1540 case INDEX_op_shl_i64:
1541 c = SHIFT_SLLX;
1fd95946
RH
1542 do_shift64:
1543 /* Limit immediate shift count lest we create an illegal insn. */
b357f902 1544 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1fd95946 1545 break;
8289b279
BS
1546 case INDEX_op_shr_i64:
1547 c = SHIFT_SRLX;
1fd95946 1548 goto do_shift64;
8289b279
BS
1549 case INDEX_op_sar_i64:
1550 c = SHIFT_SRAX;
1fd95946 1551 goto do_shift64;
8289b279
BS
1552 case INDEX_op_mul_i64:
1553 c = ARITH_MULX;
ba225198 1554 goto gen_arith;
583d1215 1555 case INDEX_op_div_i64:
53cd9273 1556 c = ARITH_SDIVX;
ba225198 1557 goto gen_arith;
583d1215 1558 case INDEX_op_divu_i64:
8289b279 1559 c = ARITH_UDIVX;
ba225198 1560 goto gen_arith;
4f2331e5 1561 case INDEX_op_ext_i32_i64:
cc6dfecf 1562 case INDEX_op_ext32s_i64:
b357f902 1563 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
cc6dfecf 1564 break;
4f2331e5 1565 case INDEX_op_extu_i32_i64:
cc6dfecf 1566 case INDEX_op_ext32u_i64:
b357f902 1567 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
cc6dfecf 1568 break;
609ad705
RH
1569 case INDEX_op_extrl_i64_i32:
1570 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1571 break;
1572 case INDEX_op_extrh_i64_i32:
1573 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
a24fba93 1574 break;
8289b279
BS
1575
1576 case INDEX_op_brcond_i64:
bec16311 1577 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
8289b279 1578 break;
dbfe80e1 1579 case INDEX_op_setcond_i64:
b357f902 1580 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
dbfe80e1 1581 break;
ded37f0d 1582 case INDEX_op_movcond_i64:
b357f902 1583 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
ded37f0d 1584 break;
609ac1e1
RH
1585 case INDEX_op_add2_i64:
1586 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1587 const_args[4], args[5], const_args[5], false);
1588 break;
1589 case INDEX_op_sub2_i64:
1590 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1591 const_args[4], args[5], const_args[5], true);
1592 break;
de8301e5
RH
1593 case INDEX_op_muluh_i64:
1594 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1595 break;
34b1a49c 1596
ba225198 1597 gen_arith:
b357f902 1598 tcg_out_arithc(s, a0, a1, a2, c2, c);
53cd9273
BS
1599 break;
1600
4b5a85c1 1601 gen_arith1:
b357f902 1602 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
4b5a85c1
RH
1603 break;
1604
f8f03b37
PK
1605 case INDEX_op_mb:
1606 tcg_out_mb(s, a0);
1607 break;
1608
96d0ee7f 1609 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
98b90bab 1610 case INDEX_op_mov_i64:
96d0ee7f 1611 case INDEX_op_call: /* Always emitted via tcg_out_call. */
8289b279 1612 default:
8289b279
BS
1613 tcg_abort();
1614 }
1615}
1616
0d11dc7c 1617static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
f69d277e 1618{
9be44a16
RH
1619 switch (op) {
1620 case INDEX_op_goto_ptr:
0d11dc7c 1621 return C_O0_I1(r);
f69d277e 1622
9be44a16
RH
1623 case INDEX_op_ld8u_i32:
1624 case INDEX_op_ld8s_i32:
1625 case INDEX_op_ld16u_i32:
1626 case INDEX_op_ld16s_i32:
1627 case INDEX_op_ld_i32:
1628 case INDEX_op_neg_i32:
1629 case INDEX_op_not_i32:
0d11dc7c 1630 return C_O1_I1(r, r);
9be44a16
RH
1631
1632 case INDEX_op_st8_i32:
1633 case INDEX_op_st16_i32:
1634 case INDEX_op_st_i32:
0d11dc7c 1635 return C_O0_I2(rZ, r);
9be44a16
RH
1636
1637 case INDEX_op_add_i32:
1638 case INDEX_op_mul_i32:
1639 case INDEX_op_div_i32:
1640 case INDEX_op_divu_i32:
1641 case INDEX_op_sub_i32:
1642 case INDEX_op_and_i32:
1643 case INDEX_op_andc_i32:
1644 case INDEX_op_or_i32:
1645 case INDEX_op_orc_i32:
1646 case INDEX_op_xor_i32:
1647 case INDEX_op_shl_i32:
1648 case INDEX_op_shr_i32:
1649 case INDEX_op_sar_i32:
1650 case INDEX_op_setcond_i32:
0d11dc7c 1651 return C_O1_I2(r, rZ, rJ);
9be44a16
RH
1652
1653 case INDEX_op_brcond_i32:
0d11dc7c 1654 return C_O0_I2(rZ, rJ);
9be44a16 1655 case INDEX_op_movcond_i32:
0d11dc7c 1656 return C_O1_I4(r, rZ, rJ, rI, 0);
9be44a16
RH
1657 case INDEX_op_add2_i32:
1658 case INDEX_op_sub2_i32:
0d11dc7c 1659 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
9be44a16
RH
1660 case INDEX_op_mulu2_i32:
1661 case INDEX_op_muls2_i32:
0d11dc7c 1662 return C_O2_I2(r, r, rZ, rJ);
9be44a16
RH
1663
1664 case INDEX_op_ld8u_i64:
1665 case INDEX_op_ld8s_i64:
1666 case INDEX_op_ld16u_i64:
1667 case INDEX_op_ld16s_i64:
1668 case INDEX_op_ld32u_i64:
1669 case INDEX_op_ld32s_i64:
1670 case INDEX_op_ld_i64:
1671 case INDEX_op_ext_i32_i64:
1672 case INDEX_op_extu_i32_i64:
0d11dc7c 1673 return C_O1_I1(R, r);
9be44a16
RH
1674
1675 case INDEX_op_st8_i64:
1676 case INDEX_op_st16_i64:
1677 case INDEX_op_st32_i64:
1678 case INDEX_op_st_i64:
0d11dc7c 1679 return C_O0_I2(RZ, r);
9be44a16
RH
1680
1681 case INDEX_op_add_i64:
1682 case INDEX_op_mul_i64:
1683 case INDEX_op_div_i64:
1684 case INDEX_op_divu_i64:
1685 case INDEX_op_sub_i64:
1686 case INDEX_op_and_i64:
1687 case INDEX_op_andc_i64:
1688 case INDEX_op_or_i64:
1689 case INDEX_op_orc_i64:
1690 case INDEX_op_xor_i64:
1691 case INDEX_op_shl_i64:
1692 case INDEX_op_shr_i64:
1693 case INDEX_op_sar_i64:
1694 case INDEX_op_setcond_i64:
0d11dc7c 1695 return C_O1_I2(R, RZ, RJ);
9be44a16
RH
1696
1697 case INDEX_op_neg_i64:
1698 case INDEX_op_not_i64:
1699 case INDEX_op_ext32s_i64:
1700 case INDEX_op_ext32u_i64:
0d11dc7c 1701 return C_O1_I1(R, R);
9be44a16
RH
1702
1703 case INDEX_op_extrl_i64_i32:
1704 case INDEX_op_extrh_i64_i32:
0d11dc7c 1705 return C_O1_I1(r, R);
9be44a16
RH
1706
1707 case INDEX_op_brcond_i64:
0d11dc7c 1708 return C_O0_I2(RZ, RJ);
9be44a16 1709 case INDEX_op_movcond_i64:
0d11dc7c 1710 return C_O1_I4(R, RZ, RJ, RI, 0);
9be44a16
RH
1711 case INDEX_op_add2_i64:
1712 case INDEX_op_sub2_i64:
0d11dc7c 1713 return C_O2_I4(R, R, RZ, RZ, RJ, RI);
9be44a16 1714 case INDEX_op_muluh_i64:
0d11dc7c 1715 return C_O1_I2(R, R, R);
9be44a16
RH
1716
1717 case INDEX_op_qemu_ld_i32:
0d11dc7c 1718 return C_O1_I1(r, A);
9be44a16 1719 case INDEX_op_qemu_ld_i64:
0d11dc7c 1720 return C_O1_I1(R, A);
9be44a16 1721 case INDEX_op_qemu_st_i32:
0d11dc7c 1722 return C_O0_I2(sZ, A);
9be44a16 1723 case INDEX_op_qemu_st_i64:
0d11dc7c 1724 return C_O0_I2(SZ, A);
9be44a16
RH
1725
1726 default:
0d11dc7c 1727 g_assert_not_reached();
f69d277e 1728 }
f69d277e
RH
1729}
1730
e4d58b41 1731static void tcg_target_init(TCGContext *s)
8289b279 1732{
a4761232
PMD
1733 /*
1734 * Only probe for the platform and capabilities if we haven't already
1735 * determined maximum values at compile time.
1736 */
90379ca8
RH
1737#ifndef use_vis3_instructions
1738 {
1739 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1740 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1741 }
1742#endif
1743
77f268e8
RH
1744 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1745 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS64;
f46934df
RH
1746
1747 tcg_target_call_clobber_regs = 0;
1748 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1749 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1750 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1751 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1752 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1753 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1754 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1755 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1756 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1757 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1758 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1759 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1760 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1761 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
8289b279 1763
ccb1bb66 1764 s->reserved_regs = 0;
375816f8
RH
1765 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1766 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1767 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1768 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1769 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1770 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1771 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1772 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
8289b279 1773}
cb1977d3 1774
9f44adc5 1775#if SPARC64
cb1977d3 1776# define ELF_HOST_MACHINE EM_SPARCV9
9b9c37c3 1777#else
cb1977d3
RH
1778# define ELF_HOST_MACHINE EM_SPARC32PLUS
1779# define ELF_HOST_FLAGS EF_SPARC_32PLUS
cb1977d3
RH
1780#endif
1781
cb1977d3 1782typedef struct {
ae18b28d 1783 DebugFrameHeader h;
9f44adc5 1784 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
497a22eb
RH
1785 uint8_t fde_win_save;
1786 uint8_t fde_ret_save[3];
cb1977d3
RH
1787} DebugFrame;
1788
ae18b28d
RH
1789static const DebugFrame debug_frame = {
1790 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1791 .h.cie.id = -1,
1792 .h.cie.version = 1,
1793 .h.cie.code_align = 1,
1794 .h.cie.data_align = -sizeof(void *) & 0x7f,
1795 .h.cie.return_column = 15, /* o7 */
cb1977d3 1796
497a22eb 1797 /* Total FDE size does not include the "len" member. */
ae18b28d 1798 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
497a22eb
RH
1799
1800 .fde_def_cfa = {
9f44adc5 1801#if SPARC64
cb1977d3
RH
1802 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1803 (2047 & 0x7f) | 0x80, (2047 >> 7)
1804#else
1805 13, 30 /* DW_CFA_def_cfa_register i6 */
1806#endif
1807 },
497a22eb
RH
1808 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1809 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
cb1977d3
RH
1810};
1811
755bf9e5 1812void tcg_register_jit(const void *buf, size_t buf_size)
cb1977d3 1813{
cb1977d3
RH
1814 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1815}
5bbd2cae 1816
1acbad0f
RH
1817void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1818 uintptr_t jmp_rw, uintptr_t addr)
5bbd2cae 1819{
ab20bdc1 1820 intptr_t tb_disp = addr - tc_ptr;
1acbad0f 1821 intptr_t br_disp = addr - jmp_rx;
ab20bdc1
RH
1822 tcg_insn_unit i1, i2;
1823
1824 /* We can reach the entire address space for ILP32.
1825 For LP64, the code_gen_buffer can't be larger than 2GB. */
1826 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1827 tcg_debug_assert(br_disp == (int32_t)br_disp);
1828
1829 if (!USE_REG_TB) {
1acbad0f 1830 qatomic_set((uint32_t *)jmp_rw,
d73415a3 1831 deposit32(CALL, 0, 30, br_disp >> 2));
1acbad0f 1832 flush_idcache_range(jmp_rx, jmp_rw, 4);
ab20bdc1
RH
1833 return;
1834 }
5bbd2cae 1835
ab20bdc1
RH
1836 /* This does not exercise the range of the branch, but we do
1837 still need to be able to load the new value of TCG_REG_TB.
1838 But this does still happen quite often. */
1839 if (check_fit_ptr(tb_disp, 13)) {
1840 /* ba,pt %icc, addr */
1841 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1842 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1843 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1844 | INSN_IMM13(tb_disp));
1845 } else if (tb_disp >= 0) {
1846 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1847 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1848 | INSN_IMM13(tb_disp & 0x3ff));
1849 } else {
1850 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1851 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1852 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1853 }
5bbd2cae 1854
1acbad0f
RH
1855 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1856 flush_idcache_range(jmp_rx, jmp_rw, 8);
5bbd2cae 1857}