]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/sparc/tcg-target.inc.c
tcg: Support arbitrary size + alignment
[mirror_qemu.git] / tcg / sparc / tcg-target.inc.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "tcg-be-null.h"
26
27 #ifdef CONFIG_DEBUG_TCG
28 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
29 "%g0",
30 "%g1",
31 "%g2",
32 "%g3",
33 "%g4",
34 "%g5",
35 "%g6",
36 "%g7",
37 "%o0",
38 "%o1",
39 "%o2",
40 "%o3",
41 "%o4",
42 "%o5",
43 "%o6",
44 "%o7",
45 "%l0",
46 "%l1",
47 "%l2",
48 "%l3",
49 "%l4",
50 "%l5",
51 "%l6",
52 "%l7",
53 "%i0",
54 "%i1",
55 "%i2",
56 "%i3",
57 "%i4",
58 "%i5",
59 "%i6",
60 "%i7",
61 };
62 #endif
63
64 #ifdef __arch64__
65 # define SPARC64 1
66 #else
67 # define SPARC64 0
68 #endif
69
70 /* Note that sparcv8plus can only hold 64 bit quantities in %g and %o
71 registers. These are saved manually by the kernel in full 64-bit
72 slots. The %i and %l registers are saved by the register window
73 mechanism, which only allocates space for 32 bits. Given that this
74 window spill/fill can happen on any signal, we must consider the
75 high bits of the %i and %l registers garbage at all times. */
76 #if SPARC64
77 # define ALL_64 0xffffffffu
78 #else
79 # define ALL_64 0xffffu
80 #endif
81
82 /* Define some temporary registers. T2 is used for constant generation. */
83 #define TCG_REG_T1 TCG_REG_G1
84 #define TCG_REG_T2 TCG_REG_O7
85
86 #ifndef CONFIG_SOFTMMU
87 # define TCG_GUEST_BASE_REG TCG_REG_I5
88 #endif
89
90 static const int tcg_target_reg_alloc_order[] = {
91 TCG_REG_L0,
92 TCG_REG_L1,
93 TCG_REG_L2,
94 TCG_REG_L3,
95 TCG_REG_L4,
96 TCG_REG_L5,
97 TCG_REG_L6,
98 TCG_REG_L7,
99
100 TCG_REG_I0,
101 TCG_REG_I1,
102 TCG_REG_I2,
103 TCG_REG_I3,
104 TCG_REG_I4,
105 TCG_REG_I5,
106
107 TCG_REG_G2,
108 TCG_REG_G3,
109 TCG_REG_G4,
110 TCG_REG_G5,
111
112 TCG_REG_O0,
113 TCG_REG_O1,
114 TCG_REG_O2,
115 TCG_REG_O3,
116 TCG_REG_O4,
117 TCG_REG_O5,
118 };
119
120 static const int tcg_target_call_iarg_regs[6] = {
121 TCG_REG_O0,
122 TCG_REG_O1,
123 TCG_REG_O2,
124 TCG_REG_O3,
125 TCG_REG_O4,
126 TCG_REG_O5,
127 };
128
129 static const int tcg_target_call_oarg_regs[] = {
130 TCG_REG_O0,
131 TCG_REG_O1,
132 TCG_REG_O2,
133 TCG_REG_O3,
134 };
135
136 #define INSN_OP(x) ((x) << 30)
137 #define INSN_OP2(x) ((x) << 22)
138 #define INSN_OP3(x) ((x) << 19)
139 #define INSN_OPF(x) ((x) << 5)
140 #define INSN_RD(x) ((x) << 25)
141 #define INSN_RS1(x) ((x) << 14)
142 #define INSN_RS2(x) (x)
143 #define INSN_ASI(x) ((x) << 5)
144
145 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
146 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
147 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
148 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
149 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
150 #define INSN_COND(x) ((x) << 25)
151
152 #define COND_N 0x0
153 #define COND_E 0x1
154 #define COND_LE 0x2
155 #define COND_L 0x3
156 #define COND_LEU 0x4
157 #define COND_CS 0x5
158 #define COND_NEG 0x6
159 #define COND_VS 0x7
160 #define COND_A 0x8
161 #define COND_NE 0x9
162 #define COND_G 0xa
163 #define COND_GE 0xb
164 #define COND_GU 0xc
165 #define COND_CC 0xd
166 #define COND_POS 0xe
167 #define COND_VC 0xf
168 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
169
170 #define RCOND_Z 1
171 #define RCOND_LEZ 2
172 #define RCOND_LZ 3
173 #define RCOND_NZ 5
174 #define RCOND_GZ 6
175 #define RCOND_GEZ 7
176
177 #define MOVCC_ICC (1 << 18)
178 #define MOVCC_XCC (1 << 18 | 1 << 12)
179
180 #define BPCC_ICC 0
181 #define BPCC_XCC (2 << 20)
182 #define BPCC_PT (1 << 19)
183 #define BPCC_PN 0
184 #define BPCC_A (1 << 29)
185
186 #define BPR_PT BPCC_PT
187
188 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
189 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
190 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
191 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
192 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
193 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
194 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
195 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
196 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
197 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
198 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
199 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
200 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
201 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
202 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
203 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
204 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
205 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
206 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
207 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
208 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
209
210 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
211 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
212
213 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
214 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
215 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
216
217 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
218 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
219 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
220
221 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
222 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
223 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
224 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
225 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
226 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
227 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
228 #define CALL INSN_OP(1)
229 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
230 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
231 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
232 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
233 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
234 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
235 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
236 #define STB (INSN_OP(3) | INSN_OP3(0x05))
237 #define STH (INSN_OP(3) | INSN_OP3(0x06))
238 #define STW (INSN_OP(3) | INSN_OP3(0x04))
239 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
240 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
241 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
242 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
243 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
244 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
245 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
246 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
247 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
248 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
249 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
250 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
251
252 #ifndef ASI_PRIMARY_LITTLE
253 #define ASI_PRIMARY_LITTLE 0x88
254 #endif
255
256 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
257 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
258 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
259 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
260 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
261
262 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
263 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
264 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
265
266 #ifndef use_vis3_instructions
267 bool use_vis3_instructions;
268 #endif
269
270 static inline int check_fit_i64(int64_t val, unsigned int bits)
271 {
272 return val == sextract64(val, 0, bits);
273 }
274
275 static inline int check_fit_i32(int32_t val, unsigned int bits)
276 {
277 return val == sextract32(val, 0, bits);
278 }
279
280 #define check_fit_tl check_fit_i64
281 #if SPARC64
282 # define check_fit_ptr check_fit_i64
283 #else
284 # define check_fit_ptr check_fit_i32
285 #endif
286
287 static void patch_reloc(tcg_insn_unit *code_ptr, int type,
288 intptr_t value, intptr_t addend)
289 {
290 uint32_t insn;
291
292 tcg_debug_assert(addend == 0);
293 value = tcg_ptr_byte_diff((tcg_insn_unit *)value, code_ptr);
294
295 switch (type) {
296 case R_SPARC_WDISP16:
297 if (!check_fit_ptr(value >> 2, 16)) {
298 tcg_abort();
299 }
300 insn = *code_ptr;
301 insn &= ~INSN_OFF16(-1);
302 insn |= INSN_OFF16(value);
303 *code_ptr = insn;
304 break;
305 case R_SPARC_WDISP19:
306 if (!check_fit_ptr(value >> 2, 19)) {
307 tcg_abort();
308 }
309 insn = *code_ptr;
310 insn &= ~INSN_OFF19(-1);
311 insn |= INSN_OFF19(value);
312 *code_ptr = insn;
313 break;
314 default:
315 tcg_abort();
316 }
317 }
318
319 /* parse target specific constraints */
320 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
321 {
322 const char *ct_str;
323
324 ct_str = *pct_str;
325 switch (ct_str[0]) {
326 case 'r':
327 ct->ct |= TCG_CT_REG;
328 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
329 break;
330 case 'R':
331 ct->ct |= TCG_CT_REG;
332 tcg_regset_set32(ct->u.regs, 0, ALL_64);
333 break;
334 case 'A': /* qemu_ld/st address constraint */
335 ct->ct |= TCG_CT_REG;
336 tcg_regset_set32(ct->u.regs, 0,
337 TARGET_LONG_BITS == 64 ? ALL_64 : 0xffffffff);
338 reserve_helpers:
339 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O0);
340 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O1);
341 tcg_regset_reset_reg(ct->u.regs, TCG_REG_O2);
342 break;
343 case 's': /* qemu_st data 32-bit constraint */
344 ct->ct |= TCG_CT_REG;
345 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
346 goto reserve_helpers;
347 case 'S': /* qemu_st data 64-bit constraint */
348 ct->ct |= TCG_CT_REG;
349 tcg_regset_set32(ct->u.regs, 0, ALL_64);
350 goto reserve_helpers;
351 case 'I':
352 ct->ct |= TCG_CT_CONST_S11;
353 break;
354 case 'J':
355 ct->ct |= TCG_CT_CONST_S13;
356 break;
357 case 'Z':
358 ct->ct |= TCG_CT_CONST_ZERO;
359 break;
360 default:
361 return -1;
362 }
363 ct_str++;
364 *pct_str = ct_str;
365 return 0;
366 }
367
368 /* test if a constant matches the constraint */
369 static inline int tcg_target_const_match(tcg_target_long val, TCGType type,
370 const TCGArgConstraint *arg_ct)
371 {
372 int ct = arg_ct->ct;
373
374 if (ct & TCG_CT_CONST) {
375 return 1;
376 }
377
378 if (type == TCG_TYPE_I32) {
379 val = (int32_t)val;
380 }
381
382 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
383 return 1;
384 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
385 return 1;
386 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
387 return 1;
388 } else {
389 return 0;
390 }
391 }
392
393 static inline void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
394 TCGReg rs2, int op)
395 {
396 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
397 }
398
399 static inline void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
400 int32_t offset, int op)
401 {
402 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
403 }
404
405 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
406 int32_t val2, int val2const, int op)
407 {
408 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
409 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
410 }
411
412 static inline void tcg_out_mov(TCGContext *s, TCGType type,
413 TCGReg ret, TCGReg arg)
414 {
415 if (ret != arg) {
416 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
417 }
418 }
419
420 static inline void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
421 {
422 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
423 }
424
425 static inline void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
426 {
427 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
428 }
429
430 static void tcg_out_movi(TCGContext *s, TCGType type,
431 TCGReg ret, tcg_target_long arg)
432 {
433 tcg_target_long hi, lo = (int32_t)arg;
434
435 /* Make sure we test 32-bit constants for imm13 properly. */
436 if (type == TCG_TYPE_I32) {
437 arg = lo;
438 }
439
440 /* A 13-bit constant sign-extended to 64-bits. */
441 if (check_fit_tl(arg, 13)) {
442 tcg_out_movi_imm13(s, ret, arg);
443 return;
444 }
445
446 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
447 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
448 tcg_out_sethi(s, ret, arg);
449 if (arg & 0x3ff) {
450 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
451 }
452 return;
453 }
454
455 /* A 32-bit constant sign-extended to 64-bits. */
456 if (arg == lo) {
457 tcg_out_sethi(s, ret, ~arg);
458 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
459 return;
460 }
461
462 /* A 64-bit constant decomposed into 2 32-bit pieces. */
463 if (check_fit_i32(lo, 13)) {
464 hi = (arg - lo) >> 32;
465 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
466 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
467 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
468 } else {
469 hi = arg >> 32;
470 tcg_out_movi(s, TCG_TYPE_I32, ret, hi);
471 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_T2, lo);
472 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
473 tcg_out_arith(s, ret, ret, TCG_REG_T2, ARITH_OR);
474 }
475 }
476
477 static inline void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
478 TCGReg a2, int op)
479 {
480 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
481 }
482
483 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
484 intptr_t offset, int op)
485 {
486 if (check_fit_ptr(offset, 13)) {
487 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
488 INSN_IMM13(offset));
489 } else {
490 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
491 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
492 }
493 }
494
495 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
496 TCGReg arg1, intptr_t arg2)
497 {
498 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
499 }
500
501 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
502 TCGReg arg1, intptr_t arg2)
503 {
504 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
505 }
506
507 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
508 TCGReg base, intptr_t ofs)
509 {
510 if (val == 0) {
511 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
512 return true;
513 }
514 return false;
515 }
516
517 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, uintptr_t arg)
518 {
519 tcg_out_movi(s, TCG_TYPE_PTR, ret, arg & ~0x3ff);
520 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, arg & 0x3ff);
521 }
522
523 static inline void tcg_out_sety(TCGContext *s, TCGReg rs)
524 {
525 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
526 }
527
528 static inline void tcg_out_rdy(TCGContext *s, TCGReg rd)
529 {
530 tcg_out32(s, RDY | INSN_RD(rd));
531 }
532
533 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
534 int32_t val2, int val2const, int uns)
535 {
536 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
537 if (uns) {
538 tcg_out_sety(s, TCG_REG_G0);
539 } else {
540 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
541 tcg_out_sety(s, TCG_REG_T1);
542 }
543
544 tcg_out_arithc(s, rd, rs1, val2, val2const,
545 uns ? ARITH_UDIV : ARITH_SDIV);
546 }
547
548 static inline void tcg_out_nop(TCGContext *s)
549 {
550 tcg_out_sethi(s, TCG_REG_G0, 0);
551 }
552
553 static const uint8_t tcg_cond_to_bcond[] = {
554 [TCG_COND_EQ] = COND_E,
555 [TCG_COND_NE] = COND_NE,
556 [TCG_COND_LT] = COND_L,
557 [TCG_COND_GE] = COND_GE,
558 [TCG_COND_LE] = COND_LE,
559 [TCG_COND_GT] = COND_G,
560 [TCG_COND_LTU] = COND_CS,
561 [TCG_COND_GEU] = COND_CC,
562 [TCG_COND_LEU] = COND_LEU,
563 [TCG_COND_GTU] = COND_GU,
564 };
565
566 static const uint8_t tcg_cond_to_rcond[] = {
567 [TCG_COND_EQ] = RCOND_Z,
568 [TCG_COND_NE] = RCOND_NZ,
569 [TCG_COND_LT] = RCOND_LZ,
570 [TCG_COND_GT] = RCOND_GZ,
571 [TCG_COND_LE] = RCOND_LEZ,
572 [TCG_COND_GE] = RCOND_GEZ
573 };
574
575 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
576 {
577 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
578 }
579
580 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
581 {
582 int off19;
583
584 if (l->has_value) {
585 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
586 } else {
587 /* Make sure to preserve destinations during retranslation. */
588 off19 = *s->code_ptr & INSN_OFF19(-1);
589 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
590 }
591 tcg_out_bpcc0(s, scond, flags, off19);
592 }
593
594 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
595 {
596 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
597 }
598
599 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
600 int32_t arg2, int const_arg2, TCGLabel *l)
601 {
602 tcg_out_cmp(s, arg1, arg2, const_arg2);
603 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
604 tcg_out_nop(s);
605 }
606
607 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
608 int32_t v1, int v1const)
609 {
610 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
611 | INSN_RS1(tcg_cond_to_bcond[cond])
612 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
613 }
614
615 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
616 TCGReg c1, int32_t c2, int c2const,
617 int32_t v1, int v1const)
618 {
619 tcg_out_cmp(s, c1, c2, c2const);
620 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
621 }
622
623 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
624 int32_t arg2, int const_arg2, TCGLabel *l)
625 {
626 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
627 if (arg2 == 0 && !is_unsigned_cond(cond)) {
628 int off16;
629
630 if (l->has_value) {
631 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
632 } else {
633 /* Make sure to preserve destinations during retranslation. */
634 off16 = *s->code_ptr & INSN_OFF16(-1);
635 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
636 }
637 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
638 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
639 } else {
640 tcg_out_cmp(s, arg1, arg2, const_arg2);
641 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
642 }
643 tcg_out_nop(s);
644 }
645
646 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
647 int32_t v1, int v1const)
648 {
649 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
650 | (tcg_cond_to_rcond[cond] << 10)
651 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
652 }
653
654 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
655 TCGReg c1, int32_t c2, int c2const,
656 int32_t v1, int v1const)
657 {
658 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
659 Note that the immediate range is one bit smaller, so we must check
660 for that as well. */
661 if (c2 == 0 && !is_unsigned_cond(cond)
662 && (!v1const || check_fit_i32(v1, 10))) {
663 tcg_out_movr(s, cond, ret, c1, v1, v1const);
664 } else {
665 tcg_out_cmp(s, c1, c2, c2const);
666 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
667 }
668 }
669
670 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
671 TCGReg c1, int32_t c2, int c2const)
672 {
673 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
674 switch (cond) {
675 case TCG_COND_LTU:
676 case TCG_COND_GEU:
677 /* The result of the comparison is in the carry bit. */
678 break;
679
680 case TCG_COND_EQ:
681 case TCG_COND_NE:
682 /* For equality, we can transform to inequality vs zero. */
683 if (c2 != 0) {
684 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
685 c2 = TCG_REG_T1;
686 } else {
687 c2 = c1;
688 }
689 c1 = TCG_REG_G0, c2const = 0;
690 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
691 break;
692
693 case TCG_COND_GTU:
694 case TCG_COND_LEU:
695 /* If we don't need to load a constant into a register, we can
696 swap the operands on GTU/LEU. There's no benefit to loading
697 the constant into a temporary register. */
698 if (!c2const || c2 == 0) {
699 TCGReg t = c1;
700 c1 = c2;
701 c2 = t;
702 c2const = 0;
703 cond = tcg_swap_cond(cond);
704 break;
705 }
706 /* FALLTHRU */
707
708 default:
709 tcg_out_cmp(s, c1, c2, c2const);
710 tcg_out_movi_imm13(s, ret, 0);
711 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
712 return;
713 }
714
715 tcg_out_cmp(s, c1, c2, c2const);
716 if (cond == TCG_COND_LTU) {
717 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
718 } else {
719 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
720 }
721 }
722
723 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
724 TCGReg c1, int32_t c2, int c2const)
725 {
726 if (use_vis3_instructions) {
727 switch (cond) {
728 case TCG_COND_NE:
729 if (c2 != 0) {
730 break;
731 }
732 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
733 /* FALLTHRU */
734 case TCG_COND_LTU:
735 tcg_out_cmp(s, c1, c2, c2const);
736 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
737 return;
738 default:
739 break;
740 }
741 }
742
743 /* For 64-bit signed comparisons vs zero, we can avoid the compare
744 if the input does not overlap the output. */
745 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
746 tcg_out_movi_imm13(s, ret, 0);
747 tcg_out_movr(s, cond, ret, c1, 1, 1);
748 } else {
749 tcg_out_cmp(s, c1, c2, c2const);
750 tcg_out_movi_imm13(s, ret, 0);
751 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
752 }
753 }
754
755 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
756 TCGReg al, TCGReg ah, int32_t bl, int blconst,
757 int32_t bh, int bhconst, int opl, int oph)
758 {
759 TCGReg tmp = TCG_REG_T1;
760
761 /* Note that the low parts are fully consumed before tmp is set. */
762 if (rl != ah && (bhconst || rl != bh)) {
763 tmp = rl;
764 }
765
766 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
767 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
768 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
769 }
770
771 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
772 TCGReg al, TCGReg ah, int32_t bl, int blconst,
773 int32_t bh, int bhconst, bool is_sub)
774 {
775 TCGReg tmp = TCG_REG_T1;
776
777 /* Note that the low parts are fully consumed before tmp is set. */
778 if (rl != ah && (bhconst || rl != bh)) {
779 tmp = rl;
780 }
781
782 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
783
784 if (use_vis3_instructions && !is_sub) {
785 /* Note that ADDXC doesn't accept immediates. */
786 if (bhconst && bh != 0) {
787 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh);
788 bh = TCG_REG_T2;
789 }
790 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
791 } else if (bh == TCG_REG_G0) {
792 /* If we have a zero, we can perform the operation in two insns,
793 with the arithmetic first, and a conditional move into place. */
794 if (rh == ah) {
795 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
796 is_sub ? ARITH_SUB : ARITH_ADD);
797 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
798 } else {
799 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
800 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
801 }
802 } else {
803 /* Otherwise adjust BH as if there is carry into T2 ... */
804 if (bhconst) {
805 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1));
806 } else {
807 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
808 is_sub ? ARITH_SUB : ARITH_ADD);
809 }
810 /* ... smoosh T2 back to original BH if carry is clear ... */
811 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
812 /* ... and finally perform the arithmetic with the new operand. */
813 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
814 }
815
816 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
817 }
818
819 static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest)
820 {
821 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
822
823 if (disp == (int32_t)disp) {
824 tcg_out32(s, CALL | (uint32_t)disp >> 2);
825 } else {
826 uintptr_t desti = (uintptr_t)dest;
827 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, desti & ~0xfff);
828 tcg_out_arithi(s, TCG_REG_O7, TCG_REG_T1, desti & 0xfff, JMPL);
829 }
830 }
831
832 static void tcg_out_call(TCGContext *s, tcg_insn_unit *dest)
833 {
834 tcg_out_call_nodelay(s, dest);
835 tcg_out_nop(s);
836 }
837
838 #ifdef CONFIG_SOFTMMU
839 static tcg_insn_unit *qemu_ld_trampoline[16];
840 static tcg_insn_unit *qemu_st_trampoline[16];
841
842 static void build_trampolines(TCGContext *s)
843 {
844 static void * const qemu_ld_helpers[16] = {
845 [MO_UB] = helper_ret_ldub_mmu,
846 [MO_SB] = helper_ret_ldsb_mmu,
847 [MO_LEUW] = helper_le_lduw_mmu,
848 [MO_LESW] = helper_le_ldsw_mmu,
849 [MO_LEUL] = helper_le_ldul_mmu,
850 [MO_LEQ] = helper_le_ldq_mmu,
851 [MO_BEUW] = helper_be_lduw_mmu,
852 [MO_BESW] = helper_be_ldsw_mmu,
853 [MO_BEUL] = helper_be_ldul_mmu,
854 [MO_BEQ] = helper_be_ldq_mmu,
855 };
856 static void * const qemu_st_helpers[16] = {
857 [MO_UB] = helper_ret_stb_mmu,
858 [MO_LEUW] = helper_le_stw_mmu,
859 [MO_LEUL] = helper_le_stl_mmu,
860 [MO_LEQ] = helper_le_stq_mmu,
861 [MO_BEUW] = helper_be_stw_mmu,
862 [MO_BEUL] = helper_be_stl_mmu,
863 [MO_BEQ] = helper_be_stq_mmu,
864 };
865
866 int i;
867 TCGReg ra;
868
869 for (i = 0; i < 16; ++i) {
870 if (qemu_ld_helpers[i] == NULL) {
871 continue;
872 }
873
874 /* May as well align the trampoline. */
875 while ((uintptr_t)s->code_ptr & 15) {
876 tcg_out_nop(s);
877 }
878 qemu_ld_trampoline[i] = s->code_ptr;
879
880 if (SPARC64 || TARGET_LONG_BITS == 32) {
881 ra = TCG_REG_O3;
882 } else {
883 /* Install the high part of the address. */
884 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O2, 32, SHIFT_SRLX);
885 ra = TCG_REG_O4;
886 }
887
888 /* Set the retaddr operand. */
889 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
890 /* Set the env operand. */
891 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
892 /* Tail call. */
893 tcg_out_call_nodelay(s, qemu_ld_helpers[i]);
894 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
895 }
896
897 for (i = 0; i < 16; ++i) {
898 if (qemu_st_helpers[i] == NULL) {
899 continue;
900 }
901
902 /* May as well align the trampoline. */
903 while ((uintptr_t)s->code_ptr & 15) {
904 tcg_out_nop(s);
905 }
906 qemu_st_trampoline[i] = s->code_ptr;
907
908 if (SPARC64) {
909 ra = TCG_REG_O4;
910 } else {
911 ra = TCG_REG_O1;
912 if (TARGET_LONG_BITS == 64) {
913 /* Install the high part of the address. */
914 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
915 ra += 2;
916 } else {
917 ra += 1;
918 }
919 if ((i & MO_SIZE) == MO_64) {
920 /* Install the high part of the data. */
921 tcg_out_arithi(s, ra, ra + 1, 32, SHIFT_SRLX);
922 ra += 2;
923 } else {
924 ra += 1;
925 }
926 /* Skip the oi argument. */
927 ra += 1;
928 }
929
930 /* Set the retaddr operand. */
931 if (ra >= TCG_REG_O6) {
932 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_O7, TCG_REG_CALL_STACK,
933 TCG_TARGET_CALL_STACK_OFFSET);
934 ra = TCG_REG_G1;
935 }
936 tcg_out_mov(s, TCG_TYPE_PTR, ra, TCG_REG_O7);
937 /* Set the env operand. */
938 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O0, TCG_AREG0);
939 /* Tail call. */
940 tcg_out_call_nodelay(s, qemu_st_helpers[i]);
941 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O7, ra);
942 }
943 }
944 #endif
945
946 /* Generate global QEMU prologue and epilogue code */
947 static void tcg_target_qemu_prologue(TCGContext *s)
948 {
949 int tmp_buf_size, frame_size;
950
951 /* The TCG temp buffer is at the top of the frame, immediately
952 below the frame pointer. */
953 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
954 tcg_set_frame(s, TCG_REG_I6, TCG_TARGET_STACK_BIAS - tmp_buf_size,
955 tmp_buf_size);
956
957 /* TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
958 otherwise the minimal frame usable by callees. */
959 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
960 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
961 frame_size += TCG_TARGET_STACK_ALIGN - 1;
962 frame_size &= -TCG_TARGET_STACK_ALIGN;
963 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
964 INSN_IMM13(-frame_size));
965
966 #ifndef CONFIG_SOFTMMU
967 if (guest_base != 0) {
968 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
969 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
970 }
971 #endif
972
973 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
974 /* delay slot */
975 tcg_out_nop(s);
976
977 /* No epilogue required. We issue ret + restore directly in the TB. */
978
979 #ifdef CONFIG_SOFTMMU
980 build_trampolines(s);
981 #endif
982 }
983
984 #if defined(CONFIG_SOFTMMU)
985 /* Perform the TLB load and compare.
986
987 Inputs:
988 ADDRLO and ADDRHI contain the possible two parts of the address.
989
990 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
991
992 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
993 This should be offsetof addr_read or addr_write.
994
995 The result of the TLB comparison is in %[ix]cc. The sanitized address
996 is in the returned register, maybe %o0. The TLB addend is in %o1. */
997
998 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
999 TCGMemOp opc, int which)
1000 {
1001 const TCGReg r0 = TCG_REG_O0;
1002 const TCGReg r1 = TCG_REG_O1;
1003 const TCGReg r2 = TCG_REG_O2;
1004 unsigned s_bits = opc & MO_SIZE;
1005 unsigned a_bits = get_alignment_bits(opc);
1006 int tlb_ofs;
1007
1008 /* Shift the page number down. */
1009 tcg_out_arithi(s, r1, addr, TARGET_PAGE_BITS, SHIFT_SRL);
1010
1011 /* Mask out the page offset, except for the required alignment.
1012 We don't support unaligned accesses. */
1013 if (a_bits < s_bits) {
1014 a_bits = s_bits;
1015 }
1016 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_T1,
1017 TARGET_PAGE_MASK | ((1 << a_bits) - 1));
1018
1019 /* Mask the tlb index. */
1020 tcg_out_arithi(s, r1, r1, CPU_TLB_SIZE - 1, ARITH_AND);
1021
1022 /* Mask page, part 2. */
1023 tcg_out_arith(s, r0, addr, TCG_REG_T1, ARITH_AND);
1024
1025 /* Shift the tlb index into place. */
1026 tcg_out_arithi(s, r1, r1, CPU_TLB_ENTRY_BITS, SHIFT_SLL);
1027
1028 /* Relative to the current ENV. */
1029 tcg_out_arith(s, r1, TCG_AREG0, r1, ARITH_ADD);
1030
1031 /* Find a base address that can load both tlb comparator and addend. */
1032 tlb_ofs = offsetof(CPUArchState, tlb_table[mem_index][0]);
1033 if (!check_fit_ptr(tlb_ofs + sizeof(CPUTLBEntry), 13)) {
1034 if (tlb_ofs & ~0x3ff) {
1035 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, tlb_ofs & ~0x3ff);
1036 tcg_out_arith(s, r1, r1, TCG_REG_T1, ARITH_ADD);
1037 }
1038 tlb_ofs &= 0x3ff;
1039 }
1040
1041 /* Load the tlb comparator and the addend. */
1042 tcg_out_ld(s, TCG_TYPE_TL, r2, r1, tlb_ofs + which);
1043 tcg_out_ld(s, TCG_TYPE_PTR, r1, r1, tlb_ofs+offsetof(CPUTLBEntry, addend));
1044
1045 /* subcc arg0, arg2, %g0 */
1046 tcg_out_cmp(s, r0, r2, 0);
1047
1048 /* If the guest address must be zero-extended, do so now. */
1049 if (SPARC64 && TARGET_LONG_BITS == 32) {
1050 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1051 return r0;
1052 }
1053 return addr;
1054 }
1055 #endif /* CONFIG_SOFTMMU */
1056
1057 static const int qemu_ld_opc[16] = {
1058 [MO_UB] = LDUB,
1059 [MO_SB] = LDSB,
1060
1061 [MO_BEUW] = LDUH,
1062 [MO_BESW] = LDSH,
1063 [MO_BEUL] = LDUW,
1064 [MO_BESL] = LDSW,
1065 [MO_BEQ] = LDX,
1066
1067 [MO_LEUW] = LDUH_LE,
1068 [MO_LESW] = LDSH_LE,
1069 [MO_LEUL] = LDUW_LE,
1070 [MO_LESL] = LDSW_LE,
1071 [MO_LEQ] = LDX_LE,
1072 };
1073
1074 static const int qemu_st_opc[16] = {
1075 [MO_UB] = STB,
1076
1077 [MO_BEUW] = STH,
1078 [MO_BEUL] = STW,
1079 [MO_BEQ] = STX,
1080
1081 [MO_LEUW] = STH_LE,
1082 [MO_LEUL] = STW_LE,
1083 [MO_LEQ] = STX_LE,
1084 };
1085
1086 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1087 TCGMemOpIdx oi, bool is_64)
1088 {
1089 TCGMemOp memop = get_memop(oi);
1090 #ifdef CONFIG_SOFTMMU
1091 unsigned memi = get_mmuidx(oi);
1092 TCGReg addrz, param;
1093 tcg_insn_unit *func;
1094 tcg_insn_unit *label_ptr;
1095
1096 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1097 offsetof(CPUTLBEntry, addr_read));
1098
1099 /* The fast path is exactly one insn. Thus we can perform the
1100 entire TLB Hit in the (annulled) delay slot of the branch
1101 over the TLB Miss case. */
1102
1103 /* beq,a,pt %[xi]cc, label0 */
1104 label_ptr = s->code_ptr;
1105 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1106 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1107 /* delay slot */
1108 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1109 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1110
1111 /* TLB Miss. */
1112
1113 param = TCG_REG_O1;
1114 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1115 /* Skip the high-part; we'll perform the extract in the trampoline. */
1116 param++;
1117 }
1118 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1119
1120 /* We use the helpers to extend SB and SW data, leaving the case
1121 of SL needing explicit extending below. */
1122 if ((memop & MO_SSIZE) == MO_SL) {
1123 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1124 } else {
1125 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1126 }
1127 tcg_debug_assert(func != NULL);
1128 tcg_out_call_nodelay(s, func);
1129 /* delay slot */
1130 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1131
1132 /* Recall that all of the helpers return 64-bit results.
1133 Which complicates things for sparcv8plus. */
1134 if (SPARC64) {
1135 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1136 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1137 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1138 } else {
1139 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1140 }
1141 } else {
1142 if ((memop & MO_SIZE) == MO_64) {
1143 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX);
1144 tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL);
1145 tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR);
1146 } else if (is_64) {
1147 /* Re-extend from 32-bit rather than reassembling when we
1148 know the high register must be an extension. */
1149 tcg_out_arithi(s, data, TCG_REG_O1, 0,
1150 memop & MO_SIGN ? SHIFT_SRA : SHIFT_SRL);
1151 } else {
1152 tcg_out_mov(s, TCG_TYPE_I32, data, TCG_REG_O1);
1153 }
1154 }
1155
1156 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1157 #else
1158 if (SPARC64 && TARGET_LONG_BITS == 32) {
1159 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1160 addr = TCG_REG_T1;
1161 }
1162 tcg_out_ldst_rr(s, data, addr,
1163 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1164 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1165 #endif /* CONFIG_SOFTMMU */
1166 }
1167
1168 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1169 TCGMemOpIdx oi)
1170 {
1171 TCGMemOp memop = get_memop(oi);
1172 #ifdef CONFIG_SOFTMMU
1173 unsigned memi = get_mmuidx(oi);
1174 TCGReg addrz, param;
1175 tcg_insn_unit *func;
1176 tcg_insn_unit *label_ptr;
1177
1178 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1179 offsetof(CPUTLBEntry, addr_write));
1180
1181 /* The fast path is exactly one insn. Thus we can perform the entire
1182 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1183 /* beq,a,pt %[xi]cc, label0 */
1184 label_ptr = s->code_ptr;
1185 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1186 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1187 /* delay slot */
1188 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1189 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1190
1191 /* TLB Miss. */
1192
1193 param = TCG_REG_O1;
1194 if (!SPARC64 && TARGET_LONG_BITS == 64) {
1195 /* Skip the high-part; we'll perform the extract in the trampoline. */
1196 param++;
1197 }
1198 tcg_out_mov(s, TCG_TYPE_REG, param++, addr);
1199 if (!SPARC64 && (memop & MO_SIZE) == MO_64) {
1200 /* Skip the high-part; we'll perform the extract in the trampoline. */
1201 param++;
1202 }
1203 tcg_out_mov(s, TCG_TYPE_REG, param++, data);
1204
1205 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1206 tcg_debug_assert(func != NULL);
1207 tcg_out_call_nodelay(s, func);
1208 /* delay slot */
1209 tcg_out_movi(s, TCG_TYPE_I32, param, oi);
1210
1211 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1212 #else
1213 if (SPARC64 && TARGET_LONG_BITS == 32) {
1214 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1215 addr = TCG_REG_T1;
1216 }
1217 tcg_out_ldst_rr(s, data, addr,
1218 (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0),
1219 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1220 #endif /* CONFIG_SOFTMMU */
1221 }
1222
1223 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1224 const TCGArg args[TCG_MAX_OP_ARGS],
1225 const int const_args[TCG_MAX_OP_ARGS])
1226 {
1227 TCGArg a0, a1, a2;
1228 int c, c2;
1229
1230 /* Hoist the loads of the most common arguments. */
1231 a0 = args[0];
1232 a1 = args[1];
1233 a2 = args[2];
1234 c2 = const_args[2];
1235
1236 switch (opc) {
1237 case INDEX_op_exit_tb:
1238 if (check_fit_ptr(a0, 13)) {
1239 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1240 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1241 } else {
1242 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1243 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1244 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1245 }
1246 break;
1247 case INDEX_op_goto_tb:
1248 if (s->tb_jmp_insn_offset) {
1249 /* direct jump method */
1250 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1251 /* Make sure to preserve links during retranslation. */
1252 tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1)));
1253 } else {
1254 /* indirect jump method */
1255 tcg_out_ld_ptr(s, TCG_REG_T1,
1256 (uintptr_t)(s->tb_jmp_target_addr + a0));
1257 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL);
1258 }
1259 tcg_out_nop(s);
1260 s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s);
1261 break;
1262 case INDEX_op_br:
1263 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1264 tcg_out_nop(s);
1265 break;
1266
1267 #define OP_32_64(x) \
1268 glue(glue(case INDEX_op_, x), _i32): \
1269 glue(glue(case INDEX_op_, x), _i64)
1270
1271 OP_32_64(ld8u):
1272 tcg_out_ldst(s, a0, a1, a2, LDUB);
1273 break;
1274 OP_32_64(ld8s):
1275 tcg_out_ldst(s, a0, a1, a2, LDSB);
1276 break;
1277 OP_32_64(ld16u):
1278 tcg_out_ldst(s, a0, a1, a2, LDUH);
1279 break;
1280 OP_32_64(ld16s):
1281 tcg_out_ldst(s, a0, a1, a2, LDSH);
1282 break;
1283 case INDEX_op_ld_i32:
1284 case INDEX_op_ld32u_i64:
1285 tcg_out_ldst(s, a0, a1, a2, LDUW);
1286 break;
1287 OP_32_64(st8):
1288 tcg_out_ldst(s, a0, a1, a2, STB);
1289 break;
1290 OP_32_64(st16):
1291 tcg_out_ldst(s, a0, a1, a2, STH);
1292 break;
1293 case INDEX_op_st_i32:
1294 case INDEX_op_st32_i64:
1295 tcg_out_ldst(s, a0, a1, a2, STW);
1296 break;
1297 OP_32_64(add):
1298 c = ARITH_ADD;
1299 goto gen_arith;
1300 OP_32_64(sub):
1301 c = ARITH_SUB;
1302 goto gen_arith;
1303 OP_32_64(and):
1304 c = ARITH_AND;
1305 goto gen_arith;
1306 OP_32_64(andc):
1307 c = ARITH_ANDN;
1308 goto gen_arith;
1309 OP_32_64(or):
1310 c = ARITH_OR;
1311 goto gen_arith;
1312 OP_32_64(orc):
1313 c = ARITH_ORN;
1314 goto gen_arith;
1315 OP_32_64(xor):
1316 c = ARITH_XOR;
1317 goto gen_arith;
1318 case INDEX_op_shl_i32:
1319 c = SHIFT_SLL;
1320 do_shift32:
1321 /* Limit immediate shift count lest we create an illegal insn. */
1322 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1323 break;
1324 case INDEX_op_shr_i32:
1325 c = SHIFT_SRL;
1326 goto do_shift32;
1327 case INDEX_op_sar_i32:
1328 c = SHIFT_SRA;
1329 goto do_shift32;
1330 case INDEX_op_mul_i32:
1331 c = ARITH_UMUL;
1332 goto gen_arith;
1333
1334 OP_32_64(neg):
1335 c = ARITH_SUB;
1336 goto gen_arith1;
1337 OP_32_64(not):
1338 c = ARITH_ORN;
1339 goto gen_arith1;
1340
1341 case INDEX_op_div_i32:
1342 tcg_out_div32(s, a0, a1, a2, c2, 0);
1343 break;
1344 case INDEX_op_divu_i32:
1345 tcg_out_div32(s, a0, a1, a2, c2, 1);
1346 break;
1347
1348 case INDEX_op_brcond_i32:
1349 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1350 break;
1351 case INDEX_op_setcond_i32:
1352 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1353 break;
1354 case INDEX_op_movcond_i32:
1355 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1356 break;
1357
1358 case INDEX_op_add2_i32:
1359 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1360 args[4], const_args[4], args[5], const_args[5],
1361 ARITH_ADDCC, ARITH_ADDC);
1362 break;
1363 case INDEX_op_sub2_i32:
1364 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1365 args[4], const_args[4], args[5], const_args[5],
1366 ARITH_SUBCC, ARITH_SUBC);
1367 break;
1368 case INDEX_op_mulu2_i32:
1369 c = ARITH_UMUL;
1370 goto do_mul2;
1371 case INDEX_op_muls2_i32:
1372 c = ARITH_SMUL;
1373 do_mul2:
1374 /* The 32-bit multiply insns produce a full 64-bit result. If the
1375 destination register can hold it, we can avoid the slower RDY. */
1376 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1377 if (SPARC64 || a0 <= TCG_REG_O7) {
1378 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1379 } else {
1380 tcg_out_rdy(s, a1);
1381 }
1382 break;
1383
1384 case INDEX_op_qemu_ld_i32:
1385 tcg_out_qemu_ld(s, a0, a1, a2, false);
1386 break;
1387 case INDEX_op_qemu_ld_i64:
1388 tcg_out_qemu_ld(s, a0, a1, a2, true);
1389 break;
1390 case INDEX_op_qemu_st_i32:
1391 case INDEX_op_qemu_st_i64:
1392 tcg_out_qemu_st(s, a0, a1, a2);
1393 break;
1394
1395 case INDEX_op_ld32s_i64:
1396 tcg_out_ldst(s, a0, a1, a2, LDSW);
1397 break;
1398 case INDEX_op_ld_i64:
1399 tcg_out_ldst(s, a0, a1, a2, LDX);
1400 break;
1401 case INDEX_op_st_i64:
1402 tcg_out_ldst(s, a0, a1, a2, STX);
1403 break;
1404 case INDEX_op_shl_i64:
1405 c = SHIFT_SLLX;
1406 do_shift64:
1407 /* Limit immediate shift count lest we create an illegal insn. */
1408 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1409 break;
1410 case INDEX_op_shr_i64:
1411 c = SHIFT_SRLX;
1412 goto do_shift64;
1413 case INDEX_op_sar_i64:
1414 c = SHIFT_SRAX;
1415 goto do_shift64;
1416 case INDEX_op_mul_i64:
1417 c = ARITH_MULX;
1418 goto gen_arith;
1419 case INDEX_op_div_i64:
1420 c = ARITH_SDIVX;
1421 goto gen_arith;
1422 case INDEX_op_divu_i64:
1423 c = ARITH_UDIVX;
1424 goto gen_arith;
1425 case INDEX_op_ext_i32_i64:
1426 case INDEX_op_ext32s_i64:
1427 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1428 break;
1429 case INDEX_op_extu_i32_i64:
1430 case INDEX_op_ext32u_i64:
1431 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1432 break;
1433 case INDEX_op_extrl_i64_i32:
1434 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1435 break;
1436 case INDEX_op_extrh_i64_i32:
1437 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1438 break;
1439
1440 case INDEX_op_brcond_i64:
1441 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1442 break;
1443 case INDEX_op_setcond_i64:
1444 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1445 break;
1446 case INDEX_op_movcond_i64:
1447 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1448 break;
1449 case INDEX_op_add2_i64:
1450 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1451 const_args[4], args[5], const_args[5], false);
1452 break;
1453 case INDEX_op_sub2_i64:
1454 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1455 const_args[4], args[5], const_args[5], true);
1456 break;
1457 case INDEX_op_muluh_i64:
1458 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1459 break;
1460
1461 gen_arith:
1462 tcg_out_arithc(s, a0, a1, a2, c2, c);
1463 break;
1464
1465 gen_arith1:
1466 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1467 break;
1468
1469 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1470 case INDEX_op_mov_i64:
1471 case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */
1472 case INDEX_op_movi_i64:
1473 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1474 default:
1475 tcg_abort();
1476 }
1477 }
1478
1479 static const TCGTargetOpDef sparc_op_defs[] = {
1480 { INDEX_op_exit_tb, { } },
1481 { INDEX_op_goto_tb, { } },
1482 { INDEX_op_br, { } },
1483
1484 { INDEX_op_ld8u_i32, { "r", "r" } },
1485 { INDEX_op_ld8s_i32, { "r", "r" } },
1486 { INDEX_op_ld16u_i32, { "r", "r" } },
1487 { INDEX_op_ld16s_i32, { "r", "r" } },
1488 { INDEX_op_ld_i32, { "r", "r" } },
1489 { INDEX_op_st8_i32, { "rZ", "r" } },
1490 { INDEX_op_st16_i32, { "rZ", "r" } },
1491 { INDEX_op_st_i32, { "rZ", "r" } },
1492
1493 { INDEX_op_add_i32, { "r", "rZ", "rJ" } },
1494 { INDEX_op_mul_i32, { "r", "rZ", "rJ" } },
1495 { INDEX_op_div_i32, { "r", "rZ", "rJ" } },
1496 { INDEX_op_divu_i32, { "r", "rZ", "rJ" } },
1497 { INDEX_op_sub_i32, { "r", "rZ", "rJ" } },
1498 { INDEX_op_and_i32, { "r", "rZ", "rJ" } },
1499 { INDEX_op_andc_i32, { "r", "rZ", "rJ" } },
1500 { INDEX_op_or_i32, { "r", "rZ", "rJ" } },
1501 { INDEX_op_orc_i32, { "r", "rZ", "rJ" } },
1502 { INDEX_op_xor_i32, { "r", "rZ", "rJ" } },
1503
1504 { INDEX_op_shl_i32, { "r", "rZ", "rJ" } },
1505 { INDEX_op_shr_i32, { "r", "rZ", "rJ" } },
1506 { INDEX_op_sar_i32, { "r", "rZ", "rJ" } },
1507
1508 { INDEX_op_neg_i32, { "r", "rJ" } },
1509 { INDEX_op_not_i32, { "r", "rJ" } },
1510
1511 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1512 { INDEX_op_setcond_i32, { "r", "rZ", "rJ" } },
1513 { INDEX_op_movcond_i32, { "r", "rZ", "rJ", "rI", "0" } },
1514
1515 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1516 { INDEX_op_sub2_i32, { "r", "r", "rZ", "rZ", "rJ", "rJ" } },
1517 { INDEX_op_mulu2_i32, { "r", "r", "rZ", "rJ" } },
1518 { INDEX_op_muls2_i32, { "r", "r", "rZ", "rJ" } },
1519
1520 { INDEX_op_ld8u_i64, { "R", "r" } },
1521 { INDEX_op_ld8s_i64, { "R", "r" } },
1522 { INDEX_op_ld16u_i64, { "R", "r" } },
1523 { INDEX_op_ld16s_i64, { "R", "r" } },
1524 { INDEX_op_ld32u_i64, { "R", "r" } },
1525 { INDEX_op_ld32s_i64, { "R", "r" } },
1526 { INDEX_op_ld_i64, { "R", "r" } },
1527 { INDEX_op_st8_i64, { "RZ", "r" } },
1528 { INDEX_op_st16_i64, { "RZ", "r" } },
1529 { INDEX_op_st32_i64, { "RZ", "r" } },
1530 { INDEX_op_st_i64, { "RZ", "r" } },
1531
1532 { INDEX_op_add_i64, { "R", "RZ", "RJ" } },
1533 { INDEX_op_mul_i64, { "R", "RZ", "RJ" } },
1534 { INDEX_op_div_i64, { "R", "RZ", "RJ" } },
1535 { INDEX_op_divu_i64, { "R", "RZ", "RJ" } },
1536 { INDEX_op_sub_i64, { "R", "RZ", "RJ" } },
1537 { INDEX_op_and_i64, { "R", "RZ", "RJ" } },
1538 { INDEX_op_andc_i64, { "R", "RZ", "RJ" } },
1539 { INDEX_op_or_i64, { "R", "RZ", "RJ" } },
1540 { INDEX_op_orc_i64, { "R", "RZ", "RJ" } },
1541 { INDEX_op_xor_i64, { "R", "RZ", "RJ" } },
1542
1543 { INDEX_op_shl_i64, { "R", "RZ", "RJ" } },
1544 { INDEX_op_shr_i64, { "R", "RZ", "RJ" } },
1545 { INDEX_op_sar_i64, { "R", "RZ", "RJ" } },
1546
1547 { INDEX_op_neg_i64, { "R", "RJ" } },
1548 { INDEX_op_not_i64, { "R", "RJ" } },
1549
1550 { INDEX_op_ext32s_i64, { "R", "R" } },
1551 { INDEX_op_ext32u_i64, { "R", "R" } },
1552 { INDEX_op_ext_i32_i64, { "R", "r" } },
1553 { INDEX_op_extu_i32_i64, { "R", "r" } },
1554 { INDEX_op_extrl_i64_i32, { "r", "R" } },
1555 { INDEX_op_extrh_i64_i32, { "r", "R" } },
1556
1557 { INDEX_op_brcond_i64, { "RZ", "RJ" } },
1558 { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } },
1559 { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } },
1560
1561 { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1562 { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } },
1563 { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } },
1564
1565 { INDEX_op_qemu_ld_i32, { "r", "A" } },
1566 { INDEX_op_qemu_ld_i64, { "R", "A" } },
1567 { INDEX_op_qemu_st_i32, { "sZ", "A" } },
1568 { INDEX_op_qemu_st_i64, { "SZ", "A" } },
1569
1570 { -1 },
1571 };
1572
1573 static void tcg_target_init(TCGContext *s)
1574 {
1575 /* Only probe for the platform and capabilities if we havn't already
1576 determined maximum values at compile time. */
1577 #ifndef use_vis3_instructions
1578 {
1579 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1580 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1581 }
1582 #endif
1583
1584 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1585 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64);
1586
1587 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
1588 (1 << TCG_REG_G1) |
1589 (1 << TCG_REG_G2) |
1590 (1 << TCG_REG_G3) |
1591 (1 << TCG_REG_G4) |
1592 (1 << TCG_REG_G5) |
1593 (1 << TCG_REG_G6) |
1594 (1 << TCG_REG_G7) |
1595 (1 << TCG_REG_O0) |
1596 (1 << TCG_REG_O1) |
1597 (1 << TCG_REG_O2) |
1598 (1 << TCG_REG_O3) |
1599 (1 << TCG_REG_O4) |
1600 (1 << TCG_REG_O5) |
1601 (1 << TCG_REG_O7));
1602
1603 tcg_regset_clear(s->reserved_regs);
1604 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1605 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1606 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1607 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1608 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1609 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1610 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1611 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1612
1613 tcg_add_target_add_op_defs(sparc_op_defs);
1614 }
1615
1616 #if SPARC64
1617 # define ELF_HOST_MACHINE EM_SPARCV9
1618 #else
1619 # define ELF_HOST_MACHINE EM_SPARC32PLUS
1620 # define ELF_HOST_FLAGS EF_SPARC_32PLUS
1621 #endif
1622
1623 typedef struct {
1624 DebugFrameHeader h;
1625 uint8_t fde_def_cfa[SPARC64 ? 4 : 2];
1626 uint8_t fde_win_save;
1627 uint8_t fde_ret_save[3];
1628 } DebugFrame;
1629
1630 static const DebugFrame debug_frame = {
1631 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1632 .h.cie.id = -1,
1633 .h.cie.version = 1,
1634 .h.cie.code_align = 1,
1635 .h.cie.data_align = -sizeof(void *) & 0x7f,
1636 .h.cie.return_column = 15, /* o7 */
1637
1638 /* Total FDE size does not include the "len" member. */
1639 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1640
1641 .fde_def_cfa = {
1642 #if SPARC64
1643 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1644 (2047 & 0x7f) | 0x80, (2047 >> 7)
1645 #else
1646 13, 30 /* DW_CFA_def_cfa_register i6 */
1647 #endif
1648 },
1649 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1650 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1651 };
1652
1653 void tcg_register_jit(void *buf, size_t buf_size)
1654 {
1655 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1656 }
1657
1658 void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
1659 {
1660 uint32_t *ptr = (uint32_t *)jmp_addr;
1661 uintptr_t disp = addr - jmp_addr;
1662
1663 /* We can reach the entire address space for 32-bit. For 64-bit
1664 the code_gen_buffer can't be larger than 2GB. */
1665 tcg_debug_assert(disp == (int32_t)disp);
1666
1667 atomic_set(ptr, deposit32(CALL, 0, 30, disp >> 2));
1668 flush_icache_range(jmp_addr, jmp_addr + 4);
1669 }