]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/sparc64/tcg-target.c.inc
Merge tag 'pull-tcg-20221031-2' of https://gitlab.com/rth7680/qemu into staging
[mirror_qemu.git] / tcg / sparc64 / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /* We only support generating code for 64-bit mode. */
26 #ifndef __arch64__
27 #error "unsupported code generation mode"
28 #endif
29
30 #include "../tcg-pool.c.inc"
31
32 #ifdef CONFIG_DEBUG_TCG
33 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
34 "%g0",
35 "%g1",
36 "%g2",
37 "%g3",
38 "%g4",
39 "%g5",
40 "%g6",
41 "%g7",
42 "%o0",
43 "%o1",
44 "%o2",
45 "%o3",
46 "%o4",
47 "%o5",
48 "%o6",
49 "%o7",
50 "%l0",
51 "%l1",
52 "%l2",
53 "%l3",
54 "%l4",
55 "%l5",
56 "%l6",
57 "%l7",
58 "%i0",
59 "%i1",
60 "%i2",
61 "%i3",
62 "%i4",
63 "%i5",
64 "%i6",
65 "%i7",
66 };
67 #endif
68
69 #define TCG_CT_CONST_S11 0x100
70 #define TCG_CT_CONST_S13 0x200
71 #define TCG_CT_CONST_ZERO 0x400
72
73 /*
74 * For softmmu, we need to avoid conflicts with the first 3
75 * argument registers to perform the tlb lookup, and to call
76 * the helper function.
77 */
78 #ifdef CONFIG_SOFTMMU
79 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_O0, 3)
80 #else
81 #define SOFTMMU_RESERVE_REGS 0
82 #endif
83 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
84 #define ALL_QLDST_REGS (ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS)
85
86 /* Define some temporary registers. T2 is used for constant generation. */
87 #define TCG_REG_T1 TCG_REG_G1
88 #define TCG_REG_T2 TCG_REG_O7
89
90 #ifndef CONFIG_SOFTMMU
91 # define TCG_GUEST_BASE_REG TCG_REG_I5
92 #endif
93
94 #define TCG_REG_TB TCG_REG_I1
95 #define USE_REG_TB (sizeof(void *) > 4)
96
97 static const int tcg_target_reg_alloc_order[] = {
98 TCG_REG_L0,
99 TCG_REG_L1,
100 TCG_REG_L2,
101 TCG_REG_L3,
102 TCG_REG_L4,
103 TCG_REG_L5,
104 TCG_REG_L6,
105 TCG_REG_L7,
106
107 TCG_REG_I0,
108 TCG_REG_I1,
109 TCG_REG_I2,
110 TCG_REG_I3,
111 TCG_REG_I4,
112 TCG_REG_I5,
113
114 TCG_REG_G2,
115 TCG_REG_G3,
116 TCG_REG_G4,
117 TCG_REG_G5,
118
119 TCG_REG_O0,
120 TCG_REG_O1,
121 TCG_REG_O2,
122 TCG_REG_O3,
123 TCG_REG_O4,
124 TCG_REG_O5,
125 };
126
127 static const int tcg_target_call_iarg_regs[6] = {
128 TCG_REG_O0,
129 TCG_REG_O1,
130 TCG_REG_O2,
131 TCG_REG_O3,
132 TCG_REG_O4,
133 TCG_REG_O5,
134 };
135
136 static const int tcg_target_call_oarg_regs[] = {
137 TCG_REG_O0,
138 TCG_REG_O1,
139 TCG_REG_O2,
140 TCG_REG_O3,
141 };
142
143 #define INSN_OP(x) ((x) << 30)
144 #define INSN_OP2(x) ((x) << 22)
145 #define INSN_OP3(x) ((x) << 19)
146 #define INSN_OPF(x) ((x) << 5)
147 #define INSN_RD(x) ((x) << 25)
148 #define INSN_RS1(x) ((x) << 14)
149 #define INSN_RS2(x) (x)
150 #define INSN_ASI(x) ((x) << 5)
151
152 #define INSN_IMM10(x) ((1 << 13) | ((x) & 0x3ff))
153 #define INSN_IMM11(x) ((1 << 13) | ((x) & 0x7ff))
154 #define INSN_IMM13(x) ((1 << 13) | ((x) & 0x1fff))
155 #define INSN_OFF16(x) ((((x) >> 2) & 0x3fff) | ((((x) >> 16) & 3) << 20))
156 #define INSN_OFF19(x) (((x) >> 2) & 0x07ffff)
157 #define INSN_COND(x) ((x) << 25)
158
159 #define COND_N 0x0
160 #define COND_E 0x1
161 #define COND_LE 0x2
162 #define COND_L 0x3
163 #define COND_LEU 0x4
164 #define COND_CS 0x5
165 #define COND_NEG 0x6
166 #define COND_VS 0x7
167 #define COND_A 0x8
168 #define COND_NE 0x9
169 #define COND_G 0xa
170 #define COND_GE 0xb
171 #define COND_GU 0xc
172 #define COND_CC 0xd
173 #define COND_POS 0xe
174 #define COND_VC 0xf
175 #define BA (INSN_OP(0) | INSN_COND(COND_A) | INSN_OP2(0x2))
176
177 #define RCOND_Z 1
178 #define RCOND_LEZ 2
179 #define RCOND_LZ 3
180 #define RCOND_NZ 5
181 #define RCOND_GZ 6
182 #define RCOND_GEZ 7
183
184 #define MOVCC_ICC (1 << 18)
185 #define MOVCC_XCC (1 << 18 | 1 << 12)
186
187 #define BPCC_ICC 0
188 #define BPCC_XCC (2 << 20)
189 #define BPCC_PT (1 << 19)
190 #define BPCC_PN 0
191 #define BPCC_A (1 << 29)
192
193 #define BPR_PT BPCC_PT
194
195 #define ARITH_ADD (INSN_OP(2) | INSN_OP3(0x00))
196 #define ARITH_ADDCC (INSN_OP(2) | INSN_OP3(0x10))
197 #define ARITH_AND (INSN_OP(2) | INSN_OP3(0x01))
198 #define ARITH_ANDCC (INSN_OP(2) | INSN_OP3(0x11))
199 #define ARITH_ANDN (INSN_OP(2) | INSN_OP3(0x05))
200 #define ARITH_OR (INSN_OP(2) | INSN_OP3(0x02))
201 #define ARITH_ORCC (INSN_OP(2) | INSN_OP3(0x12))
202 #define ARITH_ORN (INSN_OP(2) | INSN_OP3(0x06))
203 #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03))
204 #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04))
205 #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14))
206 #define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08))
207 #define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c))
208 #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a))
209 #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b))
210 #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e))
211 #define ARITH_SDIV (INSN_OP(2) | INSN_OP3(0x0f))
212 #define ARITH_MULX (INSN_OP(2) | INSN_OP3(0x09))
213 #define ARITH_UDIVX (INSN_OP(2) | INSN_OP3(0x0d))
214 #define ARITH_SDIVX (INSN_OP(2) | INSN_OP3(0x2d))
215 #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c))
216 #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f))
217
218 #define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11))
219 #define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16))
220
221 #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25))
222 #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26))
223 #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27))
224
225 #define SHIFT_SLLX (INSN_OP(2) | INSN_OP3(0x25) | (1 << 12))
226 #define SHIFT_SRLX (INSN_OP(2) | INSN_OP3(0x26) | (1 << 12))
227 #define SHIFT_SRAX (INSN_OP(2) | INSN_OP3(0x27) | (1 << 12))
228
229 #define RDY (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(0))
230 #define WRY (INSN_OP(2) | INSN_OP3(0x30) | INSN_RD(0))
231 #define JMPL (INSN_OP(2) | INSN_OP3(0x38))
232 #define RETURN (INSN_OP(2) | INSN_OP3(0x39))
233 #define SAVE (INSN_OP(2) | INSN_OP3(0x3c))
234 #define RESTORE (INSN_OP(2) | INSN_OP3(0x3d))
235 #define SETHI (INSN_OP(0) | INSN_OP2(0x4))
236 #define CALL INSN_OP(1)
237 #define LDUB (INSN_OP(3) | INSN_OP3(0x01))
238 #define LDSB (INSN_OP(3) | INSN_OP3(0x09))
239 #define LDUH (INSN_OP(3) | INSN_OP3(0x02))
240 #define LDSH (INSN_OP(3) | INSN_OP3(0x0a))
241 #define LDUW (INSN_OP(3) | INSN_OP3(0x00))
242 #define LDSW (INSN_OP(3) | INSN_OP3(0x08))
243 #define LDX (INSN_OP(3) | INSN_OP3(0x0b))
244 #define STB (INSN_OP(3) | INSN_OP3(0x05))
245 #define STH (INSN_OP(3) | INSN_OP3(0x06))
246 #define STW (INSN_OP(3) | INSN_OP3(0x04))
247 #define STX (INSN_OP(3) | INSN_OP3(0x0e))
248 #define LDUBA (INSN_OP(3) | INSN_OP3(0x11))
249 #define LDSBA (INSN_OP(3) | INSN_OP3(0x19))
250 #define LDUHA (INSN_OP(3) | INSN_OP3(0x12))
251 #define LDSHA (INSN_OP(3) | INSN_OP3(0x1a))
252 #define LDUWA (INSN_OP(3) | INSN_OP3(0x10))
253 #define LDSWA (INSN_OP(3) | INSN_OP3(0x18))
254 #define LDXA (INSN_OP(3) | INSN_OP3(0x1b))
255 #define STBA (INSN_OP(3) | INSN_OP3(0x15))
256 #define STHA (INSN_OP(3) | INSN_OP3(0x16))
257 #define STWA (INSN_OP(3) | INSN_OP3(0x14))
258 #define STXA (INSN_OP(3) | INSN_OP3(0x1e))
259
260 #define MEMBAR (INSN_OP(2) | INSN_OP3(0x28) | INSN_RS1(15) | (1 << 13))
261
262 #define NOP (SETHI | INSN_RD(TCG_REG_G0) | 0)
263
264 #ifndef ASI_PRIMARY_LITTLE
265 #define ASI_PRIMARY_LITTLE 0x88
266 #endif
267
268 #define LDUH_LE (LDUHA | INSN_ASI(ASI_PRIMARY_LITTLE))
269 #define LDSH_LE (LDSHA | INSN_ASI(ASI_PRIMARY_LITTLE))
270 #define LDUW_LE (LDUWA | INSN_ASI(ASI_PRIMARY_LITTLE))
271 #define LDSW_LE (LDSWA | INSN_ASI(ASI_PRIMARY_LITTLE))
272 #define LDX_LE (LDXA | INSN_ASI(ASI_PRIMARY_LITTLE))
273
274 #define STH_LE (STHA | INSN_ASI(ASI_PRIMARY_LITTLE))
275 #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE))
276 #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE))
277
278 #ifndef use_vis3_instructions
279 bool use_vis3_instructions;
280 #endif
281
282 static bool check_fit_i64(int64_t val, unsigned int bits)
283 {
284 return val == sextract64(val, 0, bits);
285 }
286
287 static bool check_fit_i32(int32_t val, unsigned int bits)
288 {
289 return val == sextract32(val, 0, bits);
290 }
291
292 #define check_fit_tl check_fit_i64
293 #define check_fit_ptr check_fit_i64
294
295 static bool patch_reloc(tcg_insn_unit *src_rw, int type,
296 intptr_t value, intptr_t addend)
297 {
298 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
299 uint32_t insn = *src_rw;
300 intptr_t pcrel;
301
302 value += addend;
303 pcrel = tcg_ptr_byte_diff((tcg_insn_unit *)value, src_rx);
304
305 switch (type) {
306 case R_SPARC_WDISP16:
307 if (!check_fit_ptr(pcrel >> 2, 16)) {
308 return false;
309 }
310 insn &= ~INSN_OFF16(-1);
311 insn |= INSN_OFF16(pcrel);
312 break;
313 case R_SPARC_WDISP19:
314 if (!check_fit_ptr(pcrel >> 2, 19)) {
315 return false;
316 }
317 insn &= ~INSN_OFF19(-1);
318 insn |= INSN_OFF19(pcrel);
319 break;
320 case R_SPARC_13:
321 if (!check_fit_ptr(value, 13)) {
322 return false;
323 }
324 insn &= ~INSN_IMM13(-1);
325 insn |= INSN_IMM13(value);
326 break;
327 default:
328 g_assert_not_reached();
329 }
330
331 *src_rw = insn;
332 return true;
333 }
334
335 /* test if a constant matches the constraint */
336 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
337 {
338 if (ct & TCG_CT_CONST) {
339 return 1;
340 }
341
342 if (type == TCG_TYPE_I32) {
343 val = (int32_t)val;
344 }
345
346 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
347 return 1;
348 } else if ((ct & TCG_CT_CONST_S11) && check_fit_tl(val, 11)) {
349 return 1;
350 } else if ((ct & TCG_CT_CONST_S13) && check_fit_tl(val, 13)) {
351 return 1;
352 } else {
353 return 0;
354 }
355 }
356
357 static void tcg_out_nop(TCGContext *s)
358 {
359 tcg_out32(s, NOP);
360 }
361
362 static void tcg_out_arith(TCGContext *s, TCGReg rd, TCGReg rs1,
363 TCGReg rs2, int op)
364 {
365 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_RS2(rs2));
366 }
367
368 static void tcg_out_arithi(TCGContext *s, TCGReg rd, TCGReg rs1,
369 int32_t offset, int op)
370 {
371 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1) | INSN_IMM13(offset));
372 }
373
374 static void tcg_out_arithc(TCGContext *s, TCGReg rd, TCGReg rs1,
375 int32_t val2, int val2const, int op)
376 {
377 tcg_out32(s, op | INSN_RD(rd) | INSN_RS1(rs1)
378 | (val2const ? INSN_IMM13(val2) : INSN_RS2(val2)));
379 }
380
381 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
382 {
383 if (ret != arg) {
384 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
385 }
386 return true;
387 }
388
389 static void tcg_out_mov_delay(TCGContext *s, TCGReg ret, TCGReg arg)
390 {
391 if (ret != arg) {
392 tcg_out_arith(s, ret, arg, TCG_REG_G0, ARITH_OR);
393 } else {
394 tcg_out_nop(s);
395 }
396 }
397
398 static void tcg_out_sethi(TCGContext *s, TCGReg ret, uint32_t arg)
399 {
400 tcg_out32(s, SETHI | INSN_RD(ret) | ((arg & 0xfffffc00) >> 10));
401 }
402
403 static void tcg_out_movi_imm13(TCGContext *s, TCGReg ret, int32_t arg)
404 {
405 tcg_out_arithi(s, ret, TCG_REG_G0, arg, ARITH_OR);
406 }
407
408 static void tcg_out_movi_imm32(TCGContext *s, TCGReg ret, int32_t arg)
409 {
410 if (check_fit_i32(arg, 13)) {
411 /* A 13-bit constant sign-extended to 64-bits. */
412 tcg_out_movi_imm13(s, ret, arg);
413 } else {
414 /* A 32-bit constant zero-extended to 64 bits. */
415 tcg_out_sethi(s, ret, arg);
416 if (arg & 0x3ff) {
417 tcg_out_arithi(s, ret, ret, arg & 0x3ff, ARITH_OR);
418 }
419 }
420 }
421
422 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
423 tcg_target_long arg, bool in_prologue,
424 TCGReg scratch)
425 {
426 tcg_target_long hi, lo = (int32_t)arg;
427 tcg_target_long test, lsb;
428
429 /* A 32-bit constant, or 32-bit zero-extended to 64-bits. */
430 if (type == TCG_TYPE_I32 || arg == (uint32_t)arg) {
431 tcg_out_movi_imm32(s, ret, arg);
432 return;
433 }
434
435 /* A 13-bit constant sign-extended to 64-bits. */
436 if (check_fit_tl(arg, 13)) {
437 tcg_out_movi_imm13(s, ret, arg);
438 return;
439 }
440
441 /* A 13-bit constant relative to the TB. */
442 if (!in_prologue && USE_REG_TB) {
443 test = tcg_tbrel_diff(s, (void *)arg);
444 if (check_fit_ptr(test, 13)) {
445 tcg_out_arithi(s, ret, TCG_REG_TB, test, ARITH_ADD);
446 return;
447 }
448 }
449
450 /* A 32-bit constant sign-extended to 64-bits. */
451 if (arg == lo) {
452 tcg_out_sethi(s, ret, ~arg);
453 tcg_out_arithi(s, ret, ret, (arg & 0x3ff) | -0x400, ARITH_XOR);
454 return;
455 }
456
457 /* A 32-bit constant, shifted. */
458 lsb = ctz64(arg);
459 test = (tcg_target_long)arg >> lsb;
460 if (lsb > 10 && test == extract64(test, 0, 21)) {
461 tcg_out_sethi(s, ret, test << 10);
462 tcg_out_arithi(s, ret, ret, lsb - 10, SHIFT_SLLX);
463 return;
464 } else if (test == (uint32_t)test || test == (int32_t)test) {
465 tcg_out_movi_int(s, TCG_TYPE_I64, ret, test, in_prologue, scratch);
466 tcg_out_arithi(s, ret, ret, lsb, SHIFT_SLLX);
467 return;
468 }
469
470 /* Use the constant pool, if possible. */
471 if (!in_prologue && USE_REG_TB) {
472 new_pool_label(s, arg, R_SPARC_13, s->code_ptr,
473 tcg_tbrel_diff(s, NULL));
474 tcg_out32(s, LDX | INSN_RD(ret) | INSN_RS1(TCG_REG_TB));
475 return;
476 }
477
478 /* A 64-bit constant decomposed into 2 32-bit pieces. */
479 if (check_fit_i32(lo, 13)) {
480 hi = (arg - lo) >> 32;
481 tcg_out_movi_imm32(s, ret, hi);
482 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
483 tcg_out_arithi(s, ret, ret, lo, ARITH_ADD);
484 } else {
485 hi = arg >> 32;
486 tcg_out_movi_imm32(s, ret, hi);
487 tcg_out_movi_imm32(s, scratch, lo);
488 tcg_out_arithi(s, ret, ret, 32, SHIFT_SLLX);
489 tcg_out_arith(s, ret, ret, scratch, ARITH_OR);
490 }
491 }
492
493 static void tcg_out_movi(TCGContext *s, TCGType type,
494 TCGReg ret, tcg_target_long arg)
495 {
496 tcg_debug_assert(ret != TCG_REG_T2);
497 tcg_out_movi_int(s, type, ret, arg, false, TCG_REG_T2);
498 }
499
500 static void tcg_out_ldst_rr(TCGContext *s, TCGReg data, TCGReg a1,
501 TCGReg a2, int op)
502 {
503 tcg_out32(s, op | INSN_RD(data) | INSN_RS1(a1) | INSN_RS2(a2));
504 }
505
506 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
507 intptr_t offset, int op)
508 {
509 if (check_fit_ptr(offset, 13)) {
510 tcg_out32(s, op | INSN_RD(ret) | INSN_RS1(addr) |
511 INSN_IMM13(offset));
512 } else {
513 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, offset);
514 tcg_out_ldst_rr(s, ret, addr, TCG_REG_T1, op);
515 }
516 }
517
518 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
519 TCGReg arg1, intptr_t arg2)
520 {
521 tcg_out_ldst(s, ret, arg1, arg2, (type == TCG_TYPE_I32 ? LDUW : LDX));
522 }
523
524 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
525 TCGReg arg1, intptr_t arg2)
526 {
527 tcg_out_ldst(s, arg, arg1, arg2, (type == TCG_TYPE_I32 ? STW : STX));
528 }
529
530 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
531 TCGReg base, intptr_t ofs)
532 {
533 if (val == 0) {
534 tcg_out_st(s, type, TCG_REG_G0, base, ofs);
535 return true;
536 }
537 return false;
538 }
539
540 static void tcg_out_ld_ptr(TCGContext *s, TCGReg ret, const void *arg)
541 {
542 intptr_t diff = tcg_tbrel_diff(s, arg);
543 if (USE_REG_TB && check_fit_ptr(diff, 13)) {
544 tcg_out_ld(s, TCG_TYPE_PTR, ret, TCG_REG_TB, diff);
545 return;
546 }
547 tcg_out_movi(s, TCG_TYPE_PTR, ret, (uintptr_t)arg & ~0x3ff);
548 tcg_out_ld(s, TCG_TYPE_PTR, ret, ret, (uintptr_t)arg & 0x3ff);
549 }
550
551 static void tcg_out_sety(TCGContext *s, TCGReg rs)
552 {
553 tcg_out32(s, WRY | INSN_RS1(TCG_REG_G0) | INSN_RS2(rs));
554 }
555
556 static void tcg_out_div32(TCGContext *s, TCGReg rd, TCGReg rs1,
557 int32_t val2, int val2const, int uns)
558 {
559 /* Load Y with the sign/zero extension of RS1 to 64-bits. */
560 if (uns) {
561 tcg_out_sety(s, TCG_REG_G0);
562 } else {
563 tcg_out_arithi(s, TCG_REG_T1, rs1, 31, SHIFT_SRA);
564 tcg_out_sety(s, TCG_REG_T1);
565 }
566
567 tcg_out_arithc(s, rd, rs1, val2, val2const,
568 uns ? ARITH_UDIV : ARITH_SDIV);
569 }
570
571 static const uint8_t tcg_cond_to_bcond[] = {
572 [TCG_COND_EQ] = COND_E,
573 [TCG_COND_NE] = COND_NE,
574 [TCG_COND_LT] = COND_L,
575 [TCG_COND_GE] = COND_GE,
576 [TCG_COND_LE] = COND_LE,
577 [TCG_COND_GT] = COND_G,
578 [TCG_COND_LTU] = COND_CS,
579 [TCG_COND_GEU] = COND_CC,
580 [TCG_COND_LEU] = COND_LEU,
581 [TCG_COND_GTU] = COND_GU,
582 };
583
584 static const uint8_t tcg_cond_to_rcond[] = {
585 [TCG_COND_EQ] = RCOND_Z,
586 [TCG_COND_NE] = RCOND_NZ,
587 [TCG_COND_LT] = RCOND_LZ,
588 [TCG_COND_GT] = RCOND_GZ,
589 [TCG_COND_LE] = RCOND_LEZ,
590 [TCG_COND_GE] = RCOND_GEZ
591 };
592
593 static void tcg_out_bpcc0(TCGContext *s, int scond, int flags, int off19)
594 {
595 tcg_out32(s, INSN_OP(0) | INSN_OP2(1) | INSN_COND(scond) | flags | off19);
596 }
597
598 static void tcg_out_bpcc(TCGContext *s, int scond, int flags, TCGLabel *l)
599 {
600 int off19 = 0;
601
602 if (l->has_value) {
603 off19 = INSN_OFF19(tcg_pcrel_diff(s, l->u.value_ptr));
604 } else {
605 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP19, l, 0);
606 }
607 tcg_out_bpcc0(s, scond, flags, off19);
608 }
609
610 static void tcg_out_cmp(TCGContext *s, TCGReg c1, int32_t c2, int c2const)
611 {
612 tcg_out_arithc(s, TCG_REG_G0, c1, c2, c2const, ARITH_SUBCC);
613 }
614
615 static void tcg_out_brcond_i32(TCGContext *s, TCGCond cond, TCGReg arg1,
616 int32_t arg2, int const_arg2, TCGLabel *l)
617 {
618 tcg_out_cmp(s, arg1, arg2, const_arg2);
619 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_ICC | BPCC_PT, l);
620 tcg_out_nop(s);
621 }
622
623 static void tcg_out_movcc(TCGContext *s, TCGCond cond, int cc, TCGReg ret,
624 int32_t v1, int v1const)
625 {
626 tcg_out32(s, ARITH_MOVCC | cc | INSN_RD(ret)
627 | INSN_RS1(tcg_cond_to_bcond[cond])
628 | (v1const ? INSN_IMM11(v1) : INSN_RS2(v1)));
629 }
630
631 static void tcg_out_movcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
632 TCGReg c1, int32_t c2, int c2const,
633 int32_t v1, int v1const)
634 {
635 tcg_out_cmp(s, c1, c2, c2const);
636 tcg_out_movcc(s, cond, MOVCC_ICC, ret, v1, v1const);
637 }
638
639 static void tcg_out_brcond_i64(TCGContext *s, TCGCond cond, TCGReg arg1,
640 int32_t arg2, int const_arg2, TCGLabel *l)
641 {
642 /* For 64-bit signed comparisons vs zero, we can avoid the compare. */
643 if (arg2 == 0 && !is_unsigned_cond(cond)) {
644 int off16 = 0;
645
646 if (l->has_value) {
647 off16 = INSN_OFF16(tcg_pcrel_diff(s, l->u.value_ptr));
648 } else {
649 tcg_out_reloc(s, s->code_ptr, R_SPARC_WDISP16, l, 0);
650 }
651 tcg_out32(s, INSN_OP(0) | INSN_OP2(3) | BPR_PT | INSN_RS1(arg1)
652 | INSN_COND(tcg_cond_to_rcond[cond]) | off16);
653 } else {
654 tcg_out_cmp(s, arg1, arg2, const_arg2);
655 tcg_out_bpcc(s, tcg_cond_to_bcond[cond], BPCC_XCC | BPCC_PT, l);
656 }
657 tcg_out_nop(s);
658 }
659
660 static void tcg_out_movr(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1,
661 int32_t v1, int v1const)
662 {
663 tcg_out32(s, ARITH_MOVR | INSN_RD(ret) | INSN_RS1(c1)
664 | (tcg_cond_to_rcond[cond] << 10)
665 | (v1const ? INSN_IMM10(v1) : INSN_RS2(v1)));
666 }
667
668 static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
669 TCGReg c1, int32_t c2, int c2const,
670 int32_t v1, int v1const)
671 {
672 /* For 64-bit signed comparisons vs zero, we can avoid the compare.
673 Note that the immediate range is one bit smaller, so we must check
674 for that as well. */
675 if (c2 == 0 && !is_unsigned_cond(cond)
676 && (!v1const || check_fit_i32(v1, 10))) {
677 tcg_out_movr(s, cond, ret, c1, v1, v1const);
678 } else {
679 tcg_out_cmp(s, c1, c2, c2const);
680 tcg_out_movcc(s, cond, MOVCC_XCC, ret, v1, v1const);
681 }
682 }
683
684 static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret,
685 TCGReg c1, int32_t c2, int c2const)
686 {
687 /* For 32-bit comparisons, we can play games with ADDC/SUBC. */
688 switch (cond) {
689 case TCG_COND_LTU:
690 case TCG_COND_GEU:
691 /* The result of the comparison is in the carry bit. */
692 break;
693
694 case TCG_COND_EQ:
695 case TCG_COND_NE:
696 /* For equality, we can transform to inequality vs zero. */
697 if (c2 != 0) {
698 tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR);
699 c2 = TCG_REG_T1;
700 } else {
701 c2 = c1;
702 }
703 c1 = TCG_REG_G0, c2const = 0;
704 cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU);
705 break;
706
707 case TCG_COND_GTU:
708 case TCG_COND_LEU:
709 /* If we don't need to load a constant into a register, we can
710 swap the operands on GTU/LEU. There's no benefit to loading
711 the constant into a temporary register. */
712 if (!c2const || c2 == 0) {
713 TCGReg t = c1;
714 c1 = c2;
715 c2 = t;
716 c2const = 0;
717 cond = tcg_swap_cond(cond);
718 break;
719 }
720 /* FALLTHRU */
721
722 default:
723 tcg_out_cmp(s, c1, c2, c2const);
724 tcg_out_movi_imm13(s, ret, 0);
725 tcg_out_movcc(s, cond, MOVCC_ICC, ret, 1, 1);
726 return;
727 }
728
729 tcg_out_cmp(s, c1, c2, c2const);
730 if (cond == TCG_COND_LTU) {
731 tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC);
732 } else {
733 tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC);
734 }
735 }
736
737 static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret,
738 TCGReg c1, int32_t c2, int c2const)
739 {
740 if (use_vis3_instructions) {
741 switch (cond) {
742 case TCG_COND_NE:
743 if (c2 != 0) {
744 break;
745 }
746 c2 = c1, c2const = 0, c1 = TCG_REG_G0;
747 /* FALLTHRU */
748 case TCG_COND_LTU:
749 tcg_out_cmp(s, c1, c2, c2const);
750 tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC);
751 return;
752 default:
753 break;
754 }
755 }
756
757 /* For 64-bit signed comparisons vs zero, we can avoid the compare
758 if the input does not overlap the output. */
759 if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) {
760 tcg_out_movi_imm13(s, ret, 0);
761 tcg_out_movr(s, cond, ret, c1, 1, 1);
762 } else {
763 tcg_out_cmp(s, c1, c2, c2const);
764 tcg_out_movi_imm13(s, ret, 0);
765 tcg_out_movcc(s, cond, MOVCC_XCC, ret, 1, 1);
766 }
767 }
768
769 static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh,
770 TCGReg al, TCGReg ah, int32_t bl, int blconst,
771 int32_t bh, int bhconst, int opl, int oph)
772 {
773 TCGReg tmp = TCG_REG_T1;
774
775 /* Note that the low parts are fully consumed before tmp is set. */
776 if (rl != ah && (bhconst || rl != bh)) {
777 tmp = rl;
778 }
779
780 tcg_out_arithc(s, tmp, al, bl, blconst, opl);
781 tcg_out_arithc(s, rh, ah, bh, bhconst, oph);
782 tcg_out_mov(s, TCG_TYPE_I32, rl, tmp);
783 }
784
785 static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh,
786 TCGReg al, TCGReg ah, int32_t bl, int blconst,
787 int32_t bh, int bhconst, bool is_sub)
788 {
789 TCGReg tmp = TCG_REG_T1;
790
791 /* Note that the low parts are fully consumed before tmp is set. */
792 if (rl != ah && (bhconst || rl != bh)) {
793 tmp = rl;
794 }
795
796 tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC);
797
798 if (use_vis3_instructions && !is_sub) {
799 /* Note that ADDXC doesn't accept immediates. */
800 if (bhconst && bh != 0) {
801 tcg_out_movi_imm13(s, TCG_REG_T2, bh);
802 bh = TCG_REG_T2;
803 }
804 tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC);
805 } else if (bh == TCG_REG_G0) {
806 /* If we have a zero, we can perform the operation in two insns,
807 with the arithmetic first, and a conditional move into place. */
808 if (rh == ah) {
809 tcg_out_arithi(s, TCG_REG_T2, ah, 1,
810 is_sub ? ARITH_SUB : ARITH_ADD);
811 tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0);
812 } else {
813 tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD);
814 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0);
815 }
816 } else {
817 /*
818 * Otherwise adjust BH as if there is carry into T2.
819 * Note that constant BH is constrained to 11 bits for the MOVCC,
820 * so the adjustment fits 12 bits.
821 */
822 if (bhconst) {
823 tcg_out_movi_imm13(s, TCG_REG_T2, bh + (is_sub ? -1 : 1));
824 } else {
825 tcg_out_arithi(s, TCG_REG_T2, bh, 1,
826 is_sub ? ARITH_SUB : ARITH_ADD);
827 }
828 /* ... smoosh T2 back to original BH if carry is clear ... */
829 tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst);
830 /* ... and finally perform the arithmetic with the new operand. */
831 tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD);
832 }
833
834 tcg_out_mov(s, TCG_TYPE_I64, rl, tmp);
835 }
836
837 static void tcg_out_jmpl_const(TCGContext *s, const tcg_insn_unit *dest,
838 bool in_prologue, bool tail_call)
839 {
840 uintptr_t desti = (uintptr_t)dest;
841
842 /* Be careful not to clobber %o7 for a tail call. */
843 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_REG_T1,
844 desti & ~0xfff, in_prologue,
845 tail_call ? TCG_REG_G2 : TCG_REG_O7);
846 tcg_out_arithi(s, tail_call ? TCG_REG_G0 : TCG_REG_O7,
847 TCG_REG_T1, desti & 0xfff, JMPL);
848 }
849
850 static void tcg_out_call_nodelay(TCGContext *s, const tcg_insn_unit *dest,
851 bool in_prologue)
852 {
853 ptrdiff_t disp = tcg_pcrel_diff(s, dest);
854
855 if (disp == (int32_t)disp) {
856 tcg_out32(s, CALL | (uint32_t)disp >> 2);
857 } else {
858 tcg_out_jmpl_const(s, dest, in_prologue, false);
859 }
860 }
861
862 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *dest)
863 {
864 tcg_out_call_nodelay(s, dest, false);
865 tcg_out_nop(s);
866 }
867
868 static void tcg_out_mb(TCGContext *s, TCGArg a0)
869 {
870 /* Note that the TCG memory order constants mirror the Sparc MEMBAR. */
871 tcg_out32(s, MEMBAR | (a0 & TCG_MO_ALL));
872 }
873
874 #ifdef CONFIG_SOFTMMU
875 static const tcg_insn_unit *qemu_ld_trampoline[(MO_SSIZE | MO_BSWAP) + 1];
876 static const tcg_insn_unit *qemu_st_trampoline[(MO_SIZE | MO_BSWAP) + 1];
877
878 static void emit_extend(TCGContext *s, TCGReg r, int op)
879 {
880 /* Emit zero extend of 8, 16 or 32 bit data as
881 * required by the MO_* value op; do nothing for 64 bit.
882 */
883 switch (op & MO_SIZE) {
884 case MO_8:
885 tcg_out_arithi(s, r, r, 0xff, ARITH_AND);
886 break;
887 case MO_16:
888 tcg_out_arithi(s, r, r, 16, SHIFT_SLL);
889 tcg_out_arithi(s, r, r, 16, SHIFT_SRL);
890 break;
891 case MO_32:
892 tcg_out_arith(s, r, r, 0, SHIFT_SRL);
893 break;
894 case MO_64:
895 break;
896 }
897 }
898
899 static void build_trampolines(TCGContext *s)
900 {
901 static void * const qemu_ld_helpers[] = {
902 [MO_UB] = helper_ret_ldub_mmu,
903 [MO_SB] = helper_ret_ldsb_mmu,
904 [MO_LEUW] = helper_le_lduw_mmu,
905 [MO_LESW] = helper_le_ldsw_mmu,
906 [MO_LEUL] = helper_le_ldul_mmu,
907 [MO_LEUQ] = helper_le_ldq_mmu,
908 [MO_BEUW] = helper_be_lduw_mmu,
909 [MO_BESW] = helper_be_ldsw_mmu,
910 [MO_BEUL] = helper_be_ldul_mmu,
911 [MO_BEUQ] = helper_be_ldq_mmu,
912 };
913 static void * const qemu_st_helpers[] = {
914 [MO_UB] = helper_ret_stb_mmu,
915 [MO_LEUW] = helper_le_stw_mmu,
916 [MO_LEUL] = helper_le_stl_mmu,
917 [MO_LEUQ] = helper_le_stq_mmu,
918 [MO_BEUW] = helper_be_stw_mmu,
919 [MO_BEUL] = helper_be_stl_mmu,
920 [MO_BEUQ] = helper_be_stq_mmu,
921 };
922
923 int i;
924
925 for (i = 0; i < ARRAY_SIZE(qemu_ld_helpers); ++i) {
926 if (qemu_ld_helpers[i] == NULL) {
927 continue;
928 }
929
930 /* May as well align the trampoline. */
931 while ((uintptr_t)s->code_ptr & 15) {
932 tcg_out_nop(s);
933 }
934 qemu_ld_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
935
936 /* Set the retaddr operand. */
937 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O3, TCG_REG_O7);
938 /* Tail call. */
939 tcg_out_jmpl_const(s, qemu_ld_helpers[i], true, true);
940 /* delay slot -- set the env argument */
941 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
942 }
943
944 for (i = 0; i < ARRAY_SIZE(qemu_st_helpers); ++i) {
945 if (qemu_st_helpers[i] == NULL) {
946 continue;
947 }
948
949 /* May as well align the trampoline. */
950 while ((uintptr_t)s->code_ptr & 15) {
951 tcg_out_nop(s);
952 }
953 qemu_st_trampoline[i] = tcg_splitwx_to_rx(s->code_ptr);
954
955 emit_extend(s, TCG_REG_O2, i);
956
957 /* Set the retaddr operand. */
958 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_O4, TCG_REG_O7);
959
960 /* Tail call. */
961 tcg_out_jmpl_const(s, qemu_st_helpers[i], true, true);
962 /* delay slot -- set the env argument */
963 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
964 }
965 }
966 #else
967 static const tcg_insn_unit *qemu_unalign_ld_trampoline;
968 static const tcg_insn_unit *qemu_unalign_st_trampoline;
969
970 static void build_trampolines(TCGContext *s)
971 {
972 for (int ld = 0; ld < 2; ++ld) {
973 void *helper;
974
975 while ((uintptr_t)s->code_ptr & 15) {
976 tcg_out_nop(s);
977 }
978
979 if (ld) {
980 helper = helper_unaligned_ld;
981 qemu_unalign_ld_trampoline = tcg_splitwx_to_rx(s->code_ptr);
982 } else {
983 helper = helper_unaligned_st;
984 qemu_unalign_st_trampoline = tcg_splitwx_to_rx(s->code_ptr);
985 }
986
987 /* Tail call. */
988 tcg_out_jmpl_const(s, helper, true, true);
989 /* delay slot -- set the env argument */
990 tcg_out_mov_delay(s, TCG_REG_O0, TCG_AREG0);
991 }
992 }
993 #endif
994
995 /* Generate global QEMU prologue and epilogue code */
996 static void tcg_target_qemu_prologue(TCGContext *s)
997 {
998 int tmp_buf_size, frame_size;
999
1000 /*
1001 * The TCG temp buffer is at the top of the frame, immediately
1002 * below the frame pointer. Use the logical (aligned) offset here;
1003 * the stack bias is applied in temp_allocate_frame().
1004 */
1005 tmp_buf_size = CPU_TEMP_BUF_NLONGS * (int)sizeof(long);
1006 tcg_set_frame(s, TCG_REG_I6, -tmp_buf_size, tmp_buf_size);
1007
1008 /*
1009 * TCG_TARGET_CALL_STACK_OFFSET includes the stack bias, but is
1010 * otherwise the minimal frame usable by callees.
1011 */
1012 frame_size = TCG_TARGET_CALL_STACK_OFFSET - TCG_TARGET_STACK_BIAS;
1013 frame_size += TCG_STATIC_CALL_ARGS_SIZE + tmp_buf_size;
1014 frame_size += TCG_TARGET_STACK_ALIGN - 1;
1015 frame_size &= -TCG_TARGET_STACK_ALIGN;
1016 tcg_out32(s, SAVE | INSN_RD(TCG_REG_O6) | INSN_RS1(TCG_REG_O6) |
1017 INSN_IMM13(-frame_size));
1018
1019 #ifndef CONFIG_SOFTMMU
1020 if (guest_base != 0) {
1021 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG,
1022 guest_base, true, TCG_REG_T1);
1023 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1024 }
1025 #endif
1026
1027 /* We choose TCG_REG_TB such that no move is required. */
1028 if (USE_REG_TB) {
1029 QEMU_BUILD_BUG_ON(TCG_REG_TB != TCG_REG_I1);
1030 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB);
1031 }
1032
1033 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I1, 0, JMPL);
1034 /* delay slot */
1035 tcg_out_nop(s);
1036
1037 /* Epilogue for goto_ptr. */
1038 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1039 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1040 /* delay slot */
1041 tcg_out_movi_imm13(s, TCG_REG_O0, 0);
1042
1043 build_trampolines(s);
1044 }
1045
1046 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
1047 {
1048 int i;
1049 for (i = 0; i < count; ++i) {
1050 p[i] = NOP;
1051 }
1052 }
1053
1054 #if defined(CONFIG_SOFTMMU)
1055
1056 /* We expect to use a 13-bit negative offset from ENV. */
1057 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1058 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 12));
1059
1060 /* Perform the TLB load and compare.
1061
1062 Inputs:
1063 ADDRLO and ADDRHI contain the possible two parts of the address.
1064
1065 MEM_INDEX and S_BITS are the memory context and log2 size of the load.
1066
1067 WHICH is the offset into the CPUTLBEntry structure of the slot to read.
1068 This should be offsetof addr_read or addr_write.
1069
1070 The result of the TLB comparison is in %[ix]cc. The sanitized address
1071 is in the returned register, maybe %o0. The TLB addend is in %o1. */
1072
1073 static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, int mem_index,
1074 MemOp opc, int which)
1075 {
1076 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
1077 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
1078 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
1079 const TCGReg r0 = TCG_REG_O0;
1080 const TCGReg r1 = TCG_REG_O1;
1081 const TCGReg r2 = TCG_REG_O2;
1082 unsigned s_bits = opc & MO_SIZE;
1083 unsigned a_bits = get_alignment_bits(opc);
1084 tcg_target_long compare_mask;
1085
1086 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
1087 tcg_out_ld(s, TCG_TYPE_PTR, r0, TCG_AREG0, mask_off);
1088 tcg_out_ld(s, TCG_TYPE_PTR, r1, TCG_AREG0, table_off);
1089
1090 /* Extract the page index, shifted into place for tlb index. */
1091 tcg_out_arithi(s, r2, addr, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS,
1092 SHIFT_SRL);
1093 tcg_out_arith(s, r2, r2, r0, ARITH_AND);
1094
1095 /* Add the tlb_table pointer, creating the CPUTLBEntry address into R2. */
1096 tcg_out_arith(s, r2, r2, r1, ARITH_ADD);
1097
1098 /* Load the tlb comparator and the addend. */
1099 tcg_out_ld(s, TCG_TYPE_TL, r0, r2, which);
1100 tcg_out_ld(s, TCG_TYPE_PTR, r1, r2, offsetof(CPUTLBEntry, addend));
1101
1102 /* Mask out the page offset, except for the required alignment.
1103 We don't support unaligned accesses. */
1104 if (a_bits < s_bits) {
1105 a_bits = s_bits;
1106 }
1107 compare_mask = (tcg_target_ulong)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
1108 if (check_fit_tl(compare_mask, 13)) {
1109 tcg_out_arithi(s, r2, addr, compare_mask, ARITH_AND);
1110 } else {
1111 tcg_out_movi(s, TCG_TYPE_TL, r2, compare_mask);
1112 tcg_out_arith(s, r2, addr, r2, ARITH_AND);
1113 }
1114 tcg_out_cmp(s, r0, r2, 0);
1115
1116 /* If the guest address must be zero-extended, do so now. */
1117 if (TARGET_LONG_BITS == 32) {
1118 tcg_out_arithi(s, r0, addr, 0, SHIFT_SRL);
1119 return r0;
1120 }
1121 return addr;
1122 }
1123 #endif /* CONFIG_SOFTMMU */
1124
1125 static const int qemu_ld_opc[(MO_SSIZE | MO_BSWAP) + 1] = {
1126 [MO_UB] = LDUB,
1127 [MO_SB] = LDSB,
1128 [MO_UB | MO_LE] = LDUB,
1129 [MO_SB | MO_LE] = LDSB,
1130
1131 [MO_BEUW] = LDUH,
1132 [MO_BESW] = LDSH,
1133 [MO_BEUL] = LDUW,
1134 [MO_BESL] = LDSW,
1135 [MO_BEUQ] = LDX,
1136 [MO_BESQ] = LDX,
1137
1138 [MO_LEUW] = LDUH_LE,
1139 [MO_LESW] = LDSH_LE,
1140 [MO_LEUL] = LDUW_LE,
1141 [MO_LESL] = LDSW_LE,
1142 [MO_LEUQ] = LDX_LE,
1143 [MO_LESQ] = LDX_LE,
1144 };
1145
1146 static const int qemu_st_opc[(MO_SIZE | MO_BSWAP) + 1] = {
1147 [MO_UB] = STB,
1148
1149 [MO_BEUW] = STH,
1150 [MO_BEUL] = STW,
1151 [MO_BEUQ] = STX,
1152
1153 [MO_LEUW] = STH_LE,
1154 [MO_LEUL] = STW_LE,
1155 [MO_LEUQ] = STX_LE,
1156 };
1157
1158 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr,
1159 MemOpIdx oi, bool is_64)
1160 {
1161 MemOp memop = get_memop(oi);
1162 tcg_insn_unit *label_ptr;
1163
1164 #ifdef CONFIG_SOFTMMU
1165 unsigned memi = get_mmuidx(oi);
1166 TCGReg addrz;
1167 const tcg_insn_unit *func;
1168
1169 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1170 offsetof(CPUTLBEntry, addr_read));
1171
1172 /* The fast path is exactly one insn. Thus we can perform the
1173 entire TLB Hit in the (annulled) delay slot of the branch
1174 over the TLB Miss case. */
1175
1176 /* beq,a,pt %[xi]cc, label0 */
1177 label_ptr = s->code_ptr;
1178 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1179 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1180 /* delay slot */
1181 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1182 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1183
1184 /* TLB Miss. */
1185
1186 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1187
1188 /* We use the helpers to extend SB and SW data, leaving the case
1189 of SL needing explicit extending below. */
1190 if ((memop & MO_SSIZE) == MO_SL) {
1191 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1192 } else {
1193 func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)];
1194 }
1195 tcg_debug_assert(func != NULL);
1196 tcg_out_call_nodelay(s, func, false);
1197 /* delay slot */
1198 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O2, oi);
1199
1200 /* We let the helper sign-extend SB and SW, but leave SL for here. */
1201 if (is_64 && (memop & MO_SSIZE) == MO_SL) {
1202 tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA);
1203 } else {
1204 tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0);
1205 }
1206
1207 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1208 #else
1209 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1210 unsigned a_bits = get_alignment_bits(memop);
1211 unsigned s_bits = memop & MO_SIZE;
1212 unsigned t_bits;
1213
1214 if (TARGET_LONG_BITS == 32) {
1215 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1216 addr = TCG_REG_T1;
1217 }
1218
1219 /*
1220 * Normal case: alignment equal to access size.
1221 */
1222 if (a_bits == s_bits) {
1223 tcg_out_ldst_rr(s, data, addr, index,
1224 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1225 return;
1226 }
1227
1228 /*
1229 * Test for at least natural alignment, and assume most accesses
1230 * will be aligned -- perform a straight load in the delay slot.
1231 * This is required to preserve atomicity for aligned accesses.
1232 */
1233 t_bits = MAX(a_bits, s_bits);
1234 tcg_debug_assert(t_bits < 13);
1235 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1236
1237 /* beq,a,pt %icc, label */
1238 label_ptr = s->code_ptr;
1239 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1240 /* delay slot */
1241 tcg_out_ldst_rr(s, data, addr, index,
1242 qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]);
1243
1244 if (a_bits >= s_bits) {
1245 /*
1246 * Overalignment: A successful alignment test will perform the memory
1247 * operation in the delay slot, and failure need only invoke the
1248 * handler for SIGBUS.
1249 */
1250 tcg_out_call_nodelay(s, qemu_unalign_ld_trampoline, false);
1251 /* delay slot -- move to low part of argument reg */
1252 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1253 } else {
1254 /* Underalignment: load by pieces of minimum alignment. */
1255 int ld_opc, a_size, s_size, i;
1256
1257 /*
1258 * Force full address into T1 early; avoids problems with
1259 * overlap between @addr and @data.
1260 */
1261 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1262
1263 a_size = 1 << a_bits;
1264 s_size = 1 << s_bits;
1265 if ((memop & MO_BSWAP) == MO_BE) {
1266 ld_opc = qemu_ld_opc[a_bits | MO_BE | (memop & MO_SIGN)];
1267 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1268 ld_opc = qemu_ld_opc[a_bits | MO_BE];
1269 for (i = a_size; i < s_size; i += a_size) {
1270 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1271 tcg_out_arithi(s, data, data, a_size, SHIFT_SLLX);
1272 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1273 }
1274 } else if (a_bits == 0) {
1275 ld_opc = LDUB;
1276 tcg_out_ldst(s, data, TCG_REG_T1, 0, ld_opc);
1277 for (i = a_size; i < s_size; i += a_size) {
1278 if ((memop & MO_SIGN) && i == s_size - a_size) {
1279 ld_opc = LDSB;
1280 }
1281 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, ld_opc);
1282 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1283 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1284 }
1285 } else {
1286 ld_opc = qemu_ld_opc[a_bits | MO_LE];
1287 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, ld_opc);
1288 for (i = a_size; i < s_size; i += a_size) {
1289 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1290 if ((memop & MO_SIGN) && i == s_size - a_size) {
1291 ld_opc = qemu_ld_opc[a_bits | MO_LE | MO_SIGN];
1292 }
1293 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, ld_opc);
1294 tcg_out_arithi(s, TCG_REG_T2, TCG_REG_T2, i * 8, SHIFT_SLLX);
1295 tcg_out_arith(s, data, data, TCG_REG_T2, ARITH_OR);
1296 }
1297 }
1298 }
1299
1300 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1301 #endif /* CONFIG_SOFTMMU */
1302 }
1303
1304 static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr,
1305 MemOpIdx oi)
1306 {
1307 MemOp memop = get_memop(oi);
1308 tcg_insn_unit *label_ptr;
1309
1310 #ifdef CONFIG_SOFTMMU
1311 unsigned memi = get_mmuidx(oi);
1312 TCGReg addrz;
1313 const tcg_insn_unit *func;
1314
1315 addrz = tcg_out_tlb_load(s, addr, memi, memop,
1316 offsetof(CPUTLBEntry, addr_write));
1317
1318 /* The fast path is exactly one insn. Thus we can perform the entire
1319 TLB Hit in the (annulled) delay slot of the branch over TLB Miss. */
1320 /* beq,a,pt %[xi]cc, label0 */
1321 label_ptr = s->code_ptr;
1322 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT
1323 | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0);
1324 /* delay slot */
1325 tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1,
1326 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1327
1328 /* TLB Miss. */
1329
1330 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O1, addrz);
1331 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_O2, data);
1332
1333 func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)];
1334 tcg_debug_assert(func != NULL);
1335 tcg_out_call_nodelay(s, func, false);
1336 /* delay slot */
1337 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_O3, oi);
1338
1339 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1340 #else
1341 TCGReg index = (guest_base ? TCG_GUEST_BASE_REG : TCG_REG_G0);
1342 unsigned a_bits = get_alignment_bits(memop);
1343 unsigned s_bits = memop & MO_SIZE;
1344 unsigned t_bits;
1345
1346 if (TARGET_LONG_BITS == 32) {
1347 tcg_out_arithi(s, TCG_REG_T1, addr, 0, SHIFT_SRL);
1348 addr = TCG_REG_T1;
1349 }
1350
1351 /*
1352 * Normal case: alignment equal to access size.
1353 */
1354 if (a_bits == s_bits) {
1355 tcg_out_ldst_rr(s, data, addr, index,
1356 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1357 return;
1358 }
1359
1360 /*
1361 * Test for at least natural alignment, and assume most accesses
1362 * will be aligned -- perform a straight store in the delay slot.
1363 * This is required to preserve atomicity for aligned accesses.
1364 */
1365 t_bits = MAX(a_bits, s_bits);
1366 tcg_debug_assert(t_bits < 13);
1367 tcg_out_arithi(s, TCG_REG_G0, addr, (1u << t_bits) - 1, ARITH_ANDCC);
1368
1369 /* beq,a,pt %icc, label */
1370 label_ptr = s->code_ptr;
1371 tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | BPCC_ICC, 0);
1372 /* delay slot */
1373 tcg_out_ldst_rr(s, data, addr, index,
1374 qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]);
1375
1376 if (a_bits >= s_bits) {
1377 /*
1378 * Overalignment: A successful alignment test will perform the memory
1379 * operation in the delay slot, and failure need only invoke the
1380 * handler for SIGBUS.
1381 */
1382 tcg_out_call_nodelay(s, qemu_unalign_st_trampoline, false);
1383 /* delay slot -- move to low part of argument reg */
1384 tcg_out_mov_delay(s, TCG_REG_O1, addr);
1385 } else {
1386 /* Underalignment: store by pieces of minimum alignment. */
1387 int st_opc, a_size, s_size, i;
1388
1389 /*
1390 * Force full address into T1 early; avoids problems with
1391 * overlap between @addr and @data.
1392 */
1393 tcg_out_arith(s, TCG_REG_T1, addr, index, ARITH_ADD);
1394
1395 a_size = 1 << a_bits;
1396 s_size = 1 << s_bits;
1397 if ((memop & MO_BSWAP) == MO_BE) {
1398 st_opc = qemu_st_opc[a_bits | MO_BE];
1399 for (i = 0; i < s_size; i += a_size) {
1400 TCGReg d = data;
1401 int shift = (s_size - a_size - i) * 8;
1402 if (shift) {
1403 d = TCG_REG_T2;
1404 tcg_out_arithi(s, d, data, shift, SHIFT_SRLX);
1405 }
1406 tcg_out_ldst(s, d, TCG_REG_T1, i, st_opc);
1407 }
1408 } else if (a_bits == 0) {
1409 tcg_out_ldst(s, data, TCG_REG_T1, 0, STB);
1410 for (i = 1; i < s_size; i++) {
1411 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1412 tcg_out_ldst(s, TCG_REG_T2, TCG_REG_T1, i, STB);
1413 }
1414 } else {
1415 /* Note that ST*A with immediate asi must use indexed address. */
1416 st_opc = qemu_st_opc[a_bits + MO_LE];
1417 tcg_out_ldst_rr(s, data, TCG_REG_T1, TCG_REG_G0, st_opc);
1418 for (i = a_size; i < s_size; i += a_size) {
1419 tcg_out_arithi(s, TCG_REG_T2, data, i * 8, SHIFT_SRLX);
1420 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, a_size, ARITH_ADD);
1421 tcg_out_ldst_rr(s, TCG_REG_T2, TCG_REG_T1, TCG_REG_G0, st_opc);
1422 }
1423 }
1424 }
1425
1426 *label_ptr |= INSN_OFF19(tcg_ptr_byte_diff(s->code_ptr, label_ptr));
1427 #endif /* CONFIG_SOFTMMU */
1428 }
1429
1430 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1431 const TCGArg args[TCG_MAX_OP_ARGS],
1432 const int const_args[TCG_MAX_OP_ARGS])
1433 {
1434 TCGArg a0, a1, a2;
1435 int c, c2;
1436
1437 /* Hoist the loads of the most common arguments. */
1438 a0 = args[0];
1439 a1 = args[1];
1440 a2 = args[2];
1441 c2 = const_args[2];
1442
1443 switch (opc) {
1444 case INDEX_op_exit_tb:
1445 if (check_fit_ptr(a0, 13)) {
1446 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1447 tcg_out_movi_imm13(s, TCG_REG_O0, a0);
1448 break;
1449 } else if (USE_REG_TB) {
1450 intptr_t tb_diff = tcg_tbrel_diff(s, (void *)a0);
1451 if (check_fit_ptr(tb_diff, 13)) {
1452 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1453 /* Note that TCG_REG_TB has been unwound to O1. */
1454 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O1, tb_diff, ARITH_ADD);
1455 break;
1456 }
1457 }
1458 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_I0, a0 & ~0x3ff);
1459 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_I7, 8, RETURN);
1460 tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, a0 & 0x3ff, ARITH_OR);
1461 break;
1462 case INDEX_op_goto_tb:
1463 if (s->tb_jmp_insn_offset) {
1464 /* direct jump method */
1465 if (USE_REG_TB) {
1466 /* make sure the patch is 8-byte aligned. */
1467 if ((intptr_t)s->code_ptr & 4) {
1468 tcg_out_nop(s);
1469 }
1470 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1471 tcg_out_sethi(s, TCG_REG_T1, 0);
1472 tcg_out_arithi(s, TCG_REG_T1, TCG_REG_T1, 0, ARITH_OR);
1473 tcg_out_arith(s, TCG_REG_G0, TCG_REG_TB, TCG_REG_T1, JMPL);
1474 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB, TCG_REG_T1, ARITH_ADD);
1475 } else {
1476 s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
1477 tcg_out32(s, CALL);
1478 tcg_out_nop(s);
1479 }
1480 } else {
1481 /* indirect jump method */
1482 tcg_out_ld_ptr(s, TCG_REG_TB, s->tb_jmp_target_addr + a0);
1483 tcg_out_arithi(s, TCG_REG_G0, TCG_REG_TB, 0, JMPL);
1484 tcg_out_nop(s);
1485 }
1486 set_jmp_reset_offset(s, a0);
1487
1488 /* For the unlinked path of goto_tb, we need to reset
1489 TCG_REG_TB to the beginning of this TB. */
1490 if (USE_REG_TB) {
1491 c = -tcg_current_code_size(s);
1492 if (check_fit_i32(c, 13)) {
1493 tcg_out_arithi(s, TCG_REG_TB, TCG_REG_TB, c, ARITH_ADD);
1494 } else {
1495 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_T1, c);
1496 tcg_out_arith(s, TCG_REG_TB, TCG_REG_TB,
1497 TCG_REG_T1, ARITH_ADD);
1498 }
1499 }
1500 break;
1501 case INDEX_op_goto_ptr:
1502 tcg_out_arithi(s, TCG_REG_G0, a0, 0, JMPL);
1503 if (USE_REG_TB) {
1504 tcg_out_mov_delay(s, TCG_REG_TB, a0);
1505 } else {
1506 tcg_out_nop(s);
1507 }
1508 break;
1509 case INDEX_op_br:
1510 tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0));
1511 tcg_out_nop(s);
1512 break;
1513
1514 #define OP_32_64(x) \
1515 glue(glue(case INDEX_op_, x), _i32): \
1516 glue(glue(case INDEX_op_, x), _i64)
1517
1518 OP_32_64(ld8u):
1519 tcg_out_ldst(s, a0, a1, a2, LDUB);
1520 break;
1521 OP_32_64(ld8s):
1522 tcg_out_ldst(s, a0, a1, a2, LDSB);
1523 break;
1524 OP_32_64(ld16u):
1525 tcg_out_ldst(s, a0, a1, a2, LDUH);
1526 break;
1527 OP_32_64(ld16s):
1528 tcg_out_ldst(s, a0, a1, a2, LDSH);
1529 break;
1530 case INDEX_op_ld_i32:
1531 case INDEX_op_ld32u_i64:
1532 tcg_out_ldst(s, a0, a1, a2, LDUW);
1533 break;
1534 OP_32_64(st8):
1535 tcg_out_ldst(s, a0, a1, a2, STB);
1536 break;
1537 OP_32_64(st16):
1538 tcg_out_ldst(s, a0, a1, a2, STH);
1539 break;
1540 case INDEX_op_st_i32:
1541 case INDEX_op_st32_i64:
1542 tcg_out_ldst(s, a0, a1, a2, STW);
1543 break;
1544 OP_32_64(add):
1545 c = ARITH_ADD;
1546 goto gen_arith;
1547 OP_32_64(sub):
1548 c = ARITH_SUB;
1549 goto gen_arith;
1550 OP_32_64(and):
1551 c = ARITH_AND;
1552 goto gen_arith;
1553 OP_32_64(andc):
1554 c = ARITH_ANDN;
1555 goto gen_arith;
1556 OP_32_64(or):
1557 c = ARITH_OR;
1558 goto gen_arith;
1559 OP_32_64(orc):
1560 c = ARITH_ORN;
1561 goto gen_arith;
1562 OP_32_64(xor):
1563 c = ARITH_XOR;
1564 goto gen_arith;
1565 case INDEX_op_shl_i32:
1566 c = SHIFT_SLL;
1567 do_shift32:
1568 /* Limit immediate shift count lest we create an illegal insn. */
1569 tcg_out_arithc(s, a0, a1, a2 & 31, c2, c);
1570 break;
1571 case INDEX_op_shr_i32:
1572 c = SHIFT_SRL;
1573 goto do_shift32;
1574 case INDEX_op_sar_i32:
1575 c = SHIFT_SRA;
1576 goto do_shift32;
1577 case INDEX_op_mul_i32:
1578 c = ARITH_UMUL;
1579 goto gen_arith;
1580
1581 OP_32_64(neg):
1582 c = ARITH_SUB;
1583 goto gen_arith1;
1584 OP_32_64(not):
1585 c = ARITH_ORN;
1586 goto gen_arith1;
1587
1588 case INDEX_op_div_i32:
1589 tcg_out_div32(s, a0, a1, a2, c2, 0);
1590 break;
1591 case INDEX_op_divu_i32:
1592 tcg_out_div32(s, a0, a1, a2, c2, 1);
1593 break;
1594
1595 case INDEX_op_brcond_i32:
1596 tcg_out_brcond_i32(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1597 break;
1598 case INDEX_op_setcond_i32:
1599 tcg_out_setcond_i32(s, args[3], a0, a1, a2, c2);
1600 break;
1601 case INDEX_op_movcond_i32:
1602 tcg_out_movcond_i32(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1603 break;
1604
1605 case INDEX_op_add2_i32:
1606 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1607 args[4], const_args[4], args[5], const_args[5],
1608 ARITH_ADDCC, ARITH_ADDC);
1609 break;
1610 case INDEX_op_sub2_i32:
1611 tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3],
1612 args[4], const_args[4], args[5], const_args[5],
1613 ARITH_SUBCC, ARITH_SUBC);
1614 break;
1615 case INDEX_op_mulu2_i32:
1616 c = ARITH_UMUL;
1617 goto do_mul2;
1618 case INDEX_op_muls2_i32:
1619 c = ARITH_SMUL;
1620 do_mul2:
1621 /* The 32-bit multiply insns produce a full 64-bit result. */
1622 tcg_out_arithc(s, a0, a2, args[3], const_args[3], c);
1623 tcg_out_arithi(s, a1, a0, 32, SHIFT_SRLX);
1624 break;
1625
1626 case INDEX_op_qemu_ld_i32:
1627 tcg_out_qemu_ld(s, a0, a1, a2, false);
1628 break;
1629 case INDEX_op_qemu_ld_i64:
1630 tcg_out_qemu_ld(s, a0, a1, a2, true);
1631 break;
1632 case INDEX_op_qemu_st_i32:
1633 case INDEX_op_qemu_st_i64:
1634 tcg_out_qemu_st(s, a0, a1, a2);
1635 break;
1636
1637 case INDEX_op_ld32s_i64:
1638 tcg_out_ldst(s, a0, a1, a2, LDSW);
1639 break;
1640 case INDEX_op_ld_i64:
1641 tcg_out_ldst(s, a0, a1, a2, LDX);
1642 break;
1643 case INDEX_op_st_i64:
1644 tcg_out_ldst(s, a0, a1, a2, STX);
1645 break;
1646 case INDEX_op_shl_i64:
1647 c = SHIFT_SLLX;
1648 do_shift64:
1649 /* Limit immediate shift count lest we create an illegal insn. */
1650 tcg_out_arithc(s, a0, a1, a2 & 63, c2, c);
1651 break;
1652 case INDEX_op_shr_i64:
1653 c = SHIFT_SRLX;
1654 goto do_shift64;
1655 case INDEX_op_sar_i64:
1656 c = SHIFT_SRAX;
1657 goto do_shift64;
1658 case INDEX_op_mul_i64:
1659 c = ARITH_MULX;
1660 goto gen_arith;
1661 case INDEX_op_div_i64:
1662 c = ARITH_SDIVX;
1663 goto gen_arith;
1664 case INDEX_op_divu_i64:
1665 c = ARITH_UDIVX;
1666 goto gen_arith;
1667 case INDEX_op_ext_i32_i64:
1668 case INDEX_op_ext32s_i64:
1669 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRA);
1670 break;
1671 case INDEX_op_extu_i32_i64:
1672 case INDEX_op_ext32u_i64:
1673 tcg_out_arithi(s, a0, a1, 0, SHIFT_SRL);
1674 break;
1675 case INDEX_op_extrl_i64_i32:
1676 tcg_out_mov(s, TCG_TYPE_I32, a0, a1);
1677 break;
1678 case INDEX_op_extrh_i64_i32:
1679 tcg_out_arithi(s, a0, a1, 32, SHIFT_SRLX);
1680 break;
1681
1682 case INDEX_op_brcond_i64:
1683 tcg_out_brcond_i64(s, a2, a0, a1, const_args[1], arg_label(args[3]));
1684 break;
1685 case INDEX_op_setcond_i64:
1686 tcg_out_setcond_i64(s, args[3], a0, a1, a2, c2);
1687 break;
1688 case INDEX_op_movcond_i64:
1689 tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]);
1690 break;
1691 case INDEX_op_add2_i64:
1692 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1693 const_args[4], args[5], const_args[5], false);
1694 break;
1695 case INDEX_op_sub2_i64:
1696 tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4],
1697 const_args[4], args[5], const_args[5], true);
1698 break;
1699 case INDEX_op_muluh_i64:
1700 tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI);
1701 break;
1702
1703 gen_arith:
1704 tcg_out_arithc(s, a0, a1, a2, c2, c);
1705 break;
1706
1707 gen_arith1:
1708 tcg_out_arithc(s, a0, TCG_REG_G0, a1, const_args[1], c);
1709 break;
1710
1711 case INDEX_op_mb:
1712 tcg_out_mb(s, a0);
1713 break;
1714
1715 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1716 case INDEX_op_mov_i64:
1717 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1718 default:
1719 tcg_abort();
1720 }
1721 }
1722
1723 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1724 {
1725 switch (op) {
1726 case INDEX_op_goto_ptr:
1727 return C_O0_I1(r);
1728
1729 case INDEX_op_ld8u_i32:
1730 case INDEX_op_ld8u_i64:
1731 case INDEX_op_ld8s_i32:
1732 case INDEX_op_ld8s_i64:
1733 case INDEX_op_ld16u_i32:
1734 case INDEX_op_ld16u_i64:
1735 case INDEX_op_ld16s_i32:
1736 case INDEX_op_ld16s_i64:
1737 case INDEX_op_ld_i32:
1738 case INDEX_op_ld32u_i64:
1739 case INDEX_op_ld32s_i64:
1740 case INDEX_op_ld_i64:
1741 case INDEX_op_neg_i32:
1742 case INDEX_op_neg_i64:
1743 case INDEX_op_not_i32:
1744 case INDEX_op_not_i64:
1745 case INDEX_op_ext32s_i64:
1746 case INDEX_op_ext32u_i64:
1747 case INDEX_op_ext_i32_i64:
1748 case INDEX_op_extu_i32_i64:
1749 case INDEX_op_extrl_i64_i32:
1750 case INDEX_op_extrh_i64_i32:
1751 return C_O1_I1(r, r);
1752
1753 case INDEX_op_st8_i32:
1754 case INDEX_op_st8_i64:
1755 case INDEX_op_st16_i32:
1756 case INDEX_op_st16_i64:
1757 case INDEX_op_st_i32:
1758 case INDEX_op_st32_i64:
1759 case INDEX_op_st_i64:
1760 return C_O0_I2(rZ, r);
1761
1762 case INDEX_op_add_i32:
1763 case INDEX_op_add_i64:
1764 case INDEX_op_mul_i32:
1765 case INDEX_op_mul_i64:
1766 case INDEX_op_div_i32:
1767 case INDEX_op_div_i64:
1768 case INDEX_op_divu_i32:
1769 case INDEX_op_divu_i64:
1770 case INDEX_op_sub_i32:
1771 case INDEX_op_sub_i64:
1772 case INDEX_op_and_i32:
1773 case INDEX_op_and_i64:
1774 case INDEX_op_andc_i32:
1775 case INDEX_op_andc_i64:
1776 case INDEX_op_or_i32:
1777 case INDEX_op_or_i64:
1778 case INDEX_op_orc_i32:
1779 case INDEX_op_orc_i64:
1780 case INDEX_op_xor_i32:
1781 case INDEX_op_xor_i64:
1782 case INDEX_op_shl_i32:
1783 case INDEX_op_shl_i64:
1784 case INDEX_op_shr_i32:
1785 case INDEX_op_shr_i64:
1786 case INDEX_op_sar_i32:
1787 case INDEX_op_sar_i64:
1788 case INDEX_op_setcond_i32:
1789 case INDEX_op_setcond_i64:
1790 return C_O1_I2(r, rZ, rJ);
1791
1792 case INDEX_op_brcond_i32:
1793 case INDEX_op_brcond_i64:
1794 return C_O0_I2(rZ, rJ);
1795 case INDEX_op_movcond_i32:
1796 case INDEX_op_movcond_i64:
1797 return C_O1_I4(r, rZ, rJ, rI, 0);
1798 case INDEX_op_add2_i32:
1799 case INDEX_op_add2_i64:
1800 case INDEX_op_sub2_i32:
1801 case INDEX_op_sub2_i64:
1802 return C_O2_I4(r, r, rZ, rZ, rJ, rJ);
1803 case INDEX_op_mulu2_i32:
1804 case INDEX_op_muls2_i32:
1805 return C_O2_I2(r, r, rZ, rJ);
1806 case INDEX_op_muluh_i64:
1807 return C_O1_I2(r, r, r);
1808
1809 case INDEX_op_qemu_ld_i32:
1810 case INDEX_op_qemu_ld_i64:
1811 return C_O1_I1(r, s);
1812 case INDEX_op_qemu_st_i32:
1813 case INDEX_op_qemu_st_i64:
1814 return C_O0_I2(sZ, s);
1815
1816 default:
1817 g_assert_not_reached();
1818 }
1819 }
1820
1821 static void tcg_target_init(TCGContext *s)
1822 {
1823 /*
1824 * Only probe for the platform and capabilities if we haven't already
1825 * determined maximum values at compile time.
1826 */
1827 #ifndef use_vis3_instructions
1828 {
1829 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
1830 use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0;
1831 }
1832 #endif
1833
1834 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1835 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1836
1837 tcg_target_call_clobber_regs = 0;
1838 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G1);
1839 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G2);
1840 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G3);
1841 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G4);
1842 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G5);
1843 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G6);
1844 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_G7);
1845 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O0);
1846 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O1);
1847 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O2);
1848 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O3);
1849 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O4);
1850 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O5);
1851 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O6);
1852 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_O7);
1853
1854 s->reserved_regs = 0;
1855 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G0); /* zero */
1856 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G6); /* reserved for os */
1857 tcg_regset_set_reg(s->reserved_regs, TCG_REG_G7); /* thread pointer */
1858 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I6); /* frame pointer */
1859 tcg_regset_set_reg(s->reserved_regs, TCG_REG_I7); /* return address */
1860 tcg_regset_set_reg(s->reserved_regs, TCG_REG_O6); /* stack pointer */
1861 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T1); /* for internal use */
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_T2); /* for internal use */
1863 }
1864
1865 #define ELF_HOST_MACHINE EM_SPARCV9
1866
1867 typedef struct {
1868 DebugFrameHeader h;
1869 uint8_t fde_def_cfa[4];
1870 uint8_t fde_win_save;
1871 uint8_t fde_ret_save[3];
1872 } DebugFrame;
1873
1874 static const DebugFrame debug_frame = {
1875 .h.cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1876 .h.cie.id = -1,
1877 .h.cie.version = 1,
1878 .h.cie.code_align = 1,
1879 .h.cie.data_align = -sizeof(void *) & 0x7f,
1880 .h.cie.return_column = 15, /* o7 */
1881
1882 /* Total FDE size does not include the "len" member. */
1883 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1884
1885 .fde_def_cfa = {
1886 12, 30, /* DW_CFA_def_cfa i6, 2047 */
1887 (2047 & 0x7f) | 0x80, (2047 >> 7)
1888 },
1889 .fde_win_save = 0x2d, /* DW_CFA_GNU_window_save */
1890 .fde_ret_save = { 9, 15, 31 }, /* DW_CFA_register o7, i7 */
1891 };
1892
1893 void tcg_register_jit(const void *buf, size_t buf_size)
1894 {
1895 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1896 }
1897
1898 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1899 uintptr_t jmp_rw, uintptr_t addr)
1900 {
1901 intptr_t tb_disp = addr - tc_ptr;
1902 intptr_t br_disp = addr - jmp_rx;
1903 tcg_insn_unit i1, i2;
1904
1905 /* We can reach the entire address space for ILP32.
1906 For LP64, the code_gen_buffer can't be larger than 2GB. */
1907 tcg_debug_assert(tb_disp == (int32_t)tb_disp);
1908 tcg_debug_assert(br_disp == (int32_t)br_disp);
1909
1910 if (!USE_REG_TB) {
1911 qatomic_set((uint32_t *)jmp_rw,
1912 deposit32(CALL, 0, 30, br_disp >> 2));
1913 flush_idcache_range(jmp_rx, jmp_rw, 4);
1914 return;
1915 }
1916
1917 /* This does not exercise the range of the branch, but we do
1918 still need to be able to load the new value of TCG_REG_TB.
1919 But this does still happen quite often. */
1920 if (check_fit_ptr(tb_disp, 13)) {
1921 /* ba,pt %icc, addr */
1922 i1 = (INSN_OP(0) | INSN_OP2(1) | INSN_COND(COND_A)
1923 | BPCC_ICC | BPCC_PT | INSN_OFF19(br_disp));
1924 i2 = (ARITH_ADD | INSN_RD(TCG_REG_TB) | INSN_RS1(TCG_REG_TB)
1925 | INSN_IMM13(tb_disp));
1926 } else if (tb_disp >= 0) {
1927 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((tb_disp & 0xfffffc00) >> 10);
1928 i2 = (ARITH_OR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1929 | INSN_IMM13(tb_disp & 0x3ff));
1930 } else {
1931 i1 = SETHI | INSN_RD(TCG_REG_T1) | ((~tb_disp & 0xfffffc00) >> 10);
1932 i2 = (ARITH_XOR | INSN_RD(TCG_REG_T1) | INSN_RS1(TCG_REG_T1)
1933 | INSN_IMM13((tb_disp & 0x3ff) | -0x400));
1934 }
1935
1936 qatomic_set((uint64_t *)jmp_rw, deposit64(i2, 32, 32, i1));
1937 flush_idcache_range(jmp_rx, jmp_rw, 8);
1938 }