]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/loongarch64/tcg-target.c.inc
tcg: Split out tcg_out_extu_i32_i64
[mirror_qemu.git] / tcg / loongarch64 / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32 #include "../tcg-ldst.c.inc"
33
34 #ifdef CONFIG_DEBUG_TCG
35 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
36 "zero",
37 "ra",
38 "tp",
39 "sp",
40 "a0",
41 "a1",
42 "a2",
43 "a3",
44 "a4",
45 "a5",
46 "a6",
47 "a7",
48 "t0",
49 "t1",
50 "t2",
51 "t3",
52 "t4",
53 "t5",
54 "t6",
55 "t7",
56 "t8",
57 "r21", /* reserved in the LP64* ABI, hence no ABI name */
58 "s9",
59 "s0",
60 "s1",
61 "s2",
62 "s3",
63 "s4",
64 "s5",
65 "s6",
66 "s7",
67 "s8"
68 };
69 #endif
70
71 static const int tcg_target_reg_alloc_order[] = {
72 /* Registers preserved across calls */
73 /* TCG_REG_S0 reserved for TCG_AREG0 */
74 TCG_REG_S1,
75 TCG_REG_S2,
76 TCG_REG_S3,
77 TCG_REG_S4,
78 TCG_REG_S5,
79 TCG_REG_S6,
80 TCG_REG_S7,
81 TCG_REG_S8,
82 TCG_REG_S9,
83
84 /* Registers (potentially) clobbered across calls */
85 TCG_REG_T0,
86 TCG_REG_T1,
87 TCG_REG_T2,
88 TCG_REG_T3,
89 TCG_REG_T4,
90 TCG_REG_T5,
91 TCG_REG_T6,
92 TCG_REG_T7,
93 TCG_REG_T8,
94
95 /* Argument registers, opposite order of allocation. */
96 TCG_REG_A7,
97 TCG_REG_A6,
98 TCG_REG_A5,
99 TCG_REG_A4,
100 TCG_REG_A3,
101 TCG_REG_A2,
102 TCG_REG_A1,
103 TCG_REG_A0,
104 };
105
106 static const int tcg_target_call_iarg_regs[] = {
107 TCG_REG_A0,
108 TCG_REG_A1,
109 TCG_REG_A2,
110 TCG_REG_A3,
111 TCG_REG_A4,
112 TCG_REG_A5,
113 TCG_REG_A6,
114 TCG_REG_A7,
115 };
116
117 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
118 {
119 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
120 tcg_debug_assert(slot >= 0 && slot <= 1);
121 return TCG_REG_A0 + slot;
122 }
123
124 #ifndef CONFIG_SOFTMMU
125 #define USE_GUEST_BASE (guest_base != 0)
126 #define TCG_GUEST_BASE_REG TCG_REG_S1
127 #endif
128
129 #define TCG_CT_CONST_ZERO 0x100
130 #define TCG_CT_CONST_S12 0x200
131 #define TCG_CT_CONST_S32 0x400
132 #define TCG_CT_CONST_U12 0x800
133 #define TCG_CT_CONST_C12 0x1000
134 #define TCG_CT_CONST_WSZ 0x2000
135
136 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
137 /*
138 * For softmmu, we need to avoid conflicts with the first 5
139 * argument registers to call the helper. Some of these are
140 * also used for the tlb lookup.
141 */
142 #ifdef CONFIG_SOFTMMU
143 #define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5)
144 #else
145 #define SOFTMMU_RESERVE_REGS 0
146 #endif
147
148
149 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
150 {
151 return sextract64(val, pos, len);
152 }
153
154 /* test if a constant matches the constraint */
155 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
156 {
157 if (ct & TCG_CT_CONST) {
158 return true;
159 }
160 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
161 return true;
162 }
163 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
164 return true;
165 }
166 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
167 return true;
168 }
169 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
170 return true;
171 }
172 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
173 return true;
174 }
175 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
176 return true;
177 }
178 return false;
179 }
180
181 /*
182 * Relocations
183 */
184
185 /*
186 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
187 * complicated; a whopping stack machine is needed to stuff the fields, at
188 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
189 * needed.
190 *
191 * Hence, define our own simpler relocation types. Numbers are chosen as to
192 * not collide with potential future additions to the true ELF relocation
193 * type enum.
194 */
195
196 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
197 #define R_LOONGARCH_BR_SK16 256
198 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
199 #define R_LOONGARCH_BR_SD10K16 257
200
201 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
202 {
203 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
204 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
205
206 tcg_debug_assert((offset & 3) == 0);
207 offset >>= 2;
208 if (offset == sextreg(offset, 0, 16)) {
209 *src_rw = deposit64(*src_rw, 10, 16, offset);
210 return true;
211 }
212
213 return false;
214 }
215
216 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
217 const tcg_insn_unit *target)
218 {
219 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
220 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
221
222 tcg_debug_assert((offset & 3) == 0);
223 offset >>= 2;
224 if (offset == sextreg(offset, 0, 26)) {
225 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
226 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
227 return true;
228 }
229
230 return false;
231 }
232
233 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
234 intptr_t value, intptr_t addend)
235 {
236 tcg_debug_assert(addend == 0);
237 switch (type) {
238 case R_LOONGARCH_BR_SK16:
239 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
240 case R_LOONGARCH_BR_SD10K16:
241 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
242 default:
243 g_assert_not_reached();
244 }
245 }
246
247 #include "tcg-insn-defs.c.inc"
248
249 /*
250 * TCG intrinsics
251 */
252
253 static void tcg_out_mb(TCGContext *s, TCGArg a0)
254 {
255 /* Baseline LoongArch only has the full barrier, unfortunately. */
256 tcg_out_opc_dbar(s, 0);
257 }
258
259 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
260 {
261 if (ret == arg) {
262 return true;
263 }
264 switch (type) {
265 case TCG_TYPE_I32:
266 case TCG_TYPE_I64:
267 /*
268 * Conventional register-register move used in LoongArch is
269 * `or dst, src, zero`.
270 */
271 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
272 break;
273 default:
274 g_assert_not_reached();
275 }
276 return true;
277 }
278
279 /* Loads a 32-bit immediate into rd, sign-extended. */
280 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
281 {
282 tcg_target_long lo = sextreg(val, 0, 12);
283 tcg_target_long hi12 = sextreg(val, 12, 20);
284
285 /* Single-instruction cases. */
286 if (hi12 == 0) {
287 /* val fits in uimm12: ori rd, zero, val */
288 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
289 return;
290 }
291 if (hi12 == sextreg(lo, 12, 20)) {
292 /* val fits in simm12: addi.w rd, zero, val */
293 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
294 return;
295 }
296
297 /* High bits must be set; load with lu12i.w + optional ori. */
298 tcg_out_opc_lu12i_w(s, rd, hi12);
299 if (lo != 0) {
300 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
301 }
302 }
303
304 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
305 tcg_target_long val)
306 {
307 /*
308 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
309 * with dedicated instructions for filling the respective bitfields
310 * below:
311 *
312 * 6 5 4 3
313 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
314 * +-----------------------+---------------------------------------+...
315 * | hi52 | hi32 |
316 * +-----------------------+---------------------------------------+...
317 * 3 2 1
318 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
319 * ...+-------------------------------------+-------------------------+
320 * | hi12 | lo |
321 * ...+-------------------------------------+-------------------------+
322 *
323 * Check if val belong to one of the several fast cases, before falling
324 * back to the slow path.
325 */
326
327 intptr_t pc_offset;
328 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
329 tcg_target_long hi12, hi32, hi52;
330
331 /* Value fits in signed i32. */
332 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
333 tcg_out_movi_i32(s, rd, val);
334 return;
335 }
336
337 /* PC-relative cases. */
338 pc_offset = tcg_pcrel_diff(s, (void *)val);
339 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
340 /* Single pcaddu2i. */
341 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
342 return;
343 }
344
345 if (pc_offset == (int32_t)pc_offset) {
346 /* Offset within 32 bits; load with pcalau12i + ori. */
347 val_lo = sextreg(val, 0, 12);
348 val_hi = val >> 12;
349 pc_hi = (val - pc_offset) >> 12;
350 offset_hi = val_hi - pc_hi;
351
352 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
353 tcg_out_opc_pcalau12i(s, rd, offset_hi);
354 if (val_lo != 0) {
355 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
356 }
357 return;
358 }
359
360 hi12 = sextreg(val, 12, 20);
361 hi32 = sextreg(val, 32, 20);
362 hi52 = sextreg(val, 52, 12);
363
364 /* Single cu52i.d case. */
365 if ((hi52 != 0) && (ctz64(val) >= 52)) {
366 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
367 return;
368 }
369
370 /* Slow path. Initialize the low 32 bits, then concat high bits. */
371 tcg_out_movi_i32(s, rd, val);
372
373 /* Load hi32 and hi52 explicitly when they are unexpected values. */
374 if (hi32 != sextreg(hi12, 20, 20)) {
375 tcg_out_opc_cu32i_d(s, rd, hi32);
376 }
377
378 if (hi52 != sextreg(hi32, 20, 12)) {
379 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
380 }
381 }
382
383 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
384 TCGReg rs, tcg_target_long imm)
385 {
386 tcg_target_long lo12 = sextreg(imm, 0, 12);
387 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
388
389 /*
390 * Note that there's a hole in between hi16 and lo12:
391 *
392 * 3 2 1 0
393 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
394 * ...+-------------------------------+-------+-----------------------+
395 * | hi16 | | lo12 |
396 * ...+-------------------------------+-------+-----------------------+
397 *
398 * For bits within that hole, it's more efficient to use LU12I and ADD.
399 */
400 if (imm == (hi16 << 16) + lo12) {
401 if (hi16) {
402 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
403 rs = rd;
404 }
405 if (type == TCG_TYPE_I32) {
406 tcg_out_opc_addi_w(s, rd, rs, lo12);
407 } else if (lo12) {
408 tcg_out_opc_addi_d(s, rd, rs, lo12);
409 } else {
410 tcg_out_mov(s, type, rd, rs);
411 }
412 } else {
413 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
414 if (type == TCG_TYPE_I32) {
415 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
416 } else {
417 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
418 }
419 }
420 }
421
422 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
423 tcg_target_long imm)
424 {
425 /* This function is only used for passing structs by reference. */
426 g_assert_not_reached();
427 }
428
429 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
430 {
431 tcg_out_opc_andi(s, ret, arg, 0xff);
432 }
433
434 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
435 {
436 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
437 }
438
439 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
440 {
441 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
442 }
443
444 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
445 {
446 tcg_out_opc_sext_b(s, ret, arg);
447 }
448
449 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
450 {
451 tcg_out_opc_sext_h(s, ret, arg);
452 }
453
454 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
455 {
456 tcg_out_opc_addi_w(s, ret, arg, 0);
457 }
458
459 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
460 {
461 tcg_out_ext32s(s, ret, arg);
462 }
463
464 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
465 {
466 tcg_out_ext32u(s, ret, arg);
467 }
468
469 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
470 TCGReg a0, TCGReg a1, TCGReg a2,
471 bool c2, bool is_32bit)
472 {
473 if (c2) {
474 /*
475 * Fast path: semantics already satisfied due to constraint and
476 * insn behavior, single instruction is enough.
477 */
478 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
479 /* all clz/ctz insns belong to DJ-format */
480 tcg_out32(s, encode_dj_insn(opc, a0, a1));
481 return;
482 }
483
484 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
485 /* a0 = a1 ? REG_TMP0 : a2 */
486 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
487 tcg_out_opc_masknez(s, a0, a2, a1);
488 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
489 }
490
491 #define SETCOND_INV TCG_TARGET_NB_REGS
492 #define SETCOND_NEZ (SETCOND_INV << 1)
493 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
494
495 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
496 TCGReg arg1, tcg_target_long arg2, bool c2)
497 {
498 int flags = 0;
499
500 switch (cond) {
501 case TCG_COND_EQ: /* -> NE */
502 case TCG_COND_GE: /* -> LT */
503 case TCG_COND_GEU: /* -> LTU */
504 case TCG_COND_GT: /* -> LE */
505 case TCG_COND_GTU: /* -> LEU */
506 cond = tcg_invert_cond(cond);
507 flags ^= SETCOND_INV;
508 break;
509 default:
510 break;
511 }
512
513 switch (cond) {
514 case TCG_COND_LE:
515 case TCG_COND_LEU:
516 /*
517 * If we have a constant input, the most efficient way to implement
518 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
519 * We don't need to care for this for LE because the constant input
520 * is still constrained to int32_t, and INT32_MAX+1 is representable
521 * in the 64-bit temporary register.
522 */
523 if (c2) {
524 if (cond == TCG_COND_LEU) {
525 /* unsigned <= -1 is true */
526 if (arg2 == -1) {
527 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
528 return ret;
529 }
530 cond = TCG_COND_LTU;
531 } else {
532 cond = TCG_COND_LT;
533 }
534 arg2 += 1;
535 } else {
536 TCGReg tmp = arg2;
537 arg2 = arg1;
538 arg1 = tmp;
539 cond = tcg_swap_cond(cond); /* LE -> GE */
540 cond = tcg_invert_cond(cond); /* GE -> LT */
541 flags ^= SETCOND_INV;
542 }
543 break;
544 default:
545 break;
546 }
547
548 switch (cond) {
549 case TCG_COND_NE:
550 flags |= SETCOND_NEZ;
551 if (!c2) {
552 tcg_out_opc_xor(s, ret, arg1, arg2);
553 } else if (arg2 == 0) {
554 ret = arg1;
555 } else if (arg2 >= 0 && arg2 <= 0xfff) {
556 tcg_out_opc_xori(s, ret, arg1, arg2);
557 } else {
558 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
559 }
560 break;
561
562 case TCG_COND_LT:
563 case TCG_COND_LTU:
564 if (c2) {
565 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
566 if (cond == TCG_COND_LT) {
567 tcg_out_opc_slti(s, ret, arg1, arg2);
568 } else {
569 tcg_out_opc_sltui(s, ret, arg1, arg2);
570 }
571 break;
572 }
573 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
574 arg2 = TCG_REG_TMP0;
575 }
576 if (cond == TCG_COND_LT) {
577 tcg_out_opc_slt(s, ret, arg1, arg2);
578 } else {
579 tcg_out_opc_sltu(s, ret, arg1, arg2);
580 }
581 break;
582
583 default:
584 g_assert_not_reached();
585 break;
586 }
587
588 return ret | flags;
589 }
590
591 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
592 TCGReg arg1, tcg_target_long arg2, bool c2)
593 {
594 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
595
596 if (tmpflags != ret) {
597 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
598
599 switch (tmpflags & SETCOND_FLAGS) {
600 case SETCOND_INV:
601 /* Intermediate result is boolean: simply invert. */
602 tcg_out_opc_xori(s, ret, tmp, 1);
603 break;
604 case SETCOND_NEZ:
605 /* Intermediate result is zero/non-zero: test != 0. */
606 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
607 break;
608 case SETCOND_NEZ | SETCOND_INV:
609 /* Intermediate result is zero/non-zero: test == 0. */
610 tcg_out_opc_sltui(s, ret, tmp, 1);
611 break;
612 default:
613 g_assert_not_reached();
614 }
615 }
616 }
617
618 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
619 TCGReg c1, tcg_target_long c2, bool const2,
620 TCGReg v1, TCGReg v2)
621 {
622 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
623 TCGReg t;
624
625 /* Standardize the test below to t != 0. */
626 if (tmpflags & SETCOND_INV) {
627 t = v1, v1 = v2, v2 = t;
628 }
629
630 t = tmpflags & ~SETCOND_FLAGS;
631 if (v1 == TCG_REG_ZERO) {
632 tcg_out_opc_masknez(s, ret, v2, t);
633 } else if (v2 == TCG_REG_ZERO) {
634 tcg_out_opc_maskeqz(s, ret, v1, t);
635 } else {
636 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
637 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
638 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
639 }
640 }
641
642 /*
643 * Branch helpers
644 */
645
646 static const struct {
647 LoongArchInsn op;
648 bool swap;
649 } tcg_brcond_to_loongarch[] = {
650 [TCG_COND_EQ] = { OPC_BEQ, false },
651 [TCG_COND_NE] = { OPC_BNE, false },
652 [TCG_COND_LT] = { OPC_BGT, true },
653 [TCG_COND_GE] = { OPC_BLE, true },
654 [TCG_COND_LE] = { OPC_BLE, false },
655 [TCG_COND_GT] = { OPC_BGT, false },
656 [TCG_COND_LTU] = { OPC_BGTU, true },
657 [TCG_COND_GEU] = { OPC_BLEU, true },
658 [TCG_COND_LEU] = { OPC_BLEU, false },
659 [TCG_COND_GTU] = { OPC_BGTU, false }
660 };
661
662 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
663 TCGReg arg2, TCGLabel *l)
664 {
665 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
666
667 tcg_debug_assert(op != 0);
668
669 if (tcg_brcond_to_loongarch[cond].swap) {
670 TCGReg t = arg1;
671 arg1 = arg2;
672 arg2 = t;
673 }
674
675 /* all conditional branch insns belong to DJSk16-format */
676 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
677 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
678 }
679
680 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
681 {
682 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
683 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
684
685 tcg_debug_assert((offset & 3) == 0);
686 if (offset == sextreg(offset, 0, 28)) {
687 /* short jump: +/- 256MiB */
688 if (tail) {
689 tcg_out_opc_b(s, offset >> 2);
690 } else {
691 tcg_out_opc_bl(s, offset >> 2);
692 }
693 } else if (offset == sextreg(offset, 0, 38)) {
694 /* long jump: +/- 256GiB */
695 tcg_target_long lo = sextreg(offset, 0, 18);
696 tcg_target_long hi = offset - lo;
697 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
698 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
699 } else {
700 /* far jump: 64-bit */
701 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
702 tcg_target_long hi = (tcg_target_long)arg - lo;
703 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
704 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
705 }
706 }
707
708 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
709 const TCGHelperInfo *info)
710 {
711 tcg_out_call_int(s, arg, false);
712 }
713
714 /*
715 * Load/store helpers
716 */
717
718 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
719 TCGReg addr, intptr_t offset)
720 {
721 intptr_t imm12 = sextreg(offset, 0, 12);
722
723 if (offset != imm12) {
724 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
725
726 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
727 imm12 = sextreg(diff, 0, 12);
728 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
729 } else {
730 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
731 if (addr != TCG_REG_ZERO) {
732 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
733 }
734 }
735 addr = TCG_REG_TMP2;
736 }
737
738 switch (opc) {
739 case OPC_LD_B:
740 case OPC_LD_BU:
741 case OPC_LD_H:
742 case OPC_LD_HU:
743 case OPC_LD_W:
744 case OPC_LD_WU:
745 case OPC_LD_D:
746 case OPC_ST_B:
747 case OPC_ST_H:
748 case OPC_ST_W:
749 case OPC_ST_D:
750 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
751 break;
752 default:
753 g_assert_not_reached();
754 }
755 }
756
757 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
758 TCGReg arg1, intptr_t arg2)
759 {
760 bool is_32bit = type == TCG_TYPE_I32;
761 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
762 }
763
764 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
765 TCGReg arg1, intptr_t arg2)
766 {
767 bool is_32bit = type == TCG_TYPE_I32;
768 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
769 }
770
771 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
772 TCGReg base, intptr_t ofs)
773 {
774 if (val == 0) {
775 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
776 return true;
777 }
778 return false;
779 }
780
781 /*
782 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
783 */
784
785 #if defined(CONFIG_SOFTMMU)
786 /*
787 * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
788 * MemOpIdx oi, uintptr_t ra)
789 */
790 static void * const qemu_ld_helpers[4] = {
791 [MO_8] = helper_ret_ldub_mmu,
792 [MO_16] = helper_le_lduw_mmu,
793 [MO_32] = helper_le_ldul_mmu,
794 [MO_64] = helper_le_ldq_mmu,
795 };
796
797 /*
798 * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
799 * uintxx_t val, MemOpIdx oi,
800 * uintptr_t ra)
801 */
802 static void * const qemu_st_helpers[4] = {
803 [MO_8] = helper_ret_stb_mmu,
804 [MO_16] = helper_le_stw_mmu,
805 [MO_32] = helper_le_stl_mmu,
806 [MO_64] = helper_le_stq_mmu,
807 };
808
809 /* We expect to use a 12-bit negative offset from ENV. */
810 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
811 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
812
813 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
814 {
815 tcg_out_opc_b(s, 0);
816 return reloc_br_sd10k16(s->code_ptr - 1, target);
817 }
818
819 /*
820 * Emits common code for TLB addend lookup, that eventually loads the
821 * addend in TCG_REG_TMP2.
822 */
823 static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi,
824 tcg_insn_unit **label_ptr, bool is_load)
825 {
826 MemOp opc = get_memop(oi);
827 unsigned s_bits = opc & MO_SIZE;
828 unsigned a_bits = get_alignment_bits(opc);
829 tcg_target_long compare_mask;
830 int mem_index = get_mmuidx(oi);
831 int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
832 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
833 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
834
835 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
836 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
837
838 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl,
839 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
840 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
841 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
842
843 /* Load the tlb comparator and the addend. */
844 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
845 is_load ? offsetof(CPUTLBEntry, addr_read)
846 : offsetof(CPUTLBEntry, addr_write));
847 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
848 offsetof(CPUTLBEntry, addend));
849
850 /* We don't support unaligned accesses. */
851 if (a_bits < s_bits) {
852 a_bits = s_bits;
853 }
854 /* Clear the non-page, non-alignment bits from the address. */
855 compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
856 tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
857 tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl);
858
859 /* Compare masked address with the TLB entry. */
860 label_ptr[0] = s->code_ptr;
861 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
862
863 /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */
864 }
865
866 static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
867 TCGType type,
868 TCGReg datalo, TCGReg addrlo,
869 void *raddr, tcg_insn_unit **label_ptr)
870 {
871 TCGLabelQemuLdst *label = new_ldst_label(s);
872
873 label->is_ld = is_ld;
874 label->oi = oi;
875 label->type = type;
876 label->datalo_reg = datalo;
877 label->datahi_reg = 0; /* unused */
878 label->addrlo_reg = addrlo;
879 label->addrhi_reg = 0; /* unused */
880 label->raddr = tcg_splitwx_to_rx(raddr);
881 label->label_ptr[0] = label_ptr[0];
882 }
883
884 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
885 {
886 MemOpIdx oi = l->oi;
887 MemOp opc = get_memop(oi);
888 MemOp size = opc & MO_SIZE;
889 TCGType type = l->type;
890
891 /* resolve label address */
892 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
893 return false;
894 }
895
896 /* call load helper */
897 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
898 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
899 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi);
900 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr);
901
902 tcg_out_call_int(s, qemu_ld_helpers[size], false);
903
904 switch (opc & MO_SSIZE) {
905 case MO_SB:
906 tcg_out_ext8s(s, type, l->datalo_reg, TCG_REG_A0);
907 break;
908 case MO_SW:
909 tcg_out_ext16s(s, type, l->datalo_reg, TCG_REG_A0);
910 break;
911 case MO_SL:
912 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
913 break;
914 case MO_UL:
915 if (type == TCG_TYPE_I32) {
916 /* MO_UL loads of i32 should be sign-extended too */
917 tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0);
918 break;
919 }
920 /* fallthrough */
921 default:
922 tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0);
923 break;
924 }
925
926 return tcg_out_goto(s, l->raddr);
927 }
928
929 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
930 {
931 MemOpIdx oi = l->oi;
932 MemOp opc = get_memop(oi);
933 MemOp size = opc & MO_SIZE;
934
935 /* resolve label address */
936 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
937 return false;
938 }
939
940 /* call store helper */
941 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
942 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg);
943 switch (size) {
944 case MO_8:
945 tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg);
946 break;
947 case MO_16:
948 tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg);
949 break;
950 case MO_32:
951 tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg);
952 break;
953 case MO_64:
954 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg);
955 break;
956 default:
957 g_assert_not_reached();
958 break;
959 }
960 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi);
961 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr);
962
963 tcg_out_call_int(s, qemu_st_helpers[size], false);
964
965 return tcg_out_goto(s, l->raddr);
966 }
967 #else
968
969 /*
970 * Alignment helpers for user-mode emulation
971 */
972
973 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
974 unsigned a_bits)
975 {
976 TCGLabelQemuLdst *l = new_ldst_label(s);
977
978 l->is_ld = is_ld;
979 l->addrlo_reg = addr_reg;
980
981 /*
982 * Without micro-architecture details, we don't know which of bstrpick or
983 * andi is faster, so use bstrpick as it's not constrained by imm field
984 * width. (Not to say alignments >= 2^12 are going to happen any time
985 * soon, though)
986 */
987 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
988
989 l->label_ptr[0] = s->code_ptr;
990 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
991
992 l->raddr = tcg_splitwx_to_rx(s->code_ptr);
993 }
994
995 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
996 {
997 /* resolve label address */
998 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
999 return false;
1000 }
1001
1002 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
1003 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
1004
1005 /* tail call, with the return address back inline. */
1006 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
1007 tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
1008 : helper_unaligned_st), true);
1009 return true;
1010 }
1011
1012 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1013 {
1014 return tcg_out_fail_alignment(s, l);
1015 }
1016
1017 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
1018 {
1019 return tcg_out_fail_alignment(s, l);
1020 }
1021
1022 #endif /* CONFIG_SOFTMMU */
1023
1024 /*
1025 * `ext32u` the address register into the temp register given,
1026 * if target is 32-bit, no-op otherwise.
1027 *
1028 * Returns the address register ready for use with TLB addend.
1029 */
1030 static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s,
1031 TCGReg addr, TCGReg tmp)
1032 {
1033 if (TARGET_LONG_BITS == 32) {
1034 tcg_out_ext32u(s, tmp, addr);
1035 return tmp;
1036 }
1037 return addr;
1038 }
1039
1040 static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj,
1041 TCGReg rk, MemOp opc, TCGType type)
1042 {
1043 /* Byte swapping is left to middle-end expansion. */
1044 tcg_debug_assert((opc & MO_BSWAP) == 0);
1045
1046 switch (opc & MO_SSIZE) {
1047 case MO_UB:
1048 tcg_out_opc_ldx_bu(s, rd, rj, rk);
1049 break;
1050 case MO_SB:
1051 tcg_out_opc_ldx_b(s, rd, rj, rk);
1052 break;
1053 case MO_UW:
1054 tcg_out_opc_ldx_hu(s, rd, rj, rk);
1055 break;
1056 case MO_SW:
1057 tcg_out_opc_ldx_h(s, rd, rj, rk);
1058 break;
1059 case MO_UL:
1060 if (type == TCG_TYPE_I64) {
1061 tcg_out_opc_ldx_wu(s, rd, rj, rk);
1062 break;
1063 }
1064 /* fallthrough */
1065 case MO_SL:
1066 tcg_out_opc_ldx_w(s, rd, rj, rk);
1067 break;
1068 case MO_UQ:
1069 tcg_out_opc_ldx_d(s, rd, rj, rk);
1070 break;
1071 default:
1072 g_assert_not_reached();
1073 }
1074 }
1075
1076 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type)
1077 {
1078 TCGReg addr_regl;
1079 TCGReg data_regl;
1080 MemOpIdx oi;
1081 MemOp opc;
1082 #if defined(CONFIG_SOFTMMU)
1083 tcg_insn_unit *label_ptr[1];
1084 #else
1085 unsigned a_bits;
1086 #endif
1087 TCGReg base;
1088
1089 data_regl = *args++;
1090 addr_regl = *args++;
1091 oi = *args++;
1092 opc = get_memop(oi);
1093
1094 #if defined(CONFIG_SOFTMMU)
1095 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1);
1096 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1097 tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type);
1098 add_qemu_ldst_label(s, 1, oi, type,
1099 data_regl, addr_regl,
1100 s->code_ptr, label_ptr);
1101 #else
1102 a_bits = get_alignment_bits(opc);
1103 if (a_bits) {
1104 tcg_out_test_alignment(s, true, addr_regl, a_bits);
1105 }
1106 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1107 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1108 tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type);
1109 #endif
1110 }
1111
1112 static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data,
1113 TCGReg rj, TCGReg rk, MemOp opc)
1114 {
1115 /* Byte swapping is left to middle-end expansion. */
1116 tcg_debug_assert((opc & MO_BSWAP) == 0);
1117
1118 switch (opc & MO_SIZE) {
1119 case MO_8:
1120 tcg_out_opc_stx_b(s, data, rj, rk);
1121 break;
1122 case MO_16:
1123 tcg_out_opc_stx_h(s, data, rj, rk);
1124 break;
1125 case MO_32:
1126 tcg_out_opc_stx_w(s, data, rj, rk);
1127 break;
1128 case MO_64:
1129 tcg_out_opc_stx_d(s, data, rj, rk);
1130 break;
1131 default:
1132 g_assert_not_reached();
1133 }
1134 }
1135
1136 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args)
1137 {
1138 TCGReg addr_regl;
1139 TCGReg data_regl;
1140 MemOpIdx oi;
1141 MemOp opc;
1142 #if defined(CONFIG_SOFTMMU)
1143 tcg_insn_unit *label_ptr[1];
1144 #else
1145 unsigned a_bits;
1146 #endif
1147 TCGReg base;
1148
1149 data_regl = *args++;
1150 addr_regl = *args++;
1151 oi = *args++;
1152 opc = get_memop(oi);
1153
1154 #if defined(CONFIG_SOFTMMU)
1155 tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0);
1156 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1157 tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc);
1158 add_qemu_ldst_label(s, 0, oi,
1159 0, /* type param is unused for stores */
1160 data_regl, addr_regl,
1161 s->code_ptr, label_ptr);
1162 #else
1163 a_bits = get_alignment_bits(opc);
1164 if (a_bits) {
1165 tcg_out_test_alignment(s, false, addr_regl, a_bits);
1166 }
1167 base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0);
1168 TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
1169 tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc);
1170 #endif
1171 }
1172
1173 /*
1174 * Entry-points
1175 */
1176
1177 static const tcg_insn_unit *tb_ret_addr;
1178
1179 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1180 {
1181 /* Reuse the zeroing that exists for goto_ptr. */
1182 if (a0 == 0) {
1183 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1184 } else {
1185 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1186 tcg_out_call_int(s, tb_ret_addr, true);
1187 }
1188 }
1189
1190 static void tcg_out_goto_tb(TCGContext *s, int which)
1191 {
1192 /*
1193 * Direct branch, or load indirect address, to be patched
1194 * by tb_target_set_jmp_target. Check indirect load offset
1195 * in range early, regardless of direct branch distance,
1196 * via assert within tcg_out_opc_pcaddu2i.
1197 */
1198 uintptr_t i_addr = get_jmp_target_addr(s, which);
1199 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1200
1201 set_jmp_insn_offset(s, which);
1202 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1203
1204 /* Finish the load and indirect branch. */
1205 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1206 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1207 set_jmp_reset_offset(s, which);
1208 }
1209
1210 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1211 uintptr_t jmp_rx, uintptr_t jmp_rw)
1212 {
1213 uintptr_t d_addr = tb->jmp_target_addr[n];
1214 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1215 tcg_insn_unit insn;
1216
1217 /* Either directly branch, or load slot address for indirect branch. */
1218 if (d_disp == sextreg(d_disp, 0, 26)) {
1219 insn = encode_sd10k16_insn(OPC_B, d_disp);
1220 } else {
1221 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1222 intptr_t i_disp = i_addr - jmp_rx;
1223 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1224 }
1225
1226 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1227 flush_idcache_range(jmp_rx, jmp_rw, 4);
1228 }
1229
1230 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1231 const TCGArg args[TCG_MAX_OP_ARGS],
1232 const int const_args[TCG_MAX_OP_ARGS])
1233 {
1234 TCGArg a0 = args[0];
1235 TCGArg a1 = args[1];
1236 TCGArg a2 = args[2];
1237 int c2 = const_args[2];
1238
1239 switch (opc) {
1240 case INDEX_op_mb:
1241 tcg_out_mb(s, a0);
1242 break;
1243
1244 case INDEX_op_goto_ptr:
1245 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1246 break;
1247
1248 case INDEX_op_br:
1249 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1250 0);
1251 tcg_out_opc_b(s, 0);
1252 break;
1253
1254 case INDEX_op_brcond_i32:
1255 case INDEX_op_brcond_i64:
1256 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1257 break;
1258
1259 case INDEX_op_extrl_i64_i32:
1260 tcg_out_ext32s(s, a0, a1);
1261 break;
1262
1263 case INDEX_op_extrh_i64_i32:
1264 tcg_out_opc_srai_d(s, a0, a1, 32);
1265 break;
1266
1267 case INDEX_op_not_i32:
1268 case INDEX_op_not_i64:
1269 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1270 break;
1271
1272 case INDEX_op_nor_i32:
1273 case INDEX_op_nor_i64:
1274 if (c2) {
1275 tcg_out_opc_ori(s, a0, a1, a2);
1276 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1277 } else {
1278 tcg_out_opc_nor(s, a0, a1, a2);
1279 }
1280 break;
1281
1282 case INDEX_op_andc_i32:
1283 case INDEX_op_andc_i64:
1284 if (c2) {
1285 /* guaranteed to fit due to constraint */
1286 tcg_out_opc_andi(s, a0, a1, ~a2);
1287 } else {
1288 tcg_out_opc_andn(s, a0, a1, a2);
1289 }
1290 break;
1291
1292 case INDEX_op_orc_i32:
1293 case INDEX_op_orc_i64:
1294 if (c2) {
1295 /* guaranteed to fit due to constraint */
1296 tcg_out_opc_ori(s, a0, a1, ~a2);
1297 } else {
1298 tcg_out_opc_orn(s, a0, a1, a2);
1299 }
1300 break;
1301
1302 case INDEX_op_and_i32:
1303 case INDEX_op_and_i64:
1304 if (c2) {
1305 tcg_out_opc_andi(s, a0, a1, a2);
1306 } else {
1307 tcg_out_opc_and(s, a0, a1, a2);
1308 }
1309 break;
1310
1311 case INDEX_op_or_i32:
1312 case INDEX_op_or_i64:
1313 if (c2) {
1314 tcg_out_opc_ori(s, a0, a1, a2);
1315 } else {
1316 tcg_out_opc_or(s, a0, a1, a2);
1317 }
1318 break;
1319
1320 case INDEX_op_xor_i32:
1321 case INDEX_op_xor_i64:
1322 if (c2) {
1323 tcg_out_opc_xori(s, a0, a1, a2);
1324 } else {
1325 tcg_out_opc_xor(s, a0, a1, a2);
1326 }
1327 break;
1328
1329 case INDEX_op_extract_i32:
1330 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1331 break;
1332 case INDEX_op_extract_i64:
1333 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1334 break;
1335
1336 case INDEX_op_deposit_i32:
1337 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1338 break;
1339 case INDEX_op_deposit_i64:
1340 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1341 break;
1342
1343 case INDEX_op_bswap16_i32:
1344 case INDEX_op_bswap16_i64:
1345 tcg_out_opc_revb_2h(s, a0, a1);
1346 if (a2 & TCG_BSWAP_OS) {
1347 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1348 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1349 tcg_out_ext16u(s, a0, a0);
1350 }
1351 break;
1352
1353 case INDEX_op_bswap32_i32:
1354 /* All 32-bit values are computed sign-extended in the register. */
1355 a2 = TCG_BSWAP_OS;
1356 /* fallthrough */
1357 case INDEX_op_bswap32_i64:
1358 tcg_out_opc_revb_2w(s, a0, a1);
1359 if (a2 & TCG_BSWAP_OS) {
1360 tcg_out_ext32s(s, a0, a0);
1361 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1362 tcg_out_ext32u(s, a0, a0);
1363 }
1364 break;
1365
1366 case INDEX_op_bswap64_i64:
1367 tcg_out_opc_revb_d(s, a0, a1);
1368 break;
1369
1370 case INDEX_op_clz_i32:
1371 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1372 break;
1373 case INDEX_op_clz_i64:
1374 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1375 break;
1376
1377 case INDEX_op_ctz_i32:
1378 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1379 break;
1380 case INDEX_op_ctz_i64:
1381 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1382 break;
1383
1384 case INDEX_op_shl_i32:
1385 if (c2) {
1386 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1387 } else {
1388 tcg_out_opc_sll_w(s, a0, a1, a2);
1389 }
1390 break;
1391 case INDEX_op_shl_i64:
1392 if (c2) {
1393 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1394 } else {
1395 tcg_out_opc_sll_d(s, a0, a1, a2);
1396 }
1397 break;
1398
1399 case INDEX_op_shr_i32:
1400 if (c2) {
1401 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1402 } else {
1403 tcg_out_opc_srl_w(s, a0, a1, a2);
1404 }
1405 break;
1406 case INDEX_op_shr_i64:
1407 if (c2) {
1408 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1409 } else {
1410 tcg_out_opc_srl_d(s, a0, a1, a2);
1411 }
1412 break;
1413
1414 case INDEX_op_sar_i32:
1415 if (c2) {
1416 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1417 } else {
1418 tcg_out_opc_sra_w(s, a0, a1, a2);
1419 }
1420 break;
1421 case INDEX_op_sar_i64:
1422 if (c2) {
1423 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1424 } else {
1425 tcg_out_opc_sra_d(s, a0, a1, a2);
1426 }
1427 break;
1428
1429 case INDEX_op_rotl_i32:
1430 /* transform into equivalent rotr/rotri */
1431 if (c2) {
1432 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1433 } else {
1434 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1435 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1436 }
1437 break;
1438 case INDEX_op_rotl_i64:
1439 /* transform into equivalent rotr/rotri */
1440 if (c2) {
1441 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1442 } else {
1443 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1444 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1445 }
1446 break;
1447
1448 case INDEX_op_rotr_i32:
1449 if (c2) {
1450 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1451 } else {
1452 tcg_out_opc_rotr_w(s, a0, a1, a2);
1453 }
1454 break;
1455 case INDEX_op_rotr_i64:
1456 if (c2) {
1457 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1458 } else {
1459 tcg_out_opc_rotr_d(s, a0, a1, a2);
1460 }
1461 break;
1462
1463 case INDEX_op_add_i32:
1464 if (c2) {
1465 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1466 } else {
1467 tcg_out_opc_add_w(s, a0, a1, a2);
1468 }
1469 break;
1470 case INDEX_op_add_i64:
1471 if (c2) {
1472 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1473 } else {
1474 tcg_out_opc_add_d(s, a0, a1, a2);
1475 }
1476 break;
1477
1478 case INDEX_op_sub_i32:
1479 if (c2) {
1480 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1481 } else {
1482 tcg_out_opc_sub_w(s, a0, a1, a2);
1483 }
1484 break;
1485 case INDEX_op_sub_i64:
1486 if (c2) {
1487 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1488 } else {
1489 tcg_out_opc_sub_d(s, a0, a1, a2);
1490 }
1491 break;
1492
1493 case INDEX_op_mul_i32:
1494 tcg_out_opc_mul_w(s, a0, a1, a2);
1495 break;
1496 case INDEX_op_mul_i64:
1497 tcg_out_opc_mul_d(s, a0, a1, a2);
1498 break;
1499
1500 case INDEX_op_mulsh_i32:
1501 tcg_out_opc_mulh_w(s, a0, a1, a2);
1502 break;
1503 case INDEX_op_mulsh_i64:
1504 tcg_out_opc_mulh_d(s, a0, a1, a2);
1505 break;
1506
1507 case INDEX_op_muluh_i32:
1508 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1509 break;
1510 case INDEX_op_muluh_i64:
1511 tcg_out_opc_mulh_du(s, a0, a1, a2);
1512 break;
1513
1514 case INDEX_op_div_i32:
1515 tcg_out_opc_div_w(s, a0, a1, a2);
1516 break;
1517 case INDEX_op_div_i64:
1518 tcg_out_opc_div_d(s, a0, a1, a2);
1519 break;
1520
1521 case INDEX_op_divu_i32:
1522 tcg_out_opc_div_wu(s, a0, a1, a2);
1523 break;
1524 case INDEX_op_divu_i64:
1525 tcg_out_opc_div_du(s, a0, a1, a2);
1526 break;
1527
1528 case INDEX_op_rem_i32:
1529 tcg_out_opc_mod_w(s, a0, a1, a2);
1530 break;
1531 case INDEX_op_rem_i64:
1532 tcg_out_opc_mod_d(s, a0, a1, a2);
1533 break;
1534
1535 case INDEX_op_remu_i32:
1536 tcg_out_opc_mod_wu(s, a0, a1, a2);
1537 break;
1538 case INDEX_op_remu_i64:
1539 tcg_out_opc_mod_du(s, a0, a1, a2);
1540 break;
1541
1542 case INDEX_op_setcond_i32:
1543 case INDEX_op_setcond_i64:
1544 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1545 break;
1546
1547 case INDEX_op_movcond_i32:
1548 case INDEX_op_movcond_i64:
1549 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1550 break;
1551
1552 case INDEX_op_ld8s_i32:
1553 case INDEX_op_ld8s_i64:
1554 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1555 break;
1556 case INDEX_op_ld8u_i32:
1557 case INDEX_op_ld8u_i64:
1558 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1559 break;
1560 case INDEX_op_ld16s_i32:
1561 case INDEX_op_ld16s_i64:
1562 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1563 break;
1564 case INDEX_op_ld16u_i32:
1565 case INDEX_op_ld16u_i64:
1566 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1567 break;
1568 case INDEX_op_ld_i32:
1569 case INDEX_op_ld32s_i64:
1570 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1571 break;
1572 case INDEX_op_ld32u_i64:
1573 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1574 break;
1575 case INDEX_op_ld_i64:
1576 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1577 break;
1578
1579 case INDEX_op_st8_i32:
1580 case INDEX_op_st8_i64:
1581 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1582 break;
1583 case INDEX_op_st16_i32:
1584 case INDEX_op_st16_i64:
1585 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1586 break;
1587 case INDEX_op_st_i32:
1588 case INDEX_op_st32_i64:
1589 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1590 break;
1591 case INDEX_op_st_i64:
1592 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1593 break;
1594
1595 case INDEX_op_qemu_ld_i32:
1596 tcg_out_qemu_ld(s, args, TCG_TYPE_I32);
1597 break;
1598 case INDEX_op_qemu_ld_i64:
1599 tcg_out_qemu_ld(s, args, TCG_TYPE_I64);
1600 break;
1601 case INDEX_op_qemu_st_i32:
1602 tcg_out_qemu_st(s, args);
1603 break;
1604 case INDEX_op_qemu_st_i64:
1605 tcg_out_qemu_st(s, args);
1606 break;
1607
1608 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1609 case INDEX_op_mov_i64:
1610 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1611 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1612 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1613 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1614 case INDEX_op_ext8s_i64:
1615 case INDEX_op_ext8u_i32:
1616 case INDEX_op_ext8u_i64:
1617 case INDEX_op_ext16s_i32:
1618 case INDEX_op_ext16s_i64:
1619 case INDEX_op_ext16u_i32:
1620 case INDEX_op_ext16u_i64:
1621 case INDEX_op_ext32s_i64:
1622 case INDEX_op_ext32u_i64:
1623 case INDEX_op_ext_i32_i64:
1624 case INDEX_op_extu_i32_i64:
1625 default:
1626 g_assert_not_reached();
1627 }
1628 }
1629
1630 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
1631 {
1632 switch (op) {
1633 case INDEX_op_goto_ptr:
1634 return C_O0_I1(r);
1635
1636 case INDEX_op_st8_i32:
1637 case INDEX_op_st8_i64:
1638 case INDEX_op_st16_i32:
1639 case INDEX_op_st16_i64:
1640 case INDEX_op_st32_i64:
1641 case INDEX_op_st_i32:
1642 case INDEX_op_st_i64:
1643 return C_O0_I2(rZ, r);
1644
1645 case INDEX_op_brcond_i32:
1646 case INDEX_op_brcond_i64:
1647 return C_O0_I2(rZ, rZ);
1648
1649 case INDEX_op_qemu_st_i32:
1650 case INDEX_op_qemu_st_i64:
1651 return C_O0_I2(LZ, L);
1652
1653 case INDEX_op_ext8s_i32:
1654 case INDEX_op_ext8s_i64:
1655 case INDEX_op_ext8u_i32:
1656 case INDEX_op_ext8u_i64:
1657 case INDEX_op_ext16s_i32:
1658 case INDEX_op_ext16s_i64:
1659 case INDEX_op_ext16u_i32:
1660 case INDEX_op_ext16u_i64:
1661 case INDEX_op_ext32s_i64:
1662 case INDEX_op_ext32u_i64:
1663 case INDEX_op_extu_i32_i64:
1664 case INDEX_op_extrl_i64_i32:
1665 case INDEX_op_extrh_i64_i32:
1666 case INDEX_op_ext_i32_i64:
1667 case INDEX_op_not_i32:
1668 case INDEX_op_not_i64:
1669 case INDEX_op_extract_i32:
1670 case INDEX_op_extract_i64:
1671 case INDEX_op_bswap16_i32:
1672 case INDEX_op_bswap16_i64:
1673 case INDEX_op_bswap32_i32:
1674 case INDEX_op_bswap32_i64:
1675 case INDEX_op_bswap64_i64:
1676 case INDEX_op_ld8s_i32:
1677 case INDEX_op_ld8s_i64:
1678 case INDEX_op_ld8u_i32:
1679 case INDEX_op_ld8u_i64:
1680 case INDEX_op_ld16s_i32:
1681 case INDEX_op_ld16s_i64:
1682 case INDEX_op_ld16u_i32:
1683 case INDEX_op_ld16u_i64:
1684 case INDEX_op_ld32s_i64:
1685 case INDEX_op_ld32u_i64:
1686 case INDEX_op_ld_i32:
1687 case INDEX_op_ld_i64:
1688 return C_O1_I1(r, r);
1689
1690 case INDEX_op_qemu_ld_i32:
1691 case INDEX_op_qemu_ld_i64:
1692 return C_O1_I1(r, L);
1693
1694 case INDEX_op_andc_i32:
1695 case INDEX_op_andc_i64:
1696 case INDEX_op_orc_i32:
1697 case INDEX_op_orc_i64:
1698 /*
1699 * LoongArch insns for these ops don't have reg-imm forms, but we
1700 * can express using andi/ori if ~constant satisfies
1701 * TCG_CT_CONST_U12.
1702 */
1703 return C_O1_I2(r, r, rC);
1704
1705 case INDEX_op_shl_i32:
1706 case INDEX_op_shl_i64:
1707 case INDEX_op_shr_i32:
1708 case INDEX_op_shr_i64:
1709 case INDEX_op_sar_i32:
1710 case INDEX_op_sar_i64:
1711 case INDEX_op_rotl_i32:
1712 case INDEX_op_rotl_i64:
1713 case INDEX_op_rotr_i32:
1714 case INDEX_op_rotr_i64:
1715 return C_O1_I2(r, r, ri);
1716
1717 case INDEX_op_add_i32:
1718 return C_O1_I2(r, r, ri);
1719 case INDEX_op_add_i64:
1720 return C_O1_I2(r, r, rJ);
1721
1722 case INDEX_op_and_i32:
1723 case INDEX_op_and_i64:
1724 case INDEX_op_nor_i32:
1725 case INDEX_op_nor_i64:
1726 case INDEX_op_or_i32:
1727 case INDEX_op_or_i64:
1728 case INDEX_op_xor_i32:
1729 case INDEX_op_xor_i64:
1730 /* LoongArch reg-imm bitops have their imms ZERO-extended */
1731 return C_O1_I2(r, r, rU);
1732
1733 case INDEX_op_clz_i32:
1734 case INDEX_op_clz_i64:
1735 case INDEX_op_ctz_i32:
1736 case INDEX_op_ctz_i64:
1737 return C_O1_I2(r, r, rW);
1738
1739 case INDEX_op_deposit_i32:
1740 case INDEX_op_deposit_i64:
1741 /* Must deposit into the same register as input */
1742 return C_O1_I2(r, 0, rZ);
1743
1744 case INDEX_op_sub_i32:
1745 case INDEX_op_setcond_i32:
1746 return C_O1_I2(r, rZ, ri);
1747 case INDEX_op_sub_i64:
1748 case INDEX_op_setcond_i64:
1749 return C_O1_I2(r, rZ, rJ);
1750
1751 case INDEX_op_mul_i32:
1752 case INDEX_op_mul_i64:
1753 case INDEX_op_mulsh_i32:
1754 case INDEX_op_mulsh_i64:
1755 case INDEX_op_muluh_i32:
1756 case INDEX_op_muluh_i64:
1757 case INDEX_op_div_i32:
1758 case INDEX_op_div_i64:
1759 case INDEX_op_divu_i32:
1760 case INDEX_op_divu_i64:
1761 case INDEX_op_rem_i32:
1762 case INDEX_op_rem_i64:
1763 case INDEX_op_remu_i32:
1764 case INDEX_op_remu_i64:
1765 return C_O1_I2(r, rZ, rZ);
1766
1767 case INDEX_op_movcond_i32:
1768 case INDEX_op_movcond_i64:
1769 return C_O1_I4(r, rZ, rJ, rZ, rZ);
1770
1771 default:
1772 g_assert_not_reached();
1773 }
1774 }
1775
1776 static const int tcg_target_callee_save_regs[] = {
1777 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
1778 TCG_REG_S1,
1779 TCG_REG_S2,
1780 TCG_REG_S3,
1781 TCG_REG_S4,
1782 TCG_REG_S5,
1783 TCG_REG_S6,
1784 TCG_REG_S7,
1785 TCG_REG_S8,
1786 TCG_REG_S9,
1787 TCG_REG_RA, /* should be last for ABI compliance */
1788 };
1789
1790 /* Stack frame parameters. */
1791 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
1792 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
1793 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
1794 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
1795 + TCG_TARGET_STACK_ALIGN - 1) \
1796 & -TCG_TARGET_STACK_ALIGN)
1797 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
1798
1799 /* We're expecting to be able to use an immediate for frame allocation. */
1800 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
1801
1802 /* Generate global QEMU prologue and epilogue code */
1803 static void tcg_target_qemu_prologue(TCGContext *s)
1804 {
1805 int i;
1806
1807 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
1808
1809 /* TB prologue */
1810 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
1811 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1812 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1813 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1814 }
1815
1816 #if !defined(CONFIG_SOFTMMU)
1817 if (USE_GUEST_BASE) {
1818 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
1819 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1820 }
1821 #endif
1822
1823 /* Call generated code */
1824 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1825 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
1826
1827 /* Return path for goto_ptr. Set return value to 0 */
1828 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
1829 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
1830
1831 /* TB epilogue */
1832 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
1833 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1834 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
1835 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
1836 }
1837
1838 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
1839 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
1840 }
1841
1842 static void tcg_target_init(TCGContext *s)
1843 {
1844 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
1845 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
1846
1847 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
1848 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
1849 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
1850 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
1851 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
1852 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
1853 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
1854 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
1855 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
1856 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
1857 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
1858
1859 s->reserved_regs = 0;
1860 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
1861 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
1862 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
1863 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
1864 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
1865 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
1866 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
1867 }
1868
1869 typedef struct {
1870 DebugFrameHeader h;
1871 uint8_t fde_def_cfa[4];
1872 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1873 } DebugFrame;
1874
1875 #define ELF_HOST_MACHINE EM_LOONGARCH
1876
1877 static const DebugFrame debug_frame = {
1878 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
1879 .h.cie.id = -1,
1880 .h.cie.version = 1,
1881 .h.cie.code_align = 1,
1882 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
1883 .h.cie.return_column = TCG_REG_RA,
1884
1885 /* Total FDE size does not include the "len" member. */
1886 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
1887
1888 .fde_def_cfa = {
1889 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
1890 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
1891 (FRAME_SIZE >> 7)
1892 },
1893 .fde_reg_ofs = {
1894 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
1895 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
1896 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
1897 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
1898 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
1899 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
1900 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
1901 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
1902 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
1903 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
1904 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
1905 }
1906 };
1907
1908 void tcg_register_jit(const void *buf, size_t buf_size)
1909 {
1910 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1911 }