]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/loongarch64/tcg-target.c.inc
tcg: Add tcg_out_tb_start backend hook
[mirror_qemu.git] / tcg / loongarch64 / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32 #include "../tcg-ldst.c.inc"
33 #include <asm/hwcap.h>
34
35 bool use_lsx_instructions;
36
37 #ifdef CONFIG_DEBUG_TCG
38 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
39 "zero",
40 "ra",
41 "tp",
42 "sp",
43 "a0",
44 "a1",
45 "a2",
46 "a3",
47 "a4",
48 "a5",
49 "a6",
50 "a7",
51 "t0",
52 "t1",
53 "t2",
54 "t3",
55 "t4",
56 "t5",
57 "t6",
58 "t7",
59 "t8",
60 "r21", /* reserved in the LP64* ABI, hence no ABI name */
61 "s9",
62 "s0",
63 "s1",
64 "s2",
65 "s3",
66 "s4",
67 "s5",
68 "s6",
69 "s7",
70 "s8",
71 "vr0",
72 "vr1",
73 "vr2",
74 "vr3",
75 "vr4",
76 "vr5",
77 "vr6",
78 "vr7",
79 "vr8",
80 "vr9",
81 "vr10",
82 "vr11",
83 "vr12",
84 "vr13",
85 "vr14",
86 "vr15",
87 "vr16",
88 "vr17",
89 "vr18",
90 "vr19",
91 "vr20",
92 "vr21",
93 "vr22",
94 "vr23",
95 "vr24",
96 "vr25",
97 "vr26",
98 "vr27",
99 "vr28",
100 "vr29",
101 "vr30",
102 "vr31",
103 };
104 #endif
105
106 static const int tcg_target_reg_alloc_order[] = {
107 /* Registers preserved across calls */
108 /* TCG_REG_S0 reserved for TCG_AREG0 */
109 TCG_REG_S1,
110 TCG_REG_S2,
111 TCG_REG_S3,
112 TCG_REG_S4,
113 TCG_REG_S5,
114 TCG_REG_S6,
115 TCG_REG_S7,
116 TCG_REG_S8,
117 TCG_REG_S9,
118
119 /* Registers (potentially) clobbered across calls */
120 TCG_REG_T0,
121 TCG_REG_T1,
122 TCG_REG_T2,
123 TCG_REG_T3,
124 TCG_REG_T4,
125 TCG_REG_T5,
126 TCG_REG_T6,
127 TCG_REG_T7,
128 TCG_REG_T8,
129
130 /* Argument registers, opposite order of allocation. */
131 TCG_REG_A7,
132 TCG_REG_A6,
133 TCG_REG_A5,
134 TCG_REG_A4,
135 TCG_REG_A3,
136 TCG_REG_A2,
137 TCG_REG_A1,
138 TCG_REG_A0,
139
140 /* Vector registers */
141 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
142 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
143 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
144 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
145 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
146 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
147 /* V24 - V31 are caller-saved, and skipped. */
148 };
149
150 static const int tcg_target_call_iarg_regs[] = {
151 TCG_REG_A0,
152 TCG_REG_A1,
153 TCG_REG_A2,
154 TCG_REG_A3,
155 TCG_REG_A4,
156 TCG_REG_A5,
157 TCG_REG_A6,
158 TCG_REG_A7,
159 };
160
161 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
162 {
163 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
164 tcg_debug_assert(slot >= 0 && slot <= 1);
165 return TCG_REG_A0 + slot;
166 }
167
168 #ifndef CONFIG_SOFTMMU
169 #define USE_GUEST_BASE (guest_base != 0)
170 #define TCG_GUEST_BASE_REG TCG_REG_S1
171 #endif
172
173 #define TCG_CT_CONST_ZERO 0x100
174 #define TCG_CT_CONST_S12 0x200
175 #define TCG_CT_CONST_S32 0x400
176 #define TCG_CT_CONST_U12 0x800
177 #define TCG_CT_CONST_C12 0x1000
178 #define TCG_CT_CONST_WSZ 0x2000
179 #define TCG_CT_CONST_VCMP 0x4000
180 #define TCG_CT_CONST_VADD 0x8000
181
182 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
183 #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
184
185 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
186 {
187 return sextract64(val, pos, len);
188 }
189
190 /* test if a constant matches the constraint */
191 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
192 {
193 if (ct & TCG_CT_CONST) {
194 return true;
195 }
196 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
197 return true;
198 }
199 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
200 return true;
201 }
202 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
203 return true;
204 }
205 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
206 return true;
207 }
208 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
209 return true;
210 }
211 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
212 return true;
213 }
214 int64_t vec_val = sextract64(val, 0, 8 << vece);
215 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
216 return true;
217 }
218 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
219 return true;
220 }
221 return false;
222 }
223
224 /*
225 * Relocations
226 */
227
228 /*
229 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
230 * complicated; a whopping stack machine is needed to stuff the fields, at
231 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
232 * needed.
233 *
234 * Hence, define our own simpler relocation types. Numbers are chosen as to
235 * not collide with potential future additions to the true ELF relocation
236 * type enum.
237 */
238
239 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
240 #define R_LOONGARCH_BR_SK16 256
241 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
242 #define R_LOONGARCH_BR_SD10K16 257
243
244 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
245 {
246 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
247 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
248
249 tcg_debug_assert((offset & 3) == 0);
250 offset >>= 2;
251 if (offset == sextreg(offset, 0, 16)) {
252 *src_rw = deposit64(*src_rw, 10, 16, offset);
253 return true;
254 }
255
256 return false;
257 }
258
259 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
260 const tcg_insn_unit *target)
261 {
262 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
263 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
264
265 tcg_debug_assert((offset & 3) == 0);
266 offset >>= 2;
267 if (offset == sextreg(offset, 0, 26)) {
268 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
269 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
270 return true;
271 }
272
273 return false;
274 }
275
276 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
277 intptr_t value, intptr_t addend)
278 {
279 tcg_debug_assert(addend == 0);
280 switch (type) {
281 case R_LOONGARCH_BR_SK16:
282 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
283 case R_LOONGARCH_BR_SD10K16:
284 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
285 default:
286 g_assert_not_reached();
287 }
288 }
289
290 #include "tcg-insn-defs.c.inc"
291
292 /*
293 * TCG intrinsics
294 */
295
296 static void tcg_out_mb(TCGContext *s, TCGArg a0)
297 {
298 /* Baseline LoongArch only has the full barrier, unfortunately. */
299 tcg_out_opc_dbar(s, 0);
300 }
301
302 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
303 {
304 if (ret == arg) {
305 return true;
306 }
307 switch (type) {
308 case TCG_TYPE_I32:
309 case TCG_TYPE_I64:
310 /*
311 * Conventional register-register move used in LoongArch is
312 * `or dst, src, zero`.
313 */
314 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
315 break;
316 default:
317 g_assert_not_reached();
318 }
319 return true;
320 }
321
322 /* Loads a 32-bit immediate into rd, sign-extended. */
323 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
324 {
325 tcg_target_long lo = sextreg(val, 0, 12);
326 tcg_target_long hi12 = sextreg(val, 12, 20);
327
328 /* Single-instruction cases. */
329 if (hi12 == 0) {
330 /* val fits in uimm12: ori rd, zero, val */
331 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
332 return;
333 }
334 if (hi12 == sextreg(lo, 12, 20)) {
335 /* val fits in simm12: addi.w rd, zero, val */
336 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
337 return;
338 }
339
340 /* High bits must be set; load with lu12i.w + optional ori. */
341 tcg_out_opc_lu12i_w(s, rd, hi12);
342 if (lo != 0) {
343 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
344 }
345 }
346
347 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
348 tcg_target_long val)
349 {
350 /*
351 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
352 * with dedicated instructions for filling the respective bitfields
353 * below:
354 *
355 * 6 5 4 3
356 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
357 * +-----------------------+---------------------------------------+...
358 * | hi52 | hi32 |
359 * +-----------------------+---------------------------------------+...
360 * 3 2 1
361 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
362 * ...+-------------------------------------+-------------------------+
363 * | hi12 | lo |
364 * ...+-------------------------------------+-------------------------+
365 *
366 * Check if val belong to one of the several fast cases, before falling
367 * back to the slow path.
368 */
369
370 intptr_t pc_offset;
371 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
372 tcg_target_long hi12, hi32, hi52;
373
374 /* Value fits in signed i32. */
375 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
376 tcg_out_movi_i32(s, rd, val);
377 return;
378 }
379
380 /* PC-relative cases. */
381 pc_offset = tcg_pcrel_diff(s, (void *)val);
382 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
383 /* Single pcaddu2i. */
384 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
385 return;
386 }
387
388 if (pc_offset == (int32_t)pc_offset) {
389 /* Offset within 32 bits; load with pcalau12i + ori. */
390 val_lo = sextreg(val, 0, 12);
391 val_hi = val >> 12;
392 pc_hi = (val - pc_offset) >> 12;
393 offset_hi = val_hi - pc_hi;
394
395 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
396 tcg_out_opc_pcalau12i(s, rd, offset_hi);
397 if (val_lo != 0) {
398 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
399 }
400 return;
401 }
402
403 hi12 = sextreg(val, 12, 20);
404 hi32 = sextreg(val, 32, 20);
405 hi52 = sextreg(val, 52, 12);
406
407 /* Single cu52i.d case. */
408 if ((hi52 != 0) && (ctz64(val) >= 52)) {
409 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
410 return;
411 }
412
413 /* Slow path. Initialize the low 32 bits, then concat high bits. */
414 tcg_out_movi_i32(s, rd, val);
415
416 /* Load hi32 and hi52 explicitly when they are unexpected values. */
417 if (hi32 != sextreg(hi12, 20, 20)) {
418 tcg_out_opc_cu32i_d(s, rd, hi32);
419 }
420
421 if (hi52 != sextreg(hi32, 20, 12)) {
422 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
423 }
424 }
425
426 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
427 TCGReg rs, tcg_target_long imm)
428 {
429 tcg_target_long lo12 = sextreg(imm, 0, 12);
430 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
431
432 /*
433 * Note that there's a hole in between hi16 and lo12:
434 *
435 * 3 2 1 0
436 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
437 * ...+-------------------------------+-------+-----------------------+
438 * | hi16 | | lo12 |
439 * ...+-------------------------------+-------+-----------------------+
440 *
441 * For bits within that hole, it's more efficient to use LU12I and ADD.
442 */
443 if (imm == (hi16 << 16) + lo12) {
444 if (hi16) {
445 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
446 rs = rd;
447 }
448 if (type == TCG_TYPE_I32) {
449 tcg_out_opc_addi_w(s, rd, rs, lo12);
450 } else if (lo12) {
451 tcg_out_opc_addi_d(s, rd, rs, lo12);
452 } else {
453 tcg_out_mov(s, type, rd, rs);
454 }
455 } else {
456 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
457 if (type == TCG_TYPE_I32) {
458 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
459 } else {
460 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
461 }
462 }
463 }
464
465 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
466 {
467 return false;
468 }
469
470 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
471 tcg_target_long imm)
472 {
473 /* This function is only used for passing structs by reference. */
474 g_assert_not_reached();
475 }
476
477 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
478 {
479 tcg_out_opc_andi(s, ret, arg, 0xff);
480 }
481
482 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
483 {
484 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
485 }
486
487 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
488 {
489 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
490 }
491
492 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
493 {
494 tcg_out_opc_sext_b(s, ret, arg);
495 }
496
497 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
498 {
499 tcg_out_opc_sext_h(s, ret, arg);
500 }
501
502 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
503 {
504 tcg_out_opc_addi_w(s, ret, arg, 0);
505 }
506
507 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
508 {
509 if (ret != arg) {
510 tcg_out_ext32s(s, ret, arg);
511 }
512 }
513
514 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
515 {
516 tcg_out_ext32u(s, ret, arg);
517 }
518
519 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
520 {
521 tcg_out_ext32s(s, ret, arg);
522 }
523
524 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
525 TCGReg a0, TCGReg a1, TCGReg a2,
526 bool c2, bool is_32bit)
527 {
528 if (c2) {
529 /*
530 * Fast path: semantics already satisfied due to constraint and
531 * insn behavior, single instruction is enough.
532 */
533 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
534 /* all clz/ctz insns belong to DJ-format */
535 tcg_out32(s, encode_dj_insn(opc, a0, a1));
536 return;
537 }
538
539 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
540 /* a0 = a1 ? REG_TMP0 : a2 */
541 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
542 tcg_out_opc_masknez(s, a0, a2, a1);
543 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
544 }
545
546 #define SETCOND_INV TCG_TARGET_NB_REGS
547 #define SETCOND_NEZ (SETCOND_INV << 1)
548 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
549
550 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
551 TCGReg arg1, tcg_target_long arg2, bool c2)
552 {
553 int flags = 0;
554
555 switch (cond) {
556 case TCG_COND_EQ: /* -> NE */
557 case TCG_COND_GE: /* -> LT */
558 case TCG_COND_GEU: /* -> LTU */
559 case TCG_COND_GT: /* -> LE */
560 case TCG_COND_GTU: /* -> LEU */
561 cond = tcg_invert_cond(cond);
562 flags ^= SETCOND_INV;
563 break;
564 default:
565 break;
566 }
567
568 switch (cond) {
569 case TCG_COND_LE:
570 case TCG_COND_LEU:
571 /*
572 * If we have a constant input, the most efficient way to implement
573 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
574 * We don't need to care for this for LE because the constant input
575 * is still constrained to int32_t, and INT32_MAX+1 is representable
576 * in the 64-bit temporary register.
577 */
578 if (c2) {
579 if (cond == TCG_COND_LEU) {
580 /* unsigned <= -1 is true */
581 if (arg2 == -1) {
582 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
583 return ret;
584 }
585 cond = TCG_COND_LTU;
586 } else {
587 cond = TCG_COND_LT;
588 }
589 arg2 += 1;
590 } else {
591 TCGReg tmp = arg2;
592 arg2 = arg1;
593 arg1 = tmp;
594 cond = tcg_swap_cond(cond); /* LE -> GE */
595 cond = tcg_invert_cond(cond); /* GE -> LT */
596 flags ^= SETCOND_INV;
597 }
598 break;
599 default:
600 break;
601 }
602
603 switch (cond) {
604 case TCG_COND_NE:
605 flags |= SETCOND_NEZ;
606 if (!c2) {
607 tcg_out_opc_xor(s, ret, arg1, arg2);
608 } else if (arg2 == 0) {
609 ret = arg1;
610 } else if (arg2 >= 0 && arg2 <= 0xfff) {
611 tcg_out_opc_xori(s, ret, arg1, arg2);
612 } else {
613 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
614 }
615 break;
616
617 case TCG_COND_LT:
618 case TCG_COND_LTU:
619 if (c2) {
620 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
621 if (cond == TCG_COND_LT) {
622 tcg_out_opc_slti(s, ret, arg1, arg2);
623 } else {
624 tcg_out_opc_sltui(s, ret, arg1, arg2);
625 }
626 break;
627 }
628 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
629 arg2 = TCG_REG_TMP0;
630 }
631 if (cond == TCG_COND_LT) {
632 tcg_out_opc_slt(s, ret, arg1, arg2);
633 } else {
634 tcg_out_opc_sltu(s, ret, arg1, arg2);
635 }
636 break;
637
638 default:
639 g_assert_not_reached();
640 break;
641 }
642
643 return ret | flags;
644 }
645
646 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
647 TCGReg arg1, tcg_target_long arg2, bool c2)
648 {
649 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
650
651 if (tmpflags != ret) {
652 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
653
654 switch (tmpflags & SETCOND_FLAGS) {
655 case SETCOND_INV:
656 /* Intermediate result is boolean: simply invert. */
657 tcg_out_opc_xori(s, ret, tmp, 1);
658 break;
659 case SETCOND_NEZ:
660 /* Intermediate result is zero/non-zero: test != 0. */
661 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
662 break;
663 case SETCOND_NEZ | SETCOND_INV:
664 /* Intermediate result is zero/non-zero: test == 0. */
665 tcg_out_opc_sltui(s, ret, tmp, 1);
666 break;
667 default:
668 g_assert_not_reached();
669 }
670 }
671 }
672
673 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
674 TCGReg c1, tcg_target_long c2, bool const2,
675 TCGReg v1, TCGReg v2)
676 {
677 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
678 TCGReg t;
679
680 /* Standardize the test below to t != 0. */
681 if (tmpflags & SETCOND_INV) {
682 t = v1, v1 = v2, v2 = t;
683 }
684
685 t = tmpflags & ~SETCOND_FLAGS;
686 if (v1 == TCG_REG_ZERO) {
687 tcg_out_opc_masknez(s, ret, v2, t);
688 } else if (v2 == TCG_REG_ZERO) {
689 tcg_out_opc_maskeqz(s, ret, v1, t);
690 } else {
691 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
692 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
693 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
694 }
695 }
696
697 /*
698 * Branch helpers
699 */
700
701 static const struct {
702 LoongArchInsn op;
703 bool swap;
704 } tcg_brcond_to_loongarch[] = {
705 [TCG_COND_EQ] = { OPC_BEQ, false },
706 [TCG_COND_NE] = { OPC_BNE, false },
707 [TCG_COND_LT] = { OPC_BGT, true },
708 [TCG_COND_GE] = { OPC_BLE, true },
709 [TCG_COND_LE] = { OPC_BLE, false },
710 [TCG_COND_GT] = { OPC_BGT, false },
711 [TCG_COND_LTU] = { OPC_BGTU, true },
712 [TCG_COND_GEU] = { OPC_BLEU, true },
713 [TCG_COND_LEU] = { OPC_BLEU, false },
714 [TCG_COND_GTU] = { OPC_BGTU, false }
715 };
716
717 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
718 TCGReg arg2, TCGLabel *l)
719 {
720 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
721
722 tcg_debug_assert(op != 0);
723
724 if (tcg_brcond_to_loongarch[cond].swap) {
725 TCGReg t = arg1;
726 arg1 = arg2;
727 arg2 = t;
728 }
729
730 /* all conditional branch insns belong to DJSk16-format */
731 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
732 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
733 }
734
735 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
736 {
737 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
738 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
739
740 tcg_debug_assert((offset & 3) == 0);
741 if (offset == sextreg(offset, 0, 28)) {
742 /* short jump: +/- 256MiB */
743 if (tail) {
744 tcg_out_opc_b(s, offset >> 2);
745 } else {
746 tcg_out_opc_bl(s, offset >> 2);
747 }
748 } else if (offset == sextreg(offset, 0, 38)) {
749 /* long jump: +/- 256GiB */
750 tcg_target_long lo = sextreg(offset, 0, 18);
751 tcg_target_long hi = offset - lo;
752 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
753 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
754 } else {
755 /* far jump: 64-bit */
756 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
757 tcg_target_long hi = (tcg_target_long)arg - lo;
758 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
759 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
760 }
761 }
762
763 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
764 const TCGHelperInfo *info)
765 {
766 tcg_out_call_int(s, arg, false);
767 }
768
769 /*
770 * Load/store helpers
771 */
772
773 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
774 TCGReg addr, intptr_t offset)
775 {
776 intptr_t imm12 = sextreg(offset, 0, 12);
777
778 if (offset != imm12) {
779 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
780
781 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
782 imm12 = sextreg(diff, 0, 12);
783 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
784 } else {
785 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
786 if (addr != TCG_REG_ZERO) {
787 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
788 }
789 }
790 addr = TCG_REG_TMP2;
791 }
792
793 switch (opc) {
794 case OPC_LD_B:
795 case OPC_LD_BU:
796 case OPC_LD_H:
797 case OPC_LD_HU:
798 case OPC_LD_W:
799 case OPC_LD_WU:
800 case OPC_LD_D:
801 case OPC_ST_B:
802 case OPC_ST_H:
803 case OPC_ST_W:
804 case OPC_ST_D:
805 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
806 break;
807 default:
808 g_assert_not_reached();
809 }
810 }
811
812 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
813 TCGReg arg1, intptr_t arg2)
814 {
815 bool is_32bit = type == TCG_TYPE_I32;
816 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
817 }
818
819 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
820 TCGReg arg1, intptr_t arg2)
821 {
822 bool is_32bit = type == TCG_TYPE_I32;
823 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
824 }
825
826 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
827 TCGReg base, intptr_t ofs)
828 {
829 if (val == 0) {
830 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
831 return true;
832 }
833 return false;
834 }
835
836 /*
837 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
838 */
839
840 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
841 {
842 tcg_out_opc_b(s, 0);
843 return reloc_br_sd10k16(s->code_ptr - 1, target);
844 }
845
846 static const TCGLdstHelperParam ldst_helper_param = {
847 .ntmp = 1, .tmp = { TCG_REG_TMP0 }
848 };
849
850 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
851 {
852 MemOp opc = get_memop(l->oi);
853
854 /* resolve label address */
855 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
856 return false;
857 }
858
859 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
860 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
861 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
862 return tcg_out_goto(s, l->raddr);
863 }
864
865 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
866 {
867 MemOp opc = get_memop(l->oi);
868
869 /* resolve label address */
870 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
871 return false;
872 }
873
874 tcg_out_st_helper_args(s, l, &ldst_helper_param);
875 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
876 return tcg_out_goto(s, l->raddr);
877 }
878
879 typedef struct {
880 TCGReg base;
881 TCGReg index;
882 TCGAtomAlign aa;
883 } HostAddress;
884
885 bool tcg_target_has_memory_bswap(MemOp memop)
886 {
887 return false;
888 }
889
890 /* We expect to use a 12-bit negative offset from ENV. */
891 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
892
893 /*
894 * For softmmu, perform the TLB load and compare.
895 * For useronly, perform any required alignment tests.
896 * In both cases, return a TCGLabelQemuLdst structure if the slow path
897 * is required and fill in @h with the host address for the fast path.
898 */
899 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
900 TCGReg addr_reg, MemOpIdx oi,
901 bool is_ld)
902 {
903 TCGType addr_type = s->addr_type;
904 TCGLabelQemuLdst *ldst = NULL;
905 MemOp opc = get_memop(oi);
906 MemOp a_bits;
907
908 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
909 a_bits = h->aa.align;
910
911 #ifdef CONFIG_SOFTMMU
912 unsigned s_bits = opc & MO_SIZE;
913 int mem_index = get_mmuidx(oi);
914 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
915 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
916 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
917
918 ldst = new_ldst_label(s);
919 ldst->is_ld = is_ld;
920 ldst->oi = oi;
921 ldst->addrlo_reg = addr_reg;
922
923 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
924 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
925
926 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
927 s->page_bits - CPU_TLB_ENTRY_BITS);
928 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
929 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
930
931 /* Load the tlb comparator and the addend. */
932 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
933 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
934 is_ld ? offsetof(CPUTLBEntry, addr_read)
935 : offsetof(CPUTLBEntry, addr_write));
936 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
937 offsetof(CPUTLBEntry, addend));
938
939 /*
940 * For aligned accesses, we check the first byte and include the alignment
941 * bits within the address. For unaligned access, we check that we don't
942 * cross pages using the address of the last byte of the access.
943 */
944 if (a_bits < s_bits) {
945 unsigned a_mask = (1u << a_bits) - 1;
946 unsigned s_mask = (1u << s_bits) - 1;
947 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
948 } else {
949 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
950 }
951 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
952 a_bits, s->page_bits - 1);
953
954 /* Compare masked address with the TLB entry. */
955 ldst->label_ptr[0] = s->code_ptr;
956 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
957
958 h->index = TCG_REG_TMP2;
959 #else
960 if (a_bits) {
961 ldst = new_ldst_label(s);
962
963 ldst->is_ld = is_ld;
964 ldst->oi = oi;
965 ldst->addrlo_reg = addr_reg;
966
967 /*
968 * Without micro-architecture details, we don't know which of
969 * bstrpick or andi is faster, so use bstrpick as it's not
970 * constrained by imm field width. Not to say alignments >= 2^12
971 * are going to happen any time soon.
972 */
973 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
974
975 ldst->label_ptr[0] = s->code_ptr;
976 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
977 }
978
979 h->index = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
980 #endif
981
982 if (addr_type == TCG_TYPE_I32) {
983 h->base = TCG_REG_TMP0;
984 tcg_out_ext32u(s, h->base, addr_reg);
985 } else {
986 h->base = addr_reg;
987 }
988
989 return ldst;
990 }
991
992 static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
993 TCGReg rd, HostAddress h)
994 {
995 /* Byte swapping is left to middle-end expansion. */
996 tcg_debug_assert((opc & MO_BSWAP) == 0);
997
998 switch (opc & MO_SSIZE) {
999 case MO_UB:
1000 tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1001 break;
1002 case MO_SB:
1003 tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1004 break;
1005 case MO_UW:
1006 tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1007 break;
1008 case MO_SW:
1009 tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1010 break;
1011 case MO_UL:
1012 if (type == TCG_TYPE_I64) {
1013 tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1014 break;
1015 }
1016 /* fallthrough */
1017 case MO_SL:
1018 tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1019 break;
1020 case MO_UQ:
1021 tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1022 break;
1023 default:
1024 g_assert_not_reached();
1025 }
1026 }
1027
1028 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1029 MemOpIdx oi, TCGType data_type)
1030 {
1031 TCGLabelQemuLdst *ldst;
1032 HostAddress h;
1033
1034 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1035 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1036
1037 if (ldst) {
1038 ldst->type = data_type;
1039 ldst->datalo_reg = data_reg;
1040 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1041 }
1042 }
1043
1044 static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1045 TCGReg rd, HostAddress h)
1046 {
1047 /* Byte swapping is left to middle-end expansion. */
1048 tcg_debug_assert((opc & MO_BSWAP) == 0);
1049
1050 switch (opc & MO_SIZE) {
1051 case MO_8:
1052 tcg_out_opc_stx_b(s, rd, h.base, h.index);
1053 break;
1054 case MO_16:
1055 tcg_out_opc_stx_h(s, rd, h.base, h.index);
1056 break;
1057 case MO_32:
1058 tcg_out_opc_stx_w(s, rd, h.base, h.index);
1059 break;
1060 case MO_64:
1061 tcg_out_opc_stx_d(s, rd, h.base, h.index);
1062 break;
1063 default:
1064 g_assert_not_reached();
1065 }
1066 }
1067
1068 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1069 MemOpIdx oi, TCGType data_type)
1070 {
1071 TCGLabelQemuLdst *ldst;
1072 HostAddress h;
1073
1074 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1075 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1076
1077 if (ldst) {
1078 ldst->type = data_type;
1079 ldst->datalo_reg = data_reg;
1080 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1081 }
1082 }
1083
1084 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1085 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1086 {
1087 TCGLabelQemuLdst *ldst;
1088 HostAddress h;
1089
1090 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1091
1092 if (h.aa.atom == MO_128) {
1093 /*
1094 * Use VLDX/VSTX when 128-bit atomicity is required.
1095 * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1096 */
1097 if (is_ld) {
1098 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1099 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1100 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1101 } else {
1102 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1103 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1104 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1105 }
1106 } else {
1107 /* Otherwise use a pair of LD/ST. */
1108 tcg_out_opc_add_d(s, TCG_REG_TMP0, h.base, h.index);
1109 if (is_ld) {
1110 tcg_out_opc_ld_d(s, data_lo, TCG_REG_TMP0, 0);
1111 tcg_out_opc_ld_d(s, data_hi, TCG_REG_TMP0, 8);
1112 } else {
1113 tcg_out_opc_st_d(s, data_lo, TCG_REG_TMP0, 0);
1114 tcg_out_opc_st_d(s, data_hi, TCG_REG_TMP0, 8);
1115 }
1116 }
1117
1118 if (ldst) {
1119 ldst->type = TCG_TYPE_I128;
1120 ldst->datalo_reg = data_lo;
1121 ldst->datahi_reg = data_hi;
1122 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1123 }
1124 }
1125
1126 /*
1127 * Entry-points
1128 */
1129
1130 static const tcg_insn_unit *tb_ret_addr;
1131
1132 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1133 {
1134 /* Reuse the zeroing that exists for goto_ptr. */
1135 if (a0 == 0) {
1136 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1137 } else {
1138 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1139 tcg_out_call_int(s, tb_ret_addr, true);
1140 }
1141 }
1142
1143 static void tcg_out_goto_tb(TCGContext *s, int which)
1144 {
1145 /*
1146 * Direct branch, or load indirect address, to be patched
1147 * by tb_target_set_jmp_target. Check indirect load offset
1148 * in range early, regardless of direct branch distance,
1149 * via assert within tcg_out_opc_pcaddu2i.
1150 */
1151 uintptr_t i_addr = get_jmp_target_addr(s, which);
1152 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1153
1154 set_jmp_insn_offset(s, which);
1155 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1156
1157 /* Finish the load and indirect branch. */
1158 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1159 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1160 set_jmp_reset_offset(s, which);
1161 }
1162
1163 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1164 uintptr_t jmp_rx, uintptr_t jmp_rw)
1165 {
1166 uintptr_t d_addr = tb->jmp_target_addr[n];
1167 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1168 tcg_insn_unit insn;
1169
1170 /* Either directly branch, or load slot address for indirect branch. */
1171 if (d_disp == sextreg(d_disp, 0, 26)) {
1172 insn = encode_sd10k16_insn(OPC_B, d_disp);
1173 } else {
1174 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1175 intptr_t i_disp = i_addr - jmp_rx;
1176 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1177 }
1178
1179 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1180 flush_idcache_range(jmp_rx, jmp_rw, 4);
1181 }
1182
1183 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1184 const TCGArg args[TCG_MAX_OP_ARGS],
1185 const int const_args[TCG_MAX_OP_ARGS])
1186 {
1187 TCGArg a0 = args[0];
1188 TCGArg a1 = args[1];
1189 TCGArg a2 = args[2];
1190 TCGArg a3 = args[3];
1191 int c2 = const_args[2];
1192
1193 switch (opc) {
1194 case INDEX_op_mb:
1195 tcg_out_mb(s, a0);
1196 break;
1197
1198 case INDEX_op_goto_ptr:
1199 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1200 break;
1201
1202 case INDEX_op_br:
1203 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1204 0);
1205 tcg_out_opc_b(s, 0);
1206 break;
1207
1208 case INDEX_op_brcond_i32:
1209 case INDEX_op_brcond_i64:
1210 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1211 break;
1212
1213 case INDEX_op_extrh_i64_i32:
1214 tcg_out_opc_srai_d(s, a0, a1, 32);
1215 break;
1216
1217 case INDEX_op_not_i32:
1218 case INDEX_op_not_i64:
1219 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1220 break;
1221
1222 case INDEX_op_nor_i32:
1223 case INDEX_op_nor_i64:
1224 if (c2) {
1225 tcg_out_opc_ori(s, a0, a1, a2);
1226 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1227 } else {
1228 tcg_out_opc_nor(s, a0, a1, a2);
1229 }
1230 break;
1231
1232 case INDEX_op_andc_i32:
1233 case INDEX_op_andc_i64:
1234 if (c2) {
1235 /* guaranteed to fit due to constraint */
1236 tcg_out_opc_andi(s, a0, a1, ~a2);
1237 } else {
1238 tcg_out_opc_andn(s, a0, a1, a2);
1239 }
1240 break;
1241
1242 case INDEX_op_orc_i32:
1243 case INDEX_op_orc_i64:
1244 if (c2) {
1245 /* guaranteed to fit due to constraint */
1246 tcg_out_opc_ori(s, a0, a1, ~a2);
1247 } else {
1248 tcg_out_opc_orn(s, a0, a1, a2);
1249 }
1250 break;
1251
1252 case INDEX_op_and_i32:
1253 case INDEX_op_and_i64:
1254 if (c2) {
1255 tcg_out_opc_andi(s, a0, a1, a2);
1256 } else {
1257 tcg_out_opc_and(s, a0, a1, a2);
1258 }
1259 break;
1260
1261 case INDEX_op_or_i32:
1262 case INDEX_op_or_i64:
1263 if (c2) {
1264 tcg_out_opc_ori(s, a0, a1, a2);
1265 } else {
1266 tcg_out_opc_or(s, a0, a1, a2);
1267 }
1268 break;
1269
1270 case INDEX_op_xor_i32:
1271 case INDEX_op_xor_i64:
1272 if (c2) {
1273 tcg_out_opc_xori(s, a0, a1, a2);
1274 } else {
1275 tcg_out_opc_xor(s, a0, a1, a2);
1276 }
1277 break;
1278
1279 case INDEX_op_extract_i32:
1280 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1281 break;
1282 case INDEX_op_extract_i64:
1283 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1284 break;
1285
1286 case INDEX_op_deposit_i32:
1287 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1288 break;
1289 case INDEX_op_deposit_i64:
1290 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1291 break;
1292
1293 case INDEX_op_bswap16_i32:
1294 case INDEX_op_bswap16_i64:
1295 tcg_out_opc_revb_2h(s, a0, a1);
1296 if (a2 & TCG_BSWAP_OS) {
1297 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1298 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1299 tcg_out_ext16u(s, a0, a0);
1300 }
1301 break;
1302
1303 case INDEX_op_bswap32_i32:
1304 /* All 32-bit values are computed sign-extended in the register. */
1305 a2 = TCG_BSWAP_OS;
1306 /* fallthrough */
1307 case INDEX_op_bswap32_i64:
1308 tcg_out_opc_revb_2w(s, a0, a1);
1309 if (a2 & TCG_BSWAP_OS) {
1310 tcg_out_ext32s(s, a0, a0);
1311 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1312 tcg_out_ext32u(s, a0, a0);
1313 }
1314 break;
1315
1316 case INDEX_op_bswap64_i64:
1317 tcg_out_opc_revb_d(s, a0, a1);
1318 break;
1319
1320 case INDEX_op_clz_i32:
1321 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1322 break;
1323 case INDEX_op_clz_i64:
1324 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1325 break;
1326
1327 case INDEX_op_ctz_i32:
1328 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1329 break;
1330 case INDEX_op_ctz_i64:
1331 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1332 break;
1333
1334 case INDEX_op_shl_i32:
1335 if (c2) {
1336 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1337 } else {
1338 tcg_out_opc_sll_w(s, a0, a1, a2);
1339 }
1340 break;
1341 case INDEX_op_shl_i64:
1342 if (c2) {
1343 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1344 } else {
1345 tcg_out_opc_sll_d(s, a0, a1, a2);
1346 }
1347 break;
1348
1349 case INDEX_op_shr_i32:
1350 if (c2) {
1351 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1352 } else {
1353 tcg_out_opc_srl_w(s, a0, a1, a2);
1354 }
1355 break;
1356 case INDEX_op_shr_i64:
1357 if (c2) {
1358 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1359 } else {
1360 tcg_out_opc_srl_d(s, a0, a1, a2);
1361 }
1362 break;
1363
1364 case INDEX_op_sar_i32:
1365 if (c2) {
1366 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1367 } else {
1368 tcg_out_opc_sra_w(s, a0, a1, a2);
1369 }
1370 break;
1371 case INDEX_op_sar_i64:
1372 if (c2) {
1373 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1374 } else {
1375 tcg_out_opc_sra_d(s, a0, a1, a2);
1376 }
1377 break;
1378
1379 case INDEX_op_rotl_i32:
1380 /* transform into equivalent rotr/rotri */
1381 if (c2) {
1382 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1383 } else {
1384 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1385 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1386 }
1387 break;
1388 case INDEX_op_rotl_i64:
1389 /* transform into equivalent rotr/rotri */
1390 if (c2) {
1391 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1392 } else {
1393 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1394 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1395 }
1396 break;
1397
1398 case INDEX_op_rotr_i32:
1399 if (c2) {
1400 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1401 } else {
1402 tcg_out_opc_rotr_w(s, a0, a1, a2);
1403 }
1404 break;
1405 case INDEX_op_rotr_i64:
1406 if (c2) {
1407 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1408 } else {
1409 tcg_out_opc_rotr_d(s, a0, a1, a2);
1410 }
1411 break;
1412
1413 case INDEX_op_add_i32:
1414 if (c2) {
1415 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1416 } else {
1417 tcg_out_opc_add_w(s, a0, a1, a2);
1418 }
1419 break;
1420 case INDEX_op_add_i64:
1421 if (c2) {
1422 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1423 } else {
1424 tcg_out_opc_add_d(s, a0, a1, a2);
1425 }
1426 break;
1427
1428 case INDEX_op_sub_i32:
1429 if (c2) {
1430 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1431 } else {
1432 tcg_out_opc_sub_w(s, a0, a1, a2);
1433 }
1434 break;
1435 case INDEX_op_sub_i64:
1436 if (c2) {
1437 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1438 } else {
1439 tcg_out_opc_sub_d(s, a0, a1, a2);
1440 }
1441 break;
1442
1443 case INDEX_op_mul_i32:
1444 tcg_out_opc_mul_w(s, a0, a1, a2);
1445 break;
1446 case INDEX_op_mul_i64:
1447 tcg_out_opc_mul_d(s, a0, a1, a2);
1448 break;
1449
1450 case INDEX_op_mulsh_i32:
1451 tcg_out_opc_mulh_w(s, a0, a1, a2);
1452 break;
1453 case INDEX_op_mulsh_i64:
1454 tcg_out_opc_mulh_d(s, a0, a1, a2);
1455 break;
1456
1457 case INDEX_op_muluh_i32:
1458 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1459 break;
1460 case INDEX_op_muluh_i64:
1461 tcg_out_opc_mulh_du(s, a0, a1, a2);
1462 break;
1463
1464 case INDEX_op_div_i32:
1465 tcg_out_opc_div_w(s, a0, a1, a2);
1466 break;
1467 case INDEX_op_div_i64:
1468 tcg_out_opc_div_d(s, a0, a1, a2);
1469 break;
1470
1471 case INDEX_op_divu_i32:
1472 tcg_out_opc_div_wu(s, a0, a1, a2);
1473 break;
1474 case INDEX_op_divu_i64:
1475 tcg_out_opc_div_du(s, a0, a1, a2);
1476 break;
1477
1478 case INDEX_op_rem_i32:
1479 tcg_out_opc_mod_w(s, a0, a1, a2);
1480 break;
1481 case INDEX_op_rem_i64:
1482 tcg_out_opc_mod_d(s, a0, a1, a2);
1483 break;
1484
1485 case INDEX_op_remu_i32:
1486 tcg_out_opc_mod_wu(s, a0, a1, a2);
1487 break;
1488 case INDEX_op_remu_i64:
1489 tcg_out_opc_mod_du(s, a0, a1, a2);
1490 break;
1491
1492 case INDEX_op_setcond_i32:
1493 case INDEX_op_setcond_i64:
1494 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1495 break;
1496
1497 case INDEX_op_movcond_i32:
1498 case INDEX_op_movcond_i64:
1499 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1500 break;
1501
1502 case INDEX_op_ld8s_i32:
1503 case INDEX_op_ld8s_i64:
1504 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1505 break;
1506 case INDEX_op_ld8u_i32:
1507 case INDEX_op_ld8u_i64:
1508 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1509 break;
1510 case INDEX_op_ld16s_i32:
1511 case INDEX_op_ld16s_i64:
1512 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1513 break;
1514 case INDEX_op_ld16u_i32:
1515 case INDEX_op_ld16u_i64:
1516 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1517 break;
1518 case INDEX_op_ld_i32:
1519 case INDEX_op_ld32s_i64:
1520 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1521 break;
1522 case INDEX_op_ld32u_i64:
1523 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1524 break;
1525 case INDEX_op_ld_i64:
1526 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1527 break;
1528
1529 case INDEX_op_st8_i32:
1530 case INDEX_op_st8_i64:
1531 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1532 break;
1533 case INDEX_op_st16_i32:
1534 case INDEX_op_st16_i64:
1535 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1536 break;
1537 case INDEX_op_st_i32:
1538 case INDEX_op_st32_i64:
1539 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1540 break;
1541 case INDEX_op_st_i64:
1542 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1543 break;
1544
1545 case INDEX_op_qemu_ld_a32_i32:
1546 case INDEX_op_qemu_ld_a64_i32:
1547 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1548 break;
1549 case INDEX_op_qemu_ld_a32_i64:
1550 case INDEX_op_qemu_ld_a64_i64:
1551 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1552 break;
1553 case INDEX_op_qemu_ld_a32_i128:
1554 case INDEX_op_qemu_ld_a64_i128:
1555 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1556 break;
1557 case INDEX_op_qemu_st_a32_i32:
1558 case INDEX_op_qemu_st_a64_i32:
1559 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1560 break;
1561 case INDEX_op_qemu_st_a32_i64:
1562 case INDEX_op_qemu_st_a64_i64:
1563 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1564 break;
1565 case INDEX_op_qemu_st_a32_i128:
1566 case INDEX_op_qemu_st_a64_i128:
1567 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1568 break;
1569
1570 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1571 case INDEX_op_mov_i64:
1572 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1573 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1574 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1575 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1576 case INDEX_op_ext8s_i64:
1577 case INDEX_op_ext8u_i32:
1578 case INDEX_op_ext8u_i64:
1579 case INDEX_op_ext16s_i32:
1580 case INDEX_op_ext16s_i64:
1581 case INDEX_op_ext16u_i32:
1582 case INDEX_op_ext16u_i64:
1583 case INDEX_op_ext32s_i64:
1584 case INDEX_op_ext32u_i64:
1585 case INDEX_op_ext_i32_i64:
1586 case INDEX_op_extu_i32_i64:
1587 case INDEX_op_extrl_i64_i32:
1588 default:
1589 g_assert_not_reached();
1590 }
1591 }
1592
1593 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1594 TCGReg rd, TCGReg rs)
1595 {
1596 switch (vece) {
1597 case MO_8:
1598 tcg_out_opc_vreplgr2vr_b(s, rd, rs);
1599 break;
1600 case MO_16:
1601 tcg_out_opc_vreplgr2vr_h(s, rd, rs);
1602 break;
1603 case MO_32:
1604 tcg_out_opc_vreplgr2vr_w(s, rd, rs);
1605 break;
1606 case MO_64:
1607 tcg_out_opc_vreplgr2vr_d(s, rd, rs);
1608 break;
1609 default:
1610 g_assert_not_reached();
1611 }
1612 return true;
1613 }
1614
1615 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1616 TCGReg r, TCGReg base, intptr_t offset)
1617 {
1618 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
1619 if (offset < -0x800 || offset > 0x7ff || \
1620 (offset & ((1 << vece) - 1)) != 0) {
1621 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1622 base = TCG_REG_TMP0;
1623 offset = 0;
1624 }
1625 offset >>= vece;
1626
1627 switch (vece) {
1628 case MO_8:
1629 tcg_out_opc_vldrepl_b(s, r, base, offset);
1630 break;
1631 case MO_16:
1632 tcg_out_opc_vldrepl_h(s, r, base, offset);
1633 break;
1634 case MO_32:
1635 tcg_out_opc_vldrepl_w(s, r, base, offset);
1636 break;
1637 case MO_64:
1638 tcg_out_opc_vldrepl_d(s, r, base, offset);
1639 break;
1640 default:
1641 g_assert_not_reached();
1642 }
1643 return true;
1644 }
1645
1646 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1647 TCGReg rd, int64_t v64)
1648 {
1649 /* Try vldi if imm can fit */
1650 int64_t value = sextract64(v64, 0, 8 << vece);
1651 if (-0x200 <= value && value <= 0x1FF) {
1652 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1653 tcg_out_opc_vldi(s, rd, imm);
1654 return;
1655 }
1656
1657 /* TODO: vldi patterns when imm 12 is set */
1658
1659 /* Fallback to vreplgr2vr */
1660 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1661 switch (vece) {
1662 case MO_8:
1663 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
1664 break;
1665 case MO_16:
1666 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
1667 break;
1668 case MO_32:
1669 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
1670 break;
1671 case MO_64:
1672 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
1673 break;
1674 default:
1675 g_assert_not_reached();
1676 }
1677 }
1678
1679 static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
1680 const TCGArg a1, const TCGArg a2,
1681 bool a2_is_const, bool is_add)
1682 {
1683 static const LoongArchInsn add_vec_insn[4] = {
1684 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
1685 };
1686 static const LoongArchInsn add_vec_imm_insn[4] = {
1687 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
1688 };
1689 static const LoongArchInsn sub_vec_insn[4] = {
1690 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
1691 };
1692 static const LoongArchInsn sub_vec_imm_insn[4] = {
1693 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
1694 };
1695
1696 if (a2_is_const) {
1697 int64_t value = sextract64(a2, 0, 8 << vece);
1698 if (!is_add) {
1699 value = -value;
1700 }
1701
1702 /* Try vaddi/vsubi */
1703 if (0 <= value && value <= 0x1f) {
1704 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
1705 a1, value));
1706 return;
1707 } else if (-0x1f <= value && value < 0) {
1708 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
1709 a1, -value));
1710 return;
1711 }
1712
1713 /* constraint TCG_CT_CONST_VADD ensures unreachable */
1714 g_assert_not_reached();
1715 }
1716
1717 if (is_add) {
1718 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
1719 } else {
1720 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
1721 }
1722 }
1723
1724 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1725 unsigned vecl, unsigned vece,
1726 const TCGArg args[TCG_MAX_OP_ARGS],
1727 const int const_args[TCG_MAX_OP_ARGS])
1728 {
1729 TCGType type = vecl + TCG_TYPE_V64;
1730 TCGArg a0, a1, a2, a3;
1731 TCGReg temp = TCG_REG_TMP0;
1732 TCGReg temp_vec = TCG_VEC_TMP0;
1733
1734 static const LoongArchInsn cmp_vec_insn[16][4] = {
1735 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
1736 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
1737 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
1738 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
1739 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
1740 };
1741 static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
1742 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
1743 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
1744 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
1745 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
1746 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
1747 };
1748 LoongArchInsn insn;
1749 static const LoongArchInsn neg_vec_insn[4] = {
1750 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
1751 };
1752 static const LoongArchInsn mul_vec_insn[4] = {
1753 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
1754 };
1755 static const LoongArchInsn smin_vec_insn[4] = {
1756 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
1757 };
1758 static const LoongArchInsn umin_vec_insn[4] = {
1759 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
1760 };
1761 static const LoongArchInsn smax_vec_insn[4] = {
1762 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
1763 };
1764 static const LoongArchInsn umax_vec_insn[4] = {
1765 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
1766 };
1767 static const LoongArchInsn ssadd_vec_insn[4] = {
1768 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
1769 };
1770 static const LoongArchInsn usadd_vec_insn[4] = {
1771 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
1772 };
1773 static const LoongArchInsn sssub_vec_insn[4] = {
1774 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
1775 };
1776 static const LoongArchInsn ussub_vec_insn[4] = {
1777 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
1778 };
1779 static const LoongArchInsn shlv_vec_insn[4] = {
1780 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
1781 };
1782 static const LoongArchInsn shrv_vec_insn[4] = {
1783 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
1784 };
1785 static const LoongArchInsn sarv_vec_insn[4] = {
1786 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
1787 };
1788 static const LoongArchInsn shli_vec_insn[4] = {
1789 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
1790 };
1791 static const LoongArchInsn shri_vec_insn[4] = {
1792 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
1793 };
1794 static const LoongArchInsn sari_vec_insn[4] = {
1795 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
1796 };
1797 static const LoongArchInsn rotrv_vec_insn[4] = {
1798 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
1799 };
1800
1801 a0 = args[0];
1802 a1 = args[1];
1803 a2 = args[2];
1804 a3 = args[3];
1805
1806 /* Currently only supports V128 */
1807 tcg_debug_assert(type == TCG_TYPE_V128);
1808
1809 switch (opc) {
1810 case INDEX_op_st_vec:
1811 /* Try to fit vst imm */
1812 if (-0x800 <= a2 && a2 <= 0x7ff) {
1813 tcg_out_opc_vst(s, a0, a1, a2);
1814 } else {
1815 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1816 tcg_out_opc_vstx(s, a0, a1, temp);
1817 }
1818 break;
1819 case INDEX_op_ld_vec:
1820 /* Try to fit vld imm */
1821 if (-0x800 <= a2 && a2 <= 0x7ff) {
1822 tcg_out_opc_vld(s, a0, a1, a2);
1823 } else {
1824 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1825 tcg_out_opc_vldx(s, a0, a1, temp);
1826 }
1827 break;
1828 case INDEX_op_and_vec:
1829 tcg_out_opc_vand_v(s, a0, a1, a2);
1830 break;
1831 case INDEX_op_andc_vec:
1832 /*
1833 * vandn vd, vj, vk: vd = vk & ~vj
1834 * andc_vec vd, vj, vk: vd = vj & ~vk
1835 * vk and vk are swapped
1836 */
1837 tcg_out_opc_vandn_v(s, a0, a2, a1);
1838 break;
1839 case INDEX_op_or_vec:
1840 tcg_out_opc_vor_v(s, a0, a1, a2);
1841 break;
1842 case INDEX_op_orc_vec:
1843 tcg_out_opc_vorn_v(s, a0, a1, a2);
1844 break;
1845 case INDEX_op_xor_vec:
1846 tcg_out_opc_vxor_v(s, a0, a1, a2);
1847 break;
1848 case INDEX_op_nor_vec:
1849 tcg_out_opc_vnor_v(s, a0, a1, a2);
1850 break;
1851 case INDEX_op_not_vec:
1852 tcg_out_opc_vnor_v(s, a0, a1, a1);
1853 break;
1854 case INDEX_op_cmp_vec:
1855 TCGCond cond = args[3];
1856 if (const_args[2]) {
1857 /*
1858 * cmp_vec dest, src, value
1859 * Try vseqi/vslei/vslti
1860 */
1861 int64_t value = sextract64(a2, 0, 8 << vece);
1862 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
1863 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
1864 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
1865 a0, a1, value));
1866 break;
1867 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
1868 (0x00 <= value && value <= 0x1f)) {
1869 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
1870 a0, a1, value));
1871 break;
1872 }
1873
1874 /*
1875 * Fallback to:
1876 * dupi_vec temp, a2
1877 * cmp_vec a0, a1, temp, cond
1878 */
1879 tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
1880 a2 = temp_vec;
1881 }
1882
1883 insn = cmp_vec_insn[cond][vece];
1884 if (insn == 0) {
1885 TCGArg t;
1886 t = a1, a1 = a2, a2 = t;
1887 cond = tcg_swap_cond(cond);
1888 insn = cmp_vec_insn[cond][vece];
1889 tcg_debug_assert(insn != 0);
1890 }
1891 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1892 break;
1893 case INDEX_op_add_vec:
1894 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
1895 break;
1896 case INDEX_op_sub_vec:
1897 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
1898 break;
1899 case INDEX_op_neg_vec:
1900 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
1901 break;
1902 case INDEX_op_mul_vec:
1903 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
1904 break;
1905 case INDEX_op_smin_vec:
1906 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
1907 break;
1908 case INDEX_op_smax_vec:
1909 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
1910 break;
1911 case INDEX_op_umin_vec:
1912 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
1913 break;
1914 case INDEX_op_umax_vec:
1915 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
1916 break;
1917 case INDEX_op_ssadd_vec:
1918 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
1919 break;
1920 case INDEX_op_usadd_vec:
1921 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
1922 break;
1923 case INDEX_op_sssub_vec:
1924 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
1925 break;
1926 case INDEX_op_ussub_vec:
1927 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
1928 break;
1929 case INDEX_op_shlv_vec:
1930 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
1931 break;
1932 case INDEX_op_shrv_vec:
1933 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
1934 break;
1935 case INDEX_op_sarv_vec:
1936 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
1937 break;
1938 case INDEX_op_shli_vec:
1939 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
1940 break;
1941 case INDEX_op_shri_vec:
1942 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
1943 break;
1944 case INDEX_op_sari_vec:
1945 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
1946 break;
1947 case INDEX_op_rotrv_vec:
1948 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
1949 break;
1950 case INDEX_op_rotlv_vec:
1951 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
1952 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
1953 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
1954 temp_vec));
1955 break;
1956 case INDEX_op_rotli_vec:
1957 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
1958 a2 = extract32(-a2, 0, 3 + vece);
1959 switch (vece) {
1960 case MO_8:
1961 tcg_out_opc_vrotri_b(s, a0, a1, a2);
1962 break;
1963 case MO_16:
1964 tcg_out_opc_vrotri_h(s, a0, a1, a2);
1965 break;
1966 case MO_32:
1967 tcg_out_opc_vrotri_w(s, a0, a1, a2);
1968 break;
1969 case MO_64:
1970 tcg_out_opc_vrotri_d(s, a0, a1, a2);
1971 break;
1972 default:
1973 g_assert_not_reached();
1974 }
1975 break;
1976 case INDEX_op_bitsel_vec:
1977 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
1978 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
1979 break;
1980 case INDEX_op_dupm_vec:
1981 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
1982 break;
1983 default:
1984 g_assert_not_reached();
1985 }
1986 }
1987
1988 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
1989 {
1990 switch (opc) {
1991 case INDEX_op_ld_vec:
1992 case INDEX_op_st_vec:
1993 case INDEX_op_dup_vec:
1994 case INDEX_op_dupm_vec:
1995 case INDEX_op_cmp_vec:
1996 case INDEX_op_add_vec:
1997 case INDEX_op_sub_vec:
1998 case INDEX_op_and_vec:
1999 case INDEX_op_andc_vec:
2000 case INDEX_op_or_vec:
2001 case INDEX_op_orc_vec:
2002 case INDEX_op_xor_vec:
2003 case INDEX_op_nor_vec:
2004 case INDEX_op_not_vec:
2005 case INDEX_op_neg_vec:
2006 case INDEX_op_mul_vec:
2007 case INDEX_op_smin_vec:
2008 case INDEX_op_smax_vec:
2009 case INDEX_op_umin_vec:
2010 case INDEX_op_umax_vec:
2011 case INDEX_op_ssadd_vec:
2012 case INDEX_op_usadd_vec:
2013 case INDEX_op_sssub_vec:
2014 case INDEX_op_ussub_vec:
2015 case INDEX_op_shlv_vec:
2016 case INDEX_op_shrv_vec:
2017 case INDEX_op_sarv_vec:
2018 case INDEX_op_bitsel_vec:
2019 return 1;
2020 default:
2021 return 0;
2022 }
2023 }
2024
2025 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2026 TCGArg a0, ...)
2027 {
2028 g_assert_not_reached();
2029 }
2030
2031 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2032 {
2033 switch (op) {
2034 case INDEX_op_goto_ptr:
2035 return C_O0_I1(r);
2036
2037 case INDEX_op_st8_i32:
2038 case INDEX_op_st8_i64:
2039 case INDEX_op_st16_i32:
2040 case INDEX_op_st16_i64:
2041 case INDEX_op_st32_i64:
2042 case INDEX_op_st_i32:
2043 case INDEX_op_st_i64:
2044 case INDEX_op_qemu_st_a32_i32:
2045 case INDEX_op_qemu_st_a64_i32:
2046 case INDEX_op_qemu_st_a32_i64:
2047 case INDEX_op_qemu_st_a64_i64:
2048 return C_O0_I2(rZ, r);
2049
2050 case INDEX_op_qemu_ld_a32_i128:
2051 case INDEX_op_qemu_ld_a64_i128:
2052 return C_O2_I1(r, r, r);
2053
2054 case INDEX_op_qemu_st_a32_i128:
2055 case INDEX_op_qemu_st_a64_i128:
2056 return C_O0_I3(r, r, r);
2057
2058 case INDEX_op_brcond_i32:
2059 case INDEX_op_brcond_i64:
2060 return C_O0_I2(rZ, rZ);
2061
2062 case INDEX_op_ext8s_i32:
2063 case INDEX_op_ext8s_i64:
2064 case INDEX_op_ext8u_i32:
2065 case INDEX_op_ext8u_i64:
2066 case INDEX_op_ext16s_i32:
2067 case INDEX_op_ext16s_i64:
2068 case INDEX_op_ext16u_i32:
2069 case INDEX_op_ext16u_i64:
2070 case INDEX_op_ext32s_i64:
2071 case INDEX_op_ext32u_i64:
2072 case INDEX_op_extu_i32_i64:
2073 case INDEX_op_extrl_i64_i32:
2074 case INDEX_op_extrh_i64_i32:
2075 case INDEX_op_ext_i32_i64:
2076 case INDEX_op_not_i32:
2077 case INDEX_op_not_i64:
2078 case INDEX_op_extract_i32:
2079 case INDEX_op_extract_i64:
2080 case INDEX_op_bswap16_i32:
2081 case INDEX_op_bswap16_i64:
2082 case INDEX_op_bswap32_i32:
2083 case INDEX_op_bswap32_i64:
2084 case INDEX_op_bswap64_i64:
2085 case INDEX_op_ld8s_i32:
2086 case INDEX_op_ld8s_i64:
2087 case INDEX_op_ld8u_i32:
2088 case INDEX_op_ld8u_i64:
2089 case INDEX_op_ld16s_i32:
2090 case INDEX_op_ld16s_i64:
2091 case INDEX_op_ld16u_i32:
2092 case INDEX_op_ld16u_i64:
2093 case INDEX_op_ld32s_i64:
2094 case INDEX_op_ld32u_i64:
2095 case INDEX_op_ld_i32:
2096 case INDEX_op_ld_i64:
2097 case INDEX_op_qemu_ld_a32_i32:
2098 case INDEX_op_qemu_ld_a64_i32:
2099 case INDEX_op_qemu_ld_a32_i64:
2100 case INDEX_op_qemu_ld_a64_i64:
2101 return C_O1_I1(r, r);
2102
2103 case INDEX_op_andc_i32:
2104 case INDEX_op_andc_i64:
2105 case INDEX_op_orc_i32:
2106 case INDEX_op_orc_i64:
2107 /*
2108 * LoongArch insns for these ops don't have reg-imm forms, but we
2109 * can express using andi/ori if ~constant satisfies
2110 * TCG_CT_CONST_U12.
2111 */
2112 return C_O1_I2(r, r, rC);
2113
2114 case INDEX_op_shl_i32:
2115 case INDEX_op_shl_i64:
2116 case INDEX_op_shr_i32:
2117 case INDEX_op_shr_i64:
2118 case INDEX_op_sar_i32:
2119 case INDEX_op_sar_i64:
2120 case INDEX_op_rotl_i32:
2121 case INDEX_op_rotl_i64:
2122 case INDEX_op_rotr_i32:
2123 case INDEX_op_rotr_i64:
2124 return C_O1_I2(r, r, ri);
2125
2126 case INDEX_op_add_i32:
2127 return C_O1_I2(r, r, ri);
2128 case INDEX_op_add_i64:
2129 return C_O1_I2(r, r, rJ);
2130
2131 case INDEX_op_and_i32:
2132 case INDEX_op_and_i64:
2133 case INDEX_op_nor_i32:
2134 case INDEX_op_nor_i64:
2135 case INDEX_op_or_i32:
2136 case INDEX_op_or_i64:
2137 case INDEX_op_xor_i32:
2138 case INDEX_op_xor_i64:
2139 /* LoongArch reg-imm bitops have their imms ZERO-extended */
2140 return C_O1_I2(r, r, rU);
2141
2142 case INDEX_op_clz_i32:
2143 case INDEX_op_clz_i64:
2144 case INDEX_op_ctz_i32:
2145 case INDEX_op_ctz_i64:
2146 return C_O1_I2(r, r, rW);
2147
2148 case INDEX_op_deposit_i32:
2149 case INDEX_op_deposit_i64:
2150 /* Must deposit into the same register as input */
2151 return C_O1_I2(r, 0, rZ);
2152
2153 case INDEX_op_sub_i32:
2154 case INDEX_op_setcond_i32:
2155 return C_O1_I2(r, rZ, ri);
2156 case INDEX_op_sub_i64:
2157 case INDEX_op_setcond_i64:
2158 return C_O1_I2(r, rZ, rJ);
2159
2160 case INDEX_op_mul_i32:
2161 case INDEX_op_mul_i64:
2162 case INDEX_op_mulsh_i32:
2163 case INDEX_op_mulsh_i64:
2164 case INDEX_op_muluh_i32:
2165 case INDEX_op_muluh_i64:
2166 case INDEX_op_div_i32:
2167 case INDEX_op_div_i64:
2168 case INDEX_op_divu_i32:
2169 case INDEX_op_divu_i64:
2170 case INDEX_op_rem_i32:
2171 case INDEX_op_rem_i64:
2172 case INDEX_op_remu_i32:
2173 case INDEX_op_remu_i64:
2174 return C_O1_I2(r, rZ, rZ);
2175
2176 case INDEX_op_movcond_i32:
2177 case INDEX_op_movcond_i64:
2178 return C_O1_I4(r, rZ, rJ, rZ, rZ);
2179
2180 case INDEX_op_ld_vec:
2181 case INDEX_op_dupm_vec:
2182 case INDEX_op_dup_vec:
2183 return C_O1_I1(w, r);
2184
2185 case INDEX_op_st_vec:
2186 return C_O0_I2(w, r);
2187
2188 case INDEX_op_cmp_vec:
2189 return C_O1_I2(w, w, wM);
2190
2191 case INDEX_op_add_vec:
2192 case INDEX_op_sub_vec:
2193 return C_O1_I2(w, w, wA);
2194
2195 case INDEX_op_and_vec:
2196 case INDEX_op_andc_vec:
2197 case INDEX_op_or_vec:
2198 case INDEX_op_orc_vec:
2199 case INDEX_op_xor_vec:
2200 case INDEX_op_nor_vec:
2201 case INDEX_op_mul_vec:
2202 case INDEX_op_smin_vec:
2203 case INDEX_op_smax_vec:
2204 case INDEX_op_umin_vec:
2205 case INDEX_op_umax_vec:
2206 case INDEX_op_ssadd_vec:
2207 case INDEX_op_usadd_vec:
2208 case INDEX_op_sssub_vec:
2209 case INDEX_op_ussub_vec:
2210 case INDEX_op_shlv_vec:
2211 case INDEX_op_shrv_vec:
2212 case INDEX_op_sarv_vec:
2213 case INDEX_op_rotrv_vec:
2214 case INDEX_op_rotlv_vec:
2215 return C_O1_I2(w, w, w);
2216
2217 case INDEX_op_not_vec:
2218 case INDEX_op_neg_vec:
2219 case INDEX_op_shli_vec:
2220 case INDEX_op_shri_vec:
2221 case INDEX_op_sari_vec:
2222 case INDEX_op_rotli_vec:
2223 return C_O1_I1(w, w);
2224
2225 case INDEX_op_bitsel_vec:
2226 return C_O1_I3(w, w, w, w);
2227
2228 default:
2229 g_assert_not_reached();
2230 }
2231 }
2232
2233 static const int tcg_target_callee_save_regs[] = {
2234 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2235 TCG_REG_S1,
2236 TCG_REG_S2,
2237 TCG_REG_S3,
2238 TCG_REG_S4,
2239 TCG_REG_S5,
2240 TCG_REG_S6,
2241 TCG_REG_S7,
2242 TCG_REG_S8,
2243 TCG_REG_S9,
2244 TCG_REG_RA, /* should be last for ABI compliance */
2245 };
2246
2247 /* Stack frame parameters. */
2248 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2249 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2250 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2251 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2252 + TCG_TARGET_STACK_ALIGN - 1) \
2253 & -TCG_TARGET_STACK_ALIGN)
2254 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2255
2256 /* We're expecting to be able to use an immediate for frame allocation. */
2257 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2258
2259 /* Generate global QEMU prologue and epilogue code */
2260 static void tcg_target_qemu_prologue(TCGContext *s)
2261 {
2262 int i;
2263
2264 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2265
2266 /* TB prologue */
2267 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2268 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2269 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2270 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2271 }
2272
2273 #if !defined(CONFIG_SOFTMMU)
2274 if (USE_GUEST_BASE) {
2275 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2276 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2277 }
2278 #endif
2279
2280 /* Call generated code */
2281 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2282 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2283
2284 /* Return path for goto_ptr. Set return value to 0 */
2285 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2286 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2287
2288 /* TB epilogue */
2289 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2290 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2291 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2292 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2293 }
2294
2295 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2296 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2297 }
2298
2299 static void tcg_out_tb_start(TCGContext *s)
2300 {
2301 /* nothing to do */
2302 }
2303
2304 static void tcg_target_init(TCGContext *s)
2305 {
2306 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2307
2308 /* Server and desktop class cpus have UAL; embedded cpus do not. */
2309 if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2310 error_report("TCG: unaligned access support required; exiting");
2311 exit(EXIT_FAILURE);
2312 }
2313
2314 if (hwcap & HWCAP_LOONGARCH_LSX) {
2315 use_lsx_instructions = 1;
2316 }
2317
2318 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2319 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2320
2321 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
2322 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2323 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2324 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2325 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2326 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2327 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2328 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2329 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2330 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2331 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2332
2333 if (use_lsx_instructions) {
2334 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2335 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2336 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2337 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2338 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2339 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2340 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2341 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2342 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2343 }
2344
2345 s->reserved_regs = 0;
2346 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2347 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2348 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2349 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2350 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2351 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2352 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2353 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2354 }
2355
2356 typedef struct {
2357 DebugFrameHeader h;
2358 uint8_t fde_def_cfa[4];
2359 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2360 } DebugFrame;
2361
2362 #define ELF_HOST_MACHINE EM_LOONGARCH
2363
2364 static const DebugFrame debug_frame = {
2365 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2366 .h.cie.id = -1,
2367 .h.cie.version = 1,
2368 .h.cie.code_align = 1,
2369 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2370 .h.cie.return_column = TCG_REG_RA,
2371
2372 /* Total FDE size does not include the "len" member. */
2373 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2374
2375 .fde_def_cfa = {
2376 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2377 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2378 (FRAME_SIZE >> 7)
2379 },
2380 .fde_reg_ofs = {
2381 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
2382 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
2383 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
2384 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
2385 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
2386 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
2387 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
2388 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
2389 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
2390 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
2391 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2392 }
2393 };
2394
2395 void tcg_register_jit(const void *buf, size_t buf_size)
2396 {
2397 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2398 }