]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/loongarch64/tcg-target.c.inc
tcg/loongarch64: Fix tcg_out_mov() Aborted
[mirror_qemu.git] / tcg / loongarch64 / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2021 WANG Xuerui <git@xen0n.name>
5 *
6 * Based on tcg/riscv/tcg-target.c.inc
7 *
8 * Copyright (c) 2018 SiFive, Inc
9 * Copyright (c) 2008-2009 Arnaud Patard <arnaud.patard@rtp-net.org>
10 * Copyright (c) 2009 Aurelien Jarno <aurelien@aurel32.net>
11 * Copyright (c) 2008 Fabrice Bellard
12 *
13 * Permission is hereby granted, free of charge, to any person obtaining a copy
14 * of this software and associated documentation files (the "Software"), to deal
15 * in the Software without restriction, including without limitation the rights
16 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17 * copies of the Software, and to permit persons to whom the Software is
18 * furnished to do so, subject to the following conditions:
19 *
20 * The above copyright notice and this permission notice shall be included in
21 * all copies or substantial portions of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29 * THE SOFTWARE.
30 */
31
32 #include "../tcg-ldst.c.inc"
33 #include <asm/hwcap.h>
34
35 #ifdef CONFIG_DEBUG_TCG
36 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
37 "zero",
38 "ra",
39 "tp",
40 "sp",
41 "a0",
42 "a1",
43 "a2",
44 "a3",
45 "a4",
46 "a5",
47 "a6",
48 "a7",
49 "t0",
50 "t1",
51 "t2",
52 "t3",
53 "t4",
54 "t5",
55 "t6",
56 "t7",
57 "t8",
58 "r21", /* reserved in the LP64* ABI, hence no ABI name */
59 "s9",
60 "s0",
61 "s1",
62 "s2",
63 "s3",
64 "s4",
65 "s5",
66 "s6",
67 "s7",
68 "s8",
69 "vr0",
70 "vr1",
71 "vr2",
72 "vr3",
73 "vr4",
74 "vr5",
75 "vr6",
76 "vr7",
77 "vr8",
78 "vr9",
79 "vr10",
80 "vr11",
81 "vr12",
82 "vr13",
83 "vr14",
84 "vr15",
85 "vr16",
86 "vr17",
87 "vr18",
88 "vr19",
89 "vr20",
90 "vr21",
91 "vr22",
92 "vr23",
93 "vr24",
94 "vr25",
95 "vr26",
96 "vr27",
97 "vr28",
98 "vr29",
99 "vr30",
100 "vr31",
101 };
102 #endif
103
104 static const int tcg_target_reg_alloc_order[] = {
105 /* Registers preserved across calls */
106 /* TCG_REG_S0 reserved for TCG_AREG0 */
107 TCG_REG_S1,
108 TCG_REG_S2,
109 TCG_REG_S3,
110 TCG_REG_S4,
111 TCG_REG_S5,
112 TCG_REG_S6,
113 TCG_REG_S7,
114 TCG_REG_S8,
115 TCG_REG_S9,
116
117 /* Registers (potentially) clobbered across calls */
118 TCG_REG_T0,
119 TCG_REG_T1,
120 TCG_REG_T2,
121 TCG_REG_T3,
122 TCG_REG_T4,
123 TCG_REG_T5,
124 TCG_REG_T6,
125 TCG_REG_T7,
126 TCG_REG_T8,
127
128 /* Argument registers, opposite order of allocation. */
129 TCG_REG_A7,
130 TCG_REG_A6,
131 TCG_REG_A5,
132 TCG_REG_A4,
133 TCG_REG_A3,
134 TCG_REG_A2,
135 TCG_REG_A1,
136 TCG_REG_A0,
137
138 /* Vector registers */
139 TCG_REG_V0, TCG_REG_V1, TCG_REG_V2, TCG_REG_V3,
140 TCG_REG_V4, TCG_REG_V5, TCG_REG_V6, TCG_REG_V7,
141 TCG_REG_V8, TCG_REG_V9, TCG_REG_V10, TCG_REG_V11,
142 TCG_REG_V12, TCG_REG_V13, TCG_REG_V14, TCG_REG_V15,
143 TCG_REG_V16, TCG_REG_V17, TCG_REG_V18, TCG_REG_V19,
144 TCG_REG_V20, TCG_REG_V21, TCG_REG_V22, TCG_REG_V23,
145 /* V24 - V31 are caller-saved, and skipped. */
146 };
147
148 static const int tcg_target_call_iarg_regs[] = {
149 TCG_REG_A0,
150 TCG_REG_A1,
151 TCG_REG_A2,
152 TCG_REG_A3,
153 TCG_REG_A4,
154 TCG_REG_A5,
155 TCG_REG_A6,
156 TCG_REG_A7,
157 };
158
159 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
160 {
161 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
162 tcg_debug_assert(slot >= 0 && slot <= 1);
163 return TCG_REG_A0 + slot;
164 }
165
166 #define TCG_GUEST_BASE_REG TCG_REG_S1
167
168 #define TCG_CT_CONST_ZERO 0x100
169 #define TCG_CT_CONST_S12 0x200
170 #define TCG_CT_CONST_S32 0x400
171 #define TCG_CT_CONST_U12 0x800
172 #define TCG_CT_CONST_C12 0x1000
173 #define TCG_CT_CONST_WSZ 0x2000
174 #define TCG_CT_CONST_VCMP 0x4000
175 #define TCG_CT_CONST_VADD 0x8000
176
177 #define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
178 #define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
179
180 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len)
181 {
182 return sextract64(val, pos, len);
183 }
184
185 /* test if a constant matches the constraint */
186 static bool tcg_target_const_match(int64_t val, TCGType type, int ct, int vece)
187 {
188 if (ct & TCG_CT_CONST) {
189 return true;
190 }
191 if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
192 return true;
193 }
194 if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) {
195 return true;
196 }
197 if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
198 return true;
199 }
200 if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) {
201 return true;
202 }
203 if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) {
204 return true;
205 }
206 if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
207 return true;
208 }
209 int64_t vec_val = sextract64(val, 0, 8 << vece);
210 if ((ct & TCG_CT_CONST_VCMP) && -0x10 <= vec_val && vec_val <= 0x1f) {
211 return true;
212 }
213 if ((ct & TCG_CT_CONST_VADD) && -0x1f <= vec_val && vec_val <= 0x1f) {
214 return true;
215 }
216 return false;
217 }
218
219 /*
220 * Relocations
221 */
222
223 /*
224 * Relocation records defined in LoongArch ELF psABI v1.00 is way too
225 * complicated; a whopping stack machine is needed to stuff the fields, at
226 * the very least one SOP_PUSH and one SOP_POP (of the correct format) are
227 * needed.
228 *
229 * Hence, define our own simpler relocation types. Numbers are chosen as to
230 * not collide with potential future additions to the true ELF relocation
231 * type enum.
232 */
233
234 /* Field Sk16, shifted right by 2; suitable for conditional jumps */
235 #define R_LOONGARCH_BR_SK16 256
236 /* Field Sd10k16, shifted right by 2; suitable for B and BL */
237 #define R_LOONGARCH_BR_SD10K16 257
238
239 static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
240 {
241 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
242 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
243
244 tcg_debug_assert((offset & 3) == 0);
245 offset >>= 2;
246 if (offset == sextreg(offset, 0, 16)) {
247 *src_rw = deposit64(*src_rw, 10, 16, offset);
248 return true;
249 }
250
251 return false;
252 }
253
254 static bool reloc_br_sd10k16(tcg_insn_unit *src_rw,
255 const tcg_insn_unit *target)
256 {
257 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
258 intptr_t offset = (intptr_t)target - (intptr_t)src_rx;
259
260 tcg_debug_assert((offset & 3) == 0);
261 offset >>= 2;
262 if (offset == sextreg(offset, 0, 26)) {
263 *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */
264 *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */
265 return true;
266 }
267
268 return false;
269 }
270
271 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
272 intptr_t value, intptr_t addend)
273 {
274 tcg_debug_assert(addend == 0);
275 switch (type) {
276 case R_LOONGARCH_BR_SK16:
277 return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value);
278 case R_LOONGARCH_BR_SD10K16:
279 return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value);
280 default:
281 g_assert_not_reached();
282 }
283 }
284
285 #include "tcg-insn-defs.c.inc"
286
287 /*
288 * TCG intrinsics
289 */
290
291 static void tcg_out_mb(TCGContext *s, TCGArg a0)
292 {
293 /* Baseline LoongArch only has the full barrier, unfortunately. */
294 tcg_out_opc_dbar(s, 0);
295 }
296
297 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
298 {
299 if (ret == arg) {
300 return true;
301 }
302 switch (type) {
303 case TCG_TYPE_I32:
304 case TCG_TYPE_I64:
305 /*
306 * Conventional register-register move used in LoongArch is
307 * `or dst, src, zero`.
308 */
309 tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO);
310 break;
311 case TCG_TYPE_V128:
312 tcg_out_opc_vori_b(s, ret, arg, 0);
313 break;
314 default:
315 g_assert_not_reached();
316 }
317 return true;
318 }
319
320 /* Loads a 32-bit immediate into rd, sign-extended. */
321 static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val)
322 {
323 tcg_target_long lo = sextreg(val, 0, 12);
324 tcg_target_long hi12 = sextreg(val, 12, 20);
325
326 /* Single-instruction cases. */
327 if (hi12 == 0) {
328 /* val fits in uimm12: ori rd, zero, val */
329 tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val);
330 return;
331 }
332 if (hi12 == sextreg(lo, 12, 20)) {
333 /* val fits in simm12: addi.w rd, zero, val */
334 tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val);
335 return;
336 }
337
338 /* High bits must be set; load with lu12i.w + optional ori. */
339 tcg_out_opc_lu12i_w(s, rd, hi12);
340 if (lo != 0) {
341 tcg_out_opc_ori(s, rd, rd, lo & 0xfff);
342 }
343 }
344
345 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd,
346 tcg_target_long val)
347 {
348 /*
349 * LoongArch conventionally loads 64-bit immediates in at most 4 steps,
350 * with dedicated instructions for filling the respective bitfields
351 * below:
352 *
353 * 6 5 4 3
354 * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
355 * +-----------------------+---------------------------------------+...
356 * | hi52 | hi32 |
357 * +-----------------------+---------------------------------------+...
358 * 3 2 1
359 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
360 * ...+-------------------------------------+-------------------------+
361 * | hi12 | lo |
362 * ...+-------------------------------------+-------------------------+
363 *
364 * Check if val belong to one of the several fast cases, before falling
365 * back to the slow path.
366 */
367
368 intptr_t pc_offset;
369 tcg_target_long val_lo, val_hi, pc_hi, offset_hi;
370 tcg_target_long hi12, hi32, hi52;
371
372 /* Value fits in signed i32. */
373 if (type == TCG_TYPE_I32 || val == (int32_t)val) {
374 tcg_out_movi_i32(s, rd, val);
375 return;
376 }
377
378 /* PC-relative cases. */
379 pc_offset = tcg_pcrel_diff(s, (void *)val);
380 if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) {
381 /* Single pcaddu2i. */
382 tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2);
383 return;
384 }
385
386 if (pc_offset == (int32_t)pc_offset) {
387 /* Offset within 32 bits; load with pcalau12i + ori. */
388 val_lo = sextreg(val, 0, 12);
389 val_hi = val >> 12;
390 pc_hi = (val - pc_offset) >> 12;
391 offset_hi = val_hi - pc_hi;
392
393 tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20));
394 tcg_out_opc_pcalau12i(s, rd, offset_hi);
395 if (val_lo != 0) {
396 tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff);
397 }
398 return;
399 }
400
401 hi12 = sextreg(val, 12, 20);
402 hi32 = sextreg(val, 32, 20);
403 hi52 = sextreg(val, 52, 12);
404
405 /* Single cu52i.d case. */
406 if ((hi52 != 0) && (ctz64(val) >= 52)) {
407 tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52);
408 return;
409 }
410
411 /* Slow path. Initialize the low 32 bits, then concat high bits. */
412 tcg_out_movi_i32(s, rd, val);
413
414 /* Load hi32 and hi52 explicitly when they are unexpected values. */
415 if (hi32 != sextreg(hi12, 20, 20)) {
416 tcg_out_opc_cu32i_d(s, rd, hi32);
417 }
418
419 if (hi52 != sextreg(hi32, 20, 12)) {
420 tcg_out_opc_cu52i_d(s, rd, rd, hi52);
421 }
422 }
423
424 static void tcg_out_addi(TCGContext *s, TCGType type, TCGReg rd,
425 TCGReg rs, tcg_target_long imm)
426 {
427 tcg_target_long lo12 = sextreg(imm, 0, 12);
428 tcg_target_long hi16 = sextreg(imm - lo12, 16, 16);
429
430 /*
431 * Note that there's a hole in between hi16 and lo12:
432 *
433 * 3 2 1 0
434 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
435 * ...+-------------------------------+-------+-----------------------+
436 * | hi16 | | lo12 |
437 * ...+-------------------------------+-------+-----------------------+
438 *
439 * For bits within that hole, it's more efficient to use LU12I and ADD.
440 */
441 if (imm == (hi16 << 16) + lo12) {
442 if (hi16) {
443 tcg_out_opc_addu16i_d(s, rd, rs, hi16);
444 rs = rd;
445 }
446 if (type == TCG_TYPE_I32) {
447 tcg_out_opc_addi_w(s, rd, rs, lo12);
448 } else if (lo12) {
449 tcg_out_opc_addi_d(s, rd, rs, lo12);
450 } else {
451 tcg_out_mov(s, type, rd, rs);
452 }
453 } else {
454 tcg_out_movi(s, type, TCG_REG_TMP0, imm);
455 if (type == TCG_TYPE_I32) {
456 tcg_out_opc_add_w(s, rd, rs, TCG_REG_TMP0);
457 } else {
458 tcg_out_opc_add_d(s, rd, rs, TCG_REG_TMP0);
459 }
460 }
461 }
462
463 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
464 {
465 return false;
466 }
467
468 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
469 tcg_target_long imm)
470 {
471 /* This function is only used for passing structs by reference. */
472 g_assert_not_reached();
473 }
474
475 static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg)
476 {
477 tcg_out_opc_andi(s, ret, arg, 0xff);
478 }
479
480 static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg)
481 {
482 tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15);
483 }
484
485 static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg)
486 {
487 tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31);
488 }
489
490 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
491 {
492 tcg_out_opc_sext_b(s, ret, arg);
493 }
494
495 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
496 {
497 tcg_out_opc_sext_h(s, ret, arg);
498 }
499
500 static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg)
501 {
502 tcg_out_opc_addi_w(s, ret, arg, 0);
503 }
504
505 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
506 {
507 if (ret != arg) {
508 tcg_out_ext32s(s, ret, arg);
509 }
510 }
511
512 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg ret, TCGReg arg)
513 {
514 tcg_out_ext32u(s, ret, arg);
515 }
516
517 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg ret, TCGReg arg)
518 {
519 tcg_out_ext32s(s, ret, arg);
520 }
521
522 static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc,
523 TCGReg a0, TCGReg a1, TCGReg a2,
524 bool c2, bool is_32bit)
525 {
526 if (c2) {
527 /*
528 * Fast path: semantics already satisfied due to constraint and
529 * insn behavior, single instruction is enough.
530 */
531 tcg_debug_assert(a2 == (is_32bit ? 32 : 64));
532 /* all clz/ctz insns belong to DJ-format */
533 tcg_out32(s, encode_dj_insn(opc, a0, a1));
534 return;
535 }
536
537 tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1));
538 /* a0 = a1 ? REG_TMP0 : a2 */
539 tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1);
540 tcg_out_opc_masknez(s, a0, a2, a1);
541 tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0);
542 }
543
544 #define SETCOND_INV TCG_TARGET_NB_REGS
545 #define SETCOND_NEZ (SETCOND_INV << 1)
546 #define SETCOND_FLAGS (SETCOND_INV | SETCOND_NEZ)
547
548 static int tcg_out_setcond_int(TCGContext *s, TCGCond cond, TCGReg ret,
549 TCGReg arg1, tcg_target_long arg2, bool c2)
550 {
551 int flags = 0;
552
553 switch (cond) {
554 case TCG_COND_EQ: /* -> NE */
555 case TCG_COND_GE: /* -> LT */
556 case TCG_COND_GEU: /* -> LTU */
557 case TCG_COND_GT: /* -> LE */
558 case TCG_COND_GTU: /* -> LEU */
559 cond = tcg_invert_cond(cond);
560 flags ^= SETCOND_INV;
561 break;
562 default:
563 break;
564 }
565
566 switch (cond) {
567 case TCG_COND_LE:
568 case TCG_COND_LEU:
569 /*
570 * If we have a constant input, the most efficient way to implement
571 * LE is by adding 1 and using LT. Watch out for wrap around for LEU.
572 * We don't need to care for this for LE because the constant input
573 * is still constrained to int32_t, and INT32_MAX+1 is representable
574 * in the 64-bit temporary register.
575 */
576 if (c2) {
577 if (cond == TCG_COND_LEU) {
578 /* unsigned <= -1 is true */
579 if (arg2 == -1) {
580 tcg_out_movi(s, TCG_TYPE_REG, ret, !(flags & SETCOND_INV));
581 return ret;
582 }
583 cond = TCG_COND_LTU;
584 } else {
585 cond = TCG_COND_LT;
586 }
587 arg2 += 1;
588 } else {
589 TCGReg tmp = arg2;
590 arg2 = arg1;
591 arg1 = tmp;
592 cond = tcg_swap_cond(cond); /* LE -> GE */
593 cond = tcg_invert_cond(cond); /* GE -> LT */
594 flags ^= SETCOND_INV;
595 }
596 break;
597 default:
598 break;
599 }
600
601 switch (cond) {
602 case TCG_COND_NE:
603 flags |= SETCOND_NEZ;
604 if (!c2) {
605 tcg_out_opc_xor(s, ret, arg1, arg2);
606 } else if (arg2 == 0) {
607 ret = arg1;
608 } else if (arg2 >= 0 && arg2 <= 0xfff) {
609 tcg_out_opc_xori(s, ret, arg1, arg2);
610 } else {
611 tcg_out_addi(s, TCG_TYPE_REG, ret, arg1, -arg2);
612 }
613 break;
614
615 case TCG_COND_LT:
616 case TCG_COND_LTU:
617 if (c2) {
618 if (arg2 >= -0x800 && arg2 <= 0x7ff) {
619 if (cond == TCG_COND_LT) {
620 tcg_out_opc_slti(s, ret, arg1, arg2);
621 } else {
622 tcg_out_opc_sltui(s, ret, arg1, arg2);
623 }
624 break;
625 }
626 tcg_out_movi(s, TCG_TYPE_REG, TCG_REG_TMP0, arg2);
627 arg2 = TCG_REG_TMP0;
628 }
629 if (cond == TCG_COND_LT) {
630 tcg_out_opc_slt(s, ret, arg1, arg2);
631 } else {
632 tcg_out_opc_sltu(s, ret, arg1, arg2);
633 }
634 break;
635
636 default:
637 g_assert_not_reached();
638 break;
639 }
640
641 return ret | flags;
642 }
643
644 static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
645 TCGReg arg1, tcg_target_long arg2, bool c2)
646 {
647 int tmpflags = tcg_out_setcond_int(s, cond, ret, arg1, arg2, c2);
648
649 if (tmpflags != ret) {
650 TCGReg tmp = tmpflags & ~SETCOND_FLAGS;
651
652 switch (tmpflags & SETCOND_FLAGS) {
653 case SETCOND_INV:
654 /* Intermediate result is boolean: simply invert. */
655 tcg_out_opc_xori(s, ret, tmp, 1);
656 break;
657 case SETCOND_NEZ:
658 /* Intermediate result is zero/non-zero: test != 0. */
659 tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp);
660 break;
661 case SETCOND_NEZ | SETCOND_INV:
662 /* Intermediate result is zero/non-zero: test == 0. */
663 tcg_out_opc_sltui(s, ret, tmp, 1);
664 break;
665 default:
666 g_assert_not_reached();
667 }
668 }
669 }
670
671 static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
672 TCGReg c1, tcg_target_long c2, bool const2,
673 TCGReg v1, TCGReg v2)
674 {
675 int tmpflags = tcg_out_setcond_int(s, cond, TCG_REG_TMP0, c1, c2, const2);
676 TCGReg t;
677
678 /* Standardize the test below to t != 0. */
679 if (tmpflags & SETCOND_INV) {
680 t = v1, v1 = v2, v2 = t;
681 }
682
683 t = tmpflags & ~SETCOND_FLAGS;
684 if (v1 == TCG_REG_ZERO) {
685 tcg_out_opc_masknez(s, ret, v2, t);
686 } else if (v2 == TCG_REG_ZERO) {
687 tcg_out_opc_maskeqz(s, ret, v1, t);
688 } else {
689 tcg_out_opc_masknez(s, TCG_REG_TMP2, v2, t); /* t ? 0 : v2 */
690 tcg_out_opc_maskeqz(s, TCG_REG_TMP1, v1, t); /* t ? v1 : 0 */
691 tcg_out_opc_or(s, ret, TCG_REG_TMP1, TCG_REG_TMP2);
692 }
693 }
694
695 /*
696 * Branch helpers
697 */
698
699 static const struct {
700 LoongArchInsn op;
701 bool swap;
702 } tcg_brcond_to_loongarch[] = {
703 [TCG_COND_EQ] = { OPC_BEQ, false },
704 [TCG_COND_NE] = { OPC_BNE, false },
705 [TCG_COND_LT] = { OPC_BGT, true },
706 [TCG_COND_GE] = { OPC_BLE, true },
707 [TCG_COND_LE] = { OPC_BLE, false },
708 [TCG_COND_GT] = { OPC_BGT, false },
709 [TCG_COND_LTU] = { OPC_BGTU, true },
710 [TCG_COND_GEU] = { OPC_BLEU, true },
711 [TCG_COND_LEU] = { OPC_BLEU, false },
712 [TCG_COND_GTU] = { OPC_BGTU, false }
713 };
714
715 static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1,
716 TCGReg arg2, TCGLabel *l)
717 {
718 LoongArchInsn op = tcg_brcond_to_loongarch[cond].op;
719
720 tcg_debug_assert(op != 0);
721
722 if (tcg_brcond_to_loongarch[cond].swap) {
723 TCGReg t = arg1;
724 arg1 = arg2;
725 arg2 = t;
726 }
727
728 /* all conditional branch insns belong to DJSk16-format */
729 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0);
730 tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0));
731 }
732
733 static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
734 {
735 TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
736 ptrdiff_t offset = tcg_pcrel_diff(s, arg);
737
738 tcg_debug_assert((offset & 3) == 0);
739 if (offset == sextreg(offset, 0, 28)) {
740 /* short jump: +/- 256MiB */
741 if (tail) {
742 tcg_out_opc_b(s, offset >> 2);
743 } else {
744 tcg_out_opc_bl(s, offset >> 2);
745 }
746 } else if (offset == sextreg(offset, 0, 38)) {
747 /* long jump: +/- 256GiB */
748 tcg_target_long lo = sextreg(offset, 0, 18);
749 tcg_target_long hi = offset - lo;
750 tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18);
751 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
752 } else {
753 /* far jump: 64-bit */
754 tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18);
755 tcg_target_long hi = (tcg_target_long)arg - lo;
756 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi);
757 tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2);
758 }
759 }
760
761 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg,
762 const TCGHelperInfo *info)
763 {
764 tcg_out_call_int(s, arg, false);
765 }
766
767 /*
768 * Load/store helpers
769 */
770
771 static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data,
772 TCGReg addr, intptr_t offset)
773 {
774 intptr_t imm12 = sextreg(offset, 0, 12);
775
776 if (offset != imm12) {
777 intptr_t diff = tcg_pcrel_diff(s, (void *)offset);
778
779 if (addr == TCG_REG_ZERO && diff == (int32_t)diff) {
780 imm12 = sextreg(diff, 0, 12);
781 tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12);
782 } else {
783 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12);
784 if (addr != TCG_REG_ZERO) {
785 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr);
786 }
787 }
788 addr = TCG_REG_TMP2;
789 }
790
791 switch (opc) {
792 case OPC_LD_B:
793 case OPC_LD_BU:
794 case OPC_LD_H:
795 case OPC_LD_HU:
796 case OPC_LD_W:
797 case OPC_LD_WU:
798 case OPC_LD_D:
799 case OPC_ST_B:
800 case OPC_ST_H:
801 case OPC_ST_W:
802 case OPC_ST_D:
803 tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12));
804 break;
805 default:
806 g_assert_not_reached();
807 }
808 }
809
810 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
811 TCGReg arg1, intptr_t arg2)
812 {
813 bool is_32bit = type == TCG_TYPE_I32;
814 tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2);
815 }
816
817 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
818 TCGReg arg1, intptr_t arg2)
819 {
820 bool is_32bit = type == TCG_TYPE_I32;
821 tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2);
822 }
823
824 static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
825 TCGReg base, intptr_t ofs)
826 {
827 if (val == 0) {
828 tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
829 return true;
830 }
831 return false;
832 }
833
834 /*
835 * Load/store helpers for SoftMMU, and qemu_ld/st implementations
836 */
837
838 static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
839 {
840 tcg_out_opc_b(s, 0);
841 return reloc_br_sd10k16(s->code_ptr - 1, target);
842 }
843
844 static const TCGLdstHelperParam ldst_helper_param = {
845 .ntmp = 1, .tmp = { TCG_REG_TMP0 }
846 };
847
848 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
849 {
850 MemOp opc = get_memop(l->oi);
851
852 /* resolve label address */
853 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
854 return false;
855 }
856
857 tcg_out_ld_helper_args(s, l, &ldst_helper_param);
858 tcg_out_call_int(s, qemu_ld_helpers[opc & MO_SIZE], false);
859 tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param);
860 return tcg_out_goto(s, l->raddr);
861 }
862
863 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
864 {
865 MemOp opc = get_memop(l->oi);
866
867 /* resolve label address */
868 if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
869 return false;
870 }
871
872 tcg_out_st_helper_args(s, l, &ldst_helper_param);
873 tcg_out_call_int(s, qemu_st_helpers[opc & MO_SIZE], false);
874 return tcg_out_goto(s, l->raddr);
875 }
876
877 typedef struct {
878 TCGReg base;
879 TCGReg index;
880 TCGAtomAlign aa;
881 } HostAddress;
882
883 bool tcg_target_has_memory_bswap(MemOp memop)
884 {
885 return false;
886 }
887
888 /* We expect to use a 12-bit negative offset from ENV. */
889 #define MIN_TLB_MASK_TABLE_OFS -(1 << 11)
890
891 /*
892 * For system-mode, perform the TLB load and compare.
893 * For user-mode, perform any required alignment tests.
894 * In both cases, return a TCGLabelQemuLdst structure if the slow path
895 * is required and fill in @h with the host address for the fast path.
896 */
897 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
898 TCGReg addr_reg, MemOpIdx oi,
899 bool is_ld)
900 {
901 TCGType addr_type = s->addr_type;
902 TCGLabelQemuLdst *ldst = NULL;
903 MemOp opc = get_memop(oi);
904 MemOp a_bits;
905
906 h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
907 a_bits = h->aa.align;
908
909 if (tcg_use_softmmu) {
910 unsigned s_bits = opc & MO_SIZE;
911 int mem_index = get_mmuidx(oi);
912 int fast_ofs = tlb_mask_table_ofs(s, mem_index);
913 int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
914 int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
915
916 ldst = new_ldst_label(s);
917 ldst->is_ld = is_ld;
918 ldst->oi = oi;
919 ldst->addrlo_reg = addr_reg;
920
921 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
922 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
923
924 tcg_out_opc_srli_d(s, TCG_REG_TMP2, addr_reg,
925 s->page_bits - CPU_TLB_ENTRY_BITS);
926 tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
927 tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
928
929 /* Load the tlb comparator and the addend. */
930 QEMU_BUILD_BUG_ON(HOST_BIG_ENDIAN);
931 tcg_out_ld(s, addr_type, TCG_REG_TMP0, TCG_REG_TMP2,
932 is_ld ? offsetof(CPUTLBEntry, addr_read)
933 : offsetof(CPUTLBEntry, addr_write));
934 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
935 offsetof(CPUTLBEntry, addend));
936
937 /*
938 * For aligned accesses, we check the first byte and include the
939 * alignment bits within the address. For unaligned access, we
940 * check that we don't cross pages using the address of the last
941 * byte of the access.
942 */
943 if (a_bits < s_bits) {
944 unsigned a_mask = (1u << a_bits) - 1;
945 unsigned s_mask = (1u << s_bits) - 1;
946 tcg_out_addi(s, addr_type, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
947 } else {
948 tcg_out_mov(s, addr_type, TCG_REG_TMP1, addr_reg);
949 }
950 tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
951 a_bits, s->page_bits - 1);
952
953 /* Compare masked address with the TLB entry. */
954 ldst->label_ptr[0] = s->code_ptr;
955 tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0);
956
957 h->index = TCG_REG_TMP2;
958 } else {
959 if (a_bits) {
960 ldst = new_ldst_label(s);
961
962 ldst->is_ld = is_ld;
963 ldst->oi = oi;
964 ldst->addrlo_reg = addr_reg;
965
966 /*
967 * Without micro-architecture details, we don't know which of
968 * bstrpick or andi is faster, so use bstrpick as it's not
969 * constrained by imm field width. Not to say alignments >= 2^12
970 * are going to happen any time soon.
971 */
972 tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1);
973
974 ldst->label_ptr[0] = s->code_ptr;
975 tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0);
976 }
977
978 h->index = guest_base ? TCG_GUEST_BASE_REG : TCG_REG_ZERO;
979 }
980
981 if (addr_type == TCG_TYPE_I32) {
982 h->base = TCG_REG_TMP0;
983 tcg_out_ext32u(s, h->base, addr_reg);
984 } else {
985 h->base = addr_reg;
986 }
987
988 return ldst;
989 }
990
991 static void tcg_out_qemu_ld_indexed(TCGContext *s, MemOp opc, TCGType type,
992 TCGReg rd, HostAddress h)
993 {
994 /* Byte swapping is left to middle-end expansion. */
995 tcg_debug_assert((opc & MO_BSWAP) == 0);
996
997 switch (opc & MO_SSIZE) {
998 case MO_UB:
999 tcg_out_opc_ldx_bu(s, rd, h.base, h.index);
1000 break;
1001 case MO_SB:
1002 tcg_out_opc_ldx_b(s, rd, h.base, h.index);
1003 break;
1004 case MO_UW:
1005 tcg_out_opc_ldx_hu(s, rd, h.base, h.index);
1006 break;
1007 case MO_SW:
1008 tcg_out_opc_ldx_h(s, rd, h.base, h.index);
1009 break;
1010 case MO_UL:
1011 if (type == TCG_TYPE_I64) {
1012 tcg_out_opc_ldx_wu(s, rd, h.base, h.index);
1013 break;
1014 }
1015 /* fallthrough */
1016 case MO_SL:
1017 tcg_out_opc_ldx_w(s, rd, h.base, h.index);
1018 break;
1019 case MO_UQ:
1020 tcg_out_opc_ldx_d(s, rd, h.base, h.index);
1021 break;
1022 default:
1023 g_assert_not_reached();
1024 }
1025 }
1026
1027 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1028 MemOpIdx oi, TCGType data_type)
1029 {
1030 TCGLabelQemuLdst *ldst;
1031 HostAddress h;
1032
1033 ldst = prepare_host_addr(s, &h, addr_reg, oi, true);
1034 tcg_out_qemu_ld_indexed(s, get_memop(oi), data_type, data_reg, h);
1035
1036 if (ldst) {
1037 ldst->type = data_type;
1038 ldst->datalo_reg = data_reg;
1039 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1040 }
1041 }
1042
1043 static void tcg_out_qemu_st_indexed(TCGContext *s, MemOp opc,
1044 TCGReg rd, HostAddress h)
1045 {
1046 /* Byte swapping is left to middle-end expansion. */
1047 tcg_debug_assert((opc & MO_BSWAP) == 0);
1048
1049 switch (opc & MO_SIZE) {
1050 case MO_8:
1051 tcg_out_opc_stx_b(s, rd, h.base, h.index);
1052 break;
1053 case MO_16:
1054 tcg_out_opc_stx_h(s, rd, h.base, h.index);
1055 break;
1056 case MO_32:
1057 tcg_out_opc_stx_w(s, rd, h.base, h.index);
1058 break;
1059 case MO_64:
1060 tcg_out_opc_stx_d(s, rd, h.base, h.index);
1061 break;
1062 default:
1063 g_assert_not_reached();
1064 }
1065 }
1066
1067 static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
1068 MemOpIdx oi, TCGType data_type)
1069 {
1070 TCGLabelQemuLdst *ldst;
1071 HostAddress h;
1072
1073 ldst = prepare_host_addr(s, &h, addr_reg, oi, false);
1074 tcg_out_qemu_st_indexed(s, get_memop(oi), data_reg, h);
1075
1076 if (ldst) {
1077 ldst->type = data_type;
1078 ldst->datalo_reg = data_reg;
1079 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1080 }
1081 }
1082
1083 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg data_lo, TCGReg data_hi,
1084 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
1085 {
1086 TCGLabelQemuLdst *ldst;
1087 HostAddress h;
1088
1089 ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
1090
1091 if (h.aa.atom == MO_128) {
1092 /*
1093 * Use VLDX/VSTX when 128-bit atomicity is required.
1094 * If address is aligned to 16-bytes, the 128-bit load/store is atomic.
1095 */
1096 if (is_ld) {
1097 tcg_out_opc_vldx(s, TCG_VEC_TMP0, h.base, h.index);
1098 tcg_out_opc_vpickve2gr_d(s, data_lo, TCG_VEC_TMP0, 0);
1099 tcg_out_opc_vpickve2gr_d(s, data_hi, TCG_VEC_TMP0, 1);
1100 } else {
1101 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_lo, 0);
1102 tcg_out_opc_vinsgr2vr_d(s, TCG_VEC_TMP0, data_hi, 1);
1103 tcg_out_opc_vstx(s, TCG_VEC_TMP0, h.base, h.index);
1104 }
1105 } else {
1106 /* Otherwise use a pair of LD/ST. */
1107 TCGReg base = h.base;
1108 if (h.index != TCG_REG_ZERO) {
1109 base = TCG_REG_TMP0;
1110 tcg_out_opc_add_d(s, base, h.base, h.index);
1111 }
1112 if (is_ld) {
1113 tcg_debug_assert(base != data_lo);
1114 tcg_out_opc_ld_d(s, data_lo, base, 0);
1115 tcg_out_opc_ld_d(s, data_hi, base, 8);
1116 } else {
1117 tcg_out_opc_st_d(s, data_lo, base, 0);
1118 tcg_out_opc_st_d(s, data_hi, base, 8);
1119 }
1120 }
1121
1122 if (ldst) {
1123 ldst->type = TCG_TYPE_I128;
1124 ldst->datalo_reg = data_lo;
1125 ldst->datahi_reg = data_hi;
1126 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
1127 }
1128 }
1129
1130 /*
1131 * Entry-points
1132 */
1133
1134 static const tcg_insn_unit *tb_ret_addr;
1135
1136 static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
1137 {
1138 /* Reuse the zeroing that exists for goto_ptr. */
1139 if (a0 == 0) {
1140 tcg_out_call_int(s, tcg_code_gen_epilogue, true);
1141 } else {
1142 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0);
1143 tcg_out_call_int(s, tb_ret_addr, true);
1144 }
1145 }
1146
1147 static void tcg_out_goto_tb(TCGContext *s, int which)
1148 {
1149 /*
1150 * Direct branch, or load indirect address, to be patched
1151 * by tb_target_set_jmp_target. Check indirect load offset
1152 * in range early, regardless of direct branch distance,
1153 * via assert within tcg_out_opc_pcaddu2i.
1154 */
1155 uintptr_t i_addr = get_jmp_target_addr(s, which);
1156 intptr_t i_disp = tcg_pcrel_diff(s, (void *)i_addr);
1157
1158 set_jmp_insn_offset(s, which);
1159 tcg_out_opc_pcaddu2i(s, TCG_REG_TMP0, i_disp >> 2);
1160
1161 /* Finish the load and indirect branch. */
1162 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_TMP0, 0);
1163 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0);
1164 set_jmp_reset_offset(s, which);
1165 }
1166
1167 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
1168 uintptr_t jmp_rx, uintptr_t jmp_rw)
1169 {
1170 uintptr_t d_addr = tb->jmp_target_addr[n];
1171 ptrdiff_t d_disp = (ptrdiff_t)(d_addr - jmp_rx) >> 2;
1172 tcg_insn_unit insn;
1173
1174 /* Either directly branch, or load slot address for indirect branch. */
1175 if (d_disp == sextreg(d_disp, 0, 26)) {
1176 insn = encode_sd10k16_insn(OPC_B, d_disp);
1177 } else {
1178 uintptr_t i_addr = (uintptr_t)&tb->jmp_target_addr[n];
1179 intptr_t i_disp = i_addr - jmp_rx;
1180 insn = encode_dsj20_insn(OPC_PCADDU2I, TCG_REG_TMP0, i_disp >> 2);
1181 }
1182
1183 qatomic_set((tcg_insn_unit *)jmp_rw, insn);
1184 flush_idcache_range(jmp_rx, jmp_rw, 4);
1185 }
1186
1187 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
1188 const TCGArg args[TCG_MAX_OP_ARGS],
1189 const int const_args[TCG_MAX_OP_ARGS])
1190 {
1191 TCGArg a0 = args[0];
1192 TCGArg a1 = args[1];
1193 TCGArg a2 = args[2];
1194 TCGArg a3 = args[3];
1195 int c2 = const_args[2];
1196
1197 switch (opc) {
1198 case INDEX_op_mb:
1199 tcg_out_mb(s, a0);
1200 break;
1201
1202 case INDEX_op_goto_ptr:
1203 tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0);
1204 break;
1205
1206 case INDEX_op_br:
1207 tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0),
1208 0);
1209 tcg_out_opc_b(s, 0);
1210 break;
1211
1212 case INDEX_op_brcond_i32:
1213 case INDEX_op_brcond_i64:
1214 tcg_out_brcond(s, a2, a0, a1, arg_label(args[3]));
1215 break;
1216
1217 case INDEX_op_extrh_i64_i32:
1218 tcg_out_opc_srai_d(s, a0, a1, 32);
1219 break;
1220
1221 case INDEX_op_not_i32:
1222 case INDEX_op_not_i64:
1223 tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO);
1224 break;
1225
1226 case INDEX_op_nor_i32:
1227 case INDEX_op_nor_i64:
1228 if (c2) {
1229 tcg_out_opc_ori(s, a0, a1, a2);
1230 tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO);
1231 } else {
1232 tcg_out_opc_nor(s, a0, a1, a2);
1233 }
1234 break;
1235
1236 case INDEX_op_andc_i32:
1237 case INDEX_op_andc_i64:
1238 if (c2) {
1239 /* guaranteed to fit due to constraint */
1240 tcg_out_opc_andi(s, a0, a1, ~a2);
1241 } else {
1242 tcg_out_opc_andn(s, a0, a1, a2);
1243 }
1244 break;
1245
1246 case INDEX_op_orc_i32:
1247 case INDEX_op_orc_i64:
1248 if (c2) {
1249 /* guaranteed to fit due to constraint */
1250 tcg_out_opc_ori(s, a0, a1, ~a2);
1251 } else {
1252 tcg_out_opc_orn(s, a0, a1, a2);
1253 }
1254 break;
1255
1256 case INDEX_op_and_i32:
1257 case INDEX_op_and_i64:
1258 if (c2) {
1259 tcg_out_opc_andi(s, a0, a1, a2);
1260 } else {
1261 tcg_out_opc_and(s, a0, a1, a2);
1262 }
1263 break;
1264
1265 case INDEX_op_or_i32:
1266 case INDEX_op_or_i64:
1267 if (c2) {
1268 tcg_out_opc_ori(s, a0, a1, a2);
1269 } else {
1270 tcg_out_opc_or(s, a0, a1, a2);
1271 }
1272 break;
1273
1274 case INDEX_op_xor_i32:
1275 case INDEX_op_xor_i64:
1276 if (c2) {
1277 tcg_out_opc_xori(s, a0, a1, a2);
1278 } else {
1279 tcg_out_opc_xor(s, a0, a1, a2);
1280 }
1281 break;
1282
1283 case INDEX_op_extract_i32:
1284 tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1);
1285 break;
1286 case INDEX_op_extract_i64:
1287 tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1);
1288 break;
1289
1290 case INDEX_op_deposit_i32:
1291 tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1);
1292 break;
1293 case INDEX_op_deposit_i64:
1294 tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1);
1295 break;
1296
1297 case INDEX_op_bswap16_i32:
1298 case INDEX_op_bswap16_i64:
1299 tcg_out_opc_revb_2h(s, a0, a1);
1300 if (a2 & TCG_BSWAP_OS) {
1301 tcg_out_ext16s(s, TCG_TYPE_REG, a0, a0);
1302 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1303 tcg_out_ext16u(s, a0, a0);
1304 }
1305 break;
1306
1307 case INDEX_op_bswap32_i32:
1308 /* All 32-bit values are computed sign-extended in the register. */
1309 a2 = TCG_BSWAP_OS;
1310 /* fallthrough */
1311 case INDEX_op_bswap32_i64:
1312 tcg_out_opc_revb_2w(s, a0, a1);
1313 if (a2 & TCG_BSWAP_OS) {
1314 tcg_out_ext32s(s, a0, a0);
1315 } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
1316 tcg_out_ext32u(s, a0, a0);
1317 }
1318 break;
1319
1320 case INDEX_op_bswap64_i64:
1321 tcg_out_opc_revb_d(s, a0, a1);
1322 break;
1323
1324 case INDEX_op_clz_i32:
1325 tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true);
1326 break;
1327 case INDEX_op_clz_i64:
1328 tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false);
1329 break;
1330
1331 case INDEX_op_ctz_i32:
1332 tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true);
1333 break;
1334 case INDEX_op_ctz_i64:
1335 tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false);
1336 break;
1337
1338 case INDEX_op_shl_i32:
1339 if (c2) {
1340 tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f);
1341 } else {
1342 tcg_out_opc_sll_w(s, a0, a1, a2);
1343 }
1344 break;
1345 case INDEX_op_shl_i64:
1346 if (c2) {
1347 tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f);
1348 } else {
1349 tcg_out_opc_sll_d(s, a0, a1, a2);
1350 }
1351 break;
1352
1353 case INDEX_op_shr_i32:
1354 if (c2) {
1355 tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f);
1356 } else {
1357 tcg_out_opc_srl_w(s, a0, a1, a2);
1358 }
1359 break;
1360 case INDEX_op_shr_i64:
1361 if (c2) {
1362 tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f);
1363 } else {
1364 tcg_out_opc_srl_d(s, a0, a1, a2);
1365 }
1366 break;
1367
1368 case INDEX_op_sar_i32:
1369 if (c2) {
1370 tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f);
1371 } else {
1372 tcg_out_opc_sra_w(s, a0, a1, a2);
1373 }
1374 break;
1375 case INDEX_op_sar_i64:
1376 if (c2) {
1377 tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f);
1378 } else {
1379 tcg_out_opc_sra_d(s, a0, a1, a2);
1380 }
1381 break;
1382
1383 case INDEX_op_rotl_i32:
1384 /* transform into equivalent rotr/rotri */
1385 if (c2) {
1386 tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f);
1387 } else {
1388 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1389 tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0);
1390 }
1391 break;
1392 case INDEX_op_rotl_i64:
1393 /* transform into equivalent rotr/rotri */
1394 if (c2) {
1395 tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f);
1396 } else {
1397 tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2);
1398 tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0);
1399 }
1400 break;
1401
1402 case INDEX_op_rotr_i32:
1403 if (c2) {
1404 tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f);
1405 } else {
1406 tcg_out_opc_rotr_w(s, a0, a1, a2);
1407 }
1408 break;
1409 case INDEX_op_rotr_i64:
1410 if (c2) {
1411 tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f);
1412 } else {
1413 tcg_out_opc_rotr_d(s, a0, a1, a2);
1414 }
1415 break;
1416
1417 case INDEX_op_add_i32:
1418 if (c2) {
1419 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, a2);
1420 } else {
1421 tcg_out_opc_add_w(s, a0, a1, a2);
1422 }
1423 break;
1424 case INDEX_op_add_i64:
1425 if (c2) {
1426 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, a2);
1427 } else {
1428 tcg_out_opc_add_d(s, a0, a1, a2);
1429 }
1430 break;
1431
1432 case INDEX_op_sub_i32:
1433 if (c2) {
1434 tcg_out_addi(s, TCG_TYPE_I32, a0, a1, -a2);
1435 } else {
1436 tcg_out_opc_sub_w(s, a0, a1, a2);
1437 }
1438 break;
1439 case INDEX_op_sub_i64:
1440 if (c2) {
1441 tcg_out_addi(s, TCG_TYPE_I64, a0, a1, -a2);
1442 } else {
1443 tcg_out_opc_sub_d(s, a0, a1, a2);
1444 }
1445 break;
1446
1447 case INDEX_op_neg_i32:
1448 tcg_out_opc_sub_w(s, a0, TCG_REG_ZERO, a1);
1449 break;
1450 case INDEX_op_neg_i64:
1451 tcg_out_opc_sub_d(s, a0, TCG_REG_ZERO, a1);
1452 break;
1453
1454 case INDEX_op_mul_i32:
1455 tcg_out_opc_mul_w(s, a0, a1, a2);
1456 break;
1457 case INDEX_op_mul_i64:
1458 tcg_out_opc_mul_d(s, a0, a1, a2);
1459 break;
1460
1461 case INDEX_op_mulsh_i32:
1462 tcg_out_opc_mulh_w(s, a0, a1, a2);
1463 break;
1464 case INDEX_op_mulsh_i64:
1465 tcg_out_opc_mulh_d(s, a0, a1, a2);
1466 break;
1467
1468 case INDEX_op_muluh_i32:
1469 tcg_out_opc_mulh_wu(s, a0, a1, a2);
1470 break;
1471 case INDEX_op_muluh_i64:
1472 tcg_out_opc_mulh_du(s, a0, a1, a2);
1473 break;
1474
1475 case INDEX_op_div_i32:
1476 tcg_out_opc_div_w(s, a0, a1, a2);
1477 break;
1478 case INDEX_op_div_i64:
1479 tcg_out_opc_div_d(s, a0, a1, a2);
1480 break;
1481
1482 case INDEX_op_divu_i32:
1483 tcg_out_opc_div_wu(s, a0, a1, a2);
1484 break;
1485 case INDEX_op_divu_i64:
1486 tcg_out_opc_div_du(s, a0, a1, a2);
1487 break;
1488
1489 case INDEX_op_rem_i32:
1490 tcg_out_opc_mod_w(s, a0, a1, a2);
1491 break;
1492 case INDEX_op_rem_i64:
1493 tcg_out_opc_mod_d(s, a0, a1, a2);
1494 break;
1495
1496 case INDEX_op_remu_i32:
1497 tcg_out_opc_mod_wu(s, a0, a1, a2);
1498 break;
1499 case INDEX_op_remu_i64:
1500 tcg_out_opc_mod_du(s, a0, a1, a2);
1501 break;
1502
1503 case INDEX_op_setcond_i32:
1504 case INDEX_op_setcond_i64:
1505 tcg_out_setcond(s, args[3], a0, a1, a2, c2);
1506 break;
1507
1508 case INDEX_op_movcond_i32:
1509 case INDEX_op_movcond_i64:
1510 tcg_out_movcond(s, args[5], a0, a1, a2, c2, args[3], args[4]);
1511 break;
1512
1513 case INDEX_op_ld8s_i32:
1514 case INDEX_op_ld8s_i64:
1515 tcg_out_ldst(s, OPC_LD_B, a0, a1, a2);
1516 break;
1517 case INDEX_op_ld8u_i32:
1518 case INDEX_op_ld8u_i64:
1519 tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2);
1520 break;
1521 case INDEX_op_ld16s_i32:
1522 case INDEX_op_ld16s_i64:
1523 tcg_out_ldst(s, OPC_LD_H, a0, a1, a2);
1524 break;
1525 case INDEX_op_ld16u_i32:
1526 case INDEX_op_ld16u_i64:
1527 tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2);
1528 break;
1529 case INDEX_op_ld_i32:
1530 case INDEX_op_ld32s_i64:
1531 tcg_out_ldst(s, OPC_LD_W, a0, a1, a2);
1532 break;
1533 case INDEX_op_ld32u_i64:
1534 tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2);
1535 break;
1536 case INDEX_op_ld_i64:
1537 tcg_out_ldst(s, OPC_LD_D, a0, a1, a2);
1538 break;
1539
1540 case INDEX_op_st8_i32:
1541 case INDEX_op_st8_i64:
1542 tcg_out_ldst(s, OPC_ST_B, a0, a1, a2);
1543 break;
1544 case INDEX_op_st16_i32:
1545 case INDEX_op_st16_i64:
1546 tcg_out_ldst(s, OPC_ST_H, a0, a1, a2);
1547 break;
1548 case INDEX_op_st_i32:
1549 case INDEX_op_st32_i64:
1550 tcg_out_ldst(s, OPC_ST_W, a0, a1, a2);
1551 break;
1552 case INDEX_op_st_i64:
1553 tcg_out_ldst(s, OPC_ST_D, a0, a1, a2);
1554 break;
1555
1556 case INDEX_op_qemu_ld_a32_i32:
1557 case INDEX_op_qemu_ld_a64_i32:
1558 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I32);
1559 break;
1560 case INDEX_op_qemu_ld_a32_i64:
1561 case INDEX_op_qemu_ld_a64_i64:
1562 tcg_out_qemu_ld(s, a0, a1, a2, TCG_TYPE_I64);
1563 break;
1564 case INDEX_op_qemu_ld_a32_i128:
1565 case INDEX_op_qemu_ld_a64_i128:
1566 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, true);
1567 break;
1568 case INDEX_op_qemu_st_a32_i32:
1569 case INDEX_op_qemu_st_a64_i32:
1570 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I32);
1571 break;
1572 case INDEX_op_qemu_st_a32_i64:
1573 case INDEX_op_qemu_st_a64_i64:
1574 tcg_out_qemu_st(s, a0, a1, a2, TCG_TYPE_I64);
1575 break;
1576 case INDEX_op_qemu_st_a32_i128:
1577 case INDEX_op_qemu_st_a64_i128:
1578 tcg_out_qemu_ldst_i128(s, a0, a1, a2, a3, false);
1579 break;
1580
1581 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
1582 case INDEX_op_mov_i64:
1583 case INDEX_op_call: /* Always emitted via tcg_out_call. */
1584 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
1585 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
1586 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
1587 case INDEX_op_ext8s_i64:
1588 case INDEX_op_ext8u_i32:
1589 case INDEX_op_ext8u_i64:
1590 case INDEX_op_ext16s_i32:
1591 case INDEX_op_ext16s_i64:
1592 case INDEX_op_ext16u_i32:
1593 case INDEX_op_ext16u_i64:
1594 case INDEX_op_ext32s_i64:
1595 case INDEX_op_ext32u_i64:
1596 case INDEX_op_ext_i32_i64:
1597 case INDEX_op_extu_i32_i64:
1598 case INDEX_op_extrl_i64_i32:
1599 default:
1600 g_assert_not_reached();
1601 }
1602 }
1603
1604 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
1605 TCGReg rd, TCGReg rs)
1606 {
1607 switch (vece) {
1608 case MO_8:
1609 tcg_out_opc_vreplgr2vr_b(s, rd, rs);
1610 break;
1611 case MO_16:
1612 tcg_out_opc_vreplgr2vr_h(s, rd, rs);
1613 break;
1614 case MO_32:
1615 tcg_out_opc_vreplgr2vr_w(s, rd, rs);
1616 break;
1617 case MO_64:
1618 tcg_out_opc_vreplgr2vr_d(s, rd, rs);
1619 break;
1620 default:
1621 g_assert_not_reached();
1622 }
1623 return true;
1624 }
1625
1626 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
1627 TCGReg r, TCGReg base, intptr_t offset)
1628 {
1629 /* Handle imm overflow and division (vldrepl.d imm is divided by 8) */
1630 if (offset < -0x800 || offset > 0x7ff || \
1631 (offset & ((1 << vece) - 1)) != 0) {
1632 tcg_out_addi(s, TCG_TYPE_I64, TCG_REG_TMP0, base, offset);
1633 base = TCG_REG_TMP0;
1634 offset = 0;
1635 }
1636 offset >>= vece;
1637
1638 switch (vece) {
1639 case MO_8:
1640 tcg_out_opc_vldrepl_b(s, r, base, offset);
1641 break;
1642 case MO_16:
1643 tcg_out_opc_vldrepl_h(s, r, base, offset);
1644 break;
1645 case MO_32:
1646 tcg_out_opc_vldrepl_w(s, r, base, offset);
1647 break;
1648 case MO_64:
1649 tcg_out_opc_vldrepl_d(s, r, base, offset);
1650 break;
1651 default:
1652 g_assert_not_reached();
1653 }
1654 return true;
1655 }
1656
1657 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1658 TCGReg rd, int64_t v64)
1659 {
1660 /* Try vldi if imm can fit */
1661 int64_t value = sextract64(v64, 0, 8 << vece);
1662 if (-0x200 <= value && value <= 0x1FF) {
1663 uint32_t imm = (vece << 10) | ((uint32_t)v64 & 0x3FF);
1664 tcg_out_opc_vldi(s, rd, imm);
1665 return;
1666 }
1667
1668 /* TODO: vldi patterns when imm 12 is set */
1669
1670 /* Fallback to vreplgr2vr */
1671 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP0, value);
1672 switch (vece) {
1673 case MO_8:
1674 tcg_out_opc_vreplgr2vr_b(s, rd, TCG_REG_TMP0);
1675 break;
1676 case MO_16:
1677 tcg_out_opc_vreplgr2vr_h(s, rd, TCG_REG_TMP0);
1678 break;
1679 case MO_32:
1680 tcg_out_opc_vreplgr2vr_w(s, rd, TCG_REG_TMP0);
1681 break;
1682 case MO_64:
1683 tcg_out_opc_vreplgr2vr_d(s, rd, TCG_REG_TMP0);
1684 break;
1685 default:
1686 g_assert_not_reached();
1687 }
1688 }
1689
1690 static void tcg_out_addsub_vec(TCGContext *s, unsigned vece, const TCGArg a0,
1691 const TCGArg a1, const TCGArg a2,
1692 bool a2_is_const, bool is_add)
1693 {
1694 static const LoongArchInsn add_vec_insn[4] = {
1695 OPC_VADD_B, OPC_VADD_H, OPC_VADD_W, OPC_VADD_D
1696 };
1697 static const LoongArchInsn add_vec_imm_insn[4] = {
1698 OPC_VADDI_BU, OPC_VADDI_HU, OPC_VADDI_WU, OPC_VADDI_DU
1699 };
1700 static const LoongArchInsn sub_vec_insn[4] = {
1701 OPC_VSUB_B, OPC_VSUB_H, OPC_VSUB_W, OPC_VSUB_D
1702 };
1703 static const LoongArchInsn sub_vec_imm_insn[4] = {
1704 OPC_VSUBI_BU, OPC_VSUBI_HU, OPC_VSUBI_WU, OPC_VSUBI_DU
1705 };
1706
1707 if (a2_is_const) {
1708 int64_t value = sextract64(a2, 0, 8 << vece);
1709 if (!is_add) {
1710 value = -value;
1711 }
1712
1713 /* Try vaddi/vsubi */
1714 if (0 <= value && value <= 0x1f) {
1715 tcg_out32(s, encode_vdvjuk5_insn(add_vec_imm_insn[vece], a0, \
1716 a1, value));
1717 return;
1718 } else if (-0x1f <= value && value < 0) {
1719 tcg_out32(s, encode_vdvjuk5_insn(sub_vec_imm_insn[vece], a0, \
1720 a1, -value));
1721 return;
1722 }
1723
1724 /* constraint TCG_CT_CONST_VADD ensures unreachable */
1725 g_assert_not_reached();
1726 }
1727
1728 if (is_add) {
1729 tcg_out32(s, encode_vdvjvk_insn(add_vec_insn[vece], a0, a1, a2));
1730 } else {
1731 tcg_out32(s, encode_vdvjvk_insn(sub_vec_insn[vece], a0, a1, a2));
1732 }
1733 }
1734
1735 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
1736 unsigned vecl, unsigned vece,
1737 const TCGArg args[TCG_MAX_OP_ARGS],
1738 const int const_args[TCG_MAX_OP_ARGS])
1739 {
1740 TCGType type = vecl + TCG_TYPE_V64;
1741 TCGArg a0, a1, a2, a3;
1742 TCGReg temp = TCG_REG_TMP0;
1743 TCGReg temp_vec = TCG_VEC_TMP0;
1744
1745 static const LoongArchInsn cmp_vec_insn[16][4] = {
1746 [TCG_COND_EQ] = {OPC_VSEQ_B, OPC_VSEQ_H, OPC_VSEQ_W, OPC_VSEQ_D},
1747 [TCG_COND_LE] = {OPC_VSLE_B, OPC_VSLE_H, OPC_VSLE_W, OPC_VSLE_D},
1748 [TCG_COND_LEU] = {OPC_VSLE_BU, OPC_VSLE_HU, OPC_VSLE_WU, OPC_VSLE_DU},
1749 [TCG_COND_LT] = {OPC_VSLT_B, OPC_VSLT_H, OPC_VSLT_W, OPC_VSLT_D},
1750 [TCG_COND_LTU] = {OPC_VSLT_BU, OPC_VSLT_HU, OPC_VSLT_WU, OPC_VSLT_DU},
1751 };
1752 static const LoongArchInsn cmp_vec_imm_insn[16][4] = {
1753 [TCG_COND_EQ] = {OPC_VSEQI_B, OPC_VSEQI_H, OPC_VSEQI_W, OPC_VSEQI_D},
1754 [TCG_COND_LE] = {OPC_VSLEI_B, OPC_VSLEI_H, OPC_VSLEI_W, OPC_VSLEI_D},
1755 [TCG_COND_LEU] = {OPC_VSLEI_BU, OPC_VSLEI_HU, OPC_VSLEI_WU, OPC_VSLEI_DU},
1756 [TCG_COND_LT] = {OPC_VSLTI_B, OPC_VSLTI_H, OPC_VSLTI_W, OPC_VSLTI_D},
1757 [TCG_COND_LTU] = {OPC_VSLTI_BU, OPC_VSLTI_HU, OPC_VSLTI_WU, OPC_VSLTI_DU},
1758 };
1759 LoongArchInsn insn;
1760 static const LoongArchInsn neg_vec_insn[4] = {
1761 OPC_VNEG_B, OPC_VNEG_H, OPC_VNEG_W, OPC_VNEG_D
1762 };
1763 static const LoongArchInsn mul_vec_insn[4] = {
1764 OPC_VMUL_B, OPC_VMUL_H, OPC_VMUL_W, OPC_VMUL_D
1765 };
1766 static const LoongArchInsn smin_vec_insn[4] = {
1767 OPC_VMIN_B, OPC_VMIN_H, OPC_VMIN_W, OPC_VMIN_D
1768 };
1769 static const LoongArchInsn umin_vec_insn[4] = {
1770 OPC_VMIN_BU, OPC_VMIN_HU, OPC_VMIN_WU, OPC_VMIN_DU
1771 };
1772 static const LoongArchInsn smax_vec_insn[4] = {
1773 OPC_VMAX_B, OPC_VMAX_H, OPC_VMAX_W, OPC_VMAX_D
1774 };
1775 static const LoongArchInsn umax_vec_insn[4] = {
1776 OPC_VMAX_BU, OPC_VMAX_HU, OPC_VMAX_WU, OPC_VMAX_DU
1777 };
1778 static const LoongArchInsn ssadd_vec_insn[4] = {
1779 OPC_VSADD_B, OPC_VSADD_H, OPC_VSADD_W, OPC_VSADD_D
1780 };
1781 static const LoongArchInsn usadd_vec_insn[4] = {
1782 OPC_VSADD_BU, OPC_VSADD_HU, OPC_VSADD_WU, OPC_VSADD_DU
1783 };
1784 static const LoongArchInsn sssub_vec_insn[4] = {
1785 OPC_VSSUB_B, OPC_VSSUB_H, OPC_VSSUB_W, OPC_VSSUB_D
1786 };
1787 static const LoongArchInsn ussub_vec_insn[4] = {
1788 OPC_VSSUB_BU, OPC_VSSUB_HU, OPC_VSSUB_WU, OPC_VSSUB_DU
1789 };
1790 static const LoongArchInsn shlv_vec_insn[4] = {
1791 OPC_VSLL_B, OPC_VSLL_H, OPC_VSLL_W, OPC_VSLL_D
1792 };
1793 static const LoongArchInsn shrv_vec_insn[4] = {
1794 OPC_VSRL_B, OPC_VSRL_H, OPC_VSRL_W, OPC_VSRL_D
1795 };
1796 static const LoongArchInsn sarv_vec_insn[4] = {
1797 OPC_VSRA_B, OPC_VSRA_H, OPC_VSRA_W, OPC_VSRA_D
1798 };
1799 static const LoongArchInsn shli_vec_insn[4] = {
1800 OPC_VSLLI_B, OPC_VSLLI_H, OPC_VSLLI_W, OPC_VSLLI_D
1801 };
1802 static const LoongArchInsn shri_vec_insn[4] = {
1803 OPC_VSRLI_B, OPC_VSRLI_H, OPC_VSRLI_W, OPC_VSRLI_D
1804 };
1805 static const LoongArchInsn sari_vec_insn[4] = {
1806 OPC_VSRAI_B, OPC_VSRAI_H, OPC_VSRAI_W, OPC_VSRAI_D
1807 };
1808 static const LoongArchInsn rotrv_vec_insn[4] = {
1809 OPC_VROTR_B, OPC_VROTR_H, OPC_VROTR_W, OPC_VROTR_D
1810 };
1811
1812 a0 = args[0];
1813 a1 = args[1];
1814 a2 = args[2];
1815 a3 = args[3];
1816
1817 /* Currently only supports V128 */
1818 tcg_debug_assert(type == TCG_TYPE_V128);
1819
1820 switch (opc) {
1821 case INDEX_op_st_vec:
1822 /* Try to fit vst imm */
1823 if (-0x800 <= a2 && a2 <= 0x7ff) {
1824 tcg_out_opc_vst(s, a0, a1, a2);
1825 } else {
1826 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1827 tcg_out_opc_vstx(s, a0, a1, temp);
1828 }
1829 break;
1830 case INDEX_op_ld_vec:
1831 /* Try to fit vld imm */
1832 if (-0x800 <= a2 && a2 <= 0x7ff) {
1833 tcg_out_opc_vld(s, a0, a1, a2);
1834 } else {
1835 tcg_out_movi(s, TCG_TYPE_I64, temp, a2);
1836 tcg_out_opc_vldx(s, a0, a1, temp);
1837 }
1838 break;
1839 case INDEX_op_and_vec:
1840 tcg_out_opc_vand_v(s, a0, a1, a2);
1841 break;
1842 case INDEX_op_andc_vec:
1843 /*
1844 * vandn vd, vj, vk: vd = vk & ~vj
1845 * andc_vec vd, vj, vk: vd = vj & ~vk
1846 * vk and vk are swapped
1847 */
1848 tcg_out_opc_vandn_v(s, a0, a2, a1);
1849 break;
1850 case INDEX_op_or_vec:
1851 tcg_out_opc_vor_v(s, a0, a1, a2);
1852 break;
1853 case INDEX_op_orc_vec:
1854 tcg_out_opc_vorn_v(s, a0, a1, a2);
1855 break;
1856 case INDEX_op_xor_vec:
1857 tcg_out_opc_vxor_v(s, a0, a1, a2);
1858 break;
1859 case INDEX_op_nor_vec:
1860 tcg_out_opc_vnor_v(s, a0, a1, a2);
1861 break;
1862 case INDEX_op_not_vec:
1863 tcg_out_opc_vnor_v(s, a0, a1, a1);
1864 break;
1865 case INDEX_op_cmp_vec:
1866 {
1867 TCGCond cond = args[3];
1868 if (const_args[2]) {
1869 /*
1870 * cmp_vec dest, src, value
1871 * Try vseqi/vslei/vslti
1872 */
1873 int64_t value = sextract64(a2, 0, 8 << vece);
1874 if ((cond == TCG_COND_EQ || cond == TCG_COND_LE || \
1875 cond == TCG_COND_LT) && (-0x10 <= value && value <= 0x0f)) {
1876 tcg_out32(s, encode_vdvjsk5_insn(cmp_vec_imm_insn[cond][vece], \
1877 a0, a1, value));
1878 break;
1879 } else if ((cond == TCG_COND_LEU || cond == TCG_COND_LTU) &&
1880 (0x00 <= value && value <= 0x1f)) {
1881 tcg_out32(s, encode_vdvjuk5_insn(cmp_vec_imm_insn[cond][vece], \
1882 a0, a1, value));
1883 break;
1884 }
1885
1886 /*
1887 * Fallback to:
1888 * dupi_vec temp, a2
1889 * cmp_vec a0, a1, temp, cond
1890 */
1891 tcg_out_dupi_vec(s, type, vece, temp_vec, a2);
1892 a2 = temp_vec;
1893 }
1894
1895 insn = cmp_vec_insn[cond][vece];
1896 if (insn == 0) {
1897 TCGArg t;
1898 t = a1, a1 = a2, a2 = t;
1899 cond = tcg_swap_cond(cond);
1900 insn = cmp_vec_insn[cond][vece];
1901 tcg_debug_assert(insn != 0);
1902 }
1903 tcg_out32(s, encode_vdvjvk_insn(insn, a0, a1, a2));
1904 }
1905 break;
1906 case INDEX_op_add_vec:
1907 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], true);
1908 break;
1909 case INDEX_op_sub_vec:
1910 tcg_out_addsub_vec(s, vece, a0, a1, a2, const_args[2], false);
1911 break;
1912 case INDEX_op_neg_vec:
1913 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], a0, a1));
1914 break;
1915 case INDEX_op_mul_vec:
1916 tcg_out32(s, encode_vdvjvk_insn(mul_vec_insn[vece], a0, a1, a2));
1917 break;
1918 case INDEX_op_smin_vec:
1919 tcg_out32(s, encode_vdvjvk_insn(smin_vec_insn[vece], a0, a1, a2));
1920 break;
1921 case INDEX_op_smax_vec:
1922 tcg_out32(s, encode_vdvjvk_insn(smax_vec_insn[vece], a0, a1, a2));
1923 break;
1924 case INDEX_op_umin_vec:
1925 tcg_out32(s, encode_vdvjvk_insn(umin_vec_insn[vece], a0, a1, a2));
1926 break;
1927 case INDEX_op_umax_vec:
1928 tcg_out32(s, encode_vdvjvk_insn(umax_vec_insn[vece], a0, a1, a2));
1929 break;
1930 case INDEX_op_ssadd_vec:
1931 tcg_out32(s, encode_vdvjvk_insn(ssadd_vec_insn[vece], a0, a1, a2));
1932 break;
1933 case INDEX_op_usadd_vec:
1934 tcg_out32(s, encode_vdvjvk_insn(usadd_vec_insn[vece], a0, a1, a2));
1935 break;
1936 case INDEX_op_sssub_vec:
1937 tcg_out32(s, encode_vdvjvk_insn(sssub_vec_insn[vece], a0, a1, a2));
1938 break;
1939 case INDEX_op_ussub_vec:
1940 tcg_out32(s, encode_vdvjvk_insn(ussub_vec_insn[vece], a0, a1, a2));
1941 break;
1942 case INDEX_op_shlv_vec:
1943 tcg_out32(s, encode_vdvjvk_insn(shlv_vec_insn[vece], a0, a1, a2));
1944 break;
1945 case INDEX_op_shrv_vec:
1946 tcg_out32(s, encode_vdvjvk_insn(shrv_vec_insn[vece], a0, a1, a2));
1947 break;
1948 case INDEX_op_sarv_vec:
1949 tcg_out32(s, encode_vdvjvk_insn(sarv_vec_insn[vece], a0, a1, a2));
1950 break;
1951 case INDEX_op_shli_vec:
1952 tcg_out32(s, encode_vdvjuk3_insn(shli_vec_insn[vece], a0, a1, a2));
1953 break;
1954 case INDEX_op_shri_vec:
1955 tcg_out32(s, encode_vdvjuk3_insn(shri_vec_insn[vece], a0, a1, a2));
1956 break;
1957 case INDEX_op_sari_vec:
1958 tcg_out32(s, encode_vdvjuk3_insn(sari_vec_insn[vece], a0, a1, a2));
1959 break;
1960 case INDEX_op_rotrv_vec:
1961 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1, a2));
1962 break;
1963 case INDEX_op_rotlv_vec:
1964 /* rotlv_vec a1, a2 = rotrv_vec a1, -a2 */
1965 tcg_out32(s, encode_vdvj_insn(neg_vec_insn[vece], temp_vec, a2));
1966 tcg_out32(s, encode_vdvjvk_insn(rotrv_vec_insn[vece], a0, a1,
1967 temp_vec));
1968 break;
1969 case INDEX_op_rotli_vec:
1970 /* rotli_vec a1, a2 = rotri_vec a1, -a2 */
1971 a2 = extract32(-a2, 0, 3 + vece);
1972 switch (vece) {
1973 case MO_8:
1974 tcg_out_opc_vrotri_b(s, a0, a1, a2);
1975 break;
1976 case MO_16:
1977 tcg_out_opc_vrotri_h(s, a0, a1, a2);
1978 break;
1979 case MO_32:
1980 tcg_out_opc_vrotri_w(s, a0, a1, a2);
1981 break;
1982 case MO_64:
1983 tcg_out_opc_vrotri_d(s, a0, a1, a2);
1984 break;
1985 default:
1986 g_assert_not_reached();
1987 }
1988 break;
1989 case INDEX_op_bitsel_vec:
1990 /* vbitsel vd, vj, vk, va = bitsel_vec vd, va, vk, vj */
1991 tcg_out_opc_vbitsel_v(s, a0, a3, a2, a1);
1992 break;
1993 case INDEX_op_dupm_vec:
1994 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
1995 break;
1996 default:
1997 g_assert_not_reached();
1998 }
1999 }
2000
2001 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
2002 {
2003 switch (opc) {
2004 case INDEX_op_ld_vec:
2005 case INDEX_op_st_vec:
2006 case INDEX_op_dup_vec:
2007 case INDEX_op_dupm_vec:
2008 case INDEX_op_cmp_vec:
2009 case INDEX_op_add_vec:
2010 case INDEX_op_sub_vec:
2011 case INDEX_op_and_vec:
2012 case INDEX_op_andc_vec:
2013 case INDEX_op_or_vec:
2014 case INDEX_op_orc_vec:
2015 case INDEX_op_xor_vec:
2016 case INDEX_op_nor_vec:
2017 case INDEX_op_not_vec:
2018 case INDEX_op_neg_vec:
2019 case INDEX_op_mul_vec:
2020 case INDEX_op_smin_vec:
2021 case INDEX_op_smax_vec:
2022 case INDEX_op_umin_vec:
2023 case INDEX_op_umax_vec:
2024 case INDEX_op_ssadd_vec:
2025 case INDEX_op_usadd_vec:
2026 case INDEX_op_sssub_vec:
2027 case INDEX_op_ussub_vec:
2028 case INDEX_op_shlv_vec:
2029 case INDEX_op_shrv_vec:
2030 case INDEX_op_sarv_vec:
2031 case INDEX_op_bitsel_vec:
2032 return 1;
2033 default:
2034 return 0;
2035 }
2036 }
2037
2038 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
2039 TCGArg a0, ...)
2040 {
2041 g_assert_not_reached();
2042 }
2043
2044 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
2045 {
2046 switch (op) {
2047 case INDEX_op_goto_ptr:
2048 return C_O0_I1(r);
2049
2050 case INDEX_op_st8_i32:
2051 case INDEX_op_st8_i64:
2052 case INDEX_op_st16_i32:
2053 case INDEX_op_st16_i64:
2054 case INDEX_op_st32_i64:
2055 case INDEX_op_st_i32:
2056 case INDEX_op_st_i64:
2057 case INDEX_op_qemu_st_a32_i32:
2058 case INDEX_op_qemu_st_a64_i32:
2059 case INDEX_op_qemu_st_a32_i64:
2060 case INDEX_op_qemu_st_a64_i64:
2061 return C_O0_I2(rZ, r);
2062
2063 case INDEX_op_qemu_ld_a32_i128:
2064 case INDEX_op_qemu_ld_a64_i128:
2065 return C_N2_I1(r, r, r);
2066
2067 case INDEX_op_qemu_st_a32_i128:
2068 case INDEX_op_qemu_st_a64_i128:
2069 return C_O0_I3(r, r, r);
2070
2071 case INDEX_op_brcond_i32:
2072 case INDEX_op_brcond_i64:
2073 return C_O0_I2(rZ, rZ);
2074
2075 case INDEX_op_ext8s_i32:
2076 case INDEX_op_ext8s_i64:
2077 case INDEX_op_ext8u_i32:
2078 case INDEX_op_ext8u_i64:
2079 case INDEX_op_ext16s_i32:
2080 case INDEX_op_ext16s_i64:
2081 case INDEX_op_ext16u_i32:
2082 case INDEX_op_ext16u_i64:
2083 case INDEX_op_ext32s_i64:
2084 case INDEX_op_ext32u_i64:
2085 case INDEX_op_extu_i32_i64:
2086 case INDEX_op_extrl_i64_i32:
2087 case INDEX_op_extrh_i64_i32:
2088 case INDEX_op_ext_i32_i64:
2089 case INDEX_op_neg_i32:
2090 case INDEX_op_neg_i64:
2091 case INDEX_op_not_i32:
2092 case INDEX_op_not_i64:
2093 case INDEX_op_extract_i32:
2094 case INDEX_op_extract_i64:
2095 case INDEX_op_bswap16_i32:
2096 case INDEX_op_bswap16_i64:
2097 case INDEX_op_bswap32_i32:
2098 case INDEX_op_bswap32_i64:
2099 case INDEX_op_bswap64_i64:
2100 case INDEX_op_ld8s_i32:
2101 case INDEX_op_ld8s_i64:
2102 case INDEX_op_ld8u_i32:
2103 case INDEX_op_ld8u_i64:
2104 case INDEX_op_ld16s_i32:
2105 case INDEX_op_ld16s_i64:
2106 case INDEX_op_ld16u_i32:
2107 case INDEX_op_ld16u_i64:
2108 case INDEX_op_ld32s_i64:
2109 case INDEX_op_ld32u_i64:
2110 case INDEX_op_ld_i32:
2111 case INDEX_op_ld_i64:
2112 case INDEX_op_qemu_ld_a32_i32:
2113 case INDEX_op_qemu_ld_a64_i32:
2114 case INDEX_op_qemu_ld_a32_i64:
2115 case INDEX_op_qemu_ld_a64_i64:
2116 return C_O1_I1(r, r);
2117
2118 case INDEX_op_andc_i32:
2119 case INDEX_op_andc_i64:
2120 case INDEX_op_orc_i32:
2121 case INDEX_op_orc_i64:
2122 /*
2123 * LoongArch insns for these ops don't have reg-imm forms, but we
2124 * can express using andi/ori if ~constant satisfies
2125 * TCG_CT_CONST_U12.
2126 */
2127 return C_O1_I2(r, r, rC);
2128
2129 case INDEX_op_shl_i32:
2130 case INDEX_op_shl_i64:
2131 case INDEX_op_shr_i32:
2132 case INDEX_op_shr_i64:
2133 case INDEX_op_sar_i32:
2134 case INDEX_op_sar_i64:
2135 case INDEX_op_rotl_i32:
2136 case INDEX_op_rotl_i64:
2137 case INDEX_op_rotr_i32:
2138 case INDEX_op_rotr_i64:
2139 return C_O1_I2(r, r, ri);
2140
2141 case INDEX_op_add_i32:
2142 return C_O1_I2(r, r, ri);
2143 case INDEX_op_add_i64:
2144 return C_O1_I2(r, r, rJ);
2145
2146 case INDEX_op_and_i32:
2147 case INDEX_op_and_i64:
2148 case INDEX_op_nor_i32:
2149 case INDEX_op_nor_i64:
2150 case INDEX_op_or_i32:
2151 case INDEX_op_or_i64:
2152 case INDEX_op_xor_i32:
2153 case INDEX_op_xor_i64:
2154 /* LoongArch reg-imm bitops have their imms ZERO-extended */
2155 return C_O1_I2(r, r, rU);
2156
2157 case INDEX_op_clz_i32:
2158 case INDEX_op_clz_i64:
2159 case INDEX_op_ctz_i32:
2160 case INDEX_op_ctz_i64:
2161 return C_O1_I2(r, r, rW);
2162
2163 case INDEX_op_deposit_i32:
2164 case INDEX_op_deposit_i64:
2165 /* Must deposit into the same register as input */
2166 return C_O1_I2(r, 0, rZ);
2167
2168 case INDEX_op_sub_i32:
2169 case INDEX_op_setcond_i32:
2170 return C_O1_I2(r, rZ, ri);
2171 case INDEX_op_sub_i64:
2172 case INDEX_op_setcond_i64:
2173 return C_O1_I2(r, rZ, rJ);
2174
2175 case INDEX_op_mul_i32:
2176 case INDEX_op_mul_i64:
2177 case INDEX_op_mulsh_i32:
2178 case INDEX_op_mulsh_i64:
2179 case INDEX_op_muluh_i32:
2180 case INDEX_op_muluh_i64:
2181 case INDEX_op_div_i32:
2182 case INDEX_op_div_i64:
2183 case INDEX_op_divu_i32:
2184 case INDEX_op_divu_i64:
2185 case INDEX_op_rem_i32:
2186 case INDEX_op_rem_i64:
2187 case INDEX_op_remu_i32:
2188 case INDEX_op_remu_i64:
2189 return C_O1_I2(r, rZ, rZ);
2190
2191 case INDEX_op_movcond_i32:
2192 case INDEX_op_movcond_i64:
2193 return C_O1_I4(r, rZ, rJ, rZ, rZ);
2194
2195 case INDEX_op_ld_vec:
2196 case INDEX_op_dupm_vec:
2197 case INDEX_op_dup_vec:
2198 return C_O1_I1(w, r);
2199
2200 case INDEX_op_st_vec:
2201 return C_O0_I2(w, r);
2202
2203 case INDEX_op_cmp_vec:
2204 return C_O1_I2(w, w, wM);
2205
2206 case INDEX_op_add_vec:
2207 case INDEX_op_sub_vec:
2208 return C_O1_I2(w, w, wA);
2209
2210 case INDEX_op_and_vec:
2211 case INDEX_op_andc_vec:
2212 case INDEX_op_or_vec:
2213 case INDEX_op_orc_vec:
2214 case INDEX_op_xor_vec:
2215 case INDEX_op_nor_vec:
2216 case INDEX_op_mul_vec:
2217 case INDEX_op_smin_vec:
2218 case INDEX_op_smax_vec:
2219 case INDEX_op_umin_vec:
2220 case INDEX_op_umax_vec:
2221 case INDEX_op_ssadd_vec:
2222 case INDEX_op_usadd_vec:
2223 case INDEX_op_sssub_vec:
2224 case INDEX_op_ussub_vec:
2225 case INDEX_op_shlv_vec:
2226 case INDEX_op_shrv_vec:
2227 case INDEX_op_sarv_vec:
2228 case INDEX_op_rotrv_vec:
2229 case INDEX_op_rotlv_vec:
2230 return C_O1_I2(w, w, w);
2231
2232 case INDEX_op_not_vec:
2233 case INDEX_op_neg_vec:
2234 case INDEX_op_shli_vec:
2235 case INDEX_op_shri_vec:
2236 case INDEX_op_sari_vec:
2237 case INDEX_op_rotli_vec:
2238 return C_O1_I1(w, w);
2239
2240 case INDEX_op_bitsel_vec:
2241 return C_O1_I3(w, w, w, w);
2242
2243 default:
2244 g_assert_not_reached();
2245 }
2246 }
2247
2248 static const int tcg_target_callee_save_regs[] = {
2249 TCG_REG_S0, /* used for the global env (TCG_AREG0) */
2250 TCG_REG_S1,
2251 TCG_REG_S2,
2252 TCG_REG_S3,
2253 TCG_REG_S4,
2254 TCG_REG_S5,
2255 TCG_REG_S6,
2256 TCG_REG_S7,
2257 TCG_REG_S8,
2258 TCG_REG_S9,
2259 TCG_REG_RA, /* should be last for ABI compliance */
2260 };
2261
2262 /* Stack frame parameters. */
2263 #define REG_SIZE (TCG_TARGET_REG_BITS / 8)
2264 #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE)
2265 #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2266 #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \
2267 + TCG_TARGET_STACK_ALIGN - 1) \
2268 & -TCG_TARGET_STACK_ALIGN)
2269 #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE)
2270
2271 /* We're expecting to be able to use an immediate for frame allocation. */
2272 QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff);
2273
2274 /* Generate global QEMU prologue and epilogue code */
2275 static void tcg_target_qemu_prologue(TCGContext *s)
2276 {
2277 int i;
2278
2279 tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE);
2280
2281 /* TB prologue */
2282 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE);
2283 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2284 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2285 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2286 }
2287
2288 if (!tcg_use_softmmu && guest_base) {
2289 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base);
2290 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2291 }
2292
2293 /* Call generated code */
2294 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2295 tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
2296
2297 /* Return path for goto_ptr. Set return value to 0 */
2298 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2299 tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO);
2300
2301 /* TB epilogue */
2302 tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
2303 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
2304 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2305 TCG_REG_SP, SAVE_OFS + i * REG_SIZE);
2306 }
2307
2308 tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE);
2309 tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0);
2310 }
2311
2312 static void tcg_out_tb_start(TCGContext *s)
2313 {
2314 /* nothing to do */
2315 }
2316
2317 static void tcg_target_init(TCGContext *s)
2318 {
2319 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
2320
2321 /* Server and desktop class cpus have UAL; embedded cpus do not. */
2322 if (!(hwcap & HWCAP_LOONGARCH_UAL)) {
2323 error_report("TCG: unaligned access support required; exiting");
2324 exit(EXIT_FAILURE);
2325 }
2326
2327 tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS;
2328 tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS;
2329
2330 tcg_target_call_clobber_regs = ALL_GENERAL_REGS;
2331 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
2332 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
2333 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
2334 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3);
2335 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4);
2336 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5);
2337 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6);
2338 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7);
2339 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8);
2340 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9);
2341
2342 if (cpuinfo & CPUINFO_LSX) {
2343 tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
2344 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V24);
2345 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V25);
2346 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V26);
2347 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V27);
2348 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V28);
2349 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V29);
2350 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V30);
2351 tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_V31);
2352 }
2353
2354 s->reserved_regs = 0;
2355 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO);
2356 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0);
2357 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
2358 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
2359 tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
2360 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
2361 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED);
2362 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP0);
2363 }
2364
2365 typedef struct {
2366 DebugFrameHeader h;
2367 uint8_t fde_def_cfa[4];
2368 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
2369 } DebugFrame;
2370
2371 #define ELF_HOST_MACHINE EM_LOONGARCH
2372
2373 static const DebugFrame debug_frame = {
2374 .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
2375 .h.cie.id = -1,
2376 .h.cie.version = 1,
2377 .h.cie.code_align = 1,
2378 .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */
2379 .h.cie.return_column = TCG_REG_RA,
2380
2381 /* Total FDE size does not include the "len" member. */
2382 .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset),
2383
2384 .fde_def_cfa = {
2385 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */
2386 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2387 (FRAME_SIZE >> 7)
2388 },
2389 .fde_reg_ofs = {
2390 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */
2391 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */
2392 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */
2393 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */
2394 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */
2395 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */
2396 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */
2397 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */
2398 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */
2399 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */
2400 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */
2401 }
2402 };
2403
2404 void tcg_register_jit(const void *buf, size_t buf_size)
2405 {
2406 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2407 }