2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2008 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "../tcg-pool.c.inc"
27 #include "../tcg-ldst.c.inc"
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
34 #if !defined(_CALL_SYSV) && \
35 !defined(_CALL_DARWIN) && \
36 !defined(_CALL_AIX) && \
38 # if defined(__APPLE__)
40 # elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
47 #if TCG_TARGET_REG_BITS == 64
48 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
50 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
53 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
55 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
58 /* For some memory operations, we need a scratch that isn't R0. For the AIX
59 calling convention, we can re-use the TOC register since we'll be reloading
60 it at every call. Otherwise R12 will do nicely as neither a call-saved
61 register nor a parameter register. */
63 # define TCG_REG_TMP1 TCG_REG_R2
65 # define TCG_REG_TMP1 TCG_REG_R12
68 #define TCG_VEC_TMP1 TCG_REG_V0
69 #define TCG_VEC_TMP2 TCG_REG_V1
71 #define TCG_REG_TB TCG_REG_R31
72 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
74 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
75 #define SZP ((int)sizeof(void *))
77 /* Shorthand for size of a register. */
78 #define SZR (TCG_TARGET_REG_BITS / 8)
80 #define TCG_CT_CONST_S16 0x100
81 #define TCG_CT_CONST_U16 0x200
82 #define TCG_CT_CONST_S32 0x400
83 #define TCG_CT_CONST_U32 0x800
84 #define TCG_CT_CONST_ZERO 0x1000
85 #define TCG_CT_CONST_MONE 0x2000
86 #define TCG_CT_CONST_WSZ 0x4000
88 #define ALL_GENERAL_REGS 0xffffffffu
89 #define ALL_VECTOR_REGS 0xffffffff00000000ull
92 #define ALL_QLOAD_REGS \
94 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
95 #define ALL_QSTORE_REGS \
96 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
97 (1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
99 #define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
100 #define ALL_QSTORE_REGS ALL_QLOAD_REGS
103 TCGPowerISA have_isa;
104 static bool have_isel;
108 #ifndef CONFIG_SOFTMMU
109 #define TCG_GUEST_BASE_REG 30
112 #ifdef CONFIG_DEBUG_TCG
113 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
114 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
115 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
116 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
117 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
118 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
119 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
120 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
121 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
125 static const int tcg_target_reg_alloc_order[] = {
126 TCG_REG_R14, /* call saved registers */
144 TCG_REG_R12, /* call clobbered, non-arguments */
148 TCG_REG_R10, /* call clobbered, arguments */
157 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
158 TCG_REG_V2, /* call clobbered, vectors */
178 static const int tcg_target_call_iarg_regs[] = {
189 static const int tcg_target_call_oarg_regs[] = {
194 static const int tcg_target_callee_save_regs[] = {
211 TCG_REG_R27, /* currently used for the global env */
218 static inline bool in_range_b(tcg_target_long target)
220 return target == sextract64(target, 0, 26);
223 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
224 const tcg_insn_unit *target)
226 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
227 tcg_debug_assert(in_range_b(disp));
228 return disp & 0x3fffffc;
231 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
233 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
234 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
236 if (in_range_b(disp)) {
237 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
243 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
244 const tcg_insn_unit *target)
246 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
247 tcg_debug_assert(disp == (int16_t) disp);
248 return disp & 0xfffc;
251 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
253 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
254 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
256 if (disp == (int16_t) disp) {
257 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
263 /* test if a constant matches the constraint */
264 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
266 if (ct & TCG_CT_CONST) {
270 /* The only 32-bit constraint we use aside from
271 TCG_CT_CONST is TCG_CT_CONST_S16. */
272 if (type == TCG_TYPE_I32) {
276 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
278 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
280 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
282 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
284 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
286 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
288 } else if ((ct & TCG_CT_CONST_WSZ)
289 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
295 #define OPCD(opc) ((opc)<<26)
296 #define XO19(opc) (OPCD(19)|((opc)<<1))
297 #define MD30(opc) (OPCD(30)|((opc)<<2))
298 #define MDS30(opc) (OPCD(30)|((opc)<<1))
299 #define XO31(opc) (OPCD(31)|((opc)<<1))
300 #define XO58(opc) (OPCD(58)|(opc))
301 #define XO62(opc) (OPCD(62)|(opc))
302 #define VX4(opc) (OPCD(4)|(opc))
306 #define LBZ OPCD( 34)
307 #define LHZ OPCD( 40)
308 #define LHA OPCD( 42)
309 #define LWZ OPCD( 32)
310 #define LWZUX XO31( 55)
311 #define STB OPCD( 38)
312 #define STH OPCD( 44)
313 #define STW OPCD( 36)
316 #define STDU XO62( 1)
317 #define STDX XO31(149)
320 #define LDX XO31( 21)
322 #define LDUX XO31( 53)
324 #define LWAX XO31(341)
326 #define ADDIC OPCD( 12)
327 #define ADDI OPCD( 14)
328 #define ADDIS OPCD( 15)
329 #define ORI OPCD( 24)
330 #define ORIS OPCD( 25)
331 #define XORI OPCD( 26)
332 #define XORIS OPCD( 27)
333 #define ANDI OPCD( 28)
334 #define ANDIS OPCD( 29)
335 #define MULLI OPCD( 7)
336 #define CMPLI OPCD( 10)
337 #define CMPI OPCD( 11)
338 #define SUBFIC OPCD( 8)
340 #define LWZU OPCD( 33)
341 #define STWU OPCD( 37)
343 #define RLWIMI OPCD( 20)
344 #define RLWINM OPCD( 21)
345 #define RLWNM OPCD( 23)
347 #define RLDICL MD30( 0)
348 #define RLDICR MD30( 1)
349 #define RLDIMI MD30( 3)
350 #define RLDCL MDS30( 8)
352 #define BCLR XO19( 16)
353 #define BCCTR XO19(528)
354 #define CRAND XO19(257)
355 #define CRANDC XO19(129)
356 #define CRNAND XO19(225)
357 #define CROR XO19(449)
358 #define CRNOR XO19( 33)
360 #define EXTSB XO31(954)
361 #define EXTSH XO31(922)
362 #define EXTSW XO31(986)
363 #define ADD XO31(266)
364 #define ADDE XO31(138)
365 #define ADDME XO31(234)
366 #define ADDZE XO31(202)
367 #define ADDC XO31( 10)
368 #define AND XO31( 28)
369 #define SUBF XO31( 40)
370 #define SUBFC XO31( 8)
371 #define SUBFE XO31(136)
372 #define SUBFME XO31(232)
373 #define SUBFZE XO31(200)
375 #define XOR XO31(316)
376 #define MULLW XO31(235)
377 #define MULHW XO31( 75)
378 #define MULHWU XO31( 11)
379 #define DIVW XO31(491)
380 #define DIVWU XO31(459)
381 #define MODSW XO31(779)
382 #define MODUW XO31(267)
384 #define CMPL XO31( 32)
385 #define LHBRX XO31(790)
386 #define LWBRX XO31(534)
387 #define LDBRX XO31(532)
388 #define STHBRX XO31(918)
389 #define STWBRX XO31(662)
390 #define STDBRX XO31(660)
391 #define MFSPR XO31(339)
392 #define MTSPR XO31(467)
393 #define SRAWI XO31(824)
394 #define NEG XO31(104)
395 #define MFCR XO31( 19)
396 #define MFOCRF (MFCR | (1u << 20))
397 #define NOR XO31(124)
398 #define CNTLZW XO31( 26)
399 #define CNTLZD XO31( 58)
400 #define CNTTZW XO31(538)
401 #define CNTTZD XO31(570)
402 #define CNTPOPW XO31(378)
403 #define CNTPOPD XO31(506)
404 #define ANDC XO31( 60)
405 #define ORC XO31(412)
406 #define EQV XO31(284)
407 #define NAND XO31(476)
408 #define ISEL XO31( 15)
410 #define MULLD XO31(233)
411 #define MULHD XO31( 73)
412 #define MULHDU XO31( 9)
413 #define DIVD XO31(489)
414 #define DIVDU XO31(457)
415 #define MODSD XO31(777)
416 #define MODUD XO31(265)
418 #define LBZX XO31( 87)
419 #define LHZX XO31(279)
420 #define LHAX XO31(343)
421 #define LWZX XO31( 23)
422 #define STBX XO31(215)
423 #define STHX XO31(407)
424 #define STWX XO31(151)
426 #define EIEIO XO31(854)
427 #define HWSYNC XO31(598)
428 #define LWSYNC (HWSYNC | (1u << 21))
430 #define SPR(a, b) ((((a)<<5)|(b))<<11)
432 #define CTR SPR(9, 0)
434 #define SLW XO31( 24)
435 #define SRW XO31(536)
436 #define SRAW XO31(792)
438 #define SLD XO31( 27)
439 #define SRD XO31(539)
440 #define SRAD XO31(794)
441 #define SRADI XO31(413<<1)
443 #define BRH XO31(219)
444 #define BRW XO31(155)
445 #define BRD XO31(187)
448 #define TRAP (TW | TO(31))
450 #define NOP ORI /* ori 0,0,0 */
452 #define LVX XO31(103)
453 #define LVEBX XO31(7)
454 #define LVEHX XO31(39)
455 #define LVEWX XO31(71)
456 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
457 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
458 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
459 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
460 #define LXSD (OPCD(57) | 2) /* v3.00 */
461 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
463 #define STVX XO31(231)
464 #define STVEWX XO31(199)
465 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
466 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
467 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
468 #define STXSD (OPCD(61) | 2) /* v3.00 */
470 #define VADDSBS VX4(768)
471 #define VADDUBS VX4(512)
472 #define VADDUBM VX4(0)
473 #define VADDSHS VX4(832)
474 #define VADDUHS VX4(576)
475 #define VADDUHM VX4(64)
476 #define VADDSWS VX4(896)
477 #define VADDUWS VX4(640)
478 #define VADDUWM VX4(128)
479 #define VADDUDM VX4(192) /* v2.07 */
481 #define VSUBSBS VX4(1792)
482 #define VSUBUBS VX4(1536)
483 #define VSUBUBM VX4(1024)
484 #define VSUBSHS VX4(1856)
485 #define VSUBUHS VX4(1600)
486 #define VSUBUHM VX4(1088)
487 #define VSUBSWS VX4(1920)
488 #define VSUBUWS VX4(1664)
489 #define VSUBUWM VX4(1152)
490 #define VSUBUDM VX4(1216) /* v2.07 */
492 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
493 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
495 #define VMAXSB VX4(258)
496 #define VMAXSH VX4(322)
497 #define VMAXSW VX4(386)
498 #define VMAXSD VX4(450) /* v2.07 */
499 #define VMAXUB VX4(2)
500 #define VMAXUH VX4(66)
501 #define VMAXUW VX4(130)
502 #define VMAXUD VX4(194) /* v2.07 */
503 #define VMINSB VX4(770)
504 #define VMINSH VX4(834)
505 #define VMINSW VX4(898)
506 #define VMINSD VX4(962) /* v2.07 */
507 #define VMINUB VX4(514)
508 #define VMINUH VX4(578)
509 #define VMINUW VX4(642)
510 #define VMINUD VX4(706) /* v2.07 */
512 #define VCMPEQUB VX4(6)
513 #define VCMPEQUH VX4(70)
514 #define VCMPEQUW VX4(134)
515 #define VCMPEQUD VX4(199) /* v2.07 */
516 #define VCMPGTSB VX4(774)
517 #define VCMPGTSH VX4(838)
518 #define VCMPGTSW VX4(902)
519 #define VCMPGTSD VX4(967) /* v2.07 */
520 #define VCMPGTUB VX4(518)
521 #define VCMPGTUH VX4(582)
522 #define VCMPGTUW VX4(646)
523 #define VCMPGTUD VX4(711) /* v2.07 */
524 #define VCMPNEB VX4(7) /* v3.00 */
525 #define VCMPNEH VX4(71) /* v3.00 */
526 #define VCMPNEW VX4(135) /* v3.00 */
528 #define VSLB VX4(260)
529 #define VSLH VX4(324)
530 #define VSLW VX4(388)
531 #define VSLD VX4(1476) /* v2.07 */
532 #define VSRB VX4(516)
533 #define VSRH VX4(580)
534 #define VSRW VX4(644)
535 #define VSRD VX4(1732) /* v2.07 */
536 #define VSRAB VX4(772)
537 #define VSRAH VX4(836)
538 #define VSRAW VX4(900)
539 #define VSRAD VX4(964) /* v2.07 */
542 #define VRLW VX4(132)
543 #define VRLD VX4(196) /* v2.07 */
545 #define VMULEUB VX4(520)
546 #define VMULEUH VX4(584)
547 #define VMULEUW VX4(648) /* v2.07 */
548 #define VMULOUB VX4(8)
549 #define VMULOUH VX4(72)
550 #define VMULOUW VX4(136) /* v2.07 */
551 #define VMULUWM VX4(137) /* v2.07 */
552 #define VMULLD VX4(457) /* v3.10 */
553 #define VMSUMUHM VX4(38)
555 #define VMRGHB VX4(12)
556 #define VMRGHH VX4(76)
557 #define VMRGHW VX4(140)
558 #define VMRGLB VX4(268)
559 #define VMRGLH VX4(332)
560 #define VMRGLW VX4(396)
562 #define VPKUHUM VX4(14)
563 #define VPKUWUM VX4(78)
565 #define VAND VX4(1028)
566 #define VANDC VX4(1092)
567 #define VNOR VX4(1284)
568 #define VOR VX4(1156)
569 #define VXOR VX4(1220)
570 #define VEQV VX4(1668) /* v2.07 */
571 #define VNAND VX4(1412) /* v2.07 */
572 #define VORC VX4(1348) /* v2.07 */
574 #define VSPLTB VX4(524)
575 #define VSPLTH VX4(588)
576 #define VSPLTW VX4(652)
577 #define VSPLTISB VX4(780)
578 #define VSPLTISH VX4(844)
579 #define VSPLTISW VX4(908)
581 #define VSLDOI VX4(44)
583 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
584 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
585 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
587 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
588 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
589 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
590 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
591 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
592 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
594 #define RT(r) ((r)<<21)
595 #define RS(r) ((r)<<21)
596 #define RA(r) ((r)<<16)
597 #define RB(r) ((r)<<11)
598 #define TO(t) ((t)<<21)
599 #define SH(s) ((s)<<11)
600 #define MB(b) ((b)<<6)
601 #define ME(e) ((e)<<1)
602 #define BO(o) ((o)<<21)
603 #define MB64(b) ((b)<<5)
604 #define FXM(b) (1 << (19 - (b)))
606 #define VRT(r) (((r) & 31) << 21)
607 #define VRA(r) (((r) & 31) << 16)
608 #define VRB(r) (((r) & 31) << 11)
609 #define VRC(r) (((r) & 31) << 6)
613 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
614 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
615 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
616 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
618 #define BF(n) ((n)<<23)
619 #define BI(n, c) (((c)+((n)*4))<<16)
620 #define BT(n, c) (((c)+((n)*4))<<21)
621 #define BA(n, c) (((c)+((n)*4))<<16)
622 #define BB(n, c) (((c)+((n)*4))<<11)
623 #define BC_(n, c) (((c)+((n)*4))<<6)
625 #define BO_COND_TRUE BO(12)
626 #define BO_COND_FALSE BO( 4)
627 #define BO_ALWAYS BO(20)
636 static const uint32_t tcg_to_bc[] = {
637 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
638 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
639 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
640 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
641 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
642 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
643 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
644 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
645 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
646 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
649 /* The low bit here is set if the RA and RB fields must be inverted. */
650 static const uint32_t tcg_to_isel[] = {
651 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
652 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
653 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
654 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
655 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
656 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
657 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
658 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
659 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
660 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
663 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
664 intptr_t value, intptr_t addend)
666 const tcg_insn_unit *target;
671 target = (const tcg_insn_unit *)value;
675 return reloc_pc14(code_ptr, target);
677 return reloc_pc24(code_ptr, target);
680 * We are (slightly) abusing this relocation type. In particular,
681 * assert that the low 2 bits are zero, and do not modify them.
682 * That way we can use this with LD et al that have opcode bits
683 * in the low 2 bits of the insn.
685 if ((value & 3) || value != (int16_t)value) {
688 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
692 * We are abusing this relocation type. Again, this points to
693 * a pair of insns, lis + load. This is an absolute address
694 * relocation for PPC32 so the lis cannot be removed.
698 if (hi + lo != value) {
701 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
702 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
705 g_assert_not_reached();
710 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
711 TCGReg base, tcg_target_long offset);
713 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
720 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
723 if (ret < TCG_REG_V0) {
724 if (arg < TCG_REG_V0) {
725 tcg_out32(s, OR | SAB(arg, ret, arg));
727 } else if (have_isa_2_07) {
728 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
729 | VRT(arg) | RA(ret));
732 /* Altivec does not support vector->integer moves. */
735 } else if (arg < TCG_REG_V0) {
737 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
738 | VRT(ret) | RA(arg));
741 /* Altivec does not support integer->vector moves. */
748 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
749 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
752 g_assert_not_reached();
757 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
760 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
761 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
762 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
763 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
766 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
767 int sh, int mb, int me)
769 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
772 static inline void tcg_out_ext8s(TCGContext *s, TCGReg dst, TCGReg src)
774 tcg_out32(s, EXTSB | RA(dst) | RS(src));
777 static inline void tcg_out_ext16s(TCGContext *s, TCGReg dst, TCGReg src)
779 tcg_out32(s, EXTSH | RA(dst) | RS(src));
782 static inline void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
784 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
787 static inline void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
789 tcg_out32(s, EXTSW | RA(dst) | RS(src));
792 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
794 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
797 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
799 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
802 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
804 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
807 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
809 /* Limit immediate shift count lest we create an illegal insn. */
810 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
813 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
815 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
818 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
820 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
823 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
825 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
828 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
830 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
833 tcg_out32(s, BRH | RA(dst) | RS(src));
834 if (flags & TCG_BSWAP_OS) {
835 tcg_out_ext16s(s, dst, dst);
836 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
837 tcg_out_ext16u(s, dst, dst);
844 * dep(a, b, m) -> (a & ~m) | (b & m)
846 * Begin with: src = xxxxabcd
848 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
849 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
850 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
851 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
853 if (flags & TCG_BSWAP_OS) {
854 tcg_out_ext16s(s, dst, tmp);
856 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
860 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
862 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
865 tcg_out32(s, BRW | RA(dst) | RS(src));
866 if (flags & TCG_BSWAP_OS) {
867 tcg_out_ext32s(s, dst, dst);
868 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
869 tcg_out_ext32u(s, dst, dst);
875 * Stolen from gcc's builtin_bswap32.
877 * dep(a, b, m) -> (a & ~m) | (b & m)
879 * Begin with: src = xxxxabcd
881 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
882 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
883 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
884 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
885 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
886 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
888 if (flags & TCG_BSWAP_OS) {
889 tcg_out_ext32s(s, dst, tmp);
891 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
895 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
897 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
898 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
901 tcg_out32(s, BRD | RA(dst) | RS(src));
907 * dep(a, b, m) -> (a & ~m) | (b & m)
909 * Begin with: src = abcdefgh
911 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
912 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
913 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
914 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
915 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
916 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
918 /* t0 = rol64(t0, 32) = hgfe0000 */
919 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
920 /* t1 = rol64(src, 32) = efghabcd */
921 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
923 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
924 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
925 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
926 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
927 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
928 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
930 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
933 /* Emit a move into ret of arg, if it can be done in one insn. */
934 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
936 if (arg == (int16_t)arg) {
937 tcg_out32(s, ADDI | TAI(ret, 0, arg));
940 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
941 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
947 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
948 tcg_target_long arg, bool in_prologue)
954 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
956 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
960 /* Load 16-bit immediates with one insn. */
961 if (tcg_out_movi_one(s, ret, arg)) {
965 /* Load addresses within the TB with one insn. */
966 tb_diff = tcg_tbrel_diff(s, (void *)arg);
967 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
968 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
972 /* Load 32-bit immediates with two insns. Note that we've already
973 eliminated bare ADDIS, so we know both insns are required. */
974 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
975 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
976 tcg_out32(s, ORI | SAI(ret, ret, arg));
979 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
980 tcg_out32(s, ADDI | TAI(ret, 0, arg));
981 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
985 /* Load masked 16-bit value. */
986 if (arg > 0 && (arg & 0x8000)) {
988 if ((tmp & (tmp + 1)) == 0) {
989 int mb = clz64(tmp + 1) + 1;
990 tcg_out32(s, ADDI | TAI(ret, 0, arg));
991 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
996 /* Load common masks with 2 insns. */
999 if (tmp == (int16_t)tmp) {
1000 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1001 tcg_out_shli64(s, ret, ret, shift);
1005 if (tcg_out_movi_one(s, ret, arg << shift)) {
1006 tcg_out_shri64(s, ret, ret, shift);
1010 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
1011 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1012 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1016 /* Use the constant pool, if possible. */
1017 if (!in_prologue && USE_REG_TB) {
1018 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1019 tcg_tbrel_diff(s, NULL));
1020 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1024 tmp = arg >> 31 >> 1;
1025 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1027 tcg_out_shli64(s, ret, ret, 32);
1029 if (arg & 0xffff0000) {
1030 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1033 tcg_out32(s, ORI | SAI(ret, ret, arg));
1037 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1038 TCGReg ret, int64_t val)
1047 if (low >= -16 && low < 16) {
1048 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1051 if (have_isa_3_00) {
1052 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1059 if (low >= -16 && low < 16) {
1060 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1067 if (low >= -16 && low < 16) {
1068 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1075 * Otherwise we must load the value from the constant pool.
1079 add = tcg_tbrel_diff(s, NULL);
1086 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1087 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1088 if (TCG_TARGET_REG_BITS == 64) {
1089 new_pool_label(s, val, rel, s->code_ptr, add);
1091 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1094 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1095 if (TCG_TARGET_REG_BITS == 64) {
1096 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1098 new_pool_l4(s, rel, s->code_ptr, add,
1099 val >> 32, val, val >> 32, val);
1104 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1105 load_insn |= RA(TCG_REG_TB);
1107 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1108 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1110 tcg_out32(s, load_insn);
1113 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1114 tcg_target_long arg)
1119 tcg_debug_assert(ret < TCG_REG_V0);
1120 tcg_out_movi_int(s, type, ret, arg, false);
1124 g_assert_not_reached();
1128 static bool mask_operand(uint32_t c, int *mb, int *me)
1132 /* Accept a bit pattern like:
1136 Keep track of the transitions. */
1137 if (c == 0 || c == -1) {
1143 if (test & (test - 1)) {
1148 *mb = test ? clz32(test & -test) + 1 : 0;
1152 static bool mask64_operand(uint64_t c, int *mb, int *me)
1161 /* Accept 1..10..0. */
1167 /* Accept 0..01..1. */
1168 if (lsb == 1 && (c & (c + 1)) == 0) {
1169 *mb = clz64(c + 1) + 1;
1176 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1180 if (mask_operand(c, &mb, &me)) {
1181 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1182 } else if ((c & 0xffff) == c) {
1183 tcg_out32(s, ANDI | SAI(src, dst, c));
1185 } else if ((c & 0xffff0000) == c) {
1186 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1189 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1190 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1194 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1198 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1199 if (mask64_operand(c, &mb, &me)) {
1201 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1203 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1205 } else if ((c & 0xffff) == c) {
1206 tcg_out32(s, ANDI | SAI(src, dst, c));
1208 } else if ((c & 0xffff0000) == c) {
1209 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1212 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1213 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1217 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1218 int op_lo, int op_hi)
1221 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1225 tcg_out32(s, op_lo | SAI(src, dst, c));
1230 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1232 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1235 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1237 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1240 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1242 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1243 if (in_range_b(disp)) {
1244 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1246 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1247 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1248 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1252 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1253 TCGReg base, tcg_target_long offset)
1255 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1256 bool is_int_store = false;
1257 TCGReg rs = TCG_REG_TMP1;
1264 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1280 case STB: case STH: case STW:
1281 is_int_store = true;
1285 /* For unaligned, or very large offsets, use the indexed form. */
1286 if (offset & align || offset != (int32_t)offset || opi == 0) {
1290 tcg_debug_assert(!is_int_store || rs != rt);
1291 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1292 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1296 l0 = (int16_t)offset;
1297 offset = (offset - l0) >> 16;
1298 l1 = (int16_t)offset;
1300 if (l1 < 0 && orig >= 0) {
1302 l1 = (int16_t)(offset - 0x4000);
1305 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1309 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1312 if (opi != ADDI || base != rt || l0 != 0) {
1313 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1317 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1318 TCGReg va, TCGReg vb, int shb)
1320 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1323 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1324 TCGReg base, intptr_t offset)
1330 if (ret < TCG_REG_V0) {
1331 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1334 if (have_isa_2_07 && have_vsx) {
1335 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1338 tcg_debug_assert((offset & 3) == 0);
1339 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1340 shift = (offset - 4) & 0xc;
1342 tcg_out_vsldoi(s, ret, ret, ret, shift);
1346 if (ret < TCG_REG_V0) {
1347 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1348 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1353 tcg_debug_assert(ret >= TCG_REG_V0);
1355 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1359 tcg_debug_assert((offset & 7) == 0);
1360 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1362 tcg_out_vsldoi(s, ret, ret, ret, 8);
1366 tcg_debug_assert(ret >= TCG_REG_V0);
1367 tcg_debug_assert((offset & 15) == 0);
1368 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1369 LVX, ret, base, offset);
1372 g_assert_not_reached();
1376 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1377 TCGReg base, intptr_t offset)
1383 if (arg < TCG_REG_V0) {
1384 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1387 if (have_isa_2_07 && have_vsx) {
1388 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1391 assert((offset & 3) == 0);
1392 tcg_debug_assert((offset & 3) == 0);
1393 shift = (offset - 4) & 0xc;
1395 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1398 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1401 if (arg < TCG_REG_V0) {
1402 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1403 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1408 tcg_debug_assert(arg >= TCG_REG_V0);
1410 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1411 STXSDX, arg, base, offset);
1414 tcg_debug_assert((offset & 7) == 0);
1416 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1419 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1420 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1423 tcg_debug_assert(arg >= TCG_REG_V0);
1424 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1425 STVX, arg, base, offset);
1428 g_assert_not_reached();
1432 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1433 TCGReg base, intptr_t ofs)
1438 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1439 int const_arg2, int cr, TCGType type)
1444 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1446 /* Simplify the comparisons below wrt CMPI. */
1447 if (type == TCG_TYPE_I32) {
1448 arg2 = (int32_t)arg2;
1455 if ((int16_t) arg2 == arg2) {
1459 } else if ((uint16_t) arg2 == arg2) {
1474 if ((int16_t) arg2 == arg2) {
1489 if ((uint16_t) arg2 == arg2) {
1502 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1505 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1508 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1511 tcg_out32(s, op | RA(arg1) | RB(arg2));
1515 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1516 TCGReg dst, TCGReg src)
1518 if (type == TCG_TYPE_I32) {
1519 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1520 tcg_out_shri32(s, dst, dst, 5);
1522 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1523 tcg_out_shri64(s, dst, dst, 6);
1527 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1529 /* X != 0 implies X + -1 generates a carry. Extra addition
1530 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1532 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1533 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1535 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1536 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1540 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1544 if ((uint32_t)arg2 == arg2) {
1545 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1547 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1548 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1551 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1556 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1557 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1562 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1564 /* Ignore high bits of a potential constant arg2. */
1565 if (type == TCG_TYPE_I32) {
1566 arg2 = (uint32_t)arg2;
1569 /* Handle common and trivial cases before handling anything else. */
1573 tcg_out_setcond_eq0(s, type, arg0, arg1);
1576 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1577 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1580 tcg_out_setcond_ne0(s, arg0, arg1);
1583 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1587 /* Extract the sign bit. */
1588 if (type == TCG_TYPE_I32) {
1589 tcg_out_shri32(s, arg0, arg1, 31);
1591 tcg_out_shri64(s, arg0, arg1, 63);
1599 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1600 All other cases below are also at least 3 insns, so speed up the
1601 code generator by not considering them and always using ISEL. */
1605 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1607 isel = tcg_to_isel[cond];
1609 tcg_out_movi(s, type, arg0, 1);
1611 /* arg0 = (bc ? 0 : 1) */
1612 tab = TAB(arg0, 0, arg0);
1615 /* arg0 = (bc ? 1 : 0) */
1616 tcg_out_movi(s, type, TCG_REG_R0, 0);
1617 tab = TAB(arg0, arg0, TCG_REG_R0);
1619 tcg_out32(s, isel | tab);
1625 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1626 tcg_out_setcond_eq0(s, type, arg0, arg1);
1630 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1631 /* Discard the high bits only once, rather than both inputs. */
1632 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1633 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1636 tcg_out_setcond_ne0(s, arg0, arg1);
1654 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1660 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1662 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1666 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1667 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1675 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1678 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1680 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1685 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1686 TCGArg arg1, TCGArg arg2, int const_arg2,
1687 TCGLabel *l, TCGType type)
1689 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1690 tcg_out_bc(s, tcg_to_bc[cond], l);
1693 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1694 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1695 TCGArg v2, bool const_c2)
1697 /* If for some reason both inputs are zero, don't produce bad code. */
1698 if (v1 == 0 && v2 == 0) {
1699 tcg_out_movi(s, type, dest, 0);
1703 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1706 int isel = tcg_to_isel[cond];
1708 /* Swap the V operands if the operation indicates inversion. */
1715 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1717 tcg_out_movi(s, type, TCG_REG_R0, 0);
1719 tcg_out32(s, isel | TAB(dest, v1, v2));
1722 cond = tcg_invert_cond(cond);
1724 } else if (dest != v1) {
1726 tcg_out_movi(s, type, dest, 0);
1728 tcg_out_mov(s, type, dest, v1);
1731 /* Branch forward over one insn */
1732 tcg_out32(s, tcg_to_bc[cond] | 8);
1734 tcg_out_movi(s, type, dest, 0);
1736 tcg_out_mov(s, type, dest, v2);
1741 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1742 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1744 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1745 tcg_out32(s, opc | RA(a0) | RS(a1));
1747 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1748 /* Note that the only other valid constant for a2 is 0. */
1750 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1751 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1752 } else if (!const_a2 && a0 == a2) {
1753 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1754 tcg_out32(s, opc | RA(a0) | RS(a1));
1756 tcg_out32(s, opc | RA(a0) | RS(a1));
1757 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1759 tcg_out_movi(s, type, a0, 0);
1761 tcg_out_mov(s, type, a0, a2);
1767 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1768 const int *const_args)
1770 static const struct { uint8_t bit1, bit2; } bits[] = {
1771 [TCG_COND_LT ] = { CR_LT, CR_LT },
1772 [TCG_COND_LE ] = { CR_LT, CR_GT },
1773 [TCG_COND_GT ] = { CR_GT, CR_GT },
1774 [TCG_COND_GE ] = { CR_GT, CR_LT },
1775 [TCG_COND_LTU] = { CR_LT, CR_LT },
1776 [TCG_COND_LEU] = { CR_LT, CR_GT },
1777 [TCG_COND_GTU] = { CR_GT, CR_GT },
1778 [TCG_COND_GEU] = { CR_GT, CR_LT },
1781 TCGCond cond = args[4], cond2;
1782 TCGArg al, ah, bl, bh;
1783 int blconst, bhconst;
1790 blconst = const_args[2];
1791 bhconst = const_args[3];
1800 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1801 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1802 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1813 bit1 = bits[cond].bit1;
1814 bit2 = bits[cond].bit2;
1815 op = (bit1 != bit2 ? CRANDC : CRAND);
1816 cond2 = tcg_unsigned_cond(cond);
1818 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1819 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1820 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1821 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1829 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1830 const int *const_args)
1832 tcg_out_cmp2(s, args + 1, const_args + 1);
1833 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1834 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1837 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1838 const int *const_args)
1840 tcg_out_cmp2(s, args, const_args);
1841 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1844 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1848 if (a0 & TCG_MO_ST_LD) {
1857 static inline uint64_t make_pair(tcg_insn_unit i1, tcg_insn_unit i2)
1859 if (HOST_BIG_ENDIAN) {
1860 return (uint64_t)i1 << 32 | i2;
1862 return (uint64_t)i2 << 32 | i1;
1865 static inline void ppc64_replace2(uintptr_t rx, uintptr_t rw,
1866 tcg_insn_unit i0, tcg_insn_unit i1)
1868 #if TCG_TARGET_REG_BITS == 64
1869 qatomic_set((uint64_t *)rw, make_pair(i0, i1));
1870 flush_idcache_range(rx, rw, 8);
1872 qemu_build_not_reached();
1876 static inline void ppc64_replace4(uintptr_t rx, uintptr_t rw,
1877 tcg_insn_unit i0, tcg_insn_unit i1,
1878 tcg_insn_unit i2, tcg_insn_unit i3)
1882 p[!HOST_BIG_ENDIAN] = make_pair(i0, i1);
1883 p[HOST_BIG_ENDIAN] = make_pair(i2, i3);
1886 * There's no convenient way to get the compiler to allocate a pair
1887 * of registers at an even index, so copy into r6/r7 and clobber.
1889 asm("mr %%r6, %1\n\t"
1892 : "=Q"(*(__int128 *)rw) : "r"(p[0]), "r"(p[1]) : "r6", "r7");
1893 flush_idcache_range(rx, rw, 16);
1896 void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx,
1897 uintptr_t jmp_rw, uintptr_t addr)
1899 tcg_insn_unit i0, i1, i2, i3;
1900 intptr_t tb_diff = addr - tc_ptr;
1901 intptr_t br_diff = addr - (jmp_rx + 4);
1904 if (TCG_TARGET_REG_BITS == 32) {
1905 intptr_t diff = addr - jmp_rx;
1906 tcg_debug_assert(in_range_b(diff));
1907 qatomic_set((uint32_t *)jmp_rw, B | (diff & 0x3fffffc));
1908 flush_idcache_range(jmp_rx, jmp_rw, 4);
1913 * For 16-bit displacements, we can use a single add + branch.
1914 * This happens quite often.
1916 if (tb_diff == (int16_t)tb_diff) {
1917 i0 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, tb_diff);
1918 i1 = B | (br_diff & 0x3fffffc);
1919 ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
1923 lo = (int16_t)tb_diff;
1924 hi = (int32_t)(tb_diff - lo);
1925 assert(tb_diff == hi + lo);
1926 i0 = ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, hi >> 16);
1927 i1 = ADDI | TAI(TCG_REG_TB, TCG_REG_TB, lo);
1930 * Without stq from 2.07, we can only update two insns,
1931 * and those must be the ones that load the target address.
1933 if (!have_isa_2_07) {
1934 ppc64_replace2(jmp_rx, jmp_rw, i0, i1);
1939 * For 26-bit displacements, we can use a direct branch.
1940 * Otherwise we still need the indirect branch, which we
1941 * must restore after a potential direct branch write.
1944 if (in_range_b(br_diff)) {
1945 i2 = B | (br_diff & 0x3fffffc);
1948 i2 = MTSPR | RS(TCG_REG_TB) | CTR;
1949 i3 = BCCTR | BO_ALWAYS;
1951 ppc64_replace4(jmp_rx, jmp_rw, i0, i1, i2, i3);
1954 static void tcg_out_call_int(TCGContext *s, int lk,
1955 const tcg_insn_unit *target)
1958 /* Look through the descriptor. If the branch is in range, and we
1959 don't have to spend too much effort on building the toc. */
1960 const void *tgt = ((const void * const *)target)[0];
1961 uintptr_t toc = ((const uintptr_t *)target)[1];
1962 intptr_t diff = tcg_pcrel_diff(s, tgt);
1964 if (in_range_b(diff) && toc == (uint32_t)toc) {
1965 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1966 tcg_out_b(s, lk, tgt);
1968 /* Fold the low bits of the constant into the addresses below. */
1969 intptr_t arg = (intptr_t)target;
1970 int ofs = (int16_t)arg;
1972 if (ofs + 8 < 0x8000) {
1977 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1978 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1979 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1980 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1981 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1983 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1986 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1987 address, which the callee uses to compute its TOC address. */
1988 /* FIXME: when the branch is in range, we could avoid r12 load if we
1989 knew that the destination uses the same TOC, and what its local
1990 entry point offset is. */
1991 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1993 diff = tcg_pcrel_diff(s, target);
1994 if (in_range_b(diff)) {
1995 tcg_out_b(s, lk, target);
1997 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1998 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
2001 tcg_out_b(s, lk, target);
2005 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
2006 const TCGHelperInfo *info)
2008 tcg_out_call_int(s, LK, target);
2011 static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
2018 [MO_BSWAP | MO_UB] = LBZX,
2019 [MO_BSWAP | MO_UW] = LHBRX,
2020 [MO_BSWAP | MO_UL] = LWBRX,
2021 [MO_BSWAP | MO_UQ] = LDBRX,
2024 static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
2029 [MO_BSWAP | MO_UB] = STBX,
2030 [MO_BSWAP | MO_UW] = STHBRX,
2031 [MO_BSWAP | MO_UL] = STWBRX,
2032 [MO_BSWAP | MO_UQ] = STDBRX,
2035 static const uint32_t qemu_exts_opc[4] = {
2036 EXTSB, EXTSH, EXTSW, 0
2039 #if defined (CONFIG_SOFTMMU)
2040 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
2041 * int mmu_idx, uintptr_t ra)
2043 static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
2044 [MO_UB] = helper_ret_ldub_mmu,
2045 [MO_LEUW] = helper_le_lduw_mmu,
2046 [MO_LEUL] = helper_le_ldul_mmu,
2047 [MO_LEUQ] = helper_le_ldq_mmu,
2048 [MO_BEUW] = helper_be_lduw_mmu,
2049 [MO_BEUL] = helper_be_ldul_mmu,
2050 [MO_BEUQ] = helper_be_ldq_mmu,
2053 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
2054 * uintxx_t val, int mmu_idx, uintptr_t ra)
2056 static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
2057 [MO_UB] = helper_ret_stb_mmu,
2058 [MO_LEUW] = helper_le_stw_mmu,
2059 [MO_LEUL] = helper_le_stl_mmu,
2060 [MO_LEUQ] = helper_le_stq_mmu,
2061 [MO_BEUW] = helper_be_stw_mmu,
2062 [MO_BEUL] = helper_be_stl_mmu,
2063 [MO_BEUQ] = helper_be_stq_mmu,
2066 /* We expect to use a 16-bit negative offset from ENV. */
2067 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
2068 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
2070 /* Perform the TLB load and compare. Places the result of the comparison
2071 in CR7, loads the addend of the TLB into R3, and returns the register
2072 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
2074 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
2075 TCGReg addrlo, TCGReg addrhi,
2076 int mem_index, bool is_read)
2080 ? offsetof(CPUTLBEntry, addr_read)
2081 : offsetof(CPUTLBEntry, addr_write));
2082 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
2083 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2084 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2085 unsigned s_bits = opc & MO_SIZE;
2086 unsigned a_bits = get_alignment_bits(opc);
2088 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
2089 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
2090 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
2092 /* Extract the page index, shifted into place for tlb index. */
2093 if (TCG_TARGET_REG_BITS == 32) {
2094 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
2095 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2097 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
2098 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2100 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
2102 /* Load the TLB comparator. */
2103 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2104 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2106 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
2108 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
2109 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2110 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
2111 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
2113 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
2117 /* Load the TLB addend for use on the fast path. Do this asap
2118 to minimize any load use delay. */
2119 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
2120 offsetof(CPUTLBEntry, addend));
2122 /* Clear the non-page, non-alignment bits from the address */
2123 if (TCG_TARGET_REG_BITS == 32) {
2124 /* We don't support unaligned accesses on 32-bits.
2125 * Preserve the bottom bits and thus trigger a comparison
2126 * failure on unaligned accesses.
2128 if (a_bits < s_bits) {
2131 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2132 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2136 /* If the access is unaligned, we need to make sure we fail if we
2137 * cross a page boundary. The trick is to add the access size-1
2138 * to the address before masking the low bits. That will make the
2139 * address overflow to the next page if we cross a page boundary,
2140 * which will then force a mismatch of the TLB compare.
2142 if (a_bits < s_bits) {
2143 unsigned a_mask = (1 << a_bits) - 1;
2144 unsigned s_mask = (1 << s_bits) - 1;
2145 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2149 /* Mask the address for the requested alignment. */
2150 if (TARGET_LONG_BITS == 32) {
2151 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2152 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2153 /* Zero-extend the address for use in the final address. */
2154 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
2155 addrlo = TCG_REG_R4;
2156 } else if (a_bits == 0) {
2157 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2159 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2160 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2161 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2165 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2166 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2167 0, 7, TCG_TYPE_I32);
2168 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
2169 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2171 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2178 /* Record the context of a call to the out of line helper code for the slow
2179 path for a load or store, so that we can later generate the correct
2181 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
2182 TCGReg datalo_reg, TCGReg datahi_reg,
2183 TCGReg addrlo_reg, TCGReg addrhi_reg,
2184 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
2186 TCGLabelQemuLdst *label = new_ldst_label(s);
2188 label->is_ld = is_ld;
2190 label->datalo_reg = datalo_reg;
2191 label->datahi_reg = datahi_reg;
2192 label->addrlo_reg = addrlo_reg;
2193 label->addrhi_reg = addrhi_reg;
2194 label->raddr = tcg_splitwx_to_rx(raddr);
2195 label->label_ptr[0] = lptr;
2198 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2200 MemOpIdx oi = lb->oi;
2201 MemOp opc = get_memop(oi);
2202 TCGReg hi, lo, arg = TCG_REG_R3;
2204 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2208 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2210 lo = lb->addrlo_reg;
2211 hi = lb->addrhi_reg;
2212 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2213 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2214 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2215 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2217 /* If the address needed to be zero-extended, we'll have already
2218 placed it in R4. The only remaining case is 64-bit guest. */
2219 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2222 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2223 tcg_out32(s, MFSPR | RT(arg) | LR);
2225 tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2227 lo = lb->datalo_reg;
2228 hi = lb->datahi_reg;
2229 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2230 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2231 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2232 } else if (opc & MO_SIGN) {
2233 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2234 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2236 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2239 tcg_out_b(s, 0, lb->raddr);
2243 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2245 MemOpIdx oi = lb->oi;
2246 MemOp opc = get_memop(oi);
2247 MemOp s_bits = opc & MO_SIZE;
2248 TCGReg hi, lo, arg = TCG_REG_R3;
2250 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2254 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2256 lo = lb->addrlo_reg;
2257 hi = lb->addrhi_reg;
2258 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2259 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2260 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2261 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2263 /* If the address needed to be zero-extended, we'll have already
2264 placed it in R4. The only remaining case is 64-bit guest. */
2265 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2268 lo = lb->datalo_reg;
2269 hi = lb->datahi_reg;
2270 if (TCG_TARGET_REG_BITS == 32) {
2273 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2274 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2277 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2280 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2284 if (s_bits == MO_64) {
2285 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2287 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2291 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2292 tcg_out32(s, MFSPR | RT(arg) | LR);
2294 tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2296 tcg_out_b(s, 0, lb->raddr);
2301 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
2302 TCGReg addrhi, unsigned a_bits)
2304 unsigned a_mask = (1 << a_bits) - 1;
2305 TCGLabelQemuLdst *label = new_ldst_label(s);
2307 label->is_ld = is_ld;
2308 label->addrlo_reg = addrlo;
2309 label->addrhi_reg = addrhi;
2311 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2312 tcg_debug_assert(a_bits < 16);
2313 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
2315 label->label_ptr[0] = s->code_ptr;
2316 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2318 label->raddr = tcg_splitwx_to_rx(s->code_ptr);
2321 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2323 if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2327 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2328 TCGReg arg = TCG_REG_R4;
2330 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2331 if (l->addrlo_reg != arg) {
2332 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2333 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2334 } else if (l->addrhi_reg != arg + 1) {
2335 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2336 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2338 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
2339 tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
2340 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
2343 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
2345 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
2347 /* "Tail call" to the helper, with the return address back inline. */
2348 tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
2349 : helper_unaligned_st));
2353 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2355 return tcg_out_fail_alignment(s, l);
2358 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2360 return tcg_out_fail_alignment(s, l);
2363 #endif /* SOFTMMU */
2365 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2367 TCGReg datalo, datahi, addrlo, rbase;
2368 TCGReg addrhi __attribute__((unused));
2371 #ifdef CONFIG_SOFTMMU
2373 tcg_insn_unit *label_ptr;
2379 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2381 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2383 opc = get_memop(oi);
2384 s_bits = opc & MO_SIZE;
2386 #ifdef CONFIG_SOFTMMU
2387 mem_index = get_mmuidx(oi);
2388 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2390 /* Load a pointer into the current opcode w/conditional branch-link. */
2391 label_ptr = s->code_ptr;
2392 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2395 #else /* !CONFIG_SOFTMMU */
2396 a_bits = get_alignment_bits(opc);
2398 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
2400 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2401 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2402 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2403 addrlo = TCG_REG_TMP1;
2407 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2408 if (opc & MO_BSWAP) {
2409 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2410 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2411 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2412 } else if (rbase != 0) {
2413 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2414 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2415 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2416 } else if (addrlo == datahi) {
2417 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2418 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2420 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2421 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2424 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2425 if (!have_isa_2_06 && insn == LDBRX) {
2426 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2427 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2428 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2429 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2431 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2433 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2434 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2435 insn = qemu_exts_opc[s_bits];
2436 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2440 #ifdef CONFIG_SOFTMMU
2441 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2442 s->code_ptr, label_ptr);
2446 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2448 TCGReg datalo, datahi, addrlo, rbase;
2449 TCGReg addrhi __attribute__((unused));
2452 #ifdef CONFIG_SOFTMMU
2454 tcg_insn_unit *label_ptr;
2460 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2462 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2464 opc = get_memop(oi);
2465 s_bits = opc & MO_SIZE;
2467 #ifdef CONFIG_SOFTMMU
2468 mem_index = get_mmuidx(oi);
2469 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2471 /* Load a pointer into the current opcode w/conditional branch-link. */
2472 label_ptr = s->code_ptr;
2473 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2476 #else /* !CONFIG_SOFTMMU */
2477 a_bits = get_alignment_bits(opc);
2479 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
2481 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2482 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2483 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2484 addrlo = TCG_REG_TMP1;
2488 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2489 if (opc & MO_BSWAP) {
2490 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2491 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2492 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2493 } else if (rbase != 0) {
2494 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2495 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2496 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2498 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2499 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2502 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2503 if (!have_isa_2_06 && insn == STDBRX) {
2504 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2505 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2506 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2507 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2509 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2513 #ifdef CONFIG_SOFTMMU
2514 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2515 s->code_ptr, label_ptr);
2519 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2522 for (i = 0; i < count; ++i) {
2527 /* Parameters for function call generation, used in tcg.c. */
2528 #define TCG_TARGET_STACK_ALIGN 16
2531 # define LINK_AREA_SIZE (6 * SZR)
2532 # define LR_OFFSET (1 * SZR)
2533 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2534 #elif defined(_CALL_DARWIN)
2535 # define LINK_AREA_SIZE (6 * SZR)
2536 # define LR_OFFSET (2 * SZR)
2537 #elif TCG_TARGET_REG_BITS == 64
2538 # if defined(_CALL_ELF) && _CALL_ELF == 2
2539 # define LINK_AREA_SIZE (4 * SZR)
2540 # define LR_OFFSET (1 * SZR)
2542 #else /* TCG_TARGET_REG_BITS == 32 */
2543 # if defined(_CALL_SYSV)
2544 # define LINK_AREA_SIZE (2 * SZR)
2545 # define LR_OFFSET (1 * SZR)
2549 # error "Unhandled abi"
2551 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2552 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2555 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2556 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2558 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2559 + TCG_STATIC_CALL_ARGS_SIZE \
2560 + CPU_TEMP_BUF_SIZE \
2562 + TCG_TARGET_STACK_ALIGN - 1) \
2563 & -TCG_TARGET_STACK_ALIGN)
2565 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2567 static void tcg_target_qemu_prologue(TCGContext *s)
2572 const void **desc = (const void **)s->code_ptr;
2573 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2574 desc[1] = 0; /* environment pointer */
2575 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2578 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2582 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2583 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2584 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2586 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2587 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2588 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2590 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2592 #ifndef CONFIG_SOFTMMU
2594 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2595 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2599 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2600 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2602 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2604 tcg_out32(s, BCCTR | BO_ALWAYS);
2607 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2609 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2610 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2611 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2612 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2614 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2615 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2616 tcg_out32(s, BCLR | BO_ALWAYS);
2619 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2621 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2622 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2625 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2626 const TCGArg args[TCG_MAX_OP_ARGS],
2627 const int const_args[TCG_MAX_OP_ARGS])
2632 case INDEX_op_goto_tb:
2633 if (s->tb_jmp_insn_offset) {
2635 if (TCG_TARGET_REG_BITS == 64) {
2636 /* Ensure the next insns are 8 or 16-byte aligned. */
2637 while ((uintptr_t)s->code_ptr & (have_isa_2_07 ? 15 : 7)) {
2640 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2641 tcg_out32(s, ADDIS | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2642 tcg_out32(s, ADDI | TAI(TCG_REG_TB, TCG_REG_TB, 0));
2644 s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s);
2646 s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
2650 /* Indirect jump. */
2651 tcg_debug_assert(s->tb_jmp_insn_offset == NULL);
2652 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TB, 0,
2653 (intptr_t)(s->tb_jmp_insn_offset + args[0]));
2655 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2656 tcg_out32(s, BCCTR | BO_ALWAYS);
2657 set_jmp_reset_offset(s, args[0]);
2659 /* For the unlinked case, need to reset TCG_REG_TB. */
2660 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2661 -tcg_current_code_size(s));
2664 case INDEX_op_goto_ptr:
2665 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2667 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2669 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2670 tcg_out32(s, BCCTR | BO_ALWAYS);
2674 TCGLabel *l = arg_label(args[0]);
2678 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2681 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2686 case INDEX_op_ld8u_i32:
2687 case INDEX_op_ld8u_i64:
2688 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2690 case INDEX_op_ld8s_i32:
2691 case INDEX_op_ld8s_i64:
2692 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2693 tcg_out_ext8s(s, args[0], args[0]);
2695 case INDEX_op_ld16u_i32:
2696 case INDEX_op_ld16u_i64:
2697 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2699 case INDEX_op_ld16s_i32:
2700 case INDEX_op_ld16s_i64:
2701 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2703 case INDEX_op_ld_i32:
2704 case INDEX_op_ld32u_i64:
2705 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2707 case INDEX_op_ld32s_i64:
2708 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2710 case INDEX_op_ld_i64:
2711 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2713 case INDEX_op_st8_i32:
2714 case INDEX_op_st8_i64:
2715 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2717 case INDEX_op_st16_i32:
2718 case INDEX_op_st16_i64:
2719 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2721 case INDEX_op_st_i32:
2722 case INDEX_op_st32_i64:
2723 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2725 case INDEX_op_st_i64:
2726 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2729 case INDEX_op_add_i32:
2730 a0 = args[0], a1 = args[1], a2 = args[2];
2731 if (const_args[2]) {
2733 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2735 tcg_out32(s, ADD | TAB(a0, a1, a2));
2738 case INDEX_op_sub_i32:
2739 a0 = args[0], a1 = args[1], a2 = args[2];
2740 if (const_args[1]) {
2741 if (const_args[2]) {
2742 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2744 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2746 } else if (const_args[2]) {
2750 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2754 case INDEX_op_and_i32:
2755 a0 = args[0], a1 = args[1], a2 = args[2];
2756 if (const_args[2]) {
2757 tcg_out_andi32(s, a0, a1, a2);
2759 tcg_out32(s, AND | SAB(a1, a0, a2));
2762 case INDEX_op_and_i64:
2763 a0 = args[0], a1 = args[1], a2 = args[2];
2764 if (const_args[2]) {
2765 tcg_out_andi64(s, a0, a1, a2);
2767 tcg_out32(s, AND | SAB(a1, a0, a2));
2770 case INDEX_op_or_i64:
2771 case INDEX_op_or_i32:
2772 a0 = args[0], a1 = args[1], a2 = args[2];
2773 if (const_args[2]) {
2774 tcg_out_ori32(s, a0, a1, a2);
2776 tcg_out32(s, OR | SAB(a1, a0, a2));
2779 case INDEX_op_xor_i64:
2780 case INDEX_op_xor_i32:
2781 a0 = args[0], a1 = args[1], a2 = args[2];
2782 if (const_args[2]) {
2783 tcg_out_xori32(s, a0, a1, a2);
2785 tcg_out32(s, XOR | SAB(a1, a0, a2));
2788 case INDEX_op_andc_i32:
2789 a0 = args[0], a1 = args[1], a2 = args[2];
2790 if (const_args[2]) {
2791 tcg_out_andi32(s, a0, a1, ~a2);
2793 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2796 case INDEX_op_andc_i64:
2797 a0 = args[0], a1 = args[1], a2 = args[2];
2798 if (const_args[2]) {
2799 tcg_out_andi64(s, a0, a1, ~a2);
2801 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2804 case INDEX_op_orc_i32:
2805 if (const_args[2]) {
2806 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2810 case INDEX_op_orc_i64:
2811 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2813 case INDEX_op_eqv_i32:
2814 if (const_args[2]) {
2815 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2819 case INDEX_op_eqv_i64:
2820 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2822 case INDEX_op_nand_i32:
2823 case INDEX_op_nand_i64:
2824 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2826 case INDEX_op_nor_i32:
2827 case INDEX_op_nor_i64:
2828 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2831 case INDEX_op_clz_i32:
2832 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2833 args[2], const_args[2]);
2835 case INDEX_op_ctz_i32:
2836 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2837 args[2], const_args[2]);
2839 case INDEX_op_ctpop_i32:
2840 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2843 case INDEX_op_clz_i64:
2844 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2845 args[2], const_args[2]);
2847 case INDEX_op_ctz_i64:
2848 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2849 args[2], const_args[2]);
2851 case INDEX_op_ctpop_i64:
2852 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2855 case INDEX_op_mul_i32:
2856 a0 = args[0], a1 = args[1], a2 = args[2];
2857 if (const_args[2]) {
2858 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2860 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2864 case INDEX_op_div_i32:
2865 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2868 case INDEX_op_divu_i32:
2869 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2872 case INDEX_op_rem_i32:
2873 tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
2876 case INDEX_op_remu_i32:
2877 tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
2880 case INDEX_op_shl_i32:
2881 if (const_args[2]) {
2882 /* Limit immediate shift count lest we create an illegal insn. */
2883 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2885 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2888 case INDEX_op_shr_i32:
2889 if (const_args[2]) {
2890 /* Limit immediate shift count lest we create an illegal insn. */
2891 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2893 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2896 case INDEX_op_sar_i32:
2897 if (const_args[2]) {
2898 tcg_out_sari32(s, args[0], args[1], args[2]);
2900 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2903 case INDEX_op_rotl_i32:
2904 if (const_args[2]) {
2905 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2907 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2911 case INDEX_op_rotr_i32:
2912 if (const_args[2]) {
2913 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2915 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2916 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2921 case INDEX_op_brcond_i32:
2922 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2923 arg_label(args[3]), TCG_TYPE_I32);
2925 case INDEX_op_brcond_i64:
2926 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2927 arg_label(args[3]), TCG_TYPE_I64);
2929 case INDEX_op_brcond2_i32:
2930 tcg_out_brcond2(s, args, const_args);
2933 case INDEX_op_neg_i32:
2934 case INDEX_op_neg_i64:
2935 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2938 case INDEX_op_not_i32:
2939 case INDEX_op_not_i64:
2940 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2943 case INDEX_op_add_i64:
2944 a0 = args[0], a1 = args[1], a2 = args[2];
2945 if (const_args[2]) {
2947 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2949 tcg_out32(s, ADD | TAB(a0, a1, a2));
2952 case INDEX_op_sub_i64:
2953 a0 = args[0], a1 = args[1], a2 = args[2];
2954 if (const_args[1]) {
2955 if (const_args[2]) {
2956 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2958 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2960 } else if (const_args[2]) {
2964 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2968 case INDEX_op_shl_i64:
2969 if (const_args[2]) {
2970 /* Limit immediate shift count lest we create an illegal insn. */
2971 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2973 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2976 case INDEX_op_shr_i64:
2977 if (const_args[2]) {
2978 /* Limit immediate shift count lest we create an illegal insn. */
2979 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2981 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2984 case INDEX_op_sar_i64:
2985 if (const_args[2]) {
2986 tcg_out_sari64(s, args[0], args[1], args[2]);
2988 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2991 case INDEX_op_rotl_i64:
2992 if (const_args[2]) {
2993 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2995 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2998 case INDEX_op_rotr_i64:
2999 if (const_args[2]) {
3000 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
3002 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
3003 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
3007 case INDEX_op_mul_i64:
3008 a0 = args[0], a1 = args[1], a2 = args[2];
3009 if (const_args[2]) {
3010 tcg_out32(s, MULLI | TAI(a0, a1, a2));
3012 tcg_out32(s, MULLD | TAB(a0, a1, a2));
3015 case INDEX_op_div_i64:
3016 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
3018 case INDEX_op_divu_i64:
3019 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
3021 case INDEX_op_rem_i64:
3022 tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
3024 case INDEX_op_remu_i64:
3025 tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
3028 case INDEX_op_qemu_ld_i32:
3029 tcg_out_qemu_ld(s, args, false);
3031 case INDEX_op_qemu_ld_i64:
3032 tcg_out_qemu_ld(s, args, true);
3034 case INDEX_op_qemu_st_i32:
3035 tcg_out_qemu_st(s, args, false);
3037 case INDEX_op_qemu_st_i64:
3038 tcg_out_qemu_st(s, args, true);
3041 case INDEX_op_ext8s_i32:
3042 case INDEX_op_ext8s_i64:
3043 tcg_out_ext8s(s, args[0], args[1]);
3045 case INDEX_op_ext16s_i32:
3046 case INDEX_op_ext16s_i64:
3047 tcg_out_ext16s(s, args[0], args[1]);
3049 case INDEX_op_ext_i32_i64:
3050 case INDEX_op_ext32s_i64:
3051 tcg_out_ext32s(s, args[0], args[1]);
3053 case INDEX_op_extu_i32_i64:
3054 tcg_out_ext32u(s, args[0], args[1]);
3057 case INDEX_op_setcond_i32:
3058 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
3061 case INDEX_op_setcond_i64:
3062 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
3065 case INDEX_op_setcond2_i32:
3066 tcg_out_setcond2(s, args, const_args);
3069 case INDEX_op_bswap16_i32:
3070 case INDEX_op_bswap16_i64:
3071 tcg_out_bswap16(s, args[0], args[1], args[2]);
3073 case INDEX_op_bswap32_i32:
3074 tcg_out_bswap32(s, args[0], args[1], 0);
3076 case INDEX_op_bswap32_i64:
3077 tcg_out_bswap32(s, args[0], args[1], args[2]);
3079 case INDEX_op_bswap64_i64:
3080 tcg_out_bswap64(s, args[0], args[1]);
3083 case INDEX_op_deposit_i32:
3084 if (const_args[2]) {
3085 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3086 tcg_out_andi32(s, args[0], args[0], ~mask);
3088 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3089 32 - args[3] - args[4], 31 - args[3]);
3092 case INDEX_op_deposit_i64:
3093 if (const_args[2]) {
3094 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3095 tcg_out_andi64(s, args[0], args[0], ~mask);
3097 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3098 64 - args[3] - args[4]);
3102 case INDEX_op_extract_i32:
3103 tcg_out_rlw(s, RLWINM, args[0], args[1],
3104 32 - args[2], 32 - args[3], 31);
3106 case INDEX_op_extract_i64:
3107 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3110 case INDEX_op_movcond_i32:
3111 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3112 args[3], args[4], const_args[2]);
3114 case INDEX_op_movcond_i64:
3115 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3116 args[3], args[4], const_args[2]);
3119 #if TCG_TARGET_REG_BITS == 64
3120 case INDEX_op_add2_i64:
3122 case INDEX_op_add2_i32:
3124 /* Note that the CA bit is defined based on the word size of the
3125 environment. So in 64-bit mode it's always carry-out of bit 63.
3126 The fallback code using deposit works just as well for 32-bit. */
3127 a0 = args[0], a1 = args[1];
3128 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3131 if (const_args[4]) {
3132 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3134 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3136 if (const_args[5]) {
3137 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3139 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3141 if (a0 != args[0]) {
3142 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3146 #if TCG_TARGET_REG_BITS == 64
3147 case INDEX_op_sub2_i64:
3149 case INDEX_op_sub2_i32:
3151 a0 = args[0], a1 = args[1];
3152 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3155 if (const_args[2]) {
3156 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3158 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3160 if (const_args[3]) {
3161 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3163 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3165 if (a0 != args[0]) {
3166 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3170 case INDEX_op_muluh_i32:
3171 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3173 case INDEX_op_mulsh_i32:
3174 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3176 case INDEX_op_muluh_i64:
3177 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3179 case INDEX_op_mulsh_i64:
3180 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3184 tcg_out_mb(s, args[0]);
3187 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3188 case INDEX_op_mov_i64:
3189 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3190 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
3196 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3199 case INDEX_op_and_vec:
3200 case INDEX_op_or_vec:
3201 case INDEX_op_xor_vec:
3202 case INDEX_op_andc_vec:
3203 case INDEX_op_not_vec:
3204 case INDEX_op_nor_vec:
3205 case INDEX_op_eqv_vec:
3206 case INDEX_op_nand_vec:
3208 case INDEX_op_orc_vec:
3209 return have_isa_2_07;
3210 case INDEX_op_add_vec:
3211 case INDEX_op_sub_vec:
3212 case INDEX_op_smax_vec:
3213 case INDEX_op_smin_vec:
3214 case INDEX_op_umax_vec:
3215 case INDEX_op_umin_vec:
3216 case INDEX_op_shlv_vec:
3217 case INDEX_op_shrv_vec:
3218 case INDEX_op_sarv_vec:
3219 case INDEX_op_rotlv_vec:
3220 return vece <= MO_32 || have_isa_2_07;
3221 case INDEX_op_ssadd_vec:
3222 case INDEX_op_sssub_vec:
3223 case INDEX_op_usadd_vec:
3224 case INDEX_op_ussub_vec:
3225 return vece <= MO_32;
3226 case INDEX_op_cmp_vec:
3227 case INDEX_op_shli_vec:
3228 case INDEX_op_shri_vec:
3229 case INDEX_op_sari_vec:
3230 case INDEX_op_rotli_vec:
3231 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3232 case INDEX_op_neg_vec:
3233 return vece >= MO_32 && have_isa_3_00;
3234 case INDEX_op_mul_vec:
3240 return have_isa_2_07 ? 1 : -1;
3242 return have_isa_3_10;
3245 case INDEX_op_bitsel_vec:
3247 case INDEX_op_rotrv_vec:
3254 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3255 TCGReg dst, TCGReg src)
3257 tcg_debug_assert(dst >= TCG_REG_V0);
3259 /* Splat from integer reg allowed via constraints for v3.00. */
3260 if (src < TCG_REG_V0) {
3261 tcg_debug_assert(have_isa_3_00);
3264 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3267 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3270 /* Fail, so that we fall back on either dupm or mov+dup. */
3276 * Recall we use (or emulate) VSX integer loads, so the integer is
3277 * right justified within the left (zero-index) double-word.
3281 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3284 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3287 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3291 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3294 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3295 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3298 g_assert_not_reached();
3303 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3304 TCGReg out, TCGReg base, intptr_t offset)
3308 tcg_debug_assert(out >= TCG_REG_V0);
3311 if (have_isa_3_00) {
3312 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3314 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3316 elt = extract32(offset, 0, 4);
3317 #if !HOST_BIG_ENDIAN
3320 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3323 tcg_debug_assert((offset & 1) == 0);
3324 if (have_isa_3_00) {
3325 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3327 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3329 elt = extract32(offset, 1, 3);
3330 #if !HOST_BIG_ENDIAN
3333 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3336 if (have_isa_3_00) {
3337 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3340 tcg_debug_assert((offset & 3) == 0);
3341 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3342 elt = extract32(offset, 2, 2);
3343 #if !HOST_BIG_ENDIAN
3346 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3350 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3353 tcg_debug_assert((offset & 7) == 0);
3354 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3355 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3356 elt = extract32(offset, 3, 1);
3357 #if !HOST_BIG_ENDIAN
3361 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3363 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3367 g_assert_not_reached();
3372 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3373 unsigned vecl, unsigned vece,
3374 const TCGArg args[TCG_MAX_OP_ARGS],
3375 const int const_args[TCG_MAX_OP_ARGS])
3377 static const uint32_t
3378 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3379 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3380 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3381 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3382 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3383 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3384 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3385 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3386 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3387 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3388 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3389 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3390 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3391 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3392 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3393 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3394 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3395 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3396 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3397 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3398 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3399 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3400 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3401 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3402 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3404 TCGType type = vecl + TCG_TYPE_V64;
3405 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3409 case INDEX_op_ld_vec:
3410 tcg_out_ld(s, type, a0, a1, a2);
3412 case INDEX_op_st_vec:
3413 tcg_out_st(s, type, a0, a1, a2);
3415 case INDEX_op_dupm_vec:
3416 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3419 case INDEX_op_add_vec:
3420 insn = add_op[vece];
3422 case INDEX_op_sub_vec:
3423 insn = sub_op[vece];
3425 case INDEX_op_neg_vec:
3426 insn = neg_op[vece];
3430 case INDEX_op_mul_vec:
3431 insn = mul_op[vece];
3433 case INDEX_op_ssadd_vec:
3434 insn = ssadd_op[vece];
3436 case INDEX_op_sssub_vec:
3437 insn = sssub_op[vece];
3439 case INDEX_op_usadd_vec:
3440 insn = usadd_op[vece];
3442 case INDEX_op_ussub_vec:
3443 insn = ussub_op[vece];
3445 case INDEX_op_smin_vec:
3446 insn = smin_op[vece];
3448 case INDEX_op_umin_vec:
3449 insn = umin_op[vece];
3451 case INDEX_op_smax_vec:
3452 insn = smax_op[vece];
3454 case INDEX_op_umax_vec:
3455 insn = umax_op[vece];
3457 case INDEX_op_shlv_vec:
3458 insn = shlv_op[vece];
3460 case INDEX_op_shrv_vec:
3461 insn = shrv_op[vece];
3463 case INDEX_op_sarv_vec:
3464 insn = sarv_op[vece];
3466 case INDEX_op_and_vec:
3469 case INDEX_op_or_vec:
3472 case INDEX_op_xor_vec:
3475 case INDEX_op_andc_vec:
3478 case INDEX_op_not_vec:
3482 case INDEX_op_orc_vec:
3485 case INDEX_op_nand_vec:
3488 case INDEX_op_nor_vec:
3491 case INDEX_op_eqv_vec:
3495 case INDEX_op_cmp_vec:
3504 insn = gts_op[vece];
3507 insn = gtu_op[vece];
3510 g_assert_not_reached();
3514 case INDEX_op_bitsel_vec:
3515 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3518 case INDEX_op_dup2_vec:
3519 assert(TCG_TARGET_REG_BITS == 32);
3520 /* With inputs a1 = xLxx, a2 = xHxx */
3521 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3522 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3523 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3526 case INDEX_op_ppc_mrgh_vec:
3527 insn = mrgh_op[vece];
3529 case INDEX_op_ppc_mrgl_vec:
3530 insn = mrgl_op[vece];
3532 case INDEX_op_ppc_muleu_vec:
3533 insn = muleu_op[vece];
3535 case INDEX_op_ppc_mulou_vec:
3536 insn = mulou_op[vece];
3538 case INDEX_op_ppc_pkum_vec:
3539 insn = pkum_op[vece];
3541 case INDEX_op_rotlv_vec:
3542 insn = rotl_op[vece];
3544 case INDEX_op_ppc_msum_vec:
3545 tcg_debug_assert(vece == MO_16);
3546 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3549 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3550 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3552 g_assert_not_reached();
3555 tcg_debug_assert(insn != 0);
3556 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3559 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3560 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3564 if (vece == MO_32) {
3566 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3567 * So using negative numbers gets us the 4th bit easily.
3569 imm = sextract32(imm, 0, 5);
3571 imm &= (8 << vece) - 1;
3574 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3575 t1 = tcg_constant_vec(type, MO_8, imm);
3576 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3577 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3580 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3581 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3583 bool need_swap = false, need_inv = false;
3585 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3593 if (have_isa_3_00 && vece <= MO_32) {
3607 need_swap = need_inv = true;
3610 g_assert_not_reached();
3614 cond = tcg_invert_cond(cond);
3618 t1 = v1, v1 = v2, v2 = t1;
3619 cond = tcg_swap_cond(cond);
3622 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3623 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3626 tcg_gen_not_vec(vece, v0, v0);
3630 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3631 TCGv_vec v1, TCGv_vec v2)
3633 TCGv_vec t1 = tcg_temp_new_vec(type);
3634 TCGv_vec t2 = tcg_temp_new_vec(type);
3640 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3641 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3642 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3643 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3644 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3645 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3646 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3647 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3648 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3649 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3653 tcg_debug_assert(!have_isa_2_07);
3655 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3656 * So using -16 is a quick way to represent 16.
3658 c16 = tcg_constant_vec(type, MO_8, -16);
3659 c0 = tcg_constant_vec(type, MO_8, 0);
3661 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3662 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3663 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3664 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3665 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3666 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3667 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3668 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3669 tcg_gen_add_vec(MO_32, v0, t1, t2);
3673 g_assert_not_reached();
3675 tcg_temp_free_vec(t1);
3676 tcg_temp_free_vec(t2);
3679 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3683 TCGv_vec v0, v1, v2, t0;
3687 v0 = temp_tcgv_vec(arg_temp(a0));
3688 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3689 a2 = va_arg(va, TCGArg);
3692 case INDEX_op_shli_vec:
3693 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3695 case INDEX_op_shri_vec:
3696 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3698 case INDEX_op_sari_vec:
3699 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3701 case INDEX_op_rotli_vec:
3702 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3704 case INDEX_op_cmp_vec:
3705 v2 = temp_tcgv_vec(arg_temp(a2));
3706 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3708 case INDEX_op_mul_vec:
3709 v2 = temp_tcgv_vec(arg_temp(a2));
3710 expand_vec_mul(type, vece, v0, v1, v2);
3712 case INDEX_op_rotlv_vec:
3713 v2 = temp_tcgv_vec(arg_temp(a2));
3714 t0 = tcg_temp_new_vec(type);
3715 tcg_gen_neg_vec(vece, t0, v2);
3716 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3717 tcg_temp_free_vec(t0);
3720 g_assert_not_reached();
3725 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3728 case INDEX_op_goto_ptr:
3731 case INDEX_op_ld8u_i32:
3732 case INDEX_op_ld8s_i32:
3733 case INDEX_op_ld16u_i32:
3734 case INDEX_op_ld16s_i32:
3735 case INDEX_op_ld_i32:
3736 case INDEX_op_ctpop_i32:
3737 case INDEX_op_neg_i32:
3738 case INDEX_op_not_i32:
3739 case INDEX_op_ext8s_i32:
3740 case INDEX_op_ext16s_i32:
3741 case INDEX_op_bswap16_i32:
3742 case INDEX_op_bswap32_i32:
3743 case INDEX_op_extract_i32:
3744 case INDEX_op_ld8u_i64:
3745 case INDEX_op_ld8s_i64:
3746 case INDEX_op_ld16u_i64:
3747 case INDEX_op_ld16s_i64:
3748 case INDEX_op_ld32u_i64:
3749 case INDEX_op_ld32s_i64:
3750 case INDEX_op_ld_i64:
3751 case INDEX_op_ctpop_i64:
3752 case INDEX_op_neg_i64:
3753 case INDEX_op_not_i64:
3754 case INDEX_op_ext8s_i64:
3755 case INDEX_op_ext16s_i64:
3756 case INDEX_op_ext32s_i64:
3757 case INDEX_op_ext_i32_i64:
3758 case INDEX_op_extu_i32_i64:
3759 case INDEX_op_bswap16_i64:
3760 case INDEX_op_bswap32_i64:
3761 case INDEX_op_bswap64_i64:
3762 case INDEX_op_extract_i64:
3763 return C_O1_I1(r, r);
3765 case INDEX_op_st8_i32:
3766 case INDEX_op_st16_i32:
3767 case INDEX_op_st_i32:
3768 case INDEX_op_st8_i64:
3769 case INDEX_op_st16_i64:
3770 case INDEX_op_st32_i64:
3771 case INDEX_op_st_i64:
3772 return C_O0_I2(r, r);
3774 case INDEX_op_add_i32:
3775 case INDEX_op_and_i32:
3776 case INDEX_op_or_i32:
3777 case INDEX_op_xor_i32:
3778 case INDEX_op_andc_i32:
3779 case INDEX_op_orc_i32:
3780 case INDEX_op_eqv_i32:
3781 case INDEX_op_shl_i32:
3782 case INDEX_op_shr_i32:
3783 case INDEX_op_sar_i32:
3784 case INDEX_op_rotl_i32:
3785 case INDEX_op_rotr_i32:
3786 case INDEX_op_setcond_i32:
3787 case INDEX_op_and_i64:
3788 case INDEX_op_andc_i64:
3789 case INDEX_op_shl_i64:
3790 case INDEX_op_shr_i64:
3791 case INDEX_op_sar_i64:
3792 case INDEX_op_rotl_i64:
3793 case INDEX_op_rotr_i64:
3794 case INDEX_op_setcond_i64:
3795 return C_O1_I2(r, r, ri);
3797 case INDEX_op_mul_i32:
3798 case INDEX_op_mul_i64:
3799 return C_O1_I2(r, r, rI);
3801 case INDEX_op_div_i32:
3802 case INDEX_op_divu_i32:
3803 case INDEX_op_rem_i32:
3804 case INDEX_op_remu_i32:
3805 case INDEX_op_nand_i32:
3806 case INDEX_op_nor_i32:
3807 case INDEX_op_muluh_i32:
3808 case INDEX_op_mulsh_i32:
3809 case INDEX_op_orc_i64:
3810 case INDEX_op_eqv_i64:
3811 case INDEX_op_nand_i64:
3812 case INDEX_op_nor_i64:
3813 case INDEX_op_div_i64:
3814 case INDEX_op_divu_i64:
3815 case INDEX_op_rem_i64:
3816 case INDEX_op_remu_i64:
3817 case INDEX_op_mulsh_i64:
3818 case INDEX_op_muluh_i64:
3819 return C_O1_I2(r, r, r);
3821 case INDEX_op_sub_i32:
3822 return C_O1_I2(r, rI, ri);
3823 case INDEX_op_add_i64:
3824 return C_O1_I2(r, r, rT);
3825 case INDEX_op_or_i64:
3826 case INDEX_op_xor_i64:
3827 return C_O1_I2(r, r, rU);
3828 case INDEX_op_sub_i64:
3829 return C_O1_I2(r, rI, rT);
3830 case INDEX_op_clz_i32:
3831 case INDEX_op_ctz_i32:
3832 case INDEX_op_clz_i64:
3833 case INDEX_op_ctz_i64:
3834 return C_O1_I2(r, r, rZW);
3836 case INDEX_op_brcond_i32:
3837 case INDEX_op_brcond_i64:
3838 return C_O0_I2(r, ri);
3840 case INDEX_op_movcond_i32:
3841 case INDEX_op_movcond_i64:
3842 return C_O1_I4(r, r, ri, rZ, rZ);
3843 case INDEX_op_deposit_i32:
3844 case INDEX_op_deposit_i64:
3845 return C_O1_I2(r, 0, rZ);
3846 case INDEX_op_brcond2_i32:
3847 return C_O0_I4(r, r, ri, ri);
3848 case INDEX_op_setcond2_i32:
3849 return C_O1_I4(r, r, r, ri, ri);
3850 case INDEX_op_add2_i64:
3851 case INDEX_op_add2_i32:
3852 return C_O2_I4(r, r, r, r, rI, rZM);
3853 case INDEX_op_sub2_i64:
3854 case INDEX_op_sub2_i32:
3855 return C_O2_I4(r, r, rI, rZM, r, r);
3857 case INDEX_op_qemu_ld_i32:
3858 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3860 : C_O1_I2(r, L, L));
3862 case INDEX_op_qemu_st_i32:
3863 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3865 : C_O0_I3(S, S, S));
3867 case INDEX_op_qemu_ld_i64:
3868 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3869 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
3870 : C_O2_I2(L, L, L, L));
3872 case INDEX_op_qemu_st_i64:
3873 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
3874 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
3875 : C_O0_I4(S, S, S, S));
3877 case INDEX_op_add_vec:
3878 case INDEX_op_sub_vec:
3879 case INDEX_op_mul_vec:
3880 case INDEX_op_and_vec:
3881 case INDEX_op_or_vec:
3882 case INDEX_op_xor_vec:
3883 case INDEX_op_andc_vec:
3884 case INDEX_op_orc_vec:
3885 case INDEX_op_nor_vec:
3886 case INDEX_op_eqv_vec:
3887 case INDEX_op_nand_vec:
3888 case INDEX_op_cmp_vec:
3889 case INDEX_op_ssadd_vec:
3890 case INDEX_op_sssub_vec:
3891 case INDEX_op_usadd_vec:
3892 case INDEX_op_ussub_vec:
3893 case INDEX_op_smax_vec:
3894 case INDEX_op_smin_vec:
3895 case INDEX_op_umax_vec:
3896 case INDEX_op_umin_vec:
3897 case INDEX_op_shlv_vec:
3898 case INDEX_op_shrv_vec:
3899 case INDEX_op_sarv_vec:
3900 case INDEX_op_rotlv_vec:
3901 case INDEX_op_rotrv_vec:
3902 case INDEX_op_ppc_mrgh_vec:
3903 case INDEX_op_ppc_mrgl_vec:
3904 case INDEX_op_ppc_muleu_vec:
3905 case INDEX_op_ppc_mulou_vec:
3906 case INDEX_op_ppc_pkum_vec:
3907 case INDEX_op_dup2_vec:
3908 return C_O1_I2(v, v, v);
3910 case INDEX_op_not_vec:
3911 case INDEX_op_neg_vec:
3912 return C_O1_I1(v, v);
3914 case INDEX_op_dup_vec:
3915 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3917 case INDEX_op_ld_vec:
3918 case INDEX_op_dupm_vec:
3919 return C_O1_I1(v, r);
3921 case INDEX_op_st_vec:
3922 return C_O0_I2(v, r);
3924 case INDEX_op_bitsel_vec:
3925 case INDEX_op_ppc_msum_vec:
3926 return C_O1_I3(v, v, v, v);
3929 g_assert_not_reached();
3933 static void tcg_target_init(TCGContext *s)
3935 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3936 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3938 have_isa = tcg_isa_base;
3939 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3940 have_isa = tcg_isa_2_06;
3942 #ifdef PPC_FEATURE2_ARCH_2_07
3943 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3944 have_isa = tcg_isa_2_07;
3947 #ifdef PPC_FEATURE2_ARCH_3_00
3948 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3949 have_isa = tcg_isa_3_00;
3952 #ifdef PPC_FEATURE2_ARCH_3_10
3953 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3954 have_isa = tcg_isa_3_10;
3958 #ifdef PPC_FEATURE2_HAS_ISEL
3959 /* Prefer explicit instruction from the kernel. */
3960 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3962 /* Fall back to knowing Power7 (2.06) has ISEL. */
3963 have_isel = have_isa_2_06;
3966 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3967 have_altivec = true;
3968 /* We only care about the portion of VSX that overlaps Altivec. */
3969 if (hwcap & PPC_FEATURE_HAS_VSX) {
3974 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3975 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3977 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3978 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3981 tcg_target_call_clobber_regs = 0;
3982 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3983 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3984 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3985 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3986 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3987 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3988 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3989 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3990 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3991 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3992 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3993 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3995 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3996 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3997 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3998 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3999 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
4000 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
4001 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
4002 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
4003 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
4004 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
4005 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
4006 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
4007 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
4008 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
4009 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
4010 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
4011 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
4012 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
4013 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
4014 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
4016 s->reserved_regs = 0;
4017 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
4018 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
4019 #if defined(_CALL_SYSV)
4020 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
4022 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
4023 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
4025 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
4026 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
4027 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
4029 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
4036 DebugFrameFDEHeader fde;
4037 uint8_t fde_def_cfa[4];
4038 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
4041 /* We're expecting a 2 byte uleb128 encoded value. */
4042 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
4044 #if TCG_TARGET_REG_BITS == 64
4045 # define ELF_HOST_MACHINE EM_PPC64
4047 # define ELF_HOST_MACHINE EM_PPC
4050 static DebugFrame debug_frame = {
4051 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
4054 .cie.code_align = 1,
4055 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
4056 .cie.return_column = 65,
4058 /* Total FDE size does not include the "len" member. */
4059 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
4062 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
4063 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4067 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
4068 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4072 void tcg_register_jit(const void *buf, size_t buf_size)
4074 uint8_t *p = &debug_frame.fde_reg_ofs[3];
4077 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
4078 p[0] = 0x80 + tcg_target_callee_save_regs[i];
4079 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
4082 debug_frame.fde.func_start = (uintptr_t)buf;
4083 debug_frame.fde.func_len = buf_size;
4085 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4087 #endif /* __ELF__ */