]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/ppc/tcg-target.c.inc
tcg: Split out tcg_out_ext32s
[mirror_qemu.git] / tcg / ppc / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "elf.h"
26 #include "../tcg-pool.c.inc"
27 #include "../tcg-ldst.c.inc"
28
29 /*
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
33 */
34 #if !defined(_CALL_SYSV) && \
35 !defined(_CALL_DARWIN) && \
36 !defined(_CALL_AIX) && \
37 !defined(_CALL_ELF)
38 # if defined(__APPLE__)
39 # define _CALL_DARWIN
40 # elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
41 # define _CALL_SYSV
42 # else
43 # error "Unknown ABI"
44 # endif
45 #endif
46
47 #if TCG_TARGET_REG_BITS == 64
48 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
49 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
50 #else
51 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
52 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
53 #endif
54 #ifdef _CALL_SYSV
55 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
56 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
57 #else
58 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
59 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
60 #endif
61
62 /* For some memory operations, we need a scratch that isn't R0. For the AIX
63 calling convention, we can re-use the TOC register since we'll be reloading
64 it at every call. Otherwise R12 will do nicely as neither a call-saved
65 register nor a parameter register. */
66 #ifdef _CALL_AIX
67 # define TCG_REG_TMP1 TCG_REG_R2
68 #else
69 # define TCG_REG_TMP1 TCG_REG_R12
70 #endif
71
72 #define TCG_VEC_TMP1 TCG_REG_V0
73 #define TCG_VEC_TMP2 TCG_REG_V1
74
75 #define TCG_REG_TB TCG_REG_R31
76 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
77
78 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
79 #define SZP ((int)sizeof(void *))
80
81 /* Shorthand for size of a register. */
82 #define SZR (TCG_TARGET_REG_BITS / 8)
83
84 #define TCG_CT_CONST_S16 0x100
85 #define TCG_CT_CONST_U16 0x200
86 #define TCG_CT_CONST_S32 0x400
87 #define TCG_CT_CONST_U32 0x800
88 #define TCG_CT_CONST_ZERO 0x1000
89 #define TCG_CT_CONST_MONE 0x2000
90 #define TCG_CT_CONST_WSZ 0x4000
91
92 #define ALL_GENERAL_REGS 0xffffffffu
93 #define ALL_VECTOR_REGS 0xffffffff00000000ull
94
95 #ifdef CONFIG_SOFTMMU
96 #define ALL_QLOAD_REGS \
97 (ALL_GENERAL_REGS & \
98 ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | (1 << TCG_REG_R5)))
99 #define ALL_QSTORE_REGS \
100 (ALL_GENERAL_REGS & ~((1 << TCG_REG_R3) | (1 << TCG_REG_R4) | \
101 (1 << TCG_REG_R5) | (1 << TCG_REG_R6)))
102 #else
103 #define ALL_QLOAD_REGS (ALL_GENERAL_REGS & ~(1 << TCG_REG_R3))
104 #define ALL_QSTORE_REGS ALL_QLOAD_REGS
105 #endif
106
107 TCGPowerISA have_isa;
108 static bool have_isel;
109 bool have_altivec;
110 bool have_vsx;
111
112 #ifndef CONFIG_SOFTMMU
113 #define TCG_GUEST_BASE_REG 30
114 #endif
115
116 #ifdef CONFIG_DEBUG_TCG
117 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
118 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
119 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
120 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
121 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
122 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
123 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
124 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
125 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
126 };
127 #endif
128
129 static const int tcg_target_reg_alloc_order[] = {
130 TCG_REG_R14, /* call saved registers */
131 TCG_REG_R15,
132 TCG_REG_R16,
133 TCG_REG_R17,
134 TCG_REG_R18,
135 TCG_REG_R19,
136 TCG_REG_R20,
137 TCG_REG_R21,
138 TCG_REG_R22,
139 TCG_REG_R23,
140 TCG_REG_R24,
141 TCG_REG_R25,
142 TCG_REG_R26,
143 TCG_REG_R27,
144 TCG_REG_R28,
145 TCG_REG_R29,
146 TCG_REG_R30,
147 TCG_REG_R31,
148 TCG_REG_R12, /* call clobbered, non-arguments */
149 TCG_REG_R11,
150 TCG_REG_R2,
151 TCG_REG_R13,
152 TCG_REG_R10, /* call clobbered, arguments */
153 TCG_REG_R9,
154 TCG_REG_R8,
155 TCG_REG_R7,
156 TCG_REG_R6,
157 TCG_REG_R5,
158 TCG_REG_R4,
159 TCG_REG_R3,
160
161 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
162 TCG_REG_V2, /* call clobbered, vectors */
163 TCG_REG_V3,
164 TCG_REG_V4,
165 TCG_REG_V5,
166 TCG_REG_V6,
167 TCG_REG_V7,
168 TCG_REG_V8,
169 TCG_REG_V9,
170 TCG_REG_V10,
171 TCG_REG_V11,
172 TCG_REG_V12,
173 TCG_REG_V13,
174 TCG_REG_V14,
175 TCG_REG_V15,
176 TCG_REG_V16,
177 TCG_REG_V17,
178 TCG_REG_V18,
179 TCG_REG_V19,
180 };
181
182 static const int tcg_target_call_iarg_regs[] = {
183 TCG_REG_R3,
184 TCG_REG_R4,
185 TCG_REG_R5,
186 TCG_REG_R6,
187 TCG_REG_R7,
188 TCG_REG_R8,
189 TCG_REG_R9,
190 TCG_REG_R10
191 };
192
193 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
194 {
195 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
196 tcg_debug_assert(slot >= 0 && slot <= 1);
197 return TCG_REG_R3 + slot;
198 }
199
200 static const int tcg_target_callee_save_regs[] = {
201 #ifdef _CALL_DARWIN
202 TCG_REG_R11,
203 #endif
204 TCG_REG_R14,
205 TCG_REG_R15,
206 TCG_REG_R16,
207 TCG_REG_R17,
208 TCG_REG_R18,
209 TCG_REG_R19,
210 TCG_REG_R20,
211 TCG_REG_R21,
212 TCG_REG_R22,
213 TCG_REG_R23,
214 TCG_REG_R24,
215 TCG_REG_R25,
216 TCG_REG_R26,
217 TCG_REG_R27, /* currently used for the global env */
218 TCG_REG_R28,
219 TCG_REG_R29,
220 TCG_REG_R30,
221 TCG_REG_R31
222 };
223
224 static inline bool in_range_b(tcg_target_long target)
225 {
226 return target == sextract64(target, 0, 26);
227 }
228
229 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
230 const tcg_insn_unit *target)
231 {
232 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
233 tcg_debug_assert(in_range_b(disp));
234 return disp & 0x3fffffc;
235 }
236
237 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
238 {
239 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
240 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
241
242 if (in_range_b(disp)) {
243 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
244 return true;
245 }
246 return false;
247 }
248
249 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
250 const tcg_insn_unit *target)
251 {
252 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
253 tcg_debug_assert(disp == (int16_t) disp);
254 return disp & 0xfffc;
255 }
256
257 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
258 {
259 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
260 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
261
262 if (disp == (int16_t) disp) {
263 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
264 return true;
265 }
266 return false;
267 }
268
269 /* test if a constant matches the constraint */
270 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
271 {
272 if (ct & TCG_CT_CONST) {
273 return 1;
274 }
275
276 /* The only 32-bit constraint we use aside from
277 TCG_CT_CONST is TCG_CT_CONST_S16. */
278 if (type == TCG_TYPE_I32) {
279 val = (int32_t)val;
280 }
281
282 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
283 return 1;
284 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
285 return 1;
286 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
287 return 1;
288 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
289 return 1;
290 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
291 return 1;
292 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
293 return 1;
294 } else if ((ct & TCG_CT_CONST_WSZ)
295 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
296 return 1;
297 }
298 return 0;
299 }
300
301 #define OPCD(opc) ((opc)<<26)
302 #define XO19(opc) (OPCD(19)|((opc)<<1))
303 #define MD30(opc) (OPCD(30)|((opc)<<2))
304 #define MDS30(opc) (OPCD(30)|((opc)<<1))
305 #define XO31(opc) (OPCD(31)|((opc)<<1))
306 #define XO58(opc) (OPCD(58)|(opc))
307 #define XO62(opc) (OPCD(62)|(opc))
308 #define VX4(opc) (OPCD(4)|(opc))
309
310 #define B OPCD( 18)
311 #define BC OPCD( 16)
312 #define LBZ OPCD( 34)
313 #define LHZ OPCD( 40)
314 #define LHA OPCD( 42)
315 #define LWZ OPCD( 32)
316 #define LWZUX XO31( 55)
317 #define STB OPCD( 38)
318 #define STH OPCD( 44)
319 #define STW OPCD( 36)
320
321 #define STD XO62( 0)
322 #define STDU XO62( 1)
323 #define STDX XO31(149)
324
325 #define LD XO58( 0)
326 #define LDX XO31( 21)
327 #define LDU XO58( 1)
328 #define LDUX XO31( 53)
329 #define LWA XO58( 2)
330 #define LWAX XO31(341)
331
332 #define ADDIC OPCD( 12)
333 #define ADDI OPCD( 14)
334 #define ADDIS OPCD( 15)
335 #define ORI OPCD( 24)
336 #define ORIS OPCD( 25)
337 #define XORI OPCD( 26)
338 #define XORIS OPCD( 27)
339 #define ANDI OPCD( 28)
340 #define ANDIS OPCD( 29)
341 #define MULLI OPCD( 7)
342 #define CMPLI OPCD( 10)
343 #define CMPI OPCD( 11)
344 #define SUBFIC OPCD( 8)
345
346 #define LWZU OPCD( 33)
347 #define STWU OPCD( 37)
348
349 #define RLWIMI OPCD( 20)
350 #define RLWINM OPCD( 21)
351 #define RLWNM OPCD( 23)
352
353 #define RLDICL MD30( 0)
354 #define RLDICR MD30( 1)
355 #define RLDIMI MD30( 3)
356 #define RLDCL MDS30( 8)
357
358 #define BCLR XO19( 16)
359 #define BCCTR XO19(528)
360 #define CRAND XO19(257)
361 #define CRANDC XO19(129)
362 #define CRNAND XO19(225)
363 #define CROR XO19(449)
364 #define CRNOR XO19( 33)
365
366 #define EXTSB XO31(954)
367 #define EXTSH XO31(922)
368 #define EXTSW XO31(986)
369 #define ADD XO31(266)
370 #define ADDE XO31(138)
371 #define ADDME XO31(234)
372 #define ADDZE XO31(202)
373 #define ADDC XO31( 10)
374 #define AND XO31( 28)
375 #define SUBF XO31( 40)
376 #define SUBFC XO31( 8)
377 #define SUBFE XO31(136)
378 #define SUBFME XO31(232)
379 #define SUBFZE XO31(200)
380 #define OR XO31(444)
381 #define XOR XO31(316)
382 #define MULLW XO31(235)
383 #define MULHW XO31( 75)
384 #define MULHWU XO31( 11)
385 #define DIVW XO31(491)
386 #define DIVWU XO31(459)
387 #define MODSW XO31(779)
388 #define MODUW XO31(267)
389 #define CMP XO31( 0)
390 #define CMPL XO31( 32)
391 #define LHBRX XO31(790)
392 #define LWBRX XO31(534)
393 #define LDBRX XO31(532)
394 #define STHBRX XO31(918)
395 #define STWBRX XO31(662)
396 #define STDBRX XO31(660)
397 #define MFSPR XO31(339)
398 #define MTSPR XO31(467)
399 #define SRAWI XO31(824)
400 #define NEG XO31(104)
401 #define MFCR XO31( 19)
402 #define MFOCRF (MFCR | (1u << 20))
403 #define NOR XO31(124)
404 #define CNTLZW XO31( 26)
405 #define CNTLZD XO31( 58)
406 #define CNTTZW XO31(538)
407 #define CNTTZD XO31(570)
408 #define CNTPOPW XO31(378)
409 #define CNTPOPD XO31(506)
410 #define ANDC XO31( 60)
411 #define ORC XO31(412)
412 #define EQV XO31(284)
413 #define NAND XO31(476)
414 #define ISEL XO31( 15)
415
416 #define MULLD XO31(233)
417 #define MULHD XO31( 73)
418 #define MULHDU XO31( 9)
419 #define DIVD XO31(489)
420 #define DIVDU XO31(457)
421 #define MODSD XO31(777)
422 #define MODUD XO31(265)
423
424 #define LBZX XO31( 87)
425 #define LHZX XO31(279)
426 #define LHAX XO31(343)
427 #define LWZX XO31( 23)
428 #define STBX XO31(215)
429 #define STHX XO31(407)
430 #define STWX XO31(151)
431
432 #define EIEIO XO31(854)
433 #define HWSYNC XO31(598)
434 #define LWSYNC (HWSYNC | (1u << 21))
435
436 #define SPR(a, b) ((((a)<<5)|(b))<<11)
437 #define LR SPR(8, 0)
438 #define CTR SPR(9, 0)
439
440 #define SLW XO31( 24)
441 #define SRW XO31(536)
442 #define SRAW XO31(792)
443
444 #define SLD XO31( 27)
445 #define SRD XO31(539)
446 #define SRAD XO31(794)
447 #define SRADI XO31(413<<1)
448
449 #define BRH XO31(219)
450 #define BRW XO31(155)
451 #define BRD XO31(187)
452
453 #define TW XO31( 4)
454 #define TRAP (TW | TO(31))
455
456 #define NOP ORI /* ori 0,0,0 */
457
458 #define LVX XO31(103)
459 #define LVEBX XO31(7)
460 #define LVEHX XO31(39)
461 #define LVEWX XO31(71)
462 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
463 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
464 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
465 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
466 #define LXSD (OPCD(57) | 2) /* v3.00 */
467 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
468
469 #define STVX XO31(231)
470 #define STVEWX XO31(199)
471 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
472 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
473 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
474 #define STXSD (OPCD(61) | 2) /* v3.00 */
475
476 #define VADDSBS VX4(768)
477 #define VADDUBS VX4(512)
478 #define VADDUBM VX4(0)
479 #define VADDSHS VX4(832)
480 #define VADDUHS VX4(576)
481 #define VADDUHM VX4(64)
482 #define VADDSWS VX4(896)
483 #define VADDUWS VX4(640)
484 #define VADDUWM VX4(128)
485 #define VADDUDM VX4(192) /* v2.07 */
486
487 #define VSUBSBS VX4(1792)
488 #define VSUBUBS VX4(1536)
489 #define VSUBUBM VX4(1024)
490 #define VSUBSHS VX4(1856)
491 #define VSUBUHS VX4(1600)
492 #define VSUBUHM VX4(1088)
493 #define VSUBSWS VX4(1920)
494 #define VSUBUWS VX4(1664)
495 #define VSUBUWM VX4(1152)
496 #define VSUBUDM VX4(1216) /* v2.07 */
497
498 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
499 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
500
501 #define VMAXSB VX4(258)
502 #define VMAXSH VX4(322)
503 #define VMAXSW VX4(386)
504 #define VMAXSD VX4(450) /* v2.07 */
505 #define VMAXUB VX4(2)
506 #define VMAXUH VX4(66)
507 #define VMAXUW VX4(130)
508 #define VMAXUD VX4(194) /* v2.07 */
509 #define VMINSB VX4(770)
510 #define VMINSH VX4(834)
511 #define VMINSW VX4(898)
512 #define VMINSD VX4(962) /* v2.07 */
513 #define VMINUB VX4(514)
514 #define VMINUH VX4(578)
515 #define VMINUW VX4(642)
516 #define VMINUD VX4(706) /* v2.07 */
517
518 #define VCMPEQUB VX4(6)
519 #define VCMPEQUH VX4(70)
520 #define VCMPEQUW VX4(134)
521 #define VCMPEQUD VX4(199) /* v2.07 */
522 #define VCMPGTSB VX4(774)
523 #define VCMPGTSH VX4(838)
524 #define VCMPGTSW VX4(902)
525 #define VCMPGTSD VX4(967) /* v2.07 */
526 #define VCMPGTUB VX4(518)
527 #define VCMPGTUH VX4(582)
528 #define VCMPGTUW VX4(646)
529 #define VCMPGTUD VX4(711) /* v2.07 */
530 #define VCMPNEB VX4(7) /* v3.00 */
531 #define VCMPNEH VX4(71) /* v3.00 */
532 #define VCMPNEW VX4(135) /* v3.00 */
533
534 #define VSLB VX4(260)
535 #define VSLH VX4(324)
536 #define VSLW VX4(388)
537 #define VSLD VX4(1476) /* v2.07 */
538 #define VSRB VX4(516)
539 #define VSRH VX4(580)
540 #define VSRW VX4(644)
541 #define VSRD VX4(1732) /* v2.07 */
542 #define VSRAB VX4(772)
543 #define VSRAH VX4(836)
544 #define VSRAW VX4(900)
545 #define VSRAD VX4(964) /* v2.07 */
546 #define VRLB VX4(4)
547 #define VRLH VX4(68)
548 #define VRLW VX4(132)
549 #define VRLD VX4(196) /* v2.07 */
550
551 #define VMULEUB VX4(520)
552 #define VMULEUH VX4(584)
553 #define VMULEUW VX4(648) /* v2.07 */
554 #define VMULOUB VX4(8)
555 #define VMULOUH VX4(72)
556 #define VMULOUW VX4(136) /* v2.07 */
557 #define VMULUWM VX4(137) /* v2.07 */
558 #define VMULLD VX4(457) /* v3.10 */
559 #define VMSUMUHM VX4(38)
560
561 #define VMRGHB VX4(12)
562 #define VMRGHH VX4(76)
563 #define VMRGHW VX4(140)
564 #define VMRGLB VX4(268)
565 #define VMRGLH VX4(332)
566 #define VMRGLW VX4(396)
567
568 #define VPKUHUM VX4(14)
569 #define VPKUWUM VX4(78)
570
571 #define VAND VX4(1028)
572 #define VANDC VX4(1092)
573 #define VNOR VX4(1284)
574 #define VOR VX4(1156)
575 #define VXOR VX4(1220)
576 #define VEQV VX4(1668) /* v2.07 */
577 #define VNAND VX4(1412) /* v2.07 */
578 #define VORC VX4(1348) /* v2.07 */
579
580 #define VSPLTB VX4(524)
581 #define VSPLTH VX4(588)
582 #define VSPLTW VX4(652)
583 #define VSPLTISB VX4(780)
584 #define VSPLTISH VX4(844)
585 #define VSPLTISW VX4(908)
586
587 #define VSLDOI VX4(44)
588
589 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
590 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
591 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
592
593 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
594 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
595 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
596 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
597 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
598 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
599
600 #define RT(r) ((r)<<21)
601 #define RS(r) ((r)<<21)
602 #define RA(r) ((r)<<16)
603 #define RB(r) ((r)<<11)
604 #define TO(t) ((t)<<21)
605 #define SH(s) ((s)<<11)
606 #define MB(b) ((b)<<6)
607 #define ME(e) ((e)<<1)
608 #define BO(o) ((o)<<21)
609 #define MB64(b) ((b)<<5)
610 #define FXM(b) (1 << (19 - (b)))
611
612 #define VRT(r) (((r) & 31) << 21)
613 #define VRA(r) (((r) & 31) << 16)
614 #define VRB(r) (((r) & 31) << 11)
615 #define VRC(r) (((r) & 31) << 6)
616
617 #define LK 1
618
619 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
620 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
621 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
622 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
623
624 #define BF(n) ((n)<<23)
625 #define BI(n, c) (((c)+((n)*4))<<16)
626 #define BT(n, c) (((c)+((n)*4))<<21)
627 #define BA(n, c) (((c)+((n)*4))<<16)
628 #define BB(n, c) (((c)+((n)*4))<<11)
629 #define BC_(n, c) (((c)+((n)*4))<<6)
630
631 #define BO_COND_TRUE BO(12)
632 #define BO_COND_FALSE BO( 4)
633 #define BO_ALWAYS BO(20)
634
635 enum {
636 CR_LT,
637 CR_GT,
638 CR_EQ,
639 CR_SO
640 };
641
642 static const uint32_t tcg_to_bc[] = {
643 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
644 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
645 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
646 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
647 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
648 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
649 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
650 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
651 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
652 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
653 };
654
655 /* The low bit here is set if the RA and RB fields must be inverted. */
656 static const uint32_t tcg_to_isel[] = {
657 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
658 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
659 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
660 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
661 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
662 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
663 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
664 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
665 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
666 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
667 };
668
669 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
670 intptr_t value, intptr_t addend)
671 {
672 const tcg_insn_unit *target;
673 int16_t lo;
674 int32_t hi;
675
676 value += addend;
677 target = (const tcg_insn_unit *)value;
678
679 switch (type) {
680 case R_PPC_REL14:
681 return reloc_pc14(code_ptr, target);
682 case R_PPC_REL24:
683 return reloc_pc24(code_ptr, target);
684 case R_PPC_ADDR16:
685 /*
686 * We are (slightly) abusing this relocation type. In particular,
687 * assert that the low 2 bits are zero, and do not modify them.
688 * That way we can use this with LD et al that have opcode bits
689 * in the low 2 bits of the insn.
690 */
691 if ((value & 3) || value != (int16_t)value) {
692 return false;
693 }
694 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
695 break;
696 case R_PPC_ADDR32:
697 /*
698 * We are abusing this relocation type. Again, this points to
699 * a pair of insns, lis + load. This is an absolute address
700 * relocation for PPC32 so the lis cannot be removed.
701 */
702 lo = value;
703 hi = value - lo;
704 if (hi + lo != value) {
705 return false;
706 }
707 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
708 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
709 break;
710 default:
711 g_assert_not_reached();
712 }
713 return true;
714 }
715
716 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
717 TCGReg base, tcg_target_long offset);
718
719 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
720 {
721 if (ret == arg) {
722 return true;
723 }
724 switch (type) {
725 case TCG_TYPE_I64:
726 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
727 /* fallthru */
728 case TCG_TYPE_I32:
729 if (ret < TCG_REG_V0) {
730 if (arg < TCG_REG_V0) {
731 tcg_out32(s, OR | SAB(arg, ret, arg));
732 break;
733 } else if (have_isa_2_07) {
734 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
735 | VRT(arg) | RA(ret));
736 break;
737 } else {
738 /* Altivec does not support vector->integer moves. */
739 return false;
740 }
741 } else if (arg < TCG_REG_V0) {
742 if (have_isa_2_07) {
743 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
744 | VRT(ret) | RA(arg));
745 break;
746 } else {
747 /* Altivec does not support integer->vector moves. */
748 return false;
749 }
750 }
751 /* fallthru */
752 case TCG_TYPE_V64:
753 case TCG_TYPE_V128:
754 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
755 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
756 break;
757 default:
758 g_assert_not_reached();
759 }
760 return true;
761 }
762
763 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
764 int sh, int mb)
765 {
766 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
767 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
768 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
769 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
770 }
771
772 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
773 int sh, int mb, int me)
774 {
775 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
776 }
777
778 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
779 {
780 tcg_out32(s, EXTSB | RA(dst) | RS(src));
781 }
782
783 static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
784 {
785 tcg_out32(s, ANDI | SAI(src, dst, 0xff));
786 }
787
788 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
789 {
790 tcg_out32(s, EXTSH | RA(dst) | RS(src));
791 }
792
793 static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
794 {
795 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
796 }
797
798 static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
799 {
800 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
801 tcg_out32(s, EXTSW | RA(dst) | RS(src));
802 }
803
804 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
805 {
806 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
807 }
808
809 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
810 {
811 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
812 }
813
814 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
815 {
816 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
817 }
818
819 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
820 {
821 /* Limit immediate shift count lest we create an illegal insn. */
822 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
823 }
824
825 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
826 {
827 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
828 }
829
830 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
831 {
832 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
833 }
834
835 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
836 {
837 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
838 }
839
840 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
841 {
842 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
843
844 if (have_isa_3_10) {
845 tcg_out32(s, BRH | RA(dst) | RS(src));
846 if (flags & TCG_BSWAP_OS) {
847 tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
848 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
849 tcg_out_ext16u(s, dst, dst);
850 }
851 return;
852 }
853
854 /*
855 * In the following,
856 * dep(a, b, m) -> (a & ~m) | (b & m)
857 *
858 * Begin with: src = xxxxabcd
859 */
860 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
861 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
862 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
863 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
864
865 if (flags & TCG_BSWAP_OS) {
866 tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
867 } else {
868 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
869 }
870 }
871
872 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
873 {
874 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
875
876 if (have_isa_3_10) {
877 tcg_out32(s, BRW | RA(dst) | RS(src));
878 if (flags & TCG_BSWAP_OS) {
879 tcg_out_ext32s(s, dst, dst);
880 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
881 tcg_out_ext32u(s, dst, dst);
882 }
883 return;
884 }
885
886 /*
887 * Stolen from gcc's builtin_bswap32.
888 * In the following,
889 * dep(a, b, m) -> (a & ~m) | (b & m)
890 *
891 * Begin with: src = xxxxabcd
892 */
893 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
894 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
895 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
896 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
897 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
898 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
899
900 if (flags & TCG_BSWAP_OS) {
901 tcg_out_ext32s(s, dst, tmp);
902 } else {
903 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
904 }
905 }
906
907 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
908 {
909 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
910 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
911
912 if (have_isa_3_10) {
913 tcg_out32(s, BRD | RA(dst) | RS(src));
914 return;
915 }
916
917 /*
918 * In the following,
919 * dep(a, b, m) -> (a & ~m) | (b & m)
920 *
921 * Begin with: src = abcdefgh
922 */
923 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
924 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
925 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
926 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
927 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
928 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
929
930 /* t0 = rol64(t0, 32) = hgfe0000 */
931 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
932 /* t1 = rol64(src, 32) = efghabcd */
933 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
934
935 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
936 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
937 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
938 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
939 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
940 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
941
942 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
943 }
944
945 /* Emit a move into ret of arg, if it can be done in one insn. */
946 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
947 {
948 if (arg == (int16_t)arg) {
949 tcg_out32(s, ADDI | TAI(ret, 0, arg));
950 return true;
951 }
952 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
953 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
954 return true;
955 }
956 return false;
957 }
958
959 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
960 tcg_target_long arg, bool in_prologue)
961 {
962 intptr_t tb_diff;
963 tcg_target_long tmp;
964 int shift;
965
966 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
967
968 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
969 arg = (int32_t)arg;
970 }
971
972 /* Load 16-bit immediates with one insn. */
973 if (tcg_out_movi_one(s, ret, arg)) {
974 return;
975 }
976
977 /* Load addresses within the TB with one insn. */
978 tb_diff = tcg_tbrel_diff(s, (void *)arg);
979 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
980 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
981 return;
982 }
983
984 /* Load 32-bit immediates with two insns. Note that we've already
985 eliminated bare ADDIS, so we know both insns are required. */
986 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
987 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
988 tcg_out32(s, ORI | SAI(ret, ret, arg));
989 return;
990 }
991 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
992 tcg_out32(s, ADDI | TAI(ret, 0, arg));
993 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
994 return;
995 }
996
997 /* Load masked 16-bit value. */
998 if (arg > 0 && (arg & 0x8000)) {
999 tmp = arg | 0x7fff;
1000 if ((tmp & (tmp + 1)) == 0) {
1001 int mb = clz64(tmp + 1) + 1;
1002 tcg_out32(s, ADDI | TAI(ret, 0, arg));
1003 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1004 return;
1005 }
1006 }
1007
1008 /* Load common masks with 2 insns. */
1009 shift = ctz64(arg);
1010 tmp = arg >> shift;
1011 if (tmp == (int16_t)tmp) {
1012 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1013 tcg_out_shli64(s, ret, ret, shift);
1014 return;
1015 }
1016 shift = clz64(arg);
1017 if (tcg_out_movi_one(s, ret, arg << shift)) {
1018 tcg_out_shri64(s, ret, ret, shift);
1019 return;
1020 }
1021
1022 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
1023 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1024 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1025 return;
1026 }
1027
1028 /* Use the constant pool, if possible. */
1029 if (!in_prologue && USE_REG_TB) {
1030 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1031 tcg_tbrel_diff(s, NULL));
1032 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1033 return;
1034 }
1035
1036 tmp = arg >> 31 >> 1;
1037 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1038 if (tmp) {
1039 tcg_out_shli64(s, ret, ret, 32);
1040 }
1041 if (arg & 0xffff0000) {
1042 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1043 }
1044 if (arg & 0xffff) {
1045 tcg_out32(s, ORI | SAI(ret, ret, arg));
1046 }
1047 }
1048
1049 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1050 TCGReg ret, int64_t val)
1051 {
1052 uint32_t load_insn;
1053 int rel, low;
1054 intptr_t add;
1055
1056 switch (vece) {
1057 case MO_8:
1058 low = (int8_t)val;
1059 if (low >= -16 && low < 16) {
1060 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1061 return;
1062 }
1063 if (have_isa_3_00) {
1064 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1065 return;
1066 }
1067 break;
1068
1069 case MO_16:
1070 low = (int16_t)val;
1071 if (low >= -16 && low < 16) {
1072 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1073 return;
1074 }
1075 break;
1076
1077 case MO_32:
1078 low = (int32_t)val;
1079 if (low >= -16 && low < 16) {
1080 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1081 return;
1082 }
1083 break;
1084 }
1085
1086 /*
1087 * Otherwise we must load the value from the constant pool.
1088 */
1089 if (USE_REG_TB) {
1090 rel = R_PPC_ADDR16;
1091 add = tcg_tbrel_diff(s, NULL);
1092 } else {
1093 rel = R_PPC_ADDR32;
1094 add = 0;
1095 }
1096
1097 if (have_vsx) {
1098 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1099 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1100 if (TCG_TARGET_REG_BITS == 64) {
1101 new_pool_label(s, val, rel, s->code_ptr, add);
1102 } else {
1103 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1104 }
1105 } else {
1106 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1107 if (TCG_TARGET_REG_BITS == 64) {
1108 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1109 } else {
1110 new_pool_l4(s, rel, s->code_ptr, add,
1111 val >> 32, val, val >> 32, val);
1112 }
1113 }
1114
1115 if (USE_REG_TB) {
1116 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1117 load_insn |= RA(TCG_REG_TB);
1118 } else {
1119 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1120 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1121 }
1122 tcg_out32(s, load_insn);
1123 }
1124
1125 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1126 tcg_target_long arg)
1127 {
1128 switch (type) {
1129 case TCG_TYPE_I32:
1130 case TCG_TYPE_I64:
1131 tcg_debug_assert(ret < TCG_REG_V0);
1132 tcg_out_movi_int(s, type, ret, arg, false);
1133 break;
1134
1135 default:
1136 g_assert_not_reached();
1137 }
1138 }
1139
1140 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1141 tcg_target_long imm)
1142 {
1143 /* This function is only used for passing structs by reference. */
1144 g_assert_not_reached();
1145 }
1146
1147 static bool mask_operand(uint32_t c, int *mb, int *me)
1148 {
1149 uint32_t lsb, test;
1150
1151 /* Accept a bit pattern like:
1152 0....01....1
1153 1....10....0
1154 0..01..10..0
1155 Keep track of the transitions. */
1156 if (c == 0 || c == -1) {
1157 return false;
1158 }
1159 test = c;
1160 lsb = test & -test;
1161 test += lsb;
1162 if (test & (test - 1)) {
1163 return false;
1164 }
1165
1166 *me = clz32(lsb);
1167 *mb = test ? clz32(test & -test) + 1 : 0;
1168 return true;
1169 }
1170
1171 static bool mask64_operand(uint64_t c, int *mb, int *me)
1172 {
1173 uint64_t lsb;
1174
1175 if (c == 0) {
1176 return false;
1177 }
1178
1179 lsb = c & -c;
1180 /* Accept 1..10..0. */
1181 if (c == -lsb) {
1182 *mb = 0;
1183 *me = clz64(lsb);
1184 return true;
1185 }
1186 /* Accept 0..01..1. */
1187 if (lsb == 1 && (c & (c + 1)) == 0) {
1188 *mb = clz64(c + 1) + 1;
1189 *me = 63;
1190 return true;
1191 }
1192 return false;
1193 }
1194
1195 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1196 {
1197 int mb, me;
1198
1199 if (mask_operand(c, &mb, &me)) {
1200 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1201 } else if ((c & 0xffff) == c) {
1202 tcg_out32(s, ANDI | SAI(src, dst, c));
1203 return;
1204 } else if ((c & 0xffff0000) == c) {
1205 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1206 return;
1207 } else {
1208 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1209 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1210 }
1211 }
1212
1213 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1214 {
1215 int mb, me;
1216
1217 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1218 if (mask64_operand(c, &mb, &me)) {
1219 if (mb == 0) {
1220 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1221 } else {
1222 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1223 }
1224 } else if ((c & 0xffff) == c) {
1225 tcg_out32(s, ANDI | SAI(src, dst, c));
1226 return;
1227 } else if ((c & 0xffff0000) == c) {
1228 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1229 return;
1230 } else {
1231 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1232 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1233 }
1234 }
1235
1236 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1237 int op_lo, int op_hi)
1238 {
1239 if (c >> 16) {
1240 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1241 src = dst;
1242 }
1243 if (c & 0xffff) {
1244 tcg_out32(s, op_lo | SAI(src, dst, c));
1245 src = dst;
1246 }
1247 }
1248
1249 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1250 {
1251 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1252 }
1253
1254 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1255 {
1256 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1257 }
1258
1259 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1260 {
1261 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1262 if (in_range_b(disp)) {
1263 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1264 } else {
1265 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1266 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1267 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1268 }
1269 }
1270
1271 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1272 TCGReg base, tcg_target_long offset)
1273 {
1274 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1275 bool is_int_store = false;
1276 TCGReg rs = TCG_REG_TMP1;
1277
1278 switch (opi) {
1279 case LD: case LWA:
1280 align = 3;
1281 /* FALLTHRU */
1282 default:
1283 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1284 rs = rt;
1285 break;
1286 }
1287 break;
1288 case LXSD:
1289 case STXSD:
1290 align = 3;
1291 break;
1292 case LXV:
1293 case STXV:
1294 align = 15;
1295 break;
1296 case STD:
1297 align = 3;
1298 /* FALLTHRU */
1299 case STB: case STH: case STW:
1300 is_int_store = true;
1301 break;
1302 }
1303
1304 /* For unaligned, or very large offsets, use the indexed form. */
1305 if (offset & align || offset != (int32_t)offset || opi == 0) {
1306 if (rs == base) {
1307 rs = TCG_REG_R0;
1308 }
1309 tcg_debug_assert(!is_int_store || rs != rt);
1310 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1311 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1312 return;
1313 }
1314
1315 l0 = (int16_t)offset;
1316 offset = (offset - l0) >> 16;
1317 l1 = (int16_t)offset;
1318
1319 if (l1 < 0 && orig >= 0) {
1320 extra = 0x4000;
1321 l1 = (int16_t)(offset - 0x4000);
1322 }
1323 if (l1) {
1324 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1325 base = rs;
1326 }
1327 if (extra) {
1328 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1329 base = rs;
1330 }
1331 if (opi != ADDI || base != rt || l0 != 0) {
1332 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1333 }
1334 }
1335
1336 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1337 TCGReg va, TCGReg vb, int shb)
1338 {
1339 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1340 }
1341
1342 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1343 TCGReg base, intptr_t offset)
1344 {
1345 int shift;
1346
1347 switch (type) {
1348 case TCG_TYPE_I32:
1349 if (ret < TCG_REG_V0) {
1350 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1351 break;
1352 }
1353 if (have_isa_2_07 && have_vsx) {
1354 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1355 break;
1356 }
1357 tcg_debug_assert((offset & 3) == 0);
1358 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1359 shift = (offset - 4) & 0xc;
1360 if (shift) {
1361 tcg_out_vsldoi(s, ret, ret, ret, shift);
1362 }
1363 break;
1364 case TCG_TYPE_I64:
1365 if (ret < TCG_REG_V0) {
1366 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1367 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1368 break;
1369 }
1370 /* fallthru */
1371 case TCG_TYPE_V64:
1372 tcg_debug_assert(ret >= TCG_REG_V0);
1373 if (have_vsx) {
1374 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1375 ret, base, offset);
1376 break;
1377 }
1378 tcg_debug_assert((offset & 7) == 0);
1379 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1380 if (offset & 8) {
1381 tcg_out_vsldoi(s, ret, ret, ret, 8);
1382 }
1383 break;
1384 case TCG_TYPE_V128:
1385 tcg_debug_assert(ret >= TCG_REG_V0);
1386 tcg_debug_assert((offset & 15) == 0);
1387 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1388 LVX, ret, base, offset);
1389 break;
1390 default:
1391 g_assert_not_reached();
1392 }
1393 }
1394
1395 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1396 TCGReg base, intptr_t offset)
1397 {
1398 int shift;
1399
1400 switch (type) {
1401 case TCG_TYPE_I32:
1402 if (arg < TCG_REG_V0) {
1403 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1404 break;
1405 }
1406 if (have_isa_2_07 && have_vsx) {
1407 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1408 break;
1409 }
1410 assert((offset & 3) == 0);
1411 tcg_debug_assert((offset & 3) == 0);
1412 shift = (offset - 4) & 0xc;
1413 if (shift) {
1414 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1415 arg = TCG_VEC_TMP1;
1416 }
1417 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1418 break;
1419 case TCG_TYPE_I64:
1420 if (arg < TCG_REG_V0) {
1421 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1422 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1423 break;
1424 }
1425 /* fallthru */
1426 case TCG_TYPE_V64:
1427 tcg_debug_assert(arg >= TCG_REG_V0);
1428 if (have_vsx) {
1429 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1430 STXSDX, arg, base, offset);
1431 break;
1432 }
1433 tcg_debug_assert((offset & 7) == 0);
1434 if (offset & 8) {
1435 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1436 arg = TCG_VEC_TMP1;
1437 }
1438 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1439 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1440 break;
1441 case TCG_TYPE_V128:
1442 tcg_debug_assert(arg >= TCG_REG_V0);
1443 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1444 STVX, arg, base, offset);
1445 break;
1446 default:
1447 g_assert_not_reached();
1448 }
1449 }
1450
1451 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1452 TCGReg base, intptr_t ofs)
1453 {
1454 return false;
1455 }
1456
1457 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1458 int const_arg2, int cr, TCGType type)
1459 {
1460 int imm;
1461 uint32_t op;
1462
1463 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1464
1465 /* Simplify the comparisons below wrt CMPI. */
1466 if (type == TCG_TYPE_I32) {
1467 arg2 = (int32_t)arg2;
1468 }
1469
1470 switch (cond) {
1471 case TCG_COND_EQ:
1472 case TCG_COND_NE:
1473 if (const_arg2) {
1474 if ((int16_t) arg2 == arg2) {
1475 op = CMPI;
1476 imm = 1;
1477 break;
1478 } else if ((uint16_t) arg2 == arg2) {
1479 op = CMPLI;
1480 imm = 1;
1481 break;
1482 }
1483 }
1484 op = CMPL;
1485 imm = 0;
1486 break;
1487
1488 case TCG_COND_LT:
1489 case TCG_COND_GE:
1490 case TCG_COND_LE:
1491 case TCG_COND_GT:
1492 if (const_arg2) {
1493 if ((int16_t) arg2 == arg2) {
1494 op = CMPI;
1495 imm = 1;
1496 break;
1497 }
1498 }
1499 op = CMP;
1500 imm = 0;
1501 break;
1502
1503 case TCG_COND_LTU:
1504 case TCG_COND_GEU:
1505 case TCG_COND_LEU:
1506 case TCG_COND_GTU:
1507 if (const_arg2) {
1508 if ((uint16_t) arg2 == arg2) {
1509 op = CMPLI;
1510 imm = 1;
1511 break;
1512 }
1513 }
1514 op = CMPL;
1515 imm = 0;
1516 break;
1517
1518 default:
1519 g_assert_not_reached();
1520 }
1521 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1522
1523 if (imm) {
1524 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1525 } else {
1526 if (const_arg2) {
1527 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1528 arg2 = TCG_REG_R0;
1529 }
1530 tcg_out32(s, op | RA(arg1) | RB(arg2));
1531 }
1532 }
1533
1534 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1535 TCGReg dst, TCGReg src)
1536 {
1537 if (type == TCG_TYPE_I32) {
1538 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1539 tcg_out_shri32(s, dst, dst, 5);
1540 } else {
1541 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1542 tcg_out_shri64(s, dst, dst, 6);
1543 }
1544 }
1545
1546 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1547 {
1548 /* X != 0 implies X + -1 generates a carry. Extra addition
1549 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1550 if (dst != src) {
1551 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1552 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1553 } else {
1554 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1555 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1556 }
1557 }
1558
1559 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1560 bool const_arg2)
1561 {
1562 if (const_arg2) {
1563 if ((uint32_t)arg2 == arg2) {
1564 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1565 } else {
1566 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1567 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1568 }
1569 } else {
1570 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1571 }
1572 return TCG_REG_R0;
1573 }
1574
1575 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1576 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1577 int const_arg2)
1578 {
1579 int crop, sh;
1580
1581 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1582
1583 /* Ignore high bits of a potential constant arg2. */
1584 if (type == TCG_TYPE_I32) {
1585 arg2 = (uint32_t)arg2;
1586 }
1587
1588 /* Handle common and trivial cases before handling anything else. */
1589 if (arg2 == 0) {
1590 switch (cond) {
1591 case TCG_COND_EQ:
1592 tcg_out_setcond_eq0(s, type, arg0, arg1);
1593 return;
1594 case TCG_COND_NE:
1595 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1596 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1597 arg1 = TCG_REG_R0;
1598 }
1599 tcg_out_setcond_ne0(s, arg0, arg1);
1600 return;
1601 case TCG_COND_GE:
1602 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1603 arg1 = arg0;
1604 /* FALLTHRU */
1605 case TCG_COND_LT:
1606 /* Extract the sign bit. */
1607 if (type == TCG_TYPE_I32) {
1608 tcg_out_shri32(s, arg0, arg1, 31);
1609 } else {
1610 tcg_out_shri64(s, arg0, arg1, 63);
1611 }
1612 return;
1613 default:
1614 break;
1615 }
1616 }
1617
1618 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1619 All other cases below are also at least 3 insns, so speed up the
1620 code generator by not considering them and always using ISEL. */
1621 if (have_isel) {
1622 int isel, tab;
1623
1624 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1625
1626 isel = tcg_to_isel[cond];
1627
1628 tcg_out_movi(s, type, arg0, 1);
1629 if (isel & 1) {
1630 /* arg0 = (bc ? 0 : 1) */
1631 tab = TAB(arg0, 0, arg0);
1632 isel &= ~1;
1633 } else {
1634 /* arg0 = (bc ? 1 : 0) */
1635 tcg_out_movi(s, type, TCG_REG_R0, 0);
1636 tab = TAB(arg0, arg0, TCG_REG_R0);
1637 }
1638 tcg_out32(s, isel | tab);
1639 return;
1640 }
1641
1642 switch (cond) {
1643 case TCG_COND_EQ:
1644 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1645 tcg_out_setcond_eq0(s, type, arg0, arg1);
1646 return;
1647
1648 case TCG_COND_NE:
1649 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1650 /* Discard the high bits only once, rather than both inputs. */
1651 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1652 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1653 arg1 = TCG_REG_R0;
1654 }
1655 tcg_out_setcond_ne0(s, arg0, arg1);
1656 return;
1657
1658 case TCG_COND_GT:
1659 case TCG_COND_GTU:
1660 sh = 30;
1661 crop = 0;
1662 goto crtest;
1663
1664 case TCG_COND_LT:
1665 case TCG_COND_LTU:
1666 sh = 29;
1667 crop = 0;
1668 goto crtest;
1669
1670 case TCG_COND_GE:
1671 case TCG_COND_GEU:
1672 sh = 31;
1673 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1674 goto crtest;
1675
1676 case TCG_COND_LE:
1677 case TCG_COND_LEU:
1678 sh = 31;
1679 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1680 crtest:
1681 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1682 if (crop) {
1683 tcg_out32(s, crop);
1684 }
1685 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1686 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1687 break;
1688
1689 default:
1690 g_assert_not_reached();
1691 }
1692 }
1693
1694 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1695 {
1696 if (l->has_value) {
1697 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1698 } else {
1699 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1700 }
1701 tcg_out32(s, bc);
1702 }
1703
1704 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1705 TCGArg arg1, TCGArg arg2, int const_arg2,
1706 TCGLabel *l, TCGType type)
1707 {
1708 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1709 tcg_out_bc(s, tcg_to_bc[cond], l);
1710 }
1711
1712 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1713 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1714 TCGArg v2, bool const_c2)
1715 {
1716 /* If for some reason both inputs are zero, don't produce bad code. */
1717 if (v1 == 0 && v2 == 0) {
1718 tcg_out_movi(s, type, dest, 0);
1719 return;
1720 }
1721
1722 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1723
1724 if (have_isel) {
1725 int isel = tcg_to_isel[cond];
1726
1727 /* Swap the V operands if the operation indicates inversion. */
1728 if (isel & 1) {
1729 int t = v1;
1730 v1 = v2;
1731 v2 = t;
1732 isel &= ~1;
1733 }
1734 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1735 if (v2 == 0) {
1736 tcg_out_movi(s, type, TCG_REG_R0, 0);
1737 }
1738 tcg_out32(s, isel | TAB(dest, v1, v2));
1739 } else {
1740 if (dest == v2) {
1741 cond = tcg_invert_cond(cond);
1742 v2 = v1;
1743 } else if (dest != v1) {
1744 if (v1 == 0) {
1745 tcg_out_movi(s, type, dest, 0);
1746 } else {
1747 tcg_out_mov(s, type, dest, v1);
1748 }
1749 }
1750 /* Branch forward over one insn */
1751 tcg_out32(s, tcg_to_bc[cond] | 8);
1752 if (v2 == 0) {
1753 tcg_out_movi(s, type, dest, 0);
1754 } else {
1755 tcg_out_mov(s, type, dest, v2);
1756 }
1757 }
1758 }
1759
1760 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1761 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1762 {
1763 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1764 tcg_out32(s, opc | RA(a0) | RS(a1));
1765 } else {
1766 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1767 /* Note that the only other valid constant for a2 is 0. */
1768 if (have_isel) {
1769 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1770 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1771 } else if (!const_a2 && a0 == a2) {
1772 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1773 tcg_out32(s, opc | RA(a0) | RS(a1));
1774 } else {
1775 tcg_out32(s, opc | RA(a0) | RS(a1));
1776 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1777 if (const_a2) {
1778 tcg_out_movi(s, type, a0, 0);
1779 } else {
1780 tcg_out_mov(s, type, a0, a2);
1781 }
1782 }
1783 }
1784 }
1785
1786 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1787 const int *const_args)
1788 {
1789 static const struct { uint8_t bit1, bit2; } bits[] = {
1790 [TCG_COND_LT ] = { CR_LT, CR_LT },
1791 [TCG_COND_LE ] = { CR_LT, CR_GT },
1792 [TCG_COND_GT ] = { CR_GT, CR_GT },
1793 [TCG_COND_GE ] = { CR_GT, CR_LT },
1794 [TCG_COND_LTU] = { CR_LT, CR_LT },
1795 [TCG_COND_LEU] = { CR_LT, CR_GT },
1796 [TCG_COND_GTU] = { CR_GT, CR_GT },
1797 [TCG_COND_GEU] = { CR_GT, CR_LT },
1798 };
1799
1800 TCGCond cond = args[4], cond2;
1801 TCGArg al, ah, bl, bh;
1802 int blconst, bhconst;
1803 int op, bit1, bit2;
1804
1805 al = args[0];
1806 ah = args[1];
1807 bl = args[2];
1808 bh = args[3];
1809 blconst = const_args[2];
1810 bhconst = const_args[3];
1811
1812 switch (cond) {
1813 case TCG_COND_EQ:
1814 op = CRAND;
1815 goto do_equality;
1816 case TCG_COND_NE:
1817 op = CRNAND;
1818 do_equality:
1819 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1820 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1821 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1822 break;
1823
1824 case TCG_COND_LT:
1825 case TCG_COND_LE:
1826 case TCG_COND_GT:
1827 case TCG_COND_GE:
1828 case TCG_COND_LTU:
1829 case TCG_COND_LEU:
1830 case TCG_COND_GTU:
1831 case TCG_COND_GEU:
1832 bit1 = bits[cond].bit1;
1833 bit2 = bits[cond].bit2;
1834 op = (bit1 != bit2 ? CRANDC : CRAND);
1835 cond2 = tcg_unsigned_cond(cond);
1836
1837 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1838 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1839 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1840 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1841 break;
1842
1843 default:
1844 g_assert_not_reached();
1845 }
1846 }
1847
1848 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1849 const int *const_args)
1850 {
1851 tcg_out_cmp2(s, args + 1, const_args + 1);
1852 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1853 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1854 }
1855
1856 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1857 const int *const_args)
1858 {
1859 tcg_out_cmp2(s, args, const_args);
1860 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1861 }
1862
1863 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1864 {
1865 uint32_t insn;
1866
1867 if (a0 & TCG_MO_ST_LD) {
1868 insn = HWSYNC;
1869 } else {
1870 insn = LWSYNC;
1871 }
1872
1873 tcg_out32(s, insn);
1874 }
1875
1876 static void tcg_out_call_int(TCGContext *s, int lk,
1877 const tcg_insn_unit *target)
1878 {
1879 #ifdef _CALL_AIX
1880 /* Look through the descriptor. If the branch is in range, and we
1881 don't have to spend too much effort on building the toc. */
1882 const void *tgt = ((const void * const *)target)[0];
1883 uintptr_t toc = ((const uintptr_t *)target)[1];
1884 intptr_t diff = tcg_pcrel_diff(s, tgt);
1885
1886 if (in_range_b(diff) && toc == (uint32_t)toc) {
1887 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1888 tcg_out_b(s, lk, tgt);
1889 } else {
1890 /* Fold the low bits of the constant into the addresses below. */
1891 intptr_t arg = (intptr_t)target;
1892 int ofs = (int16_t)arg;
1893
1894 if (ofs + 8 < 0x8000) {
1895 arg -= ofs;
1896 } else {
1897 ofs = 0;
1898 }
1899 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1900 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1901 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1902 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1903 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1904 }
1905 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1906 intptr_t diff;
1907
1908 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1909 address, which the callee uses to compute its TOC address. */
1910 /* FIXME: when the branch is in range, we could avoid r12 load if we
1911 knew that the destination uses the same TOC, and what its local
1912 entry point offset is. */
1913 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1914
1915 diff = tcg_pcrel_diff(s, target);
1916 if (in_range_b(diff)) {
1917 tcg_out_b(s, lk, target);
1918 } else {
1919 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1920 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1921 }
1922 #else
1923 tcg_out_b(s, lk, target);
1924 #endif
1925 }
1926
1927 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
1928 const TCGHelperInfo *info)
1929 {
1930 tcg_out_call_int(s, LK, target);
1931 }
1932
1933 static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
1934 [MO_UB] = LBZX,
1935 [MO_UW] = LHZX,
1936 [MO_UL] = LWZX,
1937 [MO_UQ] = LDX,
1938 [MO_SW] = LHAX,
1939 [MO_SL] = LWAX,
1940 [MO_BSWAP | MO_UB] = LBZX,
1941 [MO_BSWAP | MO_UW] = LHBRX,
1942 [MO_BSWAP | MO_UL] = LWBRX,
1943 [MO_BSWAP | MO_UQ] = LDBRX,
1944 };
1945
1946 static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
1947 [MO_UB] = STBX,
1948 [MO_UW] = STHX,
1949 [MO_UL] = STWX,
1950 [MO_UQ] = STDX,
1951 [MO_BSWAP | MO_UB] = STBX,
1952 [MO_BSWAP | MO_UW] = STHBRX,
1953 [MO_BSWAP | MO_UL] = STWBRX,
1954 [MO_BSWAP | MO_UQ] = STDBRX,
1955 };
1956
1957 static const uint32_t qemu_exts_opc[4] = {
1958 EXTSB, EXTSH, EXTSW, 0
1959 };
1960
1961 #if defined (CONFIG_SOFTMMU)
1962 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
1963 * int mmu_idx, uintptr_t ra)
1964 */
1965 static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1966 [MO_UB] = helper_ret_ldub_mmu,
1967 [MO_LEUW] = helper_le_lduw_mmu,
1968 [MO_LEUL] = helper_le_ldul_mmu,
1969 [MO_LEUQ] = helper_le_ldq_mmu,
1970 [MO_BEUW] = helper_be_lduw_mmu,
1971 [MO_BEUL] = helper_be_ldul_mmu,
1972 [MO_BEUQ] = helper_be_ldq_mmu,
1973 };
1974
1975 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
1976 * uintxx_t val, int mmu_idx, uintptr_t ra)
1977 */
1978 static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
1979 [MO_UB] = helper_ret_stb_mmu,
1980 [MO_LEUW] = helper_le_stw_mmu,
1981 [MO_LEUL] = helper_le_stl_mmu,
1982 [MO_LEUQ] = helper_le_stq_mmu,
1983 [MO_BEUW] = helper_be_stw_mmu,
1984 [MO_BEUL] = helper_be_stl_mmu,
1985 [MO_BEUQ] = helper_be_stq_mmu,
1986 };
1987
1988 /* We expect to use a 16-bit negative offset from ENV. */
1989 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
1990 QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -32768);
1991
1992 /* Perform the TLB load and compare. Places the result of the comparison
1993 in CR7, loads the addend of the TLB into R3, and returns the register
1994 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
1995
1996 static TCGReg tcg_out_tlb_read(TCGContext *s, MemOp opc,
1997 TCGReg addrlo, TCGReg addrhi,
1998 int mem_index, bool is_read)
1999 {
2000 int cmp_off
2001 = (is_read
2002 ? offsetof(CPUTLBEntry, addr_read)
2003 : offsetof(CPUTLBEntry, addr_write));
2004 int fast_off = TLB_MASK_TABLE_OFS(mem_index);
2005 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2006 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2007 unsigned s_bits = opc & MO_SIZE;
2008 unsigned a_bits = get_alignment_bits(opc);
2009
2010 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
2011 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0, mask_off);
2012 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R4, TCG_AREG0, table_off);
2013
2014 /* Extract the page index, shifted into place for tlb index. */
2015 if (TCG_TARGET_REG_BITS == 32) {
2016 tcg_out_shri32(s, TCG_REG_TMP1, addrlo,
2017 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2018 } else {
2019 tcg_out_shri64(s, TCG_REG_TMP1, addrlo,
2020 TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
2021 }
2022 tcg_out32(s, AND | SAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_TMP1));
2023
2024 /* Load the TLB comparator. */
2025 if (cmp_off == 0 && TCG_TARGET_REG_BITS >= TARGET_LONG_BITS) {
2026 uint32_t lxu = (TCG_TARGET_REG_BITS == 32 || TARGET_LONG_BITS == 32
2027 ? LWZUX : LDUX);
2028 tcg_out32(s, lxu | TAB(TCG_REG_TMP1, TCG_REG_R3, TCG_REG_R4));
2029 } else {
2030 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, TCG_REG_R4));
2031 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2032 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP1, TCG_REG_R3, cmp_off + 4);
2033 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_R4, TCG_REG_R3, cmp_off);
2034 } else {
2035 tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP1, TCG_REG_R3, cmp_off);
2036 }
2037 }
2038
2039 /* Load the TLB addend for use on the fast path. Do this asap
2040 to minimize any load use delay. */
2041 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_REG_R3,
2042 offsetof(CPUTLBEntry, addend));
2043
2044 /* Clear the non-page, non-alignment bits from the address */
2045 if (TCG_TARGET_REG_BITS == 32) {
2046 /* We don't support unaligned accesses on 32-bits.
2047 * Preserve the bottom bits and thus trigger a comparison
2048 * failure on unaligned accesses.
2049 */
2050 if (a_bits < s_bits) {
2051 a_bits = s_bits;
2052 }
2053 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2054 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2055 } else {
2056 TCGReg t = addrlo;
2057
2058 /* If the access is unaligned, we need to make sure we fail if we
2059 * cross a page boundary. The trick is to add the access size-1
2060 * to the address before masking the low bits. That will make the
2061 * address overflow to the next page if we cross a page boundary,
2062 * which will then force a mismatch of the TLB compare.
2063 */
2064 if (a_bits < s_bits) {
2065 unsigned a_mask = (1 << a_bits) - 1;
2066 unsigned s_mask = (1 << s_bits) - 1;
2067 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2068 t = TCG_REG_R0;
2069 }
2070
2071 /* Mask the address for the requested alignment. */
2072 if (TARGET_LONG_BITS == 32) {
2073 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2074 (32 - a_bits) & 31, 31 - TARGET_PAGE_BITS);
2075 /* Zero-extend the address for use in the final address. */
2076 tcg_out_ext32u(s, TCG_REG_R4, addrlo);
2077 addrlo = TCG_REG_R4;
2078 } else if (a_bits == 0) {
2079 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - TARGET_PAGE_BITS);
2080 } else {
2081 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2082 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - a_bits);
2083 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
2084 }
2085 }
2086
2087 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2088 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2089 0, 7, TCG_TYPE_I32);
2090 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_R4, 0, 6, TCG_TYPE_I32);
2091 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2092 } else {
2093 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP1,
2094 0, 7, TCG_TYPE_TL);
2095 }
2096
2097 return addrlo;
2098 }
2099
2100 /* Record the context of a call to the out of line helper code for the slow
2101 path for a load or store, so that we can later generate the correct
2102 helper code. */
2103 static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
2104 TCGReg datalo_reg, TCGReg datahi_reg,
2105 TCGReg addrlo_reg, TCGReg addrhi_reg,
2106 tcg_insn_unit *raddr, tcg_insn_unit *lptr)
2107 {
2108 TCGLabelQemuLdst *label = new_ldst_label(s);
2109
2110 label->is_ld = is_ld;
2111 label->oi = oi;
2112 label->datalo_reg = datalo_reg;
2113 label->datahi_reg = datahi_reg;
2114 label->addrlo_reg = addrlo_reg;
2115 label->addrhi_reg = addrhi_reg;
2116 label->raddr = tcg_splitwx_to_rx(raddr);
2117 label->label_ptr[0] = lptr;
2118 }
2119
2120 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2121 {
2122 MemOpIdx oi = lb->oi;
2123 MemOp opc = get_memop(oi);
2124 TCGReg hi, lo, arg = TCG_REG_R3;
2125
2126 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2127 return false;
2128 }
2129
2130 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2131
2132 lo = lb->addrlo_reg;
2133 hi = lb->addrhi_reg;
2134 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2135 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2136 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2137 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2138 } else {
2139 /* If the address needed to be zero-extended, we'll have already
2140 placed it in R4. The only remaining case is 64-bit guest. */
2141 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2142 }
2143
2144 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2145 tcg_out32(s, MFSPR | RT(arg) | LR);
2146
2147 tcg_out_call_int(s, LK, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2148
2149 lo = lb->datalo_reg;
2150 hi = lb->datahi_reg;
2151 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2152 tcg_out_mov(s, TCG_TYPE_I32, lo, TCG_REG_R4);
2153 tcg_out_mov(s, TCG_TYPE_I32, hi, TCG_REG_R3);
2154 } else if (opc & MO_SIGN) {
2155 uint32_t insn = qemu_exts_opc[opc & MO_SIZE];
2156 tcg_out32(s, insn | RA(lo) | RS(TCG_REG_R3));
2157 } else {
2158 tcg_out_mov(s, TCG_TYPE_REG, lo, TCG_REG_R3);
2159 }
2160
2161 tcg_out_b(s, 0, lb->raddr);
2162 return true;
2163 }
2164
2165 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2166 {
2167 MemOpIdx oi = lb->oi;
2168 MemOp opc = get_memop(oi);
2169 MemOp s_bits = opc & MO_SIZE;
2170 TCGReg hi, lo, arg = TCG_REG_R3;
2171
2172 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2173 return false;
2174 }
2175
2176 tcg_out_mov(s, TCG_TYPE_PTR, arg++, TCG_AREG0);
2177
2178 lo = lb->addrlo_reg;
2179 hi = lb->addrhi_reg;
2180 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2181 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2182 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2183 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2184 } else {
2185 /* If the address needed to be zero-extended, we'll have already
2186 placed it in R4. The only remaining case is 64-bit guest. */
2187 tcg_out_mov(s, TCG_TYPE_TL, arg++, lo);
2188 }
2189
2190 lo = lb->datalo_reg;
2191 hi = lb->datahi_reg;
2192 if (TCG_TARGET_REG_BITS == 32) {
2193 switch (s_bits) {
2194 case MO_64:
2195 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2196 tcg_out_mov(s, TCG_TYPE_I32, arg++, hi);
2197 /* FALLTHRU */
2198 case MO_32:
2199 tcg_out_mov(s, TCG_TYPE_I32, arg++, lo);
2200 break;
2201 default:
2202 tcg_out_rlw(s, RLWINM, arg++, lo, 0, 32 - (8 << s_bits), 31);
2203 break;
2204 }
2205 } else {
2206 if (s_bits == MO_64) {
2207 tcg_out_mov(s, TCG_TYPE_I64, arg++, lo);
2208 } else {
2209 tcg_out_rld(s, RLDICL, arg++, lo, 0, 64 - (8 << s_bits));
2210 }
2211 }
2212
2213 tcg_out_movi(s, TCG_TYPE_I32, arg++, oi);
2214 tcg_out32(s, MFSPR | RT(arg) | LR);
2215
2216 tcg_out_call_int(s, LK, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
2217
2218 tcg_out_b(s, 0, lb->raddr);
2219 return true;
2220 }
2221 #else
2222
2223 static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
2224 TCGReg addrhi, unsigned a_bits)
2225 {
2226 unsigned a_mask = (1 << a_bits) - 1;
2227 TCGLabelQemuLdst *label = new_ldst_label(s);
2228
2229 label->is_ld = is_ld;
2230 label->addrlo_reg = addrlo;
2231 label->addrhi_reg = addrhi;
2232
2233 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2234 tcg_debug_assert(a_bits < 16);
2235 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, a_mask));
2236
2237 label->label_ptr[0] = s->code_ptr;
2238 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2239
2240 label->raddr = tcg_splitwx_to_rx(s->code_ptr);
2241 }
2242
2243 static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
2244 {
2245 if (!reloc_pc14(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2246 return false;
2247 }
2248
2249 if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
2250 TCGReg arg = TCG_REG_R4;
2251
2252 arg |= (TCG_TARGET_CALL_ARG_I64 == TCG_CALL_ARG_EVEN);
2253 if (l->addrlo_reg != arg) {
2254 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2255 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2256 } else if (l->addrhi_reg != arg + 1) {
2257 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, l->addrlo_reg);
2258 tcg_out_mov(s, TCG_TYPE_I32, arg, l->addrhi_reg);
2259 } else {
2260 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R0, arg);
2261 tcg_out_mov(s, TCG_TYPE_I32, arg, arg + 1);
2262 tcg_out_mov(s, TCG_TYPE_I32, arg + 1, TCG_REG_R0);
2263 }
2264 } else {
2265 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R4, l->addrlo_reg);
2266 }
2267 tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_R3, TCG_AREG0);
2268
2269 /* "Tail call" to the helper, with the return address back inline. */
2270 tcg_out_call_int(s, 0, (const void *)(l->is_ld ? helper_unaligned_ld
2271 : helper_unaligned_st));
2272 return true;
2273 }
2274
2275 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2276 {
2277 return tcg_out_fail_alignment(s, l);
2278 }
2279
2280 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
2281 {
2282 return tcg_out_fail_alignment(s, l);
2283 }
2284
2285 #endif /* SOFTMMU */
2286
2287 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
2288 {
2289 TCGReg datalo, datahi, addrlo, rbase;
2290 TCGReg addrhi __attribute__((unused));
2291 MemOpIdx oi;
2292 MemOp opc, s_bits;
2293 #ifdef CONFIG_SOFTMMU
2294 int mem_index;
2295 tcg_insn_unit *label_ptr;
2296 #else
2297 unsigned a_bits;
2298 #endif
2299
2300 datalo = *args++;
2301 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2302 addrlo = *args++;
2303 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2304 oi = *args++;
2305 opc = get_memop(oi);
2306 s_bits = opc & MO_SIZE;
2307
2308 #ifdef CONFIG_SOFTMMU
2309 mem_index = get_mmuidx(oi);
2310 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, true);
2311
2312 /* Load a pointer into the current opcode w/conditional branch-link. */
2313 label_ptr = s->code_ptr;
2314 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2315
2316 rbase = TCG_REG_R3;
2317 #else /* !CONFIG_SOFTMMU */
2318 a_bits = get_alignment_bits(opc);
2319 if (a_bits) {
2320 tcg_out_test_alignment(s, true, addrlo, addrhi, a_bits);
2321 }
2322 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2323 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2324 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2325 addrlo = TCG_REG_TMP1;
2326 }
2327 #endif
2328
2329 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2330 if (opc & MO_BSWAP) {
2331 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2332 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2333 tcg_out32(s, LWBRX | TAB(datahi, rbase, TCG_REG_R0));
2334 } else if (rbase != 0) {
2335 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2336 tcg_out32(s, LWZX | TAB(datahi, rbase, addrlo));
2337 tcg_out32(s, LWZX | TAB(datalo, rbase, TCG_REG_R0));
2338 } else if (addrlo == datahi) {
2339 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2340 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2341 } else {
2342 tcg_out32(s, LWZ | TAI(datahi, addrlo, 0));
2343 tcg_out32(s, LWZ | TAI(datalo, addrlo, 4));
2344 }
2345 } else {
2346 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2347 if (!have_isa_2_06 && insn == LDBRX) {
2348 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2349 tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo));
2350 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
2351 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2352 } else if (insn) {
2353 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2354 } else {
2355 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2356 tcg_out32(s, insn | TAB(datalo, rbase, addrlo));
2357 insn = qemu_exts_opc[s_bits];
2358 tcg_out32(s, insn | RA(datalo) | RS(datalo));
2359 }
2360 }
2361
2362 #ifdef CONFIG_SOFTMMU
2363 add_qemu_ldst_label(s, true, oi, datalo, datahi, addrlo, addrhi,
2364 s->code_ptr, label_ptr);
2365 #endif
2366 }
2367
2368 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
2369 {
2370 TCGReg datalo, datahi, addrlo, rbase;
2371 TCGReg addrhi __attribute__((unused));
2372 MemOpIdx oi;
2373 MemOp opc, s_bits;
2374 #ifdef CONFIG_SOFTMMU
2375 int mem_index;
2376 tcg_insn_unit *label_ptr;
2377 #else
2378 unsigned a_bits;
2379 #endif
2380
2381 datalo = *args++;
2382 datahi = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0);
2383 addrlo = *args++;
2384 addrhi = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0);
2385 oi = *args++;
2386 opc = get_memop(oi);
2387 s_bits = opc & MO_SIZE;
2388
2389 #ifdef CONFIG_SOFTMMU
2390 mem_index = get_mmuidx(oi);
2391 addrlo = tcg_out_tlb_read(s, opc, addrlo, addrhi, mem_index, false);
2392
2393 /* Load a pointer into the current opcode w/conditional branch-link. */
2394 label_ptr = s->code_ptr;
2395 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2396
2397 rbase = TCG_REG_R3;
2398 #else /* !CONFIG_SOFTMMU */
2399 a_bits = get_alignment_bits(opc);
2400 if (a_bits) {
2401 tcg_out_test_alignment(s, false, addrlo, addrhi, a_bits);
2402 }
2403 rbase = guest_base ? TCG_GUEST_BASE_REG : 0;
2404 if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
2405 tcg_out_ext32u(s, TCG_REG_TMP1, addrlo);
2406 addrlo = TCG_REG_TMP1;
2407 }
2408 #endif
2409
2410 if (TCG_TARGET_REG_BITS == 32 && s_bits == MO_64) {
2411 if (opc & MO_BSWAP) {
2412 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2413 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2414 tcg_out32(s, STWBRX | SAB(datahi, rbase, TCG_REG_R0));
2415 } else if (rbase != 0) {
2416 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4));
2417 tcg_out32(s, STWX | SAB(datahi, rbase, addrlo));
2418 tcg_out32(s, STWX | SAB(datalo, rbase, TCG_REG_R0));
2419 } else {
2420 tcg_out32(s, STW | TAI(datahi, addrlo, 0));
2421 tcg_out32(s, STW | TAI(datalo, addrlo, 4));
2422 }
2423 } else {
2424 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2425 if (!have_isa_2_06 && insn == STDBRX) {
2426 tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo));
2427 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4));
2428 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2429 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_TMP1));
2430 } else {
2431 tcg_out32(s, insn | SAB(datalo, rbase, addrlo));
2432 }
2433 }
2434
2435 #ifdef CONFIG_SOFTMMU
2436 add_qemu_ldst_label(s, false, oi, datalo, datahi, addrlo, addrhi,
2437 s->code_ptr, label_ptr);
2438 #endif
2439 }
2440
2441 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2442 {
2443 int i;
2444 for (i = 0; i < count; ++i) {
2445 p[i] = NOP;
2446 }
2447 }
2448
2449 /* Parameters for function call generation, used in tcg.c. */
2450 #define TCG_TARGET_STACK_ALIGN 16
2451
2452 #ifdef _CALL_AIX
2453 # define LINK_AREA_SIZE (6 * SZR)
2454 # define LR_OFFSET (1 * SZR)
2455 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2456 #elif defined(_CALL_DARWIN)
2457 # define LINK_AREA_SIZE (6 * SZR)
2458 # define LR_OFFSET (2 * SZR)
2459 #elif TCG_TARGET_REG_BITS == 64
2460 # if defined(_CALL_ELF) && _CALL_ELF == 2
2461 # define LINK_AREA_SIZE (4 * SZR)
2462 # define LR_OFFSET (1 * SZR)
2463 # endif
2464 #else /* TCG_TARGET_REG_BITS == 32 */
2465 # if defined(_CALL_SYSV)
2466 # define LINK_AREA_SIZE (2 * SZR)
2467 # define LR_OFFSET (1 * SZR)
2468 # endif
2469 #endif
2470 #ifndef LR_OFFSET
2471 # error "Unhandled abi"
2472 #endif
2473 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2474 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2475 #endif
2476
2477 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2478 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2479
2480 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2481 + TCG_STATIC_CALL_ARGS_SIZE \
2482 + CPU_TEMP_BUF_SIZE \
2483 + REG_SAVE_SIZE \
2484 + TCG_TARGET_STACK_ALIGN - 1) \
2485 & -TCG_TARGET_STACK_ALIGN)
2486
2487 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2488
2489 static void tcg_target_qemu_prologue(TCGContext *s)
2490 {
2491 int i;
2492
2493 #ifdef _CALL_AIX
2494 const void **desc = (const void **)s->code_ptr;
2495 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2496 desc[1] = 0; /* environment pointer */
2497 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2498 #endif
2499
2500 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2501 CPU_TEMP_BUF_SIZE);
2502
2503 /* Prologue */
2504 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2505 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2506 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2507
2508 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2509 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2510 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2511 }
2512 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2513
2514 #ifndef CONFIG_SOFTMMU
2515 if (guest_base) {
2516 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2517 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2518 }
2519 #endif
2520
2521 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2522 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2523 if (USE_REG_TB) {
2524 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2525 }
2526 tcg_out32(s, BCCTR | BO_ALWAYS);
2527
2528 /* Epilogue */
2529 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2530
2531 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2532 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2533 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2534 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2535 }
2536 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2537 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2538 tcg_out32(s, BCLR | BO_ALWAYS);
2539 }
2540
2541 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2542 {
2543 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2544 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2545 }
2546
2547 static void tcg_out_goto_tb(TCGContext *s, int which)
2548 {
2549 uintptr_t ptr = get_jmp_target_addr(s, which);
2550
2551 if (USE_REG_TB) {
2552 ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
2553 tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
2554
2555 /* Direct branch will be patched by tb_target_set_jmp_target. */
2556 set_jmp_insn_offset(s, which);
2557 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2558
2559 /* When branch is out of range, fall through to indirect. */
2560 tcg_out32(s, BCCTR | BO_ALWAYS);
2561
2562 /* For the unlinked case, need to reset TCG_REG_TB. */
2563 set_jmp_reset_offset(s, which);
2564 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2565 -tcg_current_code_size(s));
2566 } else {
2567 /* Direct branch will be patched by tb_target_set_jmp_target. */
2568 set_jmp_insn_offset(s, which);
2569 tcg_out32(s, NOP);
2570
2571 /* When branch is out of range, fall through to indirect. */
2572 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
2573 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
2574 tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2575 tcg_out32(s, BCCTR | BO_ALWAYS);
2576 set_jmp_reset_offset(s, which);
2577 }
2578 }
2579
2580 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2581 uintptr_t jmp_rx, uintptr_t jmp_rw)
2582 {
2583 uintptr_t addr = tb->jmp_target_addr[n];
2584 intptr_t diff = addr - jmp_rx;
2585 tcg_insn_unit insn;
2586
2587 if (in_range_b(diff)) {
2588 insn = B | (diff & 0x3fffffc);
2589 } else if (USE_REG_TB) {
2590 insn = MTSPR | RS(TCG_REG_TB) | CTR;
2591 } else {
2592 insn = NOP;
2593 }
2594
2595 qatomic_set((uint32_t *)jmp_rw, insn);
2596 flush_idcache_range(jmp_rx, jmp_rw, 4);
2597 }
2598
2599 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2600 const TCGArg args[TCG_MAX_OP_ARGS],
2601 const int const_args[TCG_MAX_OP_ARGS])
2602 {
2603 TCGArg a0, a1, a2;
2604
2605 switch (opc) {
2606 case INDEX_op_goto_ptr:
2607 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2608 if (USE_REG_TB) {
2609 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2610 }
2611 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2612 tcg_out32(s, BCCTR | BO_ALWAYS);
2613 break;
2614 case INDEX_op_br:
2615 {
2616 TCGLabel *l = arg_label(args[0]);
2617 uint32_t insn = B;
2618
2619 if (l->has_value) {
2620 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2621 l->u.value_ptr);
2622 } else {
2623 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2624 }
2625 tcg_out32(s, insn);
2626 }
2627 break;
2628 case INDEX_op_ld8u_i32:
2629 case INDEX_op_ld8u_i64:
2630 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2631 break;
2632 case INDEX_op_ld8s_i32:
2633 case INDEX_op_ld8s_i64:
2634 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2635 tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2636 break;
2637 case INDEX_op_ld16u_i32:
2638 case INDEX_op_ld16u_i64:
2639 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2640 break;
2641 case INDEX_op_ld16s_i32:
2642 case INDEX_op_ld16s_i64:
2643 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2644 break;
2645 case INDEX_op_ld_i32:
2646 case INDEX_op_ld32u_i64:
2647 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2648 break;
2649 case INDEX_op_ld32s_i64:
2650 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2651 break;
2652 case INDEX_op_ld_i64:
2653 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2654 break;
2655 case INDEX_op_st8_i32:
2656 case INDEX_op_st8_i64:
2657 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2658 break;
2659 case INDEX_op_st16_i32:
2660 case INDEX_op_st16_i64:
2661 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2662 break;
2663 case INDEX_op_st_i32:
2664 case INDEX_op_st32_i64:
2665 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2666 break;
2667 case INDEX_op_st_i64:
2668 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2669 break;
2670
2671 case INDEX_op_add_i32:
2672 a0 = args[0], a1 = args[1], a2 = args[2];
2673 if (const_args[2]) {
2674 do_addi_32:
2675 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2676 } else {
2677 tcg_out32(s, ADD | TAB(a0, a1, a2));
2678 }
2679 break;
2680 case INDEX_op_sub_i32:
2681 a0 = args[0], a1 = args[1], a2 = args[2];
2682 if (const_args[1]) {
2683 if (const_args[2]) {
2684 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2685 } else {
2686 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2687 }
2688 } else if (const_args[2]) {
2689 a2 = -a2;
2690 goto do_addi_32;
2691 } else {
2692 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2693 }
2694 break;
2695
2696 case INDEX_op_and_i32:
2697 a0 = args[0], a1 = args[1], a2 = args[2];
2698 if (const_args[2]) {
2699 tcg_out_andi32(s, a0, a1, a2);
2700 } else {
2701 tcg_out32(s, AND | SAB(a1, a0, a2));
2702 }
2703 break;
2704 case INDEX_op_and_i64:
2705 a0 = args[0], a1 = args[1], a2 = args[2];
2706 if (const_args[2]) {
2707 tcg_out_andi64(s, a0, a1, a2);
2708 } else {
2709 tcg_out32(s, AND | SAB(a1, a0, a2));
2710 }
2711 break;
2712 case INDEX_op_or_i64:
2713 case INDEX_op_or_i32:
2714 a0 = args[0], a1 = args[1], a2 = args[2];
2715 if (const_args[2]) {
2716 tcg_out_ori32(s, a0, a1, a2);
2717 } else {
2718 tcg_out32(s, OR | SAB(a1, a0, a2));
2719 }
2720 break;
2721 case INDEX_op_xor_i64:
2722 case INDEX_op_xor_i32:
2723 a0 = args[0], a1 = args[1], a2 = args[2];
2724 if (const_args[2]) {
2725 tcg_out_xori32(s, a0, a1, a2);
2726 } else {
2727 tcg_out32(s, XOR | SAB(a1, a0, a2));
2728 }
2729 break;
2730 case INDEX_op_andc_i32:
2731 a0 = args[0], a1 = args[1], a2 = args[2];
2732 if (const_args[2]) {
2733 tcg_out_andi32(s, a0, a1, ~a2);
2734 } else {
2735 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2736 }
2737 break;
2738 case INDEX_op_andc_i64:
2739 a0 = args[0], a1 = args[1], a2 = args[2];
2740 if (const_args[2]) {
2741 tcg_out_andi64(s, a0, a1, ~a2);
2742 } else {
2743 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2744 }
2745 break;
2746 case INDEX_op_orc_i32:
2747 if (const_args[2]) {
2748 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2749 break;
2750 }
2751 /* FALLTHRU */
2752 case INDEX_op_orc_i64:
2753 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2754 break;
2755 case INDEX_op_eqv_i32:
2756 if (const_args[2]) {
2757 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2758 break;
2759 }
2760 /* FALLTHRU */
2761 case INDEX_op_eqv_i64:
2762 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2763 break;
2764 case INDEX_op_nand_i32:
2765 case INDEX_op_nand_i64:
2766 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2767 break;
2768 case INDEX_op_nor_i32:
2769 case INDEX_op_nor_i64:
2770 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2771 break;
2772
2773 case INDEX_op_clz_i32:
2774 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2775 args[2], const_args[2]);
2776 break;
2777 case INDEX_op_ctz_i32:
2778 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2779 args[2], const_args[2]);
2780 break;
2781 case INDEX_op_ctpop_i32:
2782 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2783 break;
2784
2785 case INDEX_op_clz_i64:
2786 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2787 args[2], const_args[2]);
2788 break;
2789 case INDEX_op_ctz_i64:
2790 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2791 args[2], const_args[2]);
2792 break;
2793 case INDEX_op_ctpop_i64:
2794 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2795 break;
2796
2797 case INDEX_op_mul_i32:
2798 a0 = args[0], a1 = args[1], a2 = args[2];
2799 if (const_args[2]) {
2800 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2801 } else {
2802 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2803 }
2804 break;
2805
2806 case INDEX_op_div_i32:
2807 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2808 break;
2809
2810 case INDEX_op_divu_i32:
2811 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2812 break;
2813
2814 case INDEX_op_rem_i32:
2815 tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
2816 break;
2817
2818 case INDEX_op_remu_i32:
2819 tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
2820 break;
2821
2822 case INDEX_op_shl_i32:
2823 if (const_args[2]) {
2824 /* Limit immediate shift count lest we create an illegal insn. */
2825 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2826 } else {
2827 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2828 }
2829 break;
2830 case INDEX_op_shr_i32:
2831 if (const_args[2]) {
2832 /* Limit immediate shift count lest we create an illegal insn. */
2833 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2834 } else {
2835 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2836 }
2837 break;
2838 case INDEX_op_sar_i32:
2839 if (const_args[2]) {
2840 tcg_out_sari32(s, args[0], args[1], args[2]);
2841 } else {
2842 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2843 }
2844 break;
2845 case INDEX_op_rotl_i32:
2846 if (const_args[2]) {
2847 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2848 } else {
2849 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2850 | MB(0) | ME(31));
2851 }
2852 break;
2853 case INDEX_op_rotr_i32:
2854 if (const_args[2]) {
2855 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2856 } else {
2857 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2858 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2859 | MB(0) | ME(31));
2860 }
2861 break;
2862
2863 case INDEX_op_brcond_i32:
2864 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2865 arg_label(args[3]), TCG_TYPE_I32);
2866 break;
2867 case INDEX_op_brcond_i64:
2868 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2869 arg_label(args[3]), TCG_TYPE_I64);
2870 break;
2871 case INDEX_op_brcond2_i32:
2872 tcg_out_brcond2(s, args, const_args);
2873 break;
2874
2875 case INDEX_op_neg_i32:
2876 case INDEX_op_neg_i64:
2877 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2878 break;
2879
2880 case INDEX_op_not_i32:
2881 case INDEX_op_not_i64:
2882 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2883 break;
2884
2885 case INDEX_op_add_i64:
2886 a0 = args[0], a1 = args[1], a2 = args[2];
2887 if (const_args[2]) {
2888 do_addi_64:
2889 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2890 } else {
2891 tcg_out32(s, ADD | TAB(a0, a1, a2));
2892 }
2893 break;
2894 case INDEX_op_sub_i64:
2895 a0 = args[0], a1 = args[1], a2 = args[2];
2896 if (const_args[1]) {
2897 if (const_args[2]) {
2898 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2899 } else {
2900 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2901 }
2902 } else if (const_args[2]) {
2903 a2 = -a2;
2904 goto do_addi_64;
2905 } else {
2906 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2907 }
2908 break;
2909
2910 case INDEX_op_shl_i64:
2911 if (const_args[2]) {
2912 /* Limit immediate shift count lest we create an illegal insn. */
2913 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2914 } else {
2915 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2916 }
2917 break;
2918 case INDEX_op_shr_i64:
2919 if (const_args[2]) {
2920 /* Limit immediate shift count lest we create an illegal insn. */
2921 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2922 } else {
2923 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2924 }
2925 break;
2926 case INDEX_op_sar_i64:
2927 if (const_args[2]) {
2928 tcg_out_sari64(s, args[0], args[1], args[2]);
2929 } else {
2930 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2931 }
2932 break;
2933 case INDEX_op_rotl_i64:
2934 if (const_args[2]) {
2935 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2936 } else {
2937 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2938 }
2939 break;
2940 case INDEX_op_rotr_i64:
2941 if (const_args[2]) {
2942 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2943 } else {
2944 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2945 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2946 }
2947 break;
2948
2949 case INDEX_op_mul_i64:
2950 a0 = args[0], a1 = args[1], a2 = args[2];
2951 if (const_args[2]) {
2952 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2953 } else {
2954 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2955 }
2956 break;
2957 case INDEX_op_div_i64:
2958 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2959 break;
2960 case INDEX_op_divu_i64:
2961 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2962 break;
2963 case INDEX_op_rem_i64:
2964 tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
2965 break;
2966 case INDEX_op_remu_i64:
2967 tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
2968 break;
2969
2970 case INDEX_op_qemu_ld_i32:
2971 tcg_out_qemu_ld(s, args, false);
2972 break;
2973 case INDEX_op_qemu_ld_i64:
2974 tcg_out_qemu_ld(s, args, true);
2975 break;
2976 case INDEX_op_qemu_st_i32:
2977 tcg_out_qemu_st(s, args, false);
2978 break;
2979 case INDEX_op_qemu_st_i64:
2980 tcg_out_qemu_st(s, args, true);
2981 break;
2982
2983 case INDEX_op_ext_i32_i64:
2984 tcg_out_ext32s(s, args[0], args[1]);
2985 break;
2986 case INDEX_op_extu_i32_i64:
2987 tcg_out_ext32u(s, args[0], args[1]);
2988 break;
2989
2990 case INDEX_op_setcond_i32:
2991 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2992 const_args[2]);
2993 break;
2994 case INDEX_op_setcond_i64:
2995 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2996 const_args[2]);
2997 break;
2998 case INDEX_op_setcond2_i32:
2999 tcg_out_setcond2(s, args, const_args);
3000 break;
3001
3002 case INDEX_op_bswap16_i32:
3003 case INDEX_op_bswap16_i64:
3004 tcg_out_bswap16(s, args[0], args[1], args[2]);
3005 break;
3006 case INDEX_op_bswap32_i32:
3007 tcg_out_bswap32(s, args[0], args[1], 0);
3008 break;
3009 case INDEX_op_bswap32_i64:
3010 tcg_out_bswap32(s, args[0], args[1], args[2]);
3011 break;
3012 case INDEX_op_bswap64_i64:
3013 tcg_out_bswap64(s, args[0], args[1]);
3014 break;
3015
3016 case INDEX_op_deposit_i32:
3017 if (const_args[2]) {
3018 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3019 tcg_out_andi32(s, args[0], args[0], ~mask);
3020 } else {
3021 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3022 32 - args[3] - args[4], 31 - args[3]);
3023 }
3024 break;
3025 case INDEX_op_deposit_i64:
3026 if (const_args[2]) {
3027 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3028 tcg_out_andi64(s, args[0], args[0], ~mask);
3029 } else {
3030 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3031 64 - args[3] - args[4]);
3032 }
3033 break;
3034
3035 case INDEX_op_extract_i32:
3036 tcg_out_rlw(s, RLWINM, args[0], args[1],
3037 32 - args[2], 32 - args[3], 31);
3038 break;
3039 case INDEX_op_extract_i64:
3040 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3041 break;
3042
3043 case INDEX_op_movcond_i32:
3044 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3045 args[3], args[4], const_args[2]);
3046 break;
3047 case INDEX_op_movcond_i64:
3048 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3049 args[3], args[4], const_args[2]);
3050 break;
3051
3052 #if TCG_TARGET_REG_BITS == 64
3053 case INDEX_op_add2_i64:
3054 #else
3055 case INDEX_op_add2_i32:
3056 #endif
3057 /* Note that the CA bit is defined based on the word size of the
3058 environment. So in 64-bit mode it's always carry-out of bit 63.
3059 The fallback code using deposit works just as well for 32-bit. */
3060 a0 = args[0], a1 = args[1];
3061 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3062 a0 = TCG_REG_R0;
3063 }
3064 if (const_args[4]) {
3065 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3066 } else {
3067 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3068 }
3069 if (const_args[5]) {
3070 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3071 } else {
3072 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3073 }
3074 if (a0 != args[0]) {
3075 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3076 }
3077 break;
3078
3079 #if TCG_TARGET_REG_BITS == 64
3080 case INDEX_op_sub2_i64:
3081 #else
3082 case INDEX_op_sub2_i32:
3083 #endif
3084 a0 = args[0], a1 = args[1];
3085 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3086 a0 = TCG_REG_R0;
3087 }
3088 if (const_args[2]) {
3089 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3090 } else {
3091 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3092 }
3093 if (const_args[3]) {
3094 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3095 } else {
3096 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3097 }
3098 if (a0 != args[0]) {
3099 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3100 }
3101 break;
3102
3103 case INDEX_op_muluh_i32:
3104 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3105 break;
3106 case INDEX_op_mulsh_i32:
3107 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3108 break;
3109 case INDEX_op_muluh_i64:
3110 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3111 break;
3112 case INDEX_op_mulsh_i64:
3113 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3114 break;
3115
3116 case INDEX_op_mb:
3117 tcg_out_mb(s, args[0]);
3118 break;
3119
3120 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3121 case INDEX_op_mov_i64:
3122 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3123 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
3124 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
3125 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
3126 case INDEX_op_ext8s_i64:
3127 case INDEX_op_ext8u_i32:
3128 case INDEX_op_ext8u_i64:
3129 case INDEX_op_ext16s_i32:
3130 case INDEX_op_ext16s_i64:
3131 case INDEX_op_ext16u_i32:
3132 case INDEX_op_ext16u_i64:
3133 case INDEX_op_ext32s_i64:
3134 default:
3135 g_assert_not_reached();
3136 }
3137 }
3138
3139 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3140 {
3141 switch (opc) {
3142 case INDEX_op_and_vec:
3143 case INDEX_op_or_vec:
3144 case INDEX_op_xor_vec:
3145 case INDEX_op_andc_vec:
3146 case INDEX_op_not_vec:
3147 case INDEX_op_nor_vec:
3148 case INDEX_op_eqv_vec:
3149 case INDEX_op_nand_vec:
3150 return 1;
3151 case INDEX_op_orc_vec:
3152 return have_isa_2_07;
3153 case INDEX_op_add_vec:
3154 case INDEX_op_sub_vec:
3155 case INDEX_op_smax_vec:
3156 case INDEX_op_smin_vec:
3157 case INDEX_op_umax_vec:
3158 case INDEX_op_umin_vec:
3159 case INDEX_op_shlv_vec:
3160 case INDEX_op_shrv_vec:
3161 case INDEX_op_sarv_vec:
3162 case INDEX_op_rotlv_vec:
3163 return vece <= MO_32 || have_isa_2_07;
3164 case INDEX_op_ssadd_vec:
3165 case INDEX_op_sssub_vec:
3166 case INDEX_op_usadd_vec:
3167 case INDEX_op_ussub_vec:
3168 return vece <= MO_32;
3169 case INDEX_op_cmp_vec:
3170 case INDEX_op_shli_vec:
3171 case INDEX_op_shri_vec:
3172 case INDEX_op_sari_vec:
3173 case INDEX_op_rotli_vec:
3174 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3175 case INDEX_op_neg_vec:
3176 return vece >= MO_32 && have_isa_3_00;
3177 case INDEX_op_mul_vec:
3178 switch (vece) {
3179 case MO_8:
3180 case MO_16:
3181 return -1;
3182 case MO_32:
3183 return have_isa_2_07 ? 1 : -1;
3184 case MO_64:
3185 return have_isa_3_10;
3186 }
3187 return 0;
3188 case INDEX_op_bitsel_vec:
3189 return have_vsx;
3190 case INDEX_op_rotrv_vec:
3191 return -1;
3192 default:
3193 return 0;
3194 }
3195 }
3196
3197 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3198 TCGReg dst, TCGReg src)
3199 {
3200 tcg_debug_assert(dst >= TCG_REG_V0);
3201
3202 /* Splat from integer reg allowed via constraints for v3.00. */
3203 if (src < TCG_REG_V0) {
3204 tcg_debug_assert(have_isa_3_00);
3205 switch (vece) {
3206 case MO_64:
3207 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3208 return true;
3209 case MO_32:
3210 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3211 return true;
3212 default:
3213 /* Fail, so that we fall back on either dupm or mov+dup. */
3214 return false;
3215 }
3216 }
3217
3218 /*
3219 * Recall we use (or emulate) VSX integer loads, so the integer is
3220 * right justified within the left (zero-index) double-word.
3221 */
3222 switch (vece) {
3223 case MO_8:
3224 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3225 break;
3226 case MO_16:
3227 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3228 break;
3229 case MO_32:
3230 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3231 break;
3232 case MO_64:
3233 if (have_vsx) {
3234 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3235 break;
3236 }
3237 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3238 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3239 break;
3240 default:
3241 g_assert_not_reached();
3242 }
3243 return true;
3244 }
3245
3246 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3247 TCGReg out, TCGReg base, intptr_t offset)
3248 {
3249 int elt;
3250
3251 tcg_debug_assert(out >= TCG_REG_V0);
3252 switch (vece) {
3253 case MO_8:
3254 if (have_isa_3_00) {
3255 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3256 } else {
3257 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3258 }
3259 elt = extract32(offset, 0, 4);
3260 #if !HOST_BIG_ENDIAN
3261 elt ^= 15;
3262 #endif
3263 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3264 break;
3265 case MO_16:
3266 tcg_debug_assert((offset & 1) == 0);
3267 if (have_isa_3_00) {
3268 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3269 } else {
3270 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3271 }
3272 elt = extract32(offset, 1, 3);
3273 #if !HOST_BIG_ENDIAN
3274 elt ^= 7;
3275 #endif
3276 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3277 break;
3278 case MO_32:
3279 if (have_isa_3_00) {
3280 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3281 break;
3282 }
3283 tcg_debug_assert((offset & 3) == 0);
3284 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3285 elt = extract32(offset, 2, 2);
3286 #if !HOST_BIG_ENDIAN
3287 elt ^= 3;
3288 #endif
3289 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3290 break;
3291 case MO_64:
3292 if (have_vsx) {
3293 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3294 break;
3295 }
3296 tcg_debug_assert((offset & 7) == 0);
3297 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3298 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3299 elt = extract32(offset, 3, 1);
3300 #if !HOST_BIG_ENDIAN
3301 elt = !elt;
3302 #endif
3303 if (elt) {
3304 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3305 } else {
3306 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3307 }
3308 break;
3309 default:
3310 g_assert_not_reached();
3311 }
3312 return true;
3313 }
3314
3315 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3316 unsigned vecl, unsigned vece,
3317 const TCGArg args[TCG_MAX_OP_ARGS],
3318 const int const_args[TCG_MAX_OP_ARGS])
3319 {
3320 static const uint32_t
3321 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3322 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3323 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3324 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3325 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3326 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3327 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3328 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3329 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3330 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3331 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3332 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3333 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3334 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3335 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3336 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3337 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3338 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3339 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3340 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3341 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3342 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3343 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3344 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3345 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3346
3347 TCGType type = vecl + TCG_TYPE_V64;
3348 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3349 uint32_t insn;
3350
3351 switch (opc) {
3352 case INDEX_op_ld_vec:
3353 tcg_out_ld(s, type, a0, a1, a2);
3354 return;
3355 case INDEX_op_st_vec:
3356 tcg_out_st(s, type, a0, a1, a2);
3357 return;
3358 case INDEX_op_dupm_vec:
3359 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3360 return;
3361
3362 case INDEX_op_add_vec:
3363 insn = add_op[vece];
3364 break;
3365 case INDEX_op_sub_vec:
3366 insn = sub_op[vece];
3367 break;
3368 case INDEX_op_neg_vec:
3369 insn = neg_op[vece];
3370 a2 = a1;
3371 a1 = 0;
3372 break;
3373 case INDEX_op_mul_vec:
3374 insn = mul_op[vece];
3375 break;
3376 case INDEX_op_ssadd_vec:
3377 insn = ssadd_op[vece];
3378 break;
3379 case INDEX_op_sssub_vec:
3380 insn = sssub_op[vece];
3381 break;
3382 case INDEX_op_usadd_vec:
3383 insn = usadd_op[vece];
3384 break;
3385 case INDEX_op_ussub_vec:
3386 insn = ussub_op[vece];
3387 break;
3388 case INDEX_op_smin_vec:
3389 insn = smin_op[vece];
3390 break;
3391 case INDEX_op_umin_vec:
3392 insn = umin_op[vece];
3393 break;
3394 case INDEX_op_smax_vec:
3395 insn = smax_op[vece];
3396 break;
3397 case INDEX_op_umax_vec:
3398 insn = umax_op[vece];
3399 break;
3400 case INDEX_op_shlv_vec:
3401 insn = shlv_op[vece];
3402 break;
3403 case INDEX_op_shrv_vec:
3404 insn = shrv_op[vece];
3405 break;
3406 case INDEX_op_sarv_vec:
3407 insn = sarv_op[vece];
3408 break;
3409 case INDEX_op_and_vec:
3410 insn = VAND;
3411 break;
3412 case INDEX_op_or_vec:
3413 insn = VOR;
3414 break;
3415 case INDEX_op_xor_vec:
3416 insn = VXOR;
3417 break;
3418 case INDEX_op_andc_vec:
3419 insn = VANDC;
3420 break;
3421 case INDEX_op_not_vec:
3422 insn = VNOR;
3423 a2 = a1;
3424 break;
3425 case INDEX_op_orc_vec:
3426 insn = VORC;
3427 break;
3428 case INDEX_op_nand_vec:
3429 insn = VNAND;
3430 break;
3431 case INDEX_op_nor_vec:
3432 insn = VNOR;
3433 break;
3434 case INDEX_op_eqv_vec:
3435 insn = VEQV;
3436 break;
3437
3438 case INDEX_op_cmp_vec:
3439 switch (args[3]) {
3440 case TCG_COND_EQ:
3441 insn = eq_op[vece];
3442 break;
3443 case TCG_COND_NE:
3444 insn = ne_op[vece];
3445 break;
3446 case TCG_COND_GT:
3447 insn = gts_op[vece];
3448 break;
3449 case TCG_COND_GTU:
3450 insn = gtu_op[vece];
3451 break;
3452 default:
3453 g_assert_not_reached();
3454 }
3455 break;
3456
3457 case INDEX_op_bitsel_vec:
3458 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3459 return;
3460
3461 case INDEX_op_dup2_vec:
3462 assert(TCG_TARGET_REG_BITS == 32);
3463 /* With inputs a1 = xLxx, a2 = xHxx */
3464 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3465 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3466 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3467 return;
3468
3469 case INDEX_op_ppc_mrgh_vec:
3470 insn = mrgh_op[vece];
3471 break;
3472 case INDEX_op_ppc_mrgl_vec:
3473 insn = mrgl_op[vece];
3474 break;
3475 case INDEX_op_ppc_muleu_vec:
3476 insn = muleu_op[vece];
3477 break;
3478 case INDEX_op_ppc_mulou_vec:
3479 insn = mulou_op[vece];
3480 break;
3481 case INDEX_op_ppc_pkum_vec:
3482 insn = pkum_op[vece];
3483 break;
3484 case INDEX_op_rotlv_vec:
3485 insn = rotl_op[vece];
3486 break;
3487 case INDEX_op_ppc_msum_vec:
3488 tcg_debug_assert(vece == MO_16);
3489 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3490 return;
3491
3492 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3493 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3494 default:
3495 g_assert_not_reached();
3496 }
3497
3498 tcg_debug_assert(insn != 0);
3499 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3500 }
3501
3502 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3503 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3504 {
3505 TCGv_vec t1;
3506
3507 if (vece == MO_32) {
3508 /*
3509 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3510 * So using negative numbers gets us the 4th bit easily.
3511 */
3512 imm = sextract32(imm, 0, 5);
3513 } else {
3514 imm &= (8 << vece) - 1;
3515 }
3516
3517 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3518 t1 = tcg_constant_vec(type, MO_8, imm);
3519 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3520 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3521 }
3522
3523 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3524 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3525 {
3526 bool need_swap = false, need_inv = false;
3527
3528 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3529
3530 switch (cond) {
3531 case TCG_COND_EQ:
3532 case TCG_COND_GT:
3533 case TCG_COND_GTU:
3534 break;
3535 case TCG_COND_NE:
3536 if (have_isa_3_00 && vece <= MO_32) {
3537 break;
3538 }
3539 /* fall through */
3540 case TCG_COND_LE:
3541 case TCG_COND_LEU:
3542 need_inv = true;
3543 break;
3544 case TCG_COND_LT:
3545 case TCG_COND_LTU:
3546 need_swap = true;
3547 break;
3548 case TCG_COND_GE:
3549 case TCG_COND_GEU:
3550 need_swap = need_inv = true;
3551 break;
3552 default:
3553 g_assert_not_reached();
3554 }
3555
3556 if (need_inv) {
3557 cond = tcg_invert_cond(cond);
3558 }
3559 if (need_swap) {
3560 TCGv_vec t1;
3561 t1 = v1, v1 = v2, v2 = t1;
3562 cond = tcg_swap_cond(cond);
3563 }
3564
3565 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3566 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3567
3568 if (need_inv) {
3569 tcg_gen_not_vec(vece, v0, v0);
3570 }
3571 }
3572
3573 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3574 TCGv_vec v1, TCGv_vec v2)
3575 {
3576 TCGv_vec t1 = tcg_temp_new_vec(type);
3577 TCGv_vec t2 = tcg_temp_new_vec(type);
3578 TCGv_vec c0, c16;
3579
3580 switch (vece) {
3581 case MO_8:
3582 case MO_16:
3583 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3584 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3585 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3586 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3587 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3588 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3589 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3590 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3591 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3592 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3593 break;
3594
3595 case MO_32:
3596 tcg_debug_assert(!have_isa_2_07);
3597 /*
3598 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3599 * So using -16 is a quick way to represent 16.
3600 */
3601 c16 = tcg_constant_vec(type, MO_8, -16);
3602 c0 = tcg_constant_vec(type, MO_8, 0);
3603
3604 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3605 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3606 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3607 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3608 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3609 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3610 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3611 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3612 tcg_gen_add_vec(MO_32, v0, t1, t2);
3613 break;
3614
3615 default:
3616 g_assert_not_reached();
3617 }
3618 tcg_temp_free_vec(t1);
3619 tcg_temp_free_vec(t2);
3620 }
3621
3622 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3623 TCGArg a0, ...)
3624 {
3625 va_list va;
3626 TCGv_vec v0, v1, v2, t0;
3627 TCGArg a2;
3628
3629 va_start(va, a0);
3630 v0 = temp_tcgv_vec(arg_temp(a0));
3631 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3632 a2 = va_arg(va, TCGArg);
3633
3634 switch (opc) {
3635 case INDEX_op_shli_vec:
3636 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3637 break;
3638 case INDEX_op_shri_vec:
3639 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3640 break;
3641 case INDEX_op_sari_vec:
3642 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3643 break;
3644 case INDEX_op_rotli_vec:
3645 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3646 break;
3647 case INDEX_op_cmp_vec:
3648 v2 = temp_tcgv_vec(arg_temp(a2));
3649 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3650 break;
3651 case INDEX_op_mul_vec:
3652 v2 = temp_tcgv_vec(arg_temp(a2));
3653 expand_vec_mul(type, vece, v0, v1, v2);
3654 break;
3655 case INDEX_op_rotlv_vec:
3656 v2 = temp_tcgv_vec(arg_temp(a2));
3657 t0 = tcg_temp_new_vec(type);
3658 tcg_gen_neg_vec(vece, t0, v2);
3659 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3660 tcg_temp_free_vec(t0);
3661 break;
3662 default:
3663 g_assert_not_reached();
3664 }
3665 va_end(va);
3666 }
3667
3668 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3669 {
3670 switch (op) {
3671 case INDEX_op_goto_ptr:
3672 return C_O0_I1(r);
3673
3674 case INDEX_op_ld8u_i32:
3675 case INDEX_op_ld8s_i32:
3676 case INDEX_op_ld16u_i32:
3677 case INDEX_op_ld16s_i32:
3678 case INDEX_op_ld_i32:
3679 case INDEX_op_ctpop_i32:
3680 case INDEX_op_neg_i32:
3681 case INDEX_op_not_i32:
3682 case INDEX_op_ext8s_i32:
3683 case INDEX_op_ext16s_i32:
3684 case INDEX_op_bswap16_i32:
3685 case INDEX_op_bswap32_i32:
3686 case INDEX_op_extract_i32:
3687 case INDEX_op_ld8u_i64:
3688 case INDEX_op_ld8s_i64:
3689 case INDEX_op_ld16u_i64:
3690 case INDEX_op_ld16s_i64:
3691 case INDEX_op_ld32u_i64:
3692 case INDEX_op_ld32s_i64:
3693 case INDEX_op_ld_i64:
3694 case INDEX_op_ctpop_i64:
3695 case INDEX_op_neg_i64:
3696 case INDEX_op_not_i64:
3697 case INDEX_op_ext8s_i64:
3698 case INDEX_op_ext16s_i64:
3699 case INDEX_op_ext32s_i64:
3700 case INDEX_op_ext_i32_i64:
3701 case INDEX_op_extu_i32_i64:
3702 case INDEX_op_bswap16_i64:
3703 case INDEX_op_bswap32_i64:
3704 case INDEX_op_bswap64_i64:
3705 case INDEX_op_extract_i64:
3706 return C_O1_I1(r, r);
3707
3708 case INDEX_op_st8_i32:
3709 case INDEX_op_st16_i32:
3710 case INDEX_op_st_i32:
3711 case INDEX_op_st8_i64:
3712 case INDEX_op_st16_i64:
3713 case INDEX_op_st32_i64:
3714 case INDEX_op_st_i64:
3715 return C_O0_I2(r, r);
3716
3717 case INDEX_op_add_i32:
3718 case INDEX_op_and_i32:
3719 case INDEX_op_or_i32:
3720 case INDEX_op_xor_i32:
3721 case INDEX_op_andc_i32:
3722 case INDEX_op_orc_i32:
3723 case INDEX_op_eqv_i32:
3724 case INDEX_op_shl_i32:
3725 case INDEX_op_shr_i32:
3726 case INDEX_op_sar_i32:
3727 case INDEX_op_rotl_i32:
3728 case INDEX_op_rotr_i32:
3729 case INDEX_op_setcond_i32:
3730 case INDEX_op_and_i64:
3731 case INDEX_op_andc_i64:
3732 case INDEX_op_shl_i64:
3733 case INDEX_op_shr_i64:
3734 case INDEX_op_sar_i64:
3735 case INDEX_op_rotl_i64:
3736 case INDEX_op_rotr_i64:
3737 case INDEX_op_setcond_i64:
3738 return C_O1_I2(r, r, ri);
3739
3740 case INDEX_op_mul_i32:
3741 case INDEX_op_mul_i64:
3742 return C_O1_I2(r, r, rI);
3743
3744 case INDEX_op_div_i32:
3745 case INDEX_op_divu_i32:
3746 case INDEX_op_rem_i32:
3747 case INDEX_op_remu_i32:
3748 case INDEX_op_nand_i32:
3749 case INDEX_op_nor_i32:
3750 case INDEX_op_muluh_i32:
3751 case INDEX_op_mulsh_i32:
3752 case INDEX_op_orc_i64:
3753 case INDEX_op_eqv_i64:
3754 case INDEX_op_nand_i64:
3755 case INDEX_op_nor_i64:
3756 case INDEX_op_div_i64:
3757 case INDEX_op_divu_i64:
3758 case INDEX_op_rem_i64:
3759 case INDEX_op_remu_i64:
3760 case INDEX_op_mulsh_i64:
3761 case INDEX_op_muluh_i64:
3762 return C_O1_I2(r, r, r);
3763
3764 case INDEX_op_sub_i32:
3765 return C_O1_I2(r, rI, ri);
3766 case INDEX_op_add_i64:
3767 return C_O1_I2(r, r, rT);
3768 case INDEX_op_or_i64:
3769 case INDEX_op_xor_i64:
3770 return C_O1_I2(r, r, rU);
3771 case INDEX_op_sub_i64:
3772 return C_O1_I2(r, rI, rT);
3773 case INDEX_op_clz_i32:
3774 case INDEX_op_ctz_i32:
3775 case INDEX_op_clz_i64:
3776 case INDEX_op_ctz_i64:
3777 return C_O1_I2(r, r, rZW);
3778
3779 case INDEX_op_brcond_i32:
3780 case INDEX_op_brcond_i64:
3781 return C_O0_I2(r, ri);
3782
3783 case INDEX_op_movcond_i32:
3784 case INDEX_op_movcond_i64:
3785 return C_O1_I4(r, r, ri, rZ, rZ);
3786 case INDEX_op_deposit_i32:
3787 case INDEX_op_deposit_i64:
3788 return C_O1_I2(r, 0, rZ);
3789 case INDEX_op_brcond2_i32:
3790 return C_O0_I4(r, r, ri, ri);
3791 case INDEX_op_setcond2_i32:
3792 return C_O1_I4(r, r, r, ri, ri);
3793 case INDEX_op_add2_i64:
3794 case INDEX_op_add2_i32:
3795 return C_O2_I4(r, r, r, r, rI, rZM);
3796 case INDEX_op_sub2_i64:
3797 case INDEX_op_sub2_i32:
3798 return C_O2_I4(r, r, rI, rZM, r, r);
3799
3800 case INDEX_op_qemu_ld_i32:
3801 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3802 ? C_O1_I1(r, L)
3803 : C_O1_I2(r, L, L));
3804
3805 case INDEX_op_qemu_st_i32:
3806 return (TCG_TARGET_REG_BITS == 64 || TARGET_LONG_BITS == 32
3807 ? C_O0_I2(S, S)
3808 : C_O0_I3(S, S, S));
3809
3810 case INDEX_op_qemu_ld_i64:
3811 return (TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, L)
3812 : TARGET_LONG_BITS == 32 ? C_O2_I1(L, L, L)
3813 : C_O2_I2(L, L, L, L));
3814
3815 case INDEX_op_qemu_st_i64:
3816 return (TCG_TARGET_REG_BITS == 64 ? C_O0_I2(S, S)
3817 : TARGET_LONG_BITS == 32 ? C_O0_I3(S, S, S)
3818 : C_O0_I4(S, S, S, S));
3819
3820 case INDEX_op_add_vec:
3821 case INDEX_op_sub_vec:
3822 case INDEX_op_mul_vec:
3823 case INDEX_op_and_vec:
3824 case INDEX_op_or_vec:
3825 case INDEX_op_xor_vec:
3826 case INDEX_op_andc_vec:
3827 case INDEX_op_orc_vec:
3828 case INDEX_op_nor_vec:
3829 case INDEX_op_eqv_vec:
3830 case INDEX_op_nand_vec:
3831 case INDEX_op_cmp_vec:
3832 case INDEX_op_ssadd_vec:
3833 case INDEX_op_sssub_vec:
3834 case INDEX_op_usadd_vec:
3835 case INDEX_op_ussub_vec:
3836 case INDEX_op_smax_vec:
3837 case INDEX_op_smin_vec:
3838 case INDEX_op_umax_vec:
3839 case INDEX_op_umin_vec:
3840 case INDEX_op_shlv_vec:
3841 case INDEX_op_shrv_vec:
3842 case INDEX_op_sarv_vec:
3843 case INDEX_op_rotlv_vec:
3844 case INDEX_op_rotrv_vec:
3845 case INDEX_op_ppc_mrgh_vec:
3846 case INDEX_op_ppc_mrgl_vec:
3847 case INDEX_op_ppc_muleu_vec:
3848 case INDEX_op_ppc_mulou_vec:
3849 case INDEX_op_ppc_pkum_vec:
3850 case INDEX_op_dup2_vec:
3851 return C_O1_I2(v, v, v);
3852
3853 case INDEX_op_not_vec:
3854 case INDEX_op_neg_vec:
3855 return C_O1_I1(v, v);
3856
3857 case INDEX_op_dup_vec:
3858 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3859
3860 case INDEX_op_ld_vec:
3861 case INDEX_op_dupm_vec:
3862 return C_O1_I1(v, r);
3863
3864 case INDEX_op_st_vec:
3865 return C_O0_I2(v, r);
3866
3867 case INDEX_op_bitsel_vec:
3868 case INDEX_op_ppc_msum_vec:
3869 return C_O1_I3(v, v, v, v);
3870
3871 default:
3872 g_assert_not_reached();
3873 }
3874 }
3875
3876 static void tcg_target_init(TCGContext *s)
3877 {
3878 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3879 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3880
3881 have_isa = tcg_isa_base;
3882 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3883 have_isa = tcg_isa_2_06;
3884 }
3885 #ifdef PPC_FEATURE2_ARCH_2_07
3886 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3887 have_isa = tcg_isa_2_07;
3888 }
3889 #endif
3890 #ifdef PPC_FEATURE2_ARCH_3_00
3891 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3892 have_isa = tcg_isa_3_00;
3893 }
3894 #endif
3895 #ifdef PPC_FEATURE2_ARCH_3_10
3896 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3897 have_isa = tcg_isa_3_10;
3898 }
3899 #endif
3900
3901 #ifdef PPC_FEATURE2_HAS_ISEL
3902 /* Prefer explicit instruction from the kernel. */
3903 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3904 #else
3905 /* Fall back to knowing Power7 (2.06) has ISEL. */
3906 have_isel = have_isa_2_06;
3907 #endif
3908
3909 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3910 have_altivec = true;
3911 /* We only care about the portion of VSX that overlaps Altivec. */
3912 if (hwcap & PPC_FEATURE_HAS_VSX) {
3913 have_vsx = true;
3914 }
3915 }
3916
3917 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3918 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3919 if (have_altivec) {
3920 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3921 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3922 }
3923
3924 tcg_target_call_clobber_regs = 0;
3925 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3926 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3927 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3928 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3929 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3930 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3931 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3932 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3933 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3934 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3935 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3936 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3937
3938 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3939 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3940 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3941 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3942 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3943 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3944 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3945 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3946 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3947 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3948 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3949 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3950 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3951 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3952 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3953 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3954 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3955 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3956 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3957 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3958
3959 s->reserved_regs = 0;
3960 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3961 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3962 #if defined(_CALL_SYSV)
3963 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3964 #endif
3965 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3966 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3967 #endif
3968 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); /* mem temp */
3969 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3970 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3971 if (USE_REG_TB) {
3972 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3973 }
3974 }
3975
3976 #ifdef __ELF__
3977 typedef struct {
3978 DebugFrameCIE cie;
3979 DebugFrameFDEHeader fde;
3980 uint8_t fde_def_cfa[4];
3981 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3982 } DebugFrame;
3983
3984 /* We're expecting a 2 byte uleb128 encoded value. */
3985 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3986
3987 #if TCG_TARGET_REG_BITS == 64
3988 # define ELF_HOST_MACHINE EM_PPC64
3989 #else
3990 # define ELF_HOST_MACHINE EM_PPC
3991 #endif
3992
3993 static DebugFrame debug_frame = {
3994 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3995 .cie.id = -1,
3996 .cie.version = 1,
3997 .cie.code_align = 1,
3998 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3999 .cie.return_column = 65,
4000
4001 /* Total FDE size does not include the "len" member. */
4002 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
4003
4004 .fde_def_cfa = {
4005 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
4006 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4007 (FRAME_SIZE >> 7)
4008 },
4009 .fde_reg_ofs = {
4010 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
4011 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4012 }
4013 };
4014
4015 void tcg_register_jit(const void *buf, size_t buf_size)
4016 {
4017 uint8_t *p = &debug_frame.fde_reg_ofs[3];
4018 int i;
4019
4020 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
4021 p[0] = 0x80 + tcg_target_callee_save_regs[i];
4022 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
4023 }
4024
4025 debug_frame.fde.func_start = (uintptr_t)buf;
4026 debug_frame.fde.func_len = buf_size;
4027
4028 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4029 }
4030 #endif /* __ELF__ */
4031 #undef VMULEUB
4032 #undef VMULEUH
4033 #undef VMULEUW
4034 #undef VMULOUB
4035 #undef VMULOUH
4036 #undef VMULOUW
4037 #undef VMSUMUHM