]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/ppc/tcg-target.c.inc
Merge tag 'pull-request-2023-06-06' of https://gitlab.com/thuth/qemu into staging
[mirror_qemu.git] / tcg / ppc / tcg-target.c.inc
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #include "elf.h"
26 #include "../tcg-pool.c.inc"
27 #include "../tcg-ldst.c.inc"
28
29 /*
30 * Standardize on the _CALL_FOO symbols used by GCC:
31 * Apple XCode does not define _CALL_DARWIN.
32 * Clang defines _CALL_ELF (64-bit) but not _CALL_SYSV (32-bit).
33 */
34 #if !defined(_CALL_SYSV) && \
35 !defined(_CALL_DARWIN) && \
36 !defined(_CALL_AIX) && \
37 !defined(_CALL_ELF)
38 # if defined(__APPLE__)
39 # define _CALL_DARWIN
40 # elif defined(__ELF__) && TCG_TARGET_REG_BITS == 32
41 # define _CALL_SYSV
42 # else
43 # error "Unknown ABI"
44 # endif
45 #endif
46
47 #if TCG_TARGET_REG_BITS == 64
48 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_EXTEND
49 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_NORMAL
50 #else
51 # define TCG_TARGET_CALL_ARG_I32 TCG_CALL_ARG_NORMAL
52 # define TCG_TARGET_CALL_RET_I128 TCG_CALL_RET_BY_REF
53 #endif
54 #ifdef _CALL_SYSV
55 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_EVEN
56 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_BY_REF
57 #else
58 # define TCG_TARGET_CALL_ARG_I64 TCG_CALL_ARG_NORMAL
59 # define TCG_TARGET_CALL_ARG_I128 TCG_CALL_ARG_NORMAL
60 #endif
61
62 /* For some memory operations, we need a scratch that isn't R0. For the AIX
63 calling convention, we can re-use the TOC register since we'll be reloading
64 it at every call. Otherwise R12 will do nicely as neither a call-saved
65 register nor a parameter register. */
66 #ifdef _CALL_AIX
67 # define TCG_REG_TMP1 TCG_REG_R2
68 #else
69 # define TCG_REG_TMP1 TCG_REG_R12
70 #endif
71 #define TCG_REG_TMP2 TCG_REG_R11
72
73 #define TCG_VEC_TMP1 TCG_REG_V0
74 #define TCG_VEC_TMP2 TCG_REG_V1
75
76 #define TCG_REG_TB TCG_REG_R31
77 #define USE_REG_TB (TCG_TARGET_REG_BITS == 64)
78
79 /* Shorthand for size of a pointer. Avoid promotion to unsigned. */
80 #define SZP ((int)sizeof(void *))
81
82 /* Shorthand for size of a register. */
83 #define SZR (TCG_TARGET_REG_BITS / 8)
84
85 #define TCG_CT_CONST_S16 0x100
86 #define TCG_CT_CONST_S32 0x400
87 #define TCG_CT_CONST_U32 0x800
88 #define TCG_CT_CONST_ZERO 0x1000
89 #define TCG_CT_CONST_MONE 0x2000
90 #define TCG_CT_CONST_WSZ 0x4000
91
92 #define ALL_GENERAL_REGS 0xffffffffu
93 #define ALL_VECTOR_REGS 0xffffffff00000000ull
94
95 TCGPowerISA have_isa;
96 static bool have_isel;
97 bool have_altivec;
98 bool have_vsx;
99
100 #ifndef CONFIG_SOFTMMU
101 #define TCG_GUEST_BASE_REG 30
102 #endif
103
104 #ifdef CONFIG_DEBUG_TCG
105 static const char tcg_target_reg_names[TCG_TARGET_NB_REGS][4] = {
106 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
107 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
108 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
109 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
110 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
111 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
112 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
113 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
114 };
115 #endif
116
117 static const int tcg_target_reg_alloc_order[] = {
118 TCG_REG_R14, /* call saved registers */
119 TCG_REG_R15,
120 TCG_REG_R16,
121 TCG_REG_R17,
122 TCG_REG_R18,
123 TCG_REG_R19,
124 TCG_REG_R20,
125 TCG_REG_R21,
126 TCG_REG_R22,
127 TCG_REG_R23,
128 TCG_REG_R24,
129 TCG_REG_R25,
130 TCG_REG_R26,
131 TCG_REG_R27,
132 TCG_REG_R28,
133 TCG_REG_R29,
134 TCG_REG_R30,
135 TCG_REG_R31,
136 TCG_REG_R12, /* call clobbered, non-arguments */
137 TCG_REG_R11,
138 TCG_REG_R2,
139 TCG_REG_R13,
140 TCG_REG_R10, /* call clobbered, arguments */
141 TCG_REG_R9,
142 TCG_REG_R8,
143 TCG_REG_R7,
144 TCG_REG_R6,
145 TCG_REG_R5,
146 TCG_REG_R4,
147 TCG_REG_R3,
148
149 /* V0 and V1 reserved as temporaries; V20 - V31 are call-saved */
150 TCG_REG_V2, /* call clobbered, vectors */
151 TCG_REG_V3,
152 TCG_REG_V4,
153 TCG_REG_V5,
154 TCG_REG_V6,
155 TCG_REG_V7,
156 TCG_REG_V8,
157 TCG_REG_V9,
158 TCG_REG_V10,
159 TCG_REG_V11,
160 TCG_REG_V12,
161 TCG_REG_V13,
162 TCG_REG_V14,
163 TCG_REG_V15,
164 TCG_REG_V16,
165 TCG_REG_V17,
166 TCG_REG_V18,
167 TCG_REG_V19,
168 };
169
170 static const int tcg_target_call_iarg_regs[] = {
171 TCG_REG_R3,
172 TCG_REG_R4,
173 TCG_REG_R5,
174 TCG_REG_R6,
175 TCG_REG_R7,
176 TCG_REG_R8,
177 TCG_REG_R9,
178 TCG_REG_R10
179 };
180
181 static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
182 {
183 tcg_debug_assert(kind == TCG_CALL_RET_NORMAL);
184 tcg_debug_assert(slot >= 0 && slot <= 1);
185 return TCG_REG_R3 + slot;
186 }
187
188 static const int tcg_target_callee_save_regs[] = {
189 #ifdef _CALL_DARWIN
190 TCG_REG_R11,
191 #endif
192 TCG_REG_R14,
193 TCG_REG_R15,
194 TCG_REG_R16,
195 TCG_REG_R17,
196 TCG_REG_R18,
197 TCG_REG_R19,
198 TCG_REG_R20,
199 TCG_REG_R21,
200 TCG_REG_R22,
201 TCG_REG_R23,
202 TCG_REG_R24,
203 TCG_REG_R25,
204 TCG_REG_R26,
205 TCG_REG_R27, /* currently used for the global env */
206 TCG_REG_R28,
207 TCG_REG_R29,
208 TCG_REG_R30,
209 TCG_REG_R31
210 };
211
212 static inline bool in_range_b(tcg_target_long target)
213 {
214 return target == sextract64(target, 0, 26);
215 }
216
217 static uint32_t reloc_pc24_val(const tcg_insn_unit *pc,
218 const tcg_insn_unit *target)
219 {
220 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
221 tcg_debug_assert(in_range_b(disp));
222 return disp & 0x3fffffc;
223 }
224
225 static bool reloc_pc24(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
226 {
227 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
228 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
229
230 if (in_range_b(disp)) {
231 *src_rw = (*src_rw & ~0x3fffffc) | (disp & 0x3fffffc);
232 return true;
233 }
234 return false;
235 }
236
237 static uint16_t reloc_pc14_val(const tcg_insn_unit *pc,
238 const tcg_insn_unit *target)
239 {
240 ptrdiff_t disp = tcg_ptr_byte_diff(target, pc);
241 tcg_debug_assert(disp == (int16_t) disp);
242 return disp & 0xfffc;
243 }
244
245 static bool reloc_pc14(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
246 {
247 const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
248 ptrdiff_t disp = tcg_ptr_byte_diff(target, src_rx);
249
250 if (disp == (int16_t) disp) {
251 *src_rw = (*src_rw & ~0xfffc) | (disp & 0xfffc);
252 return true;
253 }
254 return false;
255 }
256
257 /* test if a constant matches the constraint */
258 static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
259 {
260 if (ct & TCG_CT_CONST) {
261 return 1;
262 }
263
264 /* The only 32-bit constraint we use aside from
265 TCG_CT_CONST is TCG_CT_CONST_S16. */
266 if (type == TCG_TYPE_I32) {
267 val = (int32_t)val;
268 }
269
270 if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
271 return 1;
272 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
273 return 1;
274 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
275 return 1;
276 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
277 return 1;
278 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
279 return 1;
280 } else if ((ct & TCG_CT_CONST_WSZ)
281 && val == (type == TCG_TYPE_I32 ? 32 : 64)) {
282 return 1;
283 }
284 return 0;
285 }
286
287 #define OPCD(opc) ((opc)<<26)
288 #define XO19(opc) (OPCD(19)|((opc)<<1))
289 #define MD30(opc) (OPCD(30)|((opc)<<2))
290 #define MDS30(opc) (OPCD(30)|((opc)<<1))
291 #define XO31(opc) (OPCD(31)|((opc)<<1))
292 #define XO58(opc) (OPCD(58)|(opc))
293 #define XO62(opc) (OPCD(62)|(opc))
294 #define VX4(opc) (OPCD(4)|(opc))
295
296 #define B OPCD( 18)
297 #define BC OPCD( 16)
298
299 #define LBZ OPCD( 34)
300 #define LHZ OPCD( 40)
301 #define LHA OPCD( 42)
302 #define LWZ OPCD( 32)
303 #define LWZUX XO31( 55)
304 #define LD XO58( 0)
305 #define LDX XO31( 21)
306 #define LDU XO58( 1)
307 #define LDUX XO31( 53)
308 #define LWA XO58( 2)
309 #define LWAX XO31(341)
310 #define LQ OPCD( 56)
311
312 #define STB OPCD( 38)
313 #define STH OPCD( 44)
314 #define STW OPCD( 36)
315 #define STD XO62( 0)
316 #define STDU XO62( 1)
317 #define STDX XO31(149)
318 #define STQ XO62( 2)
319
320 #define ADDIC OPCD( 12)
321 #define ADDI OPCD( 14)
322 #define ADDIS OPCD( 15)
323 #define ORI OPCD( 24)
324 #define ORIS OPCD( 25)
325 #define XORI OPCD( 26)
326 #define XORIS OPCD( 27)
327 #define ANDI OPCD( 28)
328 #define ANDIS OPCD( 29)
329 #define MULLI OPCD( 7)
330 #define CMPLI OPCD( 10)
331 #define CMPI OPCD( 11)
332 #define SUBFIC OPCD( 8)
333
334 #define LWZU OPCD( 33)
335 #define STWU OPCD( 37)
336
337 #define RLWIMI OPCD( 20)
338 #define RLWINM OPCD( 21)
339 #define RLWNM OPCD( 23)
340
341 #define RLDICL MD30( 0)
342 #define RLDICR MD30( 1)
343 #define RLDIMI MD30( 3)
344 #define RLDCL MDS30( 8)
345
346 #define BCLR XO19( 16)
347 #define BCCTR XO19(528)
348 #define CRAND XO19(257)
349 #define CRANDC XO19(129)
350 #define CRNAND XO19(225)
351 #define CROR XO19(449)
352 #define CRNOR XO19( 33)
353
354 #define EXTSB XO31(954)
355 #define EXTSH XO31(922)
356 #define EXTSW XO31(986)
357 #define ADD XO31(266)
358 #define ADDE XO31(138)
359 #define ADDME XO31(234)
360 #define ADDZE XO31(202)
361 #define ADDC XO31( 10)
362 #define AND XO31( 28)
363 #define SUBF XO31( 40)
364 #define SUBFC XO31( 8)
365 #define SUBFE XO31(136)
366 #define SUBFME XO31(232)
367 #define SUBFZE XO31(200)
368 #define OR XO31(444)
369 #define XOR XO31(316)
370 #define MULLW XO31(235)
371 #define MULHW XO31( 75)
372 #define MULHWU XO31( 11)
373 #define DIVW XO31(491)
374 #define DIVWU XO31(459)
375 #define MODSW XO31(779)
376 #define MODUW XO31(267)
377 #define CMP XO31( 0)
378 #define CMPL XO31( 32)
379 #define LHBRX XO31(790)
380 #define LWBRX XO31(534)
381 #define LDBRX XO31(532)
382 #define STHBRX XO31(918)
383 #define STWBRX XO31(662)
384 #define STDBRX XO31(660)
385 #define MFSPR XO31(339)
386 #define MTSPR XO31(467)
387 #define SRAWI XO31(824)
388 #define NEG XO31(104)
389 #define MFCR XO31( 19)
390 #define MFOCRF (MFCR | (1u << 20))
391 #define NOR XO31(124)
392 #define CNTLZW XO31( 26)
393 #define CNTLZD XO31( 58)
394 #define CNTTZW XO31(538)
395 #define CNTTZD XO31(570)
396 #define CNTPOPW XO31(378)
397 #define CNTPOPD XO31(506)
398 #define ANDC XO31( 60)
399 #define ORC XO31(412)
400 #define EQV XO31(284)
401 #define NAND XO31(476)
402 #define ISEL XO31( 15)
403
404 #define MULLD XO31(233)
405 #define MULHD XO31( 73)
406 #define MULHDU XO31( 9)
407 #define DIVD XO31(489)
408 #define DIVDU XO31(457)
409 #define MODSD XO31(777)
410 #define MODUD XO31(265)
411
412 #define LBZX XO31( 87)
413 #define LHZX XO31(279)
414 #define LHAX XO31(343)
415 #define LWZX XO31( 23)
416 #define STBX XO31(215)
417 #define STHX XO31(407)
418 #define STWX XO31(151)
419
420 #define EIEIO XO31(854)
421 #define HWSYNC XO31(598)
422 #define LWSYNC (HWSYNC | (1u << 21))
423
424 #define SPR(a, b) ((((a)<<5)|(b))<<11)
425 #define LR SPR(8, 0)
426 #define CTR SPR(9, 0)
427
428 #define SLW XO31( 24)
429 #define SRW XO31(536)
430 #define SRAW XO31(792)
431
432 #define SLD XO31( 27)
433 #define SRD XO31(539)
434 #define SRAD XO31(794)
435 #define SRADI XO31(413<<1)
436
437 #define BRH XO31(219)
438 #define BRW XO31(155)
439 #define BRD XO31(187)
440
441 #define TW XO31( 4)
442 #define TRAP (TW | TO(31))
443
444 #define NOP ORI /* ori 0,0,0 */
445
446 #define LVX XO31(103)
447 #define LVEBX XO31(7)
448 #define LVEHX XO31(39)
449 #define LVEWX XO31(71)
450 #define LXSDX (XO31(588) | 1) /* v2.06, force tx=1 */
451 #define LXVDSX (XO31(332) | 1) /* v2.06, force tx=1 */
452 #define LXSIWZX (XO31(12) | 1) /* v2.07, force tx=1 */
453 #define LXV (OPCD(61) | 8 | 1) /* v3.00, force tx=1 */
454 #define LXSD (OPCD(57) | 2) /* v3.00 */
455 #define LXVWSX (XO31(364) | 1) /* v3.00, force tx=1 */
456
457 #define STVX XO31(231)
458 #define STVEWX XO31(199)
459 #define STXSDX (XO31(716) | 1) /* v2.06, force sx=1 */
460 #define STXSIWX (XO31(140) | 1) /* v2.07, force sx=1 */
461 #define STXV (OPCD(61) | 8 | 5) /* v3.00, force sx=1 */
462 #define STXSD (OPCD(61) | 2) /* v3.00 */
463
464 #define VADDSBS VX4(768)
465 #define VADDUBS VX4(512)
466 #define VADDUBM VX4(0)
467 #define VADDSHS VX4(832)
468 #define VADDUHS VX4(576)
469 #define VADDUHM VX4(64)
470 #define VADDSWS VX4(896)
471 #define VADDUWS VX4(640)
472 #define VADDUWM VX4(128)
473 #define VADDUDM VX4(192) /* v2.07 */
474
475 #define VSUBSBS VX4(1792)
476 #define VSUBUBS VX4(1536)
477 #define VSUBUBM VX4(1024)
478 #define VSUBSHS VX4(1856)
479 #define VSUBUHS VX4(1600)
480 #define VSUBUHM VX4(1088)
481 #define VSUBSWS VX4(1920)
482 #define VSUBUWS VX4(1664)
483 #define VSUBUWM VX4(1152)
484 #define VSUBUDM VX4(1216) /* v2.07 */
485
486 #define VNEGW (VX4(1538) | (6 << 16)) /* v3.00 */
487 #define VNEGD (VX4(1538) | (7 << 16)) /* v3.00 */
488
489 #define VMAXSB VX4(258)
490 #define VMAXSH VX4(322)
491 #define VMAXSW VX4(386)
492 #define VMAXSD VX4(450) /* v2.07 */
493 #define VMAXUB VX4(2)
494 #define VMAXUH VX4(66)
495 #define VMAXUW VX4(130)
496 #define VMAXUD VX4(194) /* v2.07 */
497 #define VMINSB VX4(770)
498 #define VMINSH VX4(834)
499 #define VMINSW VX4(898)
500 #define VMINSD VX4(962) /* v2.07 */
501 #define VMINUB VX4(514)
502 #define VMINUH VX4(578)
503 #define VMINUW VX4(642)
504 #define VMINUD VX4(706) /* v2.07 */
505
506 #define VCMPEQUB VX4(6)
507 #define VCMPEQUH VX4(70)
508 #define VCMPEQUW VX4(134)
509 #define VCMPEQUD VX4(199) /* v2.07 */
510 #define VCMPGTSB VX4(774)
511 #define VCMPGTSH VX4(838)
512 #define VCMPGTSW VX4(902)
513 #define VCMPGTSD VX4(967) /* v2.07 */
514 #define VCMPGTUB VX4(518)
515 #define VCMPGTUH VX4(582)
516 #define VCMPGTUW VX4(646)
517 #define VCMPGTUD VX4(711) /* v2.07 */
518 #define VCMPNEB VX4(7) /* v3.00 */
519 #define VCMPNEH VX4(71) /* v3.00 */
520 #define VCMPNEW VX4(135) /* v3.00 */
521
522 #define VSLB VX4(260)
523 #define VSLH VX4(324)
524 #define VSLW VX4(388)
525 #define VSLD VX4(1476) /* v2.07 */
526 #define VSRB VX4(516)
527 #define VSRH VX4(580)
528 #define VSRW VX4(644)
529 #define VSRD VX4(1732) /* v2.07 */
530 #define VSRAB VX4(772)
531 #define VSRAH VX4(836)
532 #define VSRAW VX4(900)
533 #define VSRAD VX4(964) /* v2.07 */
534 #define VRLB VX4(4)
535 #define VRLH VX4(68)
536 #define VRLW VX4(132)
537 #define VRLD VX4(196) /* v2.07 */
538
539 #define VMULEUB VX4(520)
540 #define VMULEUH VX4(584)
541 #define VMULEUW VX4(648) /* v2.07 */
542 #define VMULOUB VX4(8)
543 #define VMULOUH VX4(72)
544 #define VMULOUW VX4(136) /* v2.07 */
545 #define VMULUWM VX4(137) /* v2.07 */
546 #define VMULLD VX4(457) /* v3.10 */
547 #define VMSUMUHM VX4(38)
548
549 #define VMRGHB VX4(12)
550 #define VMRGHH VX4(76)
551 #define VMRGHW VX4(140)
552 #define VMRGLB VX4(268)
553 #define VMRGLH VX4(332)
554 #define VMRGLW VX4(396)
555
556 #define VPKUHUM VX4(14)
557 #define VPKUWUM VX4(78)
558
559 #define VAND VX4(1028)
560 #define VANDC VX4(1092)
561 #define VNOR VX4(1284)
562 #define VOR VX4(1156)
563 #define VXOR VX4(1220)
564 #define VEQV VX4(1668) /* v2.07 */
565 #define VNAND VX4(1412) /* v2.07 */
566 #define VORC VX4(1348) /* v2.07 */
567
568 #define VSPLTB VX4(524)
569 #define VSPLTH VX4(588)
570 #define VSPLTW VX4(652)
571 #define VSPLTISB VX4(780)
572 #define VSPLTISH VX4(844)
573 #define VSPLTISW VX4(908)
574
575 #define VSLDOI VX4(44)
576
577 #define XXPERMDI (OPCD(60) | (10 << 3) | 7) /* v2.06, force ax=bx=tx=1 */
578 #define XXSEL (OPCD(60) | (3 << 4) | 0xf) /* v2.06, force ax=bx=cx=tx=1 */
579 #define XXSPLTIB (OPCD(60) | (360 << 1) | 1) /* v3.00, force tx=1 */
580
581 #define MFVSRD (XO31(51) | 1) /* v2.07, force sx=1 */
582 #define MFVSRWZ (XO31(115) | 1) /* v2.07, force sx=1 */
583 #define MTVSRD (XO31(179) | 1) /* v2.07, force tx=1 */
584 #define MTVSRWZ (XO31(243) | 1) /* v2.07, force tx=1 */
585 #define MTVSRDD (XO31(435) | 1) /* v3.00, force tx=1 */
586 #define MTVSRWS (XO31(403) | 1) /* v3.00, force tx=1 */
587
588 #define RT(r) ((r)<<21)
589 #define RS(r) ((r)<<21)
590 #define RA(r) ((r)<<16)
591 #define RB(r) ((r)<<11)
592 #define TO(t) ((t)<<21)
593 #define SH(s) ((s)<<11)
594 #define MB(b) ((b)<<6)
595 #define ME(e) ((e)<<1)
596 #define BO(o) ((o)<<21)
597 #define MB64(b) ((b)<<5)
598 #define FXM(b) (1 << (19 - (b)))
599
600 #define VRT(r) (((r) & 31) << 21)
601 #define VRA(r) (((r) & 31) << 16)
602 #define VRB(r) (((r) & 31) << 11)
603 #define VRC(r) (((r) & 31) << 6)
604
605 #define LK 1
606
607 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
608 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
609 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
610 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
611
612 #define BF(n) ((n)<<23)
613 #define BI(n, c) (((c)+((n)*4))<<16)
614 #define BT(n, c) (((c)+((n)*4))<<21)
615 #define BA(n, c) (((c)+((n)*4))<<16)
616 #define BB(n, c) (((c)+((n)*4))<<11)
617 #define BC_(n, c) (((c)+((n)*4))<<6)
618
619 #define BO_COND_TRUE BO(12)
620 #define BO_COND_FALSE BO( 4)
621 #define BO_ALWAYS BO(20)
622
623 enum {
624 CR_LT,
625 CR_GT,
626 CR_EQ,
627 CR_SO
628 };
629
630 static const uint32_t tcg_to_bc[] = {
631 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
632 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
633 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
634 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
635 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
636 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
637 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
638 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
639 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
640 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
641 };
642
643 /* The low bit here is set if the RA and RB fields must be inverted. */
644 static const uint32_t tcg_to_isel[] = {
645 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
646 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
647 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
648 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
649 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
650 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
651 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
652 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
653 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
654 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
655 };
656
657 static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
658 intptr_t value, intptr_t addend)
659 {
660 const tcg_insn_unit *target;
661 int16_t lo;
662 int32_t hi;
663
664 value += addend;
665 target = (const tcg_insn_unit *)value;
666
667 switch (type) {
668 case R_PPC_REL14:
669 return reloc_pc14(code_ptr, target);
670 case R_PPC_REL24:
671 return reloc_pc24(code_ptr, target);
672 case R_PPC_ADDR16:
673 /*
674 * We are (slightly) abusing this relocation type. In particular,
675 * assert that the low 2 bits are zero, and do not modify them.
676 * That way we can use this with LD et al that have opcode bits
677 * in the low 2 bits of the insn.
678 */
679 if ((value & 3) || value != (int16_t)value) {
680 return false;
681 }
682 *code_ptr = (*code_ptr & ~0xfffc) | (value & 0xfffc);
683 break;
684 case R_PPC_ADDR32:
685 /*
686 * We are abusing this relocation type. Again, this points to
687 * a pair of insns, lis + load. This is an absolute address
688 * relocation for PPC32 so the lis cannot be removed.
689 */
690 lo = value;
691 hi = value - lo;
692 if (hi + lo != value) {
693 return false;
694 }
695 code_ptr[0] = deposit32(code_ptr[0], 0, 16, hi >> 16);
696 code_ptr[1] = deposit32(code_ptr[1], 0, 16, lo);
697 break;
698 default:
699 g_assert_not_reached();
700 }
701 return true;
702 }
703
704 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
705 TCGReg base, tcg_target_long offset);
706
707 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
708 {
709 if (ret == arg) {
710 return true;
711 }
712 switch (type) {
713 case TCG_TYPE_I64:
714 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
715 /* fallthru */
716 case TCG_TYPE_I32:
717 if (ret < TCG_REG_V0) {
718 if (arg < TCG_REG_V0) {
719 tcg_out32(s, OR | SAB(arg, ret, arg));
720 break;
721 } else if (have_isa_2_07) {
722 tcg_out32(s, (type == TCG_TYPE_I32 ? MFVSRWZ : MFVSRD)
723 | VRT(arg) | RA(ret));
724 break;
725 } else {
726 /* Altivec does not support vector->integer moves. */
727 return false;
728 }
729 } else if (arg < TCG_REG_V0) {
730 if (have_isa_2_07) {
731 tcg_out32(s, (type == TCG_TYPE_I32 ? MTVSRWZ : MTVSRD)
732 | VRT(ret) | RA(arg));
733 break;
734 } else {
735 /* Altivec does not support integer->vector moves. */
736 return false;
737 }
738 }
739 /* fallthru */
740 case TCG_TYPE_V64:
741 case TCG_TYPE_V128:
742 tcg_debug_assert(ret >= TCG_REG_V0 && arg >= TCG_REG_V0);
743 tcg_out32(s, VOR | VRT(ret) | VRA(arg) | VRB(arg));
744 break;
745 default:
746 g_assert_not_reached();
747 }
748 return true;
749 }
750
751 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
752 int sh, int mb)
753 {
754 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
755 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
756 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
757 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
758 }
759
760 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
761 int sh, int mb, int me)
762 {
763 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
764 }
765
766 static void tcg_out_ext8s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
767 {
768 tcg_out32(s, EXTSB | RA(dst) | RS(src));
769 }
770
771 static void tcg_out_ext8u(TCGContext *s, TCGReg dst, TCGReg src)
772 {
773 tcg_out32(s, ANDI | SAI(src, dst, 0xff));
774 }
775
776 static void tcg_out_ext16s(TCGContext *s, TCGType type, TCGReg dst, TCGReg src)
777 {
778 tcg_out32(s, EXTSH | RA(dst) | RS(src));
779 }
780
781 static void tcg_out_ext16u(TCGContext *s, TCGReg dst, TCGReg src)
782 {
783 tcg_out32(s, ANDI | SAI(src, dst, 0xffff));
784 }
785
786 static void tcg_out_ext32s(TCGContext *s, TCGReg dst, TCGReg src)
787 {
788 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
789 tcg_out32(s, EXTSW | RA(dst) | RS(src));
790 }
791
792 static void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
793 {
794 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
795 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
796 }
797
798 static void tcg_out_exts_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
799 {
800 tcg_out_ext32s(s, dst, src);
801 }
802
803 static void tcg_out_extu_i32_i64(TCGContext *s, TCGReg dst, TCGReg src)
804 {
805 tcg_out_ext32u(s, dst, src);
806 }
807
808 static void tcg_out_extrl_i64_i32(TCGContext *s, TCGReg rd, TCGReg rn)
809 {
810 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
811 tcg_out_mov(s, TCG_TYPE_I32, rd, rn);
812 }
813
814 static inline void tcg_out_shli32(TCGContext *s, TCGReg dst, TCGReg src, int c)
815 {
816 tcg_out_rlw(s, RLWINM, dst, src, c, 0, 31 - c);
817 }
818
819 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
820 {
821 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
822 }
823
824 static inline void tcg_out_sari32(TCGContext *s, TCGReg dst, TCGReg src, int c)
825 {
826 /* Limit immediate shift count lest we create an illegal insn. */
827 tcg_out32(s, SRAWI | RA(dst) | RS(src) | SH(c & 31));
828 }
829
830 static inline void tcg_out_shri32(TCGContext *s, TCGReg dst, TCGReg src, int c)
831 {
832 tcg_out_rlw(s, RLWINM, dst, src, 32 - c, c, 31);
833 }
834
835 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
836 {
837 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
838 }
839
840 static inline void tcg_out_sari64(TCGContext *s, TCGReg dst, TCGReg src, int c)
841 {
842 tcg_out32(s, SRADI | RA(dst) | RS(src) | SH(c & 0x1f) | ((c >> 4) & 2));
843 }
844
845 static void tcg_out_bswap16(TCGContext *s, TCGReg dst, TCGReg src, int flags)
846 {
847 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
848
849 if (have_isa_3_10) {
850 tcg_out32(s, BRH | RA(dst) | RS(src));
851 if (flags & TCG_BSWAP_OS) {
852 tcg_out_ext16s(s, TCG_TYPE_REG, dst, dst);
853 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
854 tcg_out_ext16u(s, dst, dst);
855 }
856 return;
857 }
858
859 /*
860 * In the following,
861 * dep(a, b, m) -> (a & ~m) | (b & m)
862 *
863 * Begin with: src = xxxxabcd
864 */
865 /* tmp = rol32(src, 24) & 0x000000ff = 0000000c */
866 tcg_out_rlw(s, RLWINM, tmp, src, 24, 24, 31);
867 /* tmp = dep(tmp, rol32(src, 8), 0x0000ff00) = 000000dc */
868 tcg_out_rlw(s, RLWIMI, tmp, src, 8, 16, 23);
869
870 if (flags & TCG_BSWAP_OS) {
871 tcg_out_ext16s(s, TCG_TYPE_REG, dst, tmp);
872 } else {
873 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
874 }
875 }
876
877 static void tcg_out_bswap32(TCGContext *s, TCGReg dst, TCGReg src, int flags)
878 {
879 TCGReg tmp = dst == src ? TCG_REG_R0 : dst;
880
881 if (have_isa_3_10) {
882 tcg_out32(s, BRW | RA(dst) | RS(src));
883 if (flags & TCG_BSWAP_OS) {
884 tcg_out_ext32s(s, dst, dst);
885 } else if ((flags & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) {
886 tcg_out_ext32u(s, dst, dst);
887 }
888 return;
889 }
890
891 /*
892 * Stolen from gcc's builtin_bswap32.
893 * In the following,
894 * dep(a, b, m) -> (a & ~m) | (b & m)
895 *
896 * Begin with: src = xxxxabcd
897 */
898 /* tmp = rol32(src, 8) & 0xffffffff = 0000bcda */
899 tcg_out_rlw(s, RLWINM, tmp, src, 8, 0, 31);
900 /* tmp = dep(tmp, rol32(src, 24), 0xff000000) = 0000dcda */
901 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 0, 7);
902 /* tmp = dep(tmp, rol32(src, 24), 0x0000ff00) = 0000dcba */
903 tcg_out_rlw(s, RLWIMI, tmp, src, 24, 16, 23);
904
905 if (flags & TCG_BSWAP_OS) {
906 tcg_out_ext32s(s, dst, tmp);
907 } else {
908 tcg_out_mov(s, TCG_TYPE_REG, dst, tmp);
909 }
910 }
911
912 static void tcg_out_bswap64(TCGContext *s, TCGReg dst, TCGReg src)
913 {
914 TCGReg t0 = dst == src ? TCG_REG_R0 : dst;
915 TCGReg t1 = dst == src ? dst : TCG_REG_R0;
916
917 if (have_isa_3_10) {
918 tcg_out32(s, BRD | RA(dst) | RS(src));
919 return;
920 }
921
922 /*
923 * In the following,
924 * dep(a, b, m) -> (a & ~m) | (b & m)
925 *
926 * Begin with: src = abcdefgh
927 */
928 /* t0 = rol32(src, 8) & 0xffffffff = 0000fghe */
929 tcg_out_rlw(s, RLWINM, t0, src, 8, 0, 31);
930 /* t0 = dep(t0, rol32(src, 24), 0xff000000) = 0000hghe */
931 tcg_out_rlw(s, RLWIMI, t0, src, 24, 0, 7);
932 /* t0 = dep(t0, rol32(src, 24), 0x0000ff00) = 0000hgfe */
933 tcg_out_rlw(s, RLWIMI, t0, src, 24, 16, 23);
934
935 /* t0 = rol64(t0, 32) = hgfe0000 */
936 tcg_out_rld(s, RLDICL, t0, t0, 32, 0);
937 /* t1 = rol64(src, 32) = efghabcd */
938 tcg_out_rld(s, RLDICL, t1, src, 32, 0);
939
940 /* t0 = dep(t0, rol32(t1, 24), 0xffffffff) = hgfebcda */
941 tcg_out_rlw(s, RLWIMI, t0, t1, 8, 0, 31);
942 /* t0 = dep(t0, rol32(t1, 24), 0xff000000) = hgfedcda */
943 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 0, 7);
944 /* t0 = dep(t0, rol32(t1, 24), 0x0000ff00) = hgfedcba */
945 tcg_out_rlw(s, RLWIMI, t0, t1, 24, 16, 23);
946
947 tcg_out_mov(s, TCG_TYPE_REG, dst, t0);
948 }
949
950 /* Emit a move into ret of arg, if it can be done in one insn. */
951 static bool tcg_out_movi_one(TCGContext *s, TCGReg ret, tcg_target_long arg)
952 {
953 if (arg == (int16_t)arg) {
954 tcg_out32(s, ADDI | TAI(ret, 0, arg));
955 return true;
956 }
957 if (arg == (int32_t)arg && (arg & 0xffff) == 0) {
958 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
959 return true;
960 }
961 return false;
962 }
963
964 static void tcg_out_movi_int(TCGContext *s, TCGType type, TCGReg ret,
965 tcg_target_long arg, bool in_prologue)
966 {
967 intptr_t tb_diff;
968 tcg_target_long tmp;
969 int shift;
970
971 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
972
973 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
974 arg = (int32_t)arg;
975 }
976
977 /* Load 16-bit immediates with one insn. */
978 if (tcg_out_movi_one(s, ret, arg)) {
979 return;
980 }
981
982 /* Load addresses within the TB with one insn. */
983 tb_diff = tcg_tbrel_diff(s, (void *)arg);
984 if (!in_prologue && USE_REG_TB && tb_diff == (int16_t)tb_diff) {
985 tcg_out32(s, ADDI | TAI(ret, TCG_REG_TB, tb_diff));
986 return;
987 }
988
989 /* Load 32-bit immediates with two insns. Note that we've already
990 eliminated bare ADDIS, so we know both insns are required. */
991 if (TCG_TARGET_REG_BITS == 32 || arg == (int32_t)arg) {
992 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
993 tcg_out32(s, ORI | SAI(ret, ret, arg));
994 return;
995 }
996 if (arg == (uint32_t)arg && !(arg & 0x8000)) {
997 tcg_out32(s, ADDI | TAI(ret, 0, arg));
998 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
999 return;
1000 }
1001
1002 /* Load masked 16-bit value. */
1003 if (arg > 0 && (arg & 0x8000)) {
1004 tmp = arg | 0x7fff;
1005 if ((tmp & (tmp + 1)) == 0) {
1006 int mb = clz64(tmp + 1) + 1;
1007 tcg_out32(s, ADDI | TAI(ret, 0, arg));
1008 tcg_out_rld(s, RLDICL, ret, ret, 0, mb);
1009 return;
1010 }
1011 }
1012
1013 /* Load common masks with 2 insns. */
1014 shift = ctz64(arg);
1015 tmp = arg >> shift;
1016 if (tmp == (int16_t)tmp) {
1017 tcg_out32(s, ADDI | TAI(ret, 0, tmp));
1018 tcg_out_shli64(s, ret, ret, shift);
1019 return;
1020 }
1021 shift = clz64(arg);
1022 if (tcg_out_movi_one(s, ret, arg << shift)) {
1023 tcg_out_shri64(s, ret, ret, shift);
1024 return;
1025 }
1026
1027 /* Load addresses within 2GB of TB with 2 (or rarely 3) insns. */
1028 if (!in_prologue && USE_REG_TB && tb_diff == (int32_t)tb_diff) {
1029 tcg_out_mem_long(s, ADDI, ADD, ret, TCG_REG_TB, tb_diff);
1030 return;
1031 }
1032
1033 /* Use the constant pool, if possible. */
1034 if (!in_prologue && USE_REG_TB) {
1035 new_pool_label(s, arg, R_PPC_ADDR16, s->code_ptr,
1036 tcg_tbrel_diff(s, NULL));
1037 tcg_out32(s, LD | TAI(ret, TCG_REG_TB, 0));
1038 return;
1039 }
1040
1041 tmp = arg >> 31 >> 1;
1042 tcg_out_movi(s, TCG_TYPE_I32, ret, tmp);
1043 if (tmp) {
1044 tcg_out_shli64(s, ret, ret, 32);
1045 }
1046 if (arg & 0xffff0000) {
1047 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
1048 }
1049 if (arg & 0xffff) {
1050 tcg_out32(s, ORI | SAI(ret, ret, arg));
1051 }
1052 }
1053
1054 static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
1055 TCGReg ret, int64_t val)
1056 {
1057 uint32_t load_insn;
1058 int rel, low;
1059 intptr_t add;
1060
1061 switch (vece) {
1062 case MO_8:
1063 low = (int8_t)val;
1064 if (low >= -16 && low < 16) {
1065 tcg_out32(s, VSPLTISB | VRT(ret) | ((val & 31) << 16));
1066 return;
1067 }
1068 if (have_isa_3_00) {
1069 tcg_out32(s, XXSPLTIB | VRT(ret) | ((val & 0xff) << 11));
1070 return;
1071 }
1072 break;
1073
1074 case MO_16:
1075 low = (int16_t)val;
1076 if (low >= -16 && low < 16) {
1077 tcg_out32(s, VSPLTISH | VRT(ret) | ((val & 31) << 16));
1078 return;
1079 }
1080 break;
1081
1082 case MO_32:
1083 low = (int32_t)val;
1084 if (low >= -16 && low < 16) {
1085 tcg_out32(s, VSPLTISW | VRT(ret) | ((val & 31) << 16));
1086 return;
1087 }
1088 break;
1089 }
1090
1091 /*
1092 * Otherwise we must load the value from the constant pool.
1093 */
1094 if (USE_REG_TB) {
1095 rel = R_PPC_ADDR16;
1096 add = tcg_tbrel_diff(s, NULL);
1097 } else {
1098 rel = R_PPC_ADDR32;
1099 add = 0;
1100 }
1101
1102 if (have_vsx) {
1103 load_insn = type == TCG_TYPE_V64 ? LXSDX : LXVDSX;
1104 load_insn |= VRT(ret) | RB(TCG_REG_TMP1);
1105 if (TCG_TARGET_REG_BITS == 64) {
1106 new_pool_label(s, val, rel, s->code_ptr, add);
1107 } else {
1108 new_pool_l2(s, rel, s->code_ptr, add, val >> 32, val);
1109 }
1110 } else {
1111 load_insn = LVX | VRT(ret) | RB(TCG_REG_TMP1);
1112 if (TCG_TARGET_REG_BITS == 64) {
1113 new_pool_l2(s, rel, s->code_ptr, add, val, val);
1114 } else {
1115 new_pool_l4(s, rel, s->code_ptr, add,
1116 val >> 32, val, val >> 32, val);
1117 }
1118 }
1119
1120 if (USE_REG_TB) {
1121 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, 0, 0));
1122 load_insn |= RA(TCG_REG_TB);
1123 } else {
1124 tcg_out32(s, ADDIS | TAI(TCG_REG_TMP1, 0, 0));
1125 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, TCG_REG_TMP1, 0));
1126 }
1127 tcg_out32(s, load_insn);
1128 }
1129
1130 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
1131 tcg_target_long arg)
1132 {
1133 switch (type) {
1134 case TCG_TYPE_I32:
1135 case TCG_TYPE_I64:
1136 tcg_debug_assert(ret < TCG_REG_V0);
1137 tcg_out_movi_int(s, type, ret, arg, false);
1138 break;
1139
1140 default:
1141 g_assert_not_reached();
1142 }
1143 }
1144
1145 static bool tcg_out_xchg(TCGContext *s, TCGType type, TCGReg r1, TCGReg r2)
1146 {
1147 return false;
1148 }
1149
1150 static void tcg_out_addi_ptr(TCGContext *s, TCGReg rd, TCGReg rs,
1151 tcg_target_long imm)
1152 {
1153 /* This function is only used for passing structs by reference. */
1154 g_assert_not_reached();
1155 }
1156
1157 static bool mask_operand(uint32_t c, int *mb, int *me)
1158 {
1159 uint32_t lsb, test;
1160
1161 /* Accept a bit pattern like:
1162 0....01....1
1163 1....10....0
1164 0..01..10..0
1165 Keep track of the transitions. */
1166 if (c == 0 || c == -1) {
1167 return false;
1168 }
1169 test = c;
1170 lsb = test & -test;
1171 test += lsb;
1172 if (test & (test - 1)) {
1173 return false;
1174 }
1175
1176 *me = clz32(lsb);
1177 *mb = test ? clz32(test & -test) + 1 : 0;
1178 return true;
1179 }
1180
1181 static bool mask64_operand(uint64_t c, int *mb, int *me)
1182 {
1183 uint64_t lsb;
1184
1185 if (c == 0) {
1186 return false;
1187 }
1188
1189 lsb = c & -c;
1190 /* Accept 1..10..0. */
1191 if (c == -lsb) {
1192 *mb = 0;
1193 *me = clz64(lsb);
1194 return true;
1195 }
1196 /* Accept 0..01..1. */
1197 if (lsb == 1 && (c & (c + 1)) == 0) {
1198 *mb = clz64(c + 1) + 1;
1199 *me = 63;
1200 return true;
1201 }
1202 return false;
1203 }
1204
1205 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1206 {
1207 int mb, me;
1208
1209 if (mask_operand(c, &mb, &me)) {
1210 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
1211 } else if ((c & 0xffff) == c) {
1212 tcg_out32(s, ANDI | SAI(src, dst, c));
1213 return;
1214 } else if ((c & 0xffff0000) == c) {
1215 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1216 return;
1217 } else {
1218 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
1219 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1220 }
1221 }
1222
1223 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
1224 {
1225 int mb, me;
1226
1227 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1228 if (mask64_operand(c, &mb, &me)) {
1229 if (mb == 0) {
1230 tcg_out_rld(s, RLDICR, dst, src, 0, me);
1231 } else {
1232 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
1233 }
1234 } else if ((c & 0xffff) == c) {
1235 tcg_out32(s, ANDI | SAI(src, dst, c));
1236 return;
1237 } else if ((c & 0xffff0000) == c) {
1238 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
1239 return;
1240 } else {
1241 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
1242 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
1243 }
1244 }
1245
1246 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
1247 int op_lo, int op_hi)
1248 {
1249 if (c >> 16) {
1250 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
1251 src = dst;
1252 }
1253 if (c & 0xffff) {
1254 tcg_out32(s, op_lo | SAI(src, dst, c));
1255 src = dst;
1256 }
1257 }
1258
1259 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1260 {
1261 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
1262 }
1263
1264 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
1265 {
1266 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
1267 }
1268
1269 static void tcg_out_b(TCGContext *s, int mask, const tcg_insn_unit *target)
1270 {
1271 ptrdiff_t disp = tcg_pcrel_diff(s, target);
1272 if (in_range_b(disp)) {
1273 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
1274 } else {
1275 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, (uintptr_t)target);
1276 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
1277 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
1278 }
1279 }
1280
1281 static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
1282 TCGReg base, tcg_target_long offset)
1283 {
1284 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
1285 bool is_int_store = false;
1286 TCGReg rs = TCG_REG_TMP1;
1287
1288 switch (opi) {
1289 case LD: case LWA:
1290 align = 3;
1291 /* FALLTHRU */
1292 default:
1293 if (rt > TCG_REG_R0 && rt < TCG_REG_V0) {
1294 rs = rt;
1295 break;
1296 }
1297 break;
1298 case LXSD:
1299 case STXSD:
1300 align = 3;
1301 break;
1302 case LXV:
1303 case STXV:
1304 align = 15;
1305 break;
1306 case STD:
1307 align = 3;
1308 /* FALLTHRU */
1309 case STB: case STH: case STW:
1310 is_int_store = true;
1311 break;
1312 }
1313
1314 /* For unaligned, or very large offsets, use the indexed form. */
1315 if (offset & align || offset != (int32_t)offset || opi == 0) {
1316 if (rs == base) {
1317 rs = TCG_REG_R0;
1318 }
1319 tcg_debug_assert(!is_int_store || rs != rt);
1320 tcg_out_movi(s, TCG_TYPE_PTR, rs, orig);
1321 tcg_out32(s, opx | TAB(rt & 31, base, rs));
1322 return;
1323 }
1324
1325 l0 = (int16_t)offset;
1326 offset = (offset - l0) >> 16;
1327 l1 = (int16_t)offset;
1328
1329 if (l1 < 0 && orig >= 0) {
1330 extra = 0x4000;
1331 l1 = (int16_t)(offset - 0x4000);
1332 }
1333 if (l1) {
1334 tcg_out32(s, ADDIS | TAI(rs, base, l1));
1335 base = rs;
1336 }
1337 if (extra) {
1338 tcg_out32(s, ADDIS | TAI(rs, base, extra));
1339 base = rs;
1340 }
1341 if (opi != ADDI || base != rt || l0 != 0) {
1342 tcg_out32(s, opi | TAI(rt & 31, base, l0));
1343 }
1344 }
1345
1346 static void tcg_out_vsldoi(TCGContext *s, TCGReg ret,
1347 TCGReg va, TCGReg vb, int shb)
1348 {
1349 tcg_out32(s, VSLDOI | VRT(ret) | VRA(va) | VRB(vb) | (shb << 6));
1350 }
1351
1352 static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1353 TCGReg base, intptr_t offset)
1354 {
1355 int shift;
1356
1357 switch (type) {
1358 case TCG_TYPE_I32:
1359 if (ret < TCG_REG_V0) {
1360 tcg_out_mem_long(s, LWZ, LWZX, ret, base, offset);
1361 break;
1362 }
1363 if (have_isa_2_07 && have_vsx) {
1364 tcg_out_mem_long(s, 0, LXSIWZX, ret, base, offset);
1365 break;
1366 }
1367 tcg_debug_assert((offset & 3) == 0);
1368 tcg_out_mem_long(s, 0, LVEWX, ret, base, offset);
1369 shift = (offset - 4) & 0xc;
1370 if (shift) {
1371 tcg_out_vsldoi(s, ret, ret, ret, shift);
1372 }
1373 break;
1374 case TCG_TYPE_I64:
1375 if (ret < TCG_REG_V0) {
1376 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1377 tcg_out_mem_long(s, LD, LDX, ret, base, offset);
1378 break;
1379 }
1380 /* fallthru */
1381 case TCG_TYPE_V64:
1382 tcg_debug_assert(ret >= TCG_REG_V0);
1383 if (have_vsx) {
1384 tcg_out_mem_long(s, have_isa_3_00 ? LXSD : 0, LXSDX,
1385 ret, base, offset);
1386 break;
1387 }
1388 tcg_debug_assert((offset & 7) == 0);
1389 tcg_out_mem_long(s, 0, LVX, ret, base, offset & -16);
1390 if (offset & 8) {
1391 tcg_out_vsldoi(s, ret, ret, ret, 8);
1392 }
1393 break;
1394 case TCG_TYPE_V128:
1395 tcg_debug_assert(ret >= TCG_REG_V0);
1396 tcg_debug_assert((offset & 15) == 0);
1397 tcg_out_mem_long(s, have_isa_3_00 ? LXV : 0,
1398 LVX, ret, base, offset);
1399 break;
1400 default:
1401 g_assert_not_reached();
1402 }
1403 }
1404
1405 static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1406 TCGReg base, intptr_t offset)
1407 {
1408 int shift;
1409
1410 switch (type) {
1411 case TCG_TYPE_I32:
1412 if (arg < TCG_REG_V0) {
1413 tcg_out_mem_long(s, STW, STWX, arg, base, offset);
1414 break;
1415 }
1416 if (have_isa_2_07 && have_vsx) {
1417 tcg_out_mem_long(s, 0, STXSIWX, arg, base, offset);
1418 break;
1419 }
1420 assert((offset & 3) == 0);
1421 tcg_debug_assert((offset & 3) == 0);
1422 shift = (offset - 4) & 0xc;
1423 if (shift) {
1424 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, shift);
1425 arg = TCG_VEC_TMP1;
1426 }
1427 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1428 break;
1429 case TCG_TYPE_I64:
1430 if (arg < TCG_REG_V0) {
1431 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
1432 tcg_out_mem_long(s, STD, STDX, arg, base, offset);
1433 break;
1434 }
1435 /* fallthru */
1436 case TCG_TYPE_V64:
1437 tcg_debug_assert(arg >= TCG_REG_V0);
1438 if (have_vsx) {
1439 tcg_out_mem_long(s, have_isa_3_00 ? STXSD : 0,
1440 STXSDX, arg, base, offset);
1441 break;
1442 }
1443 tcg_debug_assert((offset & 7) == 0);
1444 if (offset & 8) {
1445 tcg_out_vsldoi(s, TCG_VEC_TMP1, arg, arg, 8);
1446 arg = TCG_VEC_TMP1;
1447 }
1448 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset);
1449 tcg_out_mem_long(s, 0, STVEWX, arg, base, offset + 4);
1450 break;
1451 case TCG_TYPE_V128:
1452 tcg_debug_assert(arg >= TCG_REG_V0);
1453 tcg_out_mem_long(s, have_isa_3_00 ? STXV : 0,
1454 STVX, arg, base, offset);
1455 break;
1456 default:
1457 g_assert_not_reached();
1458 }
1459 }
1460
1461 static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
1462 TCGReg base, intptr_t ofs)
1463 {
1464 return false;
1465 }
1466
1467 static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1468 int const_arg2, int cr, TCGType type)
1469 {
1470 int imm;
1471 uint32_t op;
1472
1473 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1474
1475 /* Simplify the comparisons below wrt CMPI. */
1476 if (type == TCG_TYPE_I32) {
1477 arg2 = (int32_t)arg2;
1478 }
1479
1480 switch (cond) {
1481 case TCG_COND_EQ:
1482 case TCG_COND_NE:
1483 if (const_arg2) {
1484 if ((int16_t) arg2 == arg2) {
1485 op = CMPI;
1486 imm = 1;
1487 break;
1488 } else if ((uint16_t) arg2 == arg2) {
1489 op = CMPLI;
1490 imm = 1;
1491 break;
1492 }
1493 }
1494 op = CMPL;
1495 imm = 0;
1496 break;
1497
1498 case TCG_COND_LT:
1499 case TCG_COND_GE:
1500 case TCG_COND_LE:
1501 case TCG_COND_GT:
1502 if (const_arg2) {
1503 if ((int16_t) arg2 == arg2) {
1504 op = CMPI;
1505 imm = 1;
1506 break;
1507 }
1508 }
1509 op = CMP;
1510 imm = 0;
1511 break;
1512
1513 case TCG_COND_LTU:
1514 case TCG_COND_GEU:
1515 case TCG_COND_LEU:
1516 case TCG_COND_GTU:
1517 if (const_arg2) {
1518 if ((uint16_t) arg2 == arg2) {
1519 op = CMPLI;
1520 imm = 1;
1521 break;
1522 }
1523 }
1524 op = CMPL;
1525 imm = 0;
1526 break;
1527
1528 default:
1529 g_assert_not_reached();
1530 }
1531 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
1532
1533 if (imm) {
1534 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1535 } else {
1536 if (const_arg2) {
1537 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1538 arg2 = TCG_REG_R0;
1539 }
1540 tcg_out32(s, op | RA(arg1) | RB(arg2));
1541 }
1542 }
1543
1544 static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1545 TCGReg dst, TCGReg src)
1546 {
1547 if (type == TCG_TYPE_I32) {
1548 tcg_out32(s, CNTLZW | RS(src) | RA(dst));
1549 tcg_out_shri32(s, dst, dst, 5);
1550 } else {
1551 tcg_out32(s, CNTLZD | RS(src) | RA(dst));
1552 tcg_out_shri64(s, dst, dst, 6);
1553 }
1554 }
1555
1556 static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1557 {
1558 /* X != 0 implies X + -1 generates a carry. Extra addition
1559 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1560 if (dst != src) {
1561 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1562 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1563 } else {
1564 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1565 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
1566 }
1567 }
1568
1569 static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1570 bool const_arg2)
1571 {
1572 if (const_arg2) {
1573 if ((uint32_t)arg2 == arg2) {
1574 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1575 } else {
1576 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1577 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1578 }
1579 } else {
1580 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1581 }
1582 return TCG_REG_R0;
1583 }
1584
1585 static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1586 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1587 int const_arg2)
1588 {
1589 int crop, sh;
1590
1591 tcg_debug_assert(TCG_TARGET_REG_BITS == 64 || type == TCG_TYPE_I32);
1592
1593 /* Ignore high bits of a potential constant arg2. */
1594 if (type == TCG_TYPE_I32) {
1595 arg2 = (uint32_t)arg2;
1596 }
1597
1598 /* Handle common and trivial cases before handling anything else. */
1599 if (arg2 == 0) {
1600 switch (cond) {
1601 case TCG_COND_EQ:
1602 tcg_out_setcond_eq0(s, type, arg0, arg1);
1603 return;
1604 case TCG_COND_NE:
1605 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1606 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1607 arg1 = TCG_REG_R0;
1608 }
1609 tcg_out_setcond_ne0(s, arg0, arg1);
1610 return;
1611 case TCG_COND_GE:
1612 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1613 arg1 = arg0;
1614 /* FALLTHRU */
1615 case TCG_COND_LT:
1616 /* Extract the sign bit. */
1617 if (type == TCG_TYPE_I32) {
1618 tcg_out_shri32(s, arg0, arg1, 31);
1619 } else {
1620 tcg_out_shri64(s, arg0, arg1, 63);
1621 }
1622 return;
1623 default:
1624 break;
1625 }
1626 }
1627
1628 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1629 All other cases below are also at least 3 insns, so speed up the
1630 code generator by not considering them and always using ISEL. */
1631 if (have_isel) {
1632 int isel, tab;
1633
1634 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1635
1636 isel = tcg_to_isel[cond];
1637
1638 tcg_out_movi(s, type, arg0, 1);
1639 if (isel & 1) {
1640 /* arg0 = (bc ? 0 : 1) */
1641 tab = TAB(arg0, 0, arg0);
1642 isel &= ~1;
1643 } else {
1644 /* arg0 = (bc ? 1 : 0) */
1645 tcg_out_movi(s, type, TCG_REG_R0, 0);
1646 tab = TAB(arg0, arg0, TCG_REG_R0);
1647 }
1648 tcg_out32(s, isel | tab);
1649 return;
1650 }
1651
1652 switch (cond) {
1653 case TCG_COND_EQ:
1654 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1655 tcg_out_setcond_eq0(s, type, arg0, arg1);
1656 return;
1657
1658 case TCG_COND_NE:
1659 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1660 /* Discard the high bits only once, rather than both inputs. */
1661 if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
1662 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1663 arg1 = TCG_REG_R0;
1664 }
1665 tcg_out_setcond_ne0(s, arg0, arg1);
1666 return;
1667
1668 case TCG_COND_GT:
1669 case TCG_COND_GTU:
1670 sh = 30;
1671 crop = 0;
1672 goto crtest;
1673
1674 case TCG_COND_LT:
1675 case TCG_COND_LTU:
1676 sh = 29;
1677 crop = 0;
1678 goto crtest;
1679
1680 case TCG_COND_GE:
1681 case TCG_COND_GEU:
1682 sh = 31;
1683 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1684 goto crtest;
1685
1686 case TCG_COND_LE:
1687 case TCG_COND_LEU:
1688 sh = 31;
1689 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1690 crtest:
1691 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1692 if (crop) {
1693 tcg_out32(s, crop);
1694 }
1695 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1696 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1697 break;
1698
1699 default:
1700 g_assert_not_reached();
1701 }
1702 }
1703
1704 static void tcg_out_bc(TCGContext *s, int bc, TCGLabel *l)
1705 {
1706 if (l->has_value) {
1707 bc |= reloc_pc14_val(tcg_splitwx_to_rx(s->code_ptr), l->u.value_ptr);
1708 } else {
1709 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, l, 0);
1710 }
1711 tcg_out32(s, bc);
1712 }
1713
1714 static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1715 TCGArg arg1, TCGArg arg2, int const_arg2,
1716 TCGLabel *l, TCGType type)
1717 {
1718 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1719 tcg_out_bc(s, tcg_to_bc[cond], l);
1720 }
1721
1722 static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1723 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1724 TCGArg v2, bool const_c2)
1725 {
1726 /* If for some reason both inputs are zero, don't produce bad code. */
1727 if (v1 == 0 && v2 == 0) {
1728 tcg_out_movi(s, type, dest, 0);
1729 return;
1730 }
1731
1732 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1733
1734 if (have_isel) {
1735 int isel = tcg_to_isel[cond];
1736
1737 /* Swap the V operands if the operation indicates inversion. */
1738 if (isel & 1) {
1739 int t = v1;
1740 v1 = v2;
1741 v2 = t;
1742 isel &= ~1;
1743 }
1744 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1745 if (v2 == 0) {
1746 tcg_out_movi(s, type, TCG_REG_R0, 0);
1747 }
1748 tcg_out32(s, isel | TAB(dest, v1, v2));
1749 } else {
1750 if (dest == v2) {
1751 cond = tcg_invert_cond(cond);
1752 v2 = v1;
1753 } else if (dest != v1) {
1754 if (v1 == 0) {
1755 tcg_out_movi(s, type, dest, 0);
1756 } else {
1757 tcg_out_mov(s, type, dest, v1);
1758 }
1759 }
1760 /* Branch forward over one insn */
1761 tcg_out32(s, tcg_to_bc[cond] | 8);
1762 if (v2 == 0) {
1763 tcg_out_movi(s, type, dest, 0);
1764 } else {
1765 tcg_out_mov(s, type, dest, v2);
1766 }
1767 }
1768 }
1769
1770 static void tcg_out_cntxz(TCGContext *s, TCGType type, uint32_t opc,
1771 TCGArg a0, TCGArg a1, TCGArg a2, bool const_a2)
1772 {
1773 if (const_a2 && a2 == (type == TCG_TYPE_I32 ? 32 : 64)) {
1774 tcg_out32(s, opc | RA(a0) | RS(a1));
1775 } else {
1776 tcg_out_cmp(s, TCG_COND_EQ, a1, 0, 1, 7, type);
1777 /* Note that the only other valid constant for a2 is 0. */
1778 if (have_isel) {
1779 tcg_out32(s, opc | RA(TCG_REG_R0) | RS(a1));
1780 tcg_out32(s, tcg_to_isel[TCG_COND_EQ] | TAB(a0, a2, TCG_REG_R0));
1781 } else if (!const_a2 && a0 == a2) {
1782 tcg_out32(s, tcg_to_bc[TCG_COND_EQ] | 8);
1783 tcg_out32(s, opc | RA(a0) | RS(a1));
1784 } else {
1785 tcg_out32(s, opc | RA(a0) | RS(a1));
1786 tcg_out32(s, tcg_to_bc[TCG_COND_NE] | 8);
1787 if (const_a2) {
1788 tcg_out_movi(s, type, a0, 0);
1789 } else {
1790 tcg_out_mov(s, type, a0, a2);
1791 }
1792 }
1793 }
1794 }
1795
1796 static void tcg_out_cmp2(TCGContext *s, const TCGArg *args,
1797 const int *const_args)
1798 {
1799 static const struct { uint8_t bit1, bit2; } bits[] = {
1800 [TCG_COND_LT ] = { CR_LT, CR_LT },
1801 [TCG_COND_LE ] = { CR_LT, CR_GT },
1802 [TCG_COND_GT ] = { CR_GT, CR_GT },
1803 [TCG_COND_GE ] = { CR_GT, CR_LT },
1804 [TCG_COND_LTU] = { CR_LT, CR_LT },
1805 [TCG_COND_LEU] = { CR_LT, CR_GT },
1806 [TCG_COND_GTU] = { CR_GT, CR_GT },
1807 [TCG_COND_GEU] = { CR_GT, CR_LT },
1808 };
1809
1810 TCGCond cond = args[4], cond2;
1811 TCGArg al, ah, bl, bh;
1812 int blconst, bhconst;
1813 int op, bit1, bit2;
1814
1815 al = args[0];
1816 ah = args[1];
1817 bl = args[2];
1818 bh = args[3];
1819 blconst = const_args[2];
1820 bhconst = const_args[3];
1821
1822 switch (cond) {
1823 case TCG_COND_EQ:
1824 op = CRAND;
1825 goto do_equality;
1826 case TCG_COND_NE:
1827 op = CRNAND;
1828 do_equality:
1829 tcg_out_cmp(s, cond, al, bl, blconst, 6, TCG_TYPE_I32);
1830 tcg_out_cmp(s, cond, ah, bh, bhconst, 7, TCG_TYPE_I32);
1831 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
1832 break;
1833
1834 case TCG_COND_LT:
1835 case TCG_COND_LE:
1836 case TCG_COND_GT:
1837 case TCG_COND_GE:
1838 case TCG_COND_LTU:
1839 case TCG_COND_LEU:
1840 case TCG_COND_GTU:
1841 case TCG_COND_GEU:
1842 bit1 = bits[cond].bit1;
1843 bit2 = bits[cond].bit2;
1844 op = (bit1 != bit2 ? CRANDC : CRAND);
1845 cond2 = tcg_unsigned_cond(cond);
1846
1847 tcg_out_cmp(s, cond, ah, bh, bhconst, 6, TCG_TYPE_I32);
1848 tcg_out_cmp(s, cond2, al, bl, blconst, 7, TCG_TYPE_I32);
1849 tcg_out32(s, op | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, bit2));
1850 tcg_out32(s, CROR | BT(7, CR_EQ) | BA(6, bit1) | BB(7, CR_EQ));
1851 break;
1852
1853 default:
1854 g_assert_not_reached();
1855 }
1856 }
1857
1858 static void tcg_out_setcond2(TCGContext *s, const TCGArg *args,
1859 const int *const_args)
1860 {
1861 tcg_out_cmp2(s, args + 1, const_args + 1);
1862 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1863 tcg_out_rlw(s, RLWINM, args[0], TCG_REG_R0, 31, 31, 31);
1864 }
1865
1866 static void tcg_out_brcond2 (TCGContext *s, const TCGArg *args,
1867 const int *const_args)
1868 {
1869 tcg_out_cmp2(s, args, const_args);
1870 tcg_out_bc(s, BC | BI(7, CR_EQ) | BO_COND_TRUE, arg_label(args[5]));
1871 }
1872
1873 static void tcg_out_mb(TCGContext *s, TCGArg a0)
1874 {
1875 uint32_t insn;
1876
1877 if (a0 & TCG_MO_ST_LD) {
1878 insn = HWSYNC;
1879 } else {
1880 insn = LWSYNC;
1881 }
1882
1883 tcg_out32(s, insn);
1884 }
1885
1886 static void tcg_out_call_int(TCGContext *s, int lk,
1887 const tcg_insn_unit *target)
1888 {
1889 #ifdef _CALL_AIX
1890 /* Look through the descriptor. If the branch is in range, and we
1891 don't have to spend too much effort on building the toc. */
1892 const void *tgt = ((const void * const *)target)[0];
1893 uintptr_t toc = ((const uintptr_t *)target)[1];
1894 intptr_t diff = tcg_pcrel_diff(s, tgt);
1895
1896 if (in_range_b(diff) && toc == (uint32_t)toc) {
1897 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, toc);
1898 tcg_out_b(s, lk, tgt);
1899 } else {
1900 /* Fold the low bits of the constant into the addresses below. */
1901 intptr_t arg = (intptr_t)target;
1902 int ofs = (int16_t)arg;
1903
1904 if (ofs + 8 < 0x8000) {
1905 arg -= ofs;
1906 } else {
1907 ofs = 0;
1908 }
1909 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, arg);
1910 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_TMP1, ofs);
1911 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
1912 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R2, TCG_REG_TMP1, ofs + SZP);
1913 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1914 }
1915 #elif defined(_CALL_ELF) && _CALL_ELF == 2
1916 intptr_t diff;
1917
1918 /* In the ELFv2 ABI, we have to set up r12 to contain the destination
1919 address, which the callee uses to compute its TOC address. */
1920 /* FIXME: when the branch is in range, we could avoid r12 load if we
1921 knew that the destination uses the same TOC, and what its local
1922 entry point offset is. */
1923 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R12, (intptr_t)target);
1924
1925 diff = tcg_pcrel_diff(s, target);
1926 if (in_range_b(diff)) {
1927 tcg_out_b(s, lk, target);
1928 } else {
1929 tcg_out32(s, MTSPR | RS(TCG_REG_R12) | CTR);
1930 tcg_out32(s, BCCTR | BO_ALWAYS | lk);
1931 }
1932 #else
1933 tcg_out_b(s, lk, target);
1934 #endif
1935 }
1936
1937 static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target,
1938 const TCGHelperInfo *info)
1939 {
1940 tcg_out_call_int(s, LK, target);
1941 }
1942
1943 static const uint32_t qemu_ldx_opc[(MO_SSIZE + MO_BSWAP) + 1] = {
1944 [MO_UB] = LBZX,
1945 [MO_UW] = LHZX,
1946 [MO_UL] = LWZX,
1947 [MO_UQ] = LDX,
1948 [MO_SW] = LHAX,
1949 [MO_SL] = LWAX,
1950 [MO_BSWAP | MO_UB] = LBZX,
1951 [MO_BSWAP | MO_UW] = LHBRX,
1952 [MO_BSWAP | MO_UL] = LWBRX,
1953 [MO_BSWAP | MO_UQ] = LDBRX,
1954 };
1955
1956 static const uint32_t qemu_stx_opc[(MO_SIZE + MO_BSWAP) + 1] = {
1957 [MO_UB] = STBX,
1958 [MO_UW] = STHX,
1959 [MO_UL] = STWX,
1960 [MO_UQ] = STDX,
1961 [MO_BSWAP | MO_UB] = STBX,
1962 [MO_BSWAP | MO_UW] = STHBRX,
1963 [MO_BSWAP | MO_UL] = STWBRX,
1964 [MO_BSWAP | MO_UQ] = STDBRX,
1965 };
1966
1967 static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg)
1968 {
1969 if (arg < 0) {
1970 arg = TCG_REG_TMP1;
1971 }
1972 tcg_out32(s, MFSPR | RT(arg) | LR);
1973 return arg;
1974 }
1975
1976 /*
1977 * For the purposes of ppc32 sorting 4 input registers into 4 argument
1978 * registers, there is an outside chance we would require 3 temps.
1979 */
1980 static const TCGLdstHelperParam ldst_helper_param = {
1981 .ra_gen = ldst_ra_gen,
1982 .ntmp = 3,
1983 .tmp = { TCG_REG_TMP1, TCG_REG_TMP2, TCG_REG_R0 }
1984 };
1985
1986 static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
1987 {
1988 MemOp opc = get_memop(lb->oi);
1989
1990 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
1991 return false;
1992 }
1993
1994 tcg_out_ld_helper_args(s, lb, &ldst_helper_param);
1995 tcg_out_call_int(s, LK, qemu_ld_helpers[opc & MO_SIZE]);
1996 tcg_out_ld_helper_ret(s, lb, false, &ldst_helper_param);
1997
1998 tcg_out_b(s, 0, lb->raddr);
1999 return true;
2000 }
2001
2002 static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
2003 {
2004 MemOp opc = get_memop(lb->oi);
2005
2006 if (!reloc_pc14(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
2007 return false;
2008 }
2009
2010 tcg_out_st_helper_args(s, lb, &ldst_helper_param);
2011 tcg_out_call_int(s, LK, qemu_st_helpers[opc & MO_SIZE]);
2012
2013 tcg_out_b(s, 0, lb->raddr);
2014 return true;
2015 }
2016
2017 typedef struct {
2018 TCGReg base;
2019 TCGReg index;
2020 TCGAtomAlign aa;
2021 } HostAddress;
2022
2023 bool tcg_target_has_memory_bswap(MemOp memop)
2024 {
2025 TCGAtomAlign aa;
2026
2027 if ((memop & MO_SIZE) <= MO_64) {
2028 return true;
2029 }
2030
2031 /*
2032 * Reject 16-byte memop with 16-byte atomicity,
2033 * but do allow a pair of 64-bit operations.
2034 */
2035 aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
2036 return aa.atom <= MO_64;
2037 }
2038
2039 /* We expect to use a 16-bit negative offset from ENV. */
2040 #define MIN_TLB_MASK_TABLE_OFS -32768
2041
2042 /*
2043 * For softmmu, perform the TLB load and compare.
2044 * For useronly, perform any required alignment tests.
2045 * In both cases, return a TCGLabelQemuLdst structure if the slow path
2046 * is required and fill in @h with the host address for the fast path.
2047 */
2048 static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
2049 TCGReg addrlo, TCGReg addrhi,
2050 MemOpIdx oi, bool is_ld)
2051 {
2052 TCGType addr_type = s->addr_type;
2053 TCGLabelQemuLdst *ldst = NULL;
2054 MemOp opc = get_memop(oi);
2055 MemOp a_bits, s_bits;
2056
2057 /*
2058 * Book II, Section 1.4, Single-Copy Atomicity, specifies:
2059 *
2060 * Before 3.0, "An access that is not atomic is performed as a set of
2061 * smaller disjoint atomic accesses. In general, the number and alignment
2062 * of these accesses are implementation-dependent." Thus MO_ATOM_IFALIGN.
2063 *
2064 * As of 3.0, "the non-atomic access is performed as described in
2065 * the corresponding list", which matches MO_ATOM_SUBALIGN.
2066 */
2067 s_bits = opc & MO_SIZE;
2068 h->aa = atom_and_align_for_opc(s, opc,
2069 have_isa_3_00 ? MO_ATOM_SUBALIGN
2070 : MO_ATOM_IFALIGN,
2071 s_bits == MO_128);
2072 a_bits = h->aa.align;
2073
2074 #ifdef CONFIG_SOFTMMU
2075 int mem_index = get_mmuidx(oi);
2076 int cmp_off = is_ld ? offsetof(CPUTLBEntry, addr_read)
2077 : offsetof(CPUTLBEntry, addr_write);
2078 int fast_off = tlb_mask_table_ofs(s, mem_index);
2079 int mask_off = fast_off + offsetof(CPUTLBDescFast, mask);
2080 int table_off = fast_off + offsetof(CPUTLBDescFast, table);
2081
2082 ldst = new_ldst_label(s);
2083 ldst->is_ld = is_ld;
2084 ldst->oi = oi;
2085 ldst->addrlo_reg = addrlo;
2086 ldst->addrhi_reg = addrhi;
2087
2088 /* Load tlb_mask[mmu_idx] and tlb_table[mmu_idx]. */
2089 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, mask_off);
2090 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_AREG0, table_off);
2091
2092 /* Extract the page index, shifted into place for tlb index. */
2093 if (TCG_TARGET_REG_BITS == 32) {
2094 tcg_out_shri32(s, TCG_REG_R0, addrlo,
2095 s->page_bits - CPU_TLB_ENTRY_BITS);
2096 } else {
2097 tcg_out_shri64(s, TCG_REG_R0, addrlo,
2098 s->page_bits - CPU_TLB_ENTRY_BITS);
2099 }
2100 tcg_out32(s, AND | SAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_R0));
2101
2102 /*
2103 * Load the (low part) TLB comparator into TMP2.
2104 * For 64-bit host, always load the entire 64-bit slot for simplicity.
2105 * We will ignore the high bits with tcg_out_cmp(..., addr_type).
2106 */
2107 if (TCG_TARGET_REG_BITS == 64) {
2108 if (cmp_off == 0) {
2109 tcg_out32(s, LDUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
2110 } else {
2111 tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2112 tcg_out_ld(s, TCG_TYPE_I64, TCG_REG_TMP2, TCG_REG_TMP1, cmp_off);
2113 }
2114 } else if (cmp_off == 0 && !HOST_BIG_ENDIAN) {
2115 tcg_out32(s, LWZUX | TAB(TCG_REG_TMP2, TCG_REG_TMP1, TCG_REG_TMP2));
2116 } else {
2117 tcg_out32(s, ADD | TAB(TCG_REG_TMP1, TCG_REG_TMP1, TCG_REG_TMP2));
2118 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2119 cmp_off + 4 * HOST_BIG_ENDIAN);
2120 }
2121
2122 /*
2123 * Load the TLB addend for use on the fast path.
2124 * Do this asap to minimize any load use delay.
2125 */
2126 if (TCG_TARGET_REG_BITS == 64 || addr_type == TCG_TYPE_I32) {
2127 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2128 offsetof(CPUTLBEntry, addend));
2129 }
2130
2131 /* Clear the non-page, non-alignment bits from the address in R0. */
2132 if (TCG_TARGET_REG_BITS == 32) {
2133 /*
2134 * We don't support unaligned accesses on 32-bits.
2135 * Preserve the bottom bits and thus trigger a comparison
2136 * failure on unaligned accesses.
2137 */
2138 if (a_bits < s_bits) {
2139 a_bits = s_bits;
2140 }
2141 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addrlo, 0,
2142 (32 - a_bits) & 31, 31 - s->page_bits);
2143 } else {
2144 TCGReg t = addrlo;
2145
2146 /*
2147 * If the access is unaligned, we need to make sure we fail if we
2148 * cross a page boundary. The trick is to add the access size-1
2149 * to the address before masking the low bits. That will make the
2150 * address overflow to the next page if we cross a page boundary,
2151 * which will then force a mismatch of the TLB compare.
2152 */
2153 if (a_bits < s_bits) {
2154 unsigned a_mask = (1 << a_bits) - 1;
2155 unsigned s_mask = (1 << s_bits) - 1;
2156 tcg_out32(s, ADDI | TAI(TCG_REG_R0, t, s_mask - a_mask));
2157 t = TCG_REG_R0;
2158 }
2159
2160 /* Mask the address for the requested alignment. */
2161 if (addr_type == TCG_TYPE_I32) {
2162 tcg_out_rlw(s, RLWINM, TCG_REG_R0, t, 0,
2163 (32 - a_bits) & 31, 31 - s->page_bits);
2164 } else if (a_bits == 0) {
2165 tcg_out_rld(s, RLDICR, TCG_REG_R0, t, 0, 63 - s->page_bits);
2166 } else {
2167 tcg_out_rld(s, RLDICL, TCG_REG_R0, t,
2168 64 - s->page_bits, s->page_bits - a_bits);
2169 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, s->page_bits, 0);
2170 }
2171 }
2172
2173 if (TCG_TARGET_REG_BITS == 32 && addr_type != TCG_TYPE_I32) {
2174 /* Low part comparison into cr7. */
2175 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2,
2176 0, 7, TCG_TYPE_I32);
2177
2178 /* Load the high part TLB comparator into TMP2. */
2179 tcg_out_ld(s, TCG_TYPE_I32, TCG_REG_TMP2, TCG_REG_TMP1,
2180 cmp_off + 4 * !HOST_BIG_ENDIAN);
2181
2182 /* Load addend, deferred for this case. */
2183 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1,
2184 offsetof(CPUTLBEntry, addend));
2185
2186 /* High part comparison into cr6. */
2187 tcg_out_cmp(s, TCG_COND_EQ, addrhi, TCG_REG_TMP2, 0, 6, TCG_TYPE_I32);
2188
2189 /* Combine comparisons into cr7. */
2190 tcg_out32(s, CRAND | BT(7, CR_EQ) | BA(6, CR_EQ) | BB(7, CR_EQ));
2191 } else {
2192 /* Full comparison into cr7. */
2193 tcg_out_cmp(s, TCG_COND_EQ, TCG_REG_R0, TCG_REG_TMP2, 0, 7, addr_type);
2194 }
2195
2196 /* Load a pointer into the current opcode w/conditional branch-link. */
2197 ldst->label_ptr[0] = s->code_ptr;
2198 tcg_out32(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
2199
2200 h->base = TCG_REG_TMP1;
2201 #else
2202 if (a_bits) {
2203 ldst = new_ldst_label(s);
2204 ldst->is_ld = is_ld;
2205 ldst->oi = oi;
2206 ldst->addrlo_reg = addrlo;
2207 ldst->addrhi_reg = addrhi;
2208
2209 /* We are expecting a_bits to max out at 7, much lower than ANDI. */
2210 tcg_debug_assert(a_bits < 16);
2211 tcg_out32(s, ANDI | SAI(addrlo, TCG_REG_R0, (1 << a_bits) - 1));
2212
2213 ldst->label_ptr[0] = s->code_ptr;
2214 tcg_out32(s, BC | BI(0, CR_EQ) | BO_COND_FALSE | LK);
2215 }
2216
2217 h->base = guest_base ? TCG_GUEST_BASE_REG : 0;
2218 #endif
2219
2220 if (TCG_TARGET_REG_BITS == 64 && addr_type == TCG_TYPE_I32) {
2221 /* Zero-extend the guest address for use in the host address. */
2222 tcg_out_ext32u(s, TCG_REG_R0, addrlo);
2223 h->index = TCG_REG_R0;
2224 } else {
2225 h->index = addrlo;
2226 }
2227
2228 return ldst;
2229 }
2230
2231 static void tcg_out_qemu_ld(TCGContext *s, TCGReg datalo, TCGReg datahi,
2232 TCGReg addrlo, TCGReg addrhi,
2233 MemOpIdx oi, TCGType data_type)
2234 {
2235 MemOp opc = get_memop(oi);
2236 TCGLabelQemuLdst *ldst;
2237 HostAddress h;
2238
2239 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, true);
2240
2241 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2242 if (opc & MO_BSWAP) {
2243 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2244 tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2245 tcg_out32(s, LWBRX | TAB(datahi, h.base, TCG_REG_R0));
2246 } else if (h.base != 0) {
2247 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2248 tcg_out32(s, LWZX | TAB(datahi, h.base, h.index));
2249 tcg_out32(s, LWZX | TAB(datalo, h.base, TCG_REG_R0));
2250 } else if (h.index == datahi) {
2251 tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2252 tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2253 } else {
2254 tcg_out32(s, LWZ | TAI(datahi, h.index, 0));
2255 tcg_out32(s, LWZ | TAI(datalo, h.index, 4));
2256 }
2257 } else {
2258 uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)];
2259 if (!have_isa_2_06 && insn == LDBRX) {
2260 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2261 tcg_out32(s, LWBRX | TAB(datalo, h.base, h.index));
2262 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, h.base, TCG_REG_R0));
2263 tcg_out_rld(s, RLDIMI, datalo, TCG_REG_R0, 32, 0);
2264 } else if (insn) {
2265 tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2266 } else {
2267 insn = qemu_ldx_opc[opc & (MO_SIZE | MO_BSWAP)];
2268 tcg_out32(s, insn | TAB(datalo, h.base, h.index));
2269 tcg_out_movext(s, TCG_TYPE_REG, datalo,
2270 TCG_TYPE_REG, opc & MO_SSIZE, datalo);
2271 }
2272 }
2273
2274 if (ldst) {
2275 ldst->type = data_type;
2276 ldst->datalo_reg = datalo;
2277 ldst->datahi_reg = datahi;
2278 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2279 }
2280 }
2281
2282 static void tcg_out_qemu_st(TCGContext *s, TCGReg datalo, TCGReg datahi,
2283 TCGReg addrlo, TCGReg addrhi,
2284 MemOpIdx oi, TCGType data_type)
2285 {
2286 MemOp opc = get_memop(oi);
2287 TCGLabelQemuLdst *ldst;
2288 HostAddress h;
2289
2290 ldst = prepare_host_addr(s, &h, addrlo, addrhi, oi, false);
2291
2292 if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) {
2293 if (opc & MO_BSWAP) {
2294 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2295 tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2296 tcg_out32(s, STWBRX | SAB(datahi, h.base, TCG_REG_R0));
2297 } else if (h.base != 0) {
2298 tcg_out32(s, ADDI | TAI(TCG_REG_R0, h.index, 4));
2299 tcg_out32(s, STWX | SAB(datahi, h.base, h.index));
2300 tcg_out32(s, STWX | SAB(datalo, h.base, TCG_REG_R0));
2301 } else {
2302 tcg_out32(s, STW | TAI(datahi, h.index, 0));
2303 tcg_out32(s, STW | TAI(datalo, h.index, 4));
2304 }
2305 } else {
2306 uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)];
2307 if (!have_isa_2_06 && insn == STDBRX) {
2308 tcg_out32(s, STWBRX | SAB(datalo, h.base, h.index));
2309 tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, h.index, 4));
2310 tcg_out_shri64(s, TCG_REG_R0, datalo, 32);
2311 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, h.base, TCG_REG_TMP1));
2312 } else {
2313 tcg_out32(s, insn | SAB(datalo, h.base, h.index));
2314 }
2315 }
2316
2317 if (ldst) {
2318 ldst->type = data_type;
2319 ldst->datalo_reg = datalo;
2320 ldst->datahi_reg = datahi;
2321 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2322 }
2323 }
2324
2325 static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
2326 TCGReg addr_reg, MemOpIdx oi, bool is_ld)
2327 {
2328 TCGLabelQemuLdst *ldst;
2329 HostAddress h;
2330 bool need_bswap;
2331 uint32_t insn;
2332 TCGReg index;
2333
2334 ldst = prepare_host_addr(s, &h, addr_reg, -1, oi, is_ld);
2335
2336 /* Compose the final address, as LQ/STQ have no indexing. */
2337 index = h.index;
2338 if (h.base != 0) {
2339 index = TCG_REG_TMP1;
2340 tcg_out32(s, ADD | TAB(index, h.base, h.index));
2341 }
2342 need_bswap = get_memop(oi) & MO_BSWAP;
2343
2344 if (h.aa.atom == MO_128) {
2345 tcg_debug_assert(!need_bswap);
2346 tcg_debug_assert(datalo & 1);
2347 tcg_debug_assert(datahi == datalo - 1);
2348 insn = is_ld ? LQ : STQ;
2349 tcg_out32(s, insn | TAI(datahi, index, 0));
2350 } else {
2351 TCGReg d1, d2;
2352
2353 if (HOST_BIG_ENDIAN ^ need_bswap) {
2354 d1 = datahi, d2 = datalo;
2355 } else {
2356 d1 = datalo, d2 = datahi;
2357 }
2358
2359 if (need_bswap) {
2360 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 8);
2361 insn = is_ld ? LDBRX : STDBRX;
2362 tcg_out32(s, insn | TAB(d1, 0, index));
2363 tcg_out32(s, insn | TAB(d2, index, TCG_REG_R0));
2364 } else {
2365 insn = is_ld ? LD : STD;
2366 tcg_out32(s, insn | TAI(d1, index, 0));
2367 tcg_out32(s, insn | TAI(d2, index, 8));
2368 }
2369 }
2370
2371 if (ldst) {
2372 ldst->type = TCG_TYPE_I128;
2373 ldst->datalo_reg = datalo;
2374 ldst->datahi_reg = datahi;
2375 ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
2376 }
2377 }
2378
2379 static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
2380 {
2381 int i;
2382 for (i = 0; i < count; ++i) {
2383 p[i] = NOP;
2384 }
2385 }
2386
2387 /* Parameters for function call generation, used in tcg.c. */
2388 #define TCG_TARGET_STACK_ALIGN 16
2389
2390 #ifdef _CALL_AIX
2391 # define LINK_AREA_SIZE (6 * SZR)
2392 # define LR_OFFSET (1 * SZR)
2393 # define TCG_TARGET_CALL_STACK_OFFSET (LINK_AREA_SIZE + 8 * SZR)
2394 #elif defined(_CALL_DARWIN)
2395 # define LINK_AREA_SIZE (6 * SZR)
2396 # define LR_OFFSET (2 * SZR)
2397 #elif TCG_TARGET_REG_BITS == 64
2398 # if defined(_CALL_ELF) && _CALL_ELF == 2
2399 # define LINK_AREA_SIZE (4 * SZR)
2400 # define LR_OFFSET (1 * SZR)
2401 # endif
2402 #else /* TCG_TARGET_REG_BITS == 32 */
2403 # if defined(_CALL_SYSV)
2404 # define LINK_AREA_SIZE (2 * SZR)
2405 # define LR_OFFSET (1 * SZR)
2406 # endif
2407 #endif
2408 #ifndef LR_OFFSET
2409 # error "Unhandled abi"
2410 #endif
2411 #ifndef TCG_TARGET_CALL_STACK_OFFSET
2412 # define TCG_TARGET_CALL_STACK_OFFSET LINK_AREA_SIZE
2413 #endif
2414
2415 #define CPU_TEMP_BUF_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long))
2416 #define REG_SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * SZR)
2417
2418 #define FRAME_SIZE ((TCG_TARGET_CALL_STACK_OFFSET \
2419 + TCG_STATIC_CALL_ARGS_SIZE \
2420 + CPU_TEMP_BUF_SIZE \
2421 + REG_SAVE_SIZE \
2422 + TCG_TARGET_STACK_ALIGN - 1) \
2423 & -TCG_TARGET_STACK_ALIGN)
2424
2425 #define REG_SAVE_BOT (FRAME_SIZE - REG_SAVE_SIZE)
2426
2427 static void tcg_target_qemu_prologue(TCGContext *s)
2428 {
2429 int i;
2430
2431 #ifdef _CALL_AIX
2432 const void **desc = (const void **)s->code_ptr;
2433 desc[0] = tcg_splitwx_to_rx(desc + 2); /* entry point */
2434 desc[1] = 0; /* environment pointer */
2435 s->code_ptr = (void *)(desc + 2); /* skip over descriptor */
2436 #endif
2437
2438 tcg_set_frame(s, TCG_REG_CALL_STACK, REG_SAVE_BOT - CPU_TEMP_BUF_SIZE,
2439 CPU_TEMP_BUF_SIZE);
2440
2441 /* Prologue */
2442 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
2443 tcg_out32(s, (SZR == 8 ? STDU : STWU)
2444 | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
2445
2446 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2447 tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2448 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2449 }
2450 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2451
2452 #ifndef CONFIG_SOFTMMU
2453 if (guest_base) {
2454 tcg_out_movi_int(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base, true);
2455 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
2456 }
2457 #endif
2458
2459 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
2460 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
2461 if (USE_REG_TB) {
2462 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, tcg_target_call_iarg_regs[1]);
2463 }
2464 tcg_out32(s, BCCTR | BO_ALWAYS);
2465
2466 /* Epilogue */
2467 tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
2468
2469 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R0, TCG_REG_R1, FRAME_SIZE+LR_OFFSET);
2470 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
2471 tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i],
2472 TCG_REG_R1, REG_SAVE_BOT + i * SZR);
2473 }
2474 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
2475 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2476 tcg_out32(s, BCLR | BO_ALWAYS);
2477 }
2478
2479 static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg)
2480 {
2481 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R3, arg);
2482 tcg_out_b(s, 0, tcg_code_gen_epilogue);
2483 }
2484
2485 static void tcg_out_goto_tb(TCGContext *s, int which)
2486 {
2487 uintptr_t ptr = get_jmp_target_addr(s, which);
2488
2489 if (USE_REG_TB) {
2490 ptrdiff_t offset = tcg_tbrel_diff(s, (void *)ptr);
2491 tcg_out_mem_long(s, LD, LDX, TCG_REG_TB, TCG_REG_TB, offset);
2492
2493 /* Direct branch will be patched by tb_target_set_jmp_target. */
2494 set_jmp_insn_offset(s, which);
2495 tcg_out32(s, MTSPR | RS(TCG_REG_TB) | CTR);
2496
2497 /* When branch is out of range, fall through to indirect. */
2498 tcg_out32(s, BCCTR | BO_ALWAYS);
2499
2500 /* For the unlinked case, need to reset TCG_REG_TB. */
2501 set_jmp_reset_offset(s, which);
2502 tcg_out_mem_long(s, ADDI, ADD, TCG_REG_TB, TCG_REG_TB,
2503 -tcg_current_code_size(s));
2504 } else {
2505 /* Direct branch will be patched by tb_target_set_jmp_target. */
2506 set_jmp_insn_offset(s, which);
2507 tcg_out32(s, NOP);
2508
2509 /* When branch is out of range, fall through to indirect. */
2510 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP1, ptr - (int16_t)ptr);
2511 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_REG_TMP1, (int16_t)ptr);
2512 tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR);
2513 tcg_out32(s, BCCTR | BO_ALWAYS);
2514 set_jmp_reset_offset(s, which);
2515 }
2516 }
2517
2518 void tb_target_set_jmp_target(const TranslationBlock *tb, int n,
2519 uintptr_t jmp_rx, uintptr_t jmp_rw)
2520 {
2521 uintptr_t addr = tb->jmp_target_addr[n];
2522 intptr_t diff = addr - jmp_rx;
2523 tcg_insn_unit insn;
2524
2525 if (in_range_b(diff)) {
2526 insn = B | (diff & 0x3fffffc);
2527 } else if (USE_REG_TB) {
2528 insn = MTSPR | RS(TCG_REG_TB) | CTR;
2529 } else {
2530 insn = NOP;
2531 }
2532
2533 qatomic_set((uint32_t *)jmp_rw, insn);
2534 flush_idcache_range(jmp_rx, jmp_rw, 4);
2535 }
2536
2537 static void tcg_out_op(TCGContext *s, TCGOpcode opc,
2538 const TCGArg args[TCG_MAX_OP_ARGS],
2539 const int const_args[TCG_MAX_OP_ARGS])
2540 {
2541 TCGArg a0, a1, a2;
2542
2543 switch (opc) {
2544 case INDEX_op_goto_ptr:
2545 tcg_out32(s, MTSPR | RS(args[0]) | CTR);
2546 if (USE_REG_TB) {
2547 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_TB, args[0]);
2548 }
2549 tcg_out32(s, ADDI | TAI(TCG_REG_R3, 0, 0));
2550 tcg_out32(s, BCCTR | BO_ALWAYS);
2551 break;
2552 case INDEX_op_br:
2553 {
2554 TCGLabel *l = arg_label(args[0]);
2555 uint32_t insn = B;
2556
2557 if (l->has_value) {
2558 insn |= reloc_pc24_val(tcg_splitwx_to_rx(s->code_ptr),
2559 l->u.value_ptr);
2560 } else {
2561 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, l, 0);
2562 }
2563 tcg_out32(s, insn);
2564 }
2565 break;
2566 case INDEX_op_ld8u_i32:
2567 case INDEX_op_ld8u_i64:
2568 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2569 break;
2570 case INDEX_op_ld8s_i32:
2571 case INDEX_op_ld8s_i64:
2572 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
2573 tcg_out_ext8s(s, TCG_TYPE_REG, args[0], args[0]);
2574 break;
2575 case INDEX_op_ld16u_i32:
2576 case INDEX_op_ld16u_i64:
2577 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
2578 break;
2579 case INDEX_op_ld16s_i32:
2580 case INDEX_op_ld16s_i64:
2581 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
2582 break;
2583 case INDEX_op_ld_i32:
2584 case INDEX_op_ld32u_i64:
2585 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
2586 break;
2587 case INDEX_op_ld32s_i64:
2588 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
2589 break;
2590 case INDEX_op_ld_i64:
2591 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
2592 break;
2593 case INDEX_op_st8_i32:
2594 case INDEX_op_st8_i64:
2595 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
2596 break;
2597 case INDEX_op_st16_i32:
2598 case INDEX_op_st16_i64:
2599 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
2600 break;
2601 case INDEX_op_st_i32:
2602 case INDEX_op_st32_i64:
2603 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
2604 break;
2605 case INDEX_op_st_i64:
2606 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
2607 break;
2608
2609 case INDEX_op_add_i32:
2610 a0 = args[0], a1 = args[1], a2 = args[2];
2611 if (const_args[2]) {
2612 do_addi_32:
2613 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
2614 } else {
2615 tcg_out32(s, ADD | TAB(a0, a1, a2));
2616 }
2617 break;
2618 case INDEX_op_sub_i32:
2619 a0 = args[0], a1 = args[1], a2 = args[2];
2620 if (const_args[1]) {
2621 if (const_args[2]) {
2622 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
2623 } else {
2624 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2625 }
2626 } else if (const_args[2]) {
2627 a2 = -a2;
2628 goto do_addi_32;
2629 } else {
2630 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2631 }
2632 break;
2633
2634 case INDEX_op_and_i32:
2635 a0 = args[0], a1 = args[1], a2 = args[2];
2636 if (const_args[2]) {
2637 tcg_out_andi32(s, a0, a1, a2);
2638 } else {
2639 tcg_out32(s, AND | SAB(a1, a0, a2));
2640 }
2641 break;
2642 case INDEX_op_and_i64:
2643 a0 = args[0], a1 = args[1], a2 = args[2];
2644 if (const_args[2]) {
2645 tcg_out_andi64(s, a0, a1, a2);
2646 } else {
2647 tcg_out32(s, AND | SAB(a1, a0, a2));
2648 }
2649 break;
2650 case INDEX_op_or_i64:
2651 case INDEX_op_or_i32:
2652 a0 = args[0], a1 = args[1], a2 = args[2];
2653 if (const_args[2]) {
2654 tcg_out_ori32(s, a0, a1, a2);
2655 } else {
2656 tcg_out32(s, OR | SAB(a1, a0, a2));
2657 }
2658 break;
2659 case INDEX_op_xor_i64:
2660 case INDEX_op_xor_i32:
2661 a0 = args[0], a1 = args[1], a2 = args[2];
2662 if (const_args[2]) {
2663 tcg_out_xori32(s, a0, a1, a2);
2664 } else {
2665 tcg_out32(s, XOR | SAB(a1, a0, a2));
2666 }
2667 break;
2668 case INDEX_op_andc_i32:
2669 a0 = args[0], a1 = args[1], a2 = args[2];
2670 if (const_args[2]) {
2671 tcg_out_andi32(s, a0, a1, ~a2);
2672 } else {
2673 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2674 }
2675 break;
2676 case INDEX_op_andc_i64:
2677 a0 = args[0], a1 = args[1], a2 = args[2];
2678 if (const_args[2]) {
2679 tcg_out_andi64(s, a0, a1, ~a2);
2680 } else {
2681 tcg_out32(s, ANDC | SAB(a1, a0, a2));
2682 }
2683 break;
2684 case INDEX_op_orc_i32:
2685 if (const_args[2]) {
2686 tcg_out_ori32(s, args[0], args[1], ~args[2]);
2687 break;
2688 }
2689 /* FALLTHRU */
2690 case INDEX_op_orc_i64:
2691 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
2692 break;
2693 case INDEX_op_eqv_i32:
2694 if (const_args[2]) {
2695 tcg_out_xori32(s, args[0], args[1], ~args[2]);
2696 break;
2697 }
2698 /* FALLTHRU */
2699 case INDEX_op_eqv_i64:
2700 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
2701 break;
2702 case INDEX_op_nand_i32:
2703 case INDEX_op_nand_i64:
2704 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
2705 break;
2706 case INDEX_op_nor_i32:
2707 case INDEX_op_nor_i64:
2708 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
2709 break;
2710
2711 case INDEX_op_clz_i32:
2712 tcg_out_cntxz(s, TCG_TYPE_I32, CNTLZW, args[0], args[1],
2713 args[2], const_args[2]);
2714 break;
2715 case INDEX_op_ctz_i32:
2716 tcg_out_cntxz(s, TCG_TYPE_I32, CNTTZW, args[0], args[1],
2717 args[2], const_args[2]);
2718 break;
2719 case INDEX_op_ctpop_i32:
2720 tcg_out32(s, CNTPOPW | SAB(args[1], args[0], 0));
2721 break;
2722
2723 case INDEX_op_clz_i64:
2724 tcg_out_cntxz(s, TCG_TYPE_I64, CNTLZD, args[0], args[1],
2725 args[2], const_args[2]);
2726 break;
2727 case INDEX_op_ctz_i64:
2728 tcg_out_cntxz(s, TCG_TYPE_I64, CNTTZD, args[0], args[1],
2729 args[2], const_args[2]);
2730 break;
2731 case INDEX_op_ctpop_i64:
2732 tcg_out32(s, CNTPOPD | SAB(args[1], args[0], 0));
2733 break;
2734
2735 case INDEX_op_mul_i32:
2736 a0 = args[0], a1 = args[1], a2 = args[2];
2737 if (const_args[2]) {
2738 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2739 } else {
2740 tcg_out32(s, MULLW | TAB(a0, a1, a2));
2741 }
2742 break;
2743
2744 case INDEX_op_div_i32:
2745 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
2746 break;
2747
2748 case INDEX_op_divu_i32:
2749 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
2750 break;
2751
2752 case INDEX_op_rem_i32:
2753 tcg_out32(s, MODSW | TAB(args[0], args[1], args[2]));
2754 break;
2755
2756 case INDEX_op_remu_i32:
2757 tcg_out32(s, MODUW | TAB(args[0], args[1], args[2]));
2758 break;
2759
2760 case INDEX_op_shl_i32:
2761 if (const_args[2]) {
2762 /* Limit immediate shift count lest we create an illegal insn. */
2763 tcg_out_shli32(s, args[0], args[1], args[2] & 31);
2764 } else {
2765 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
2766 }
2767 break;
2768 case INDEX_op_shr_i32:
2769 if (const_args[2]) {
2770 /* Limit immediate shift count lest we create an illegal insn. */
2771 tcg_out_shri32(s, args[0], args[1], args[2] & 31);
2772 } else {
2773 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
2774 }
2775 break;
2776 case INDEX_op_sar_i32:
2777 if (const_args[2]) {
2778 tcg_out_sari32(s, args[0], args[1], args[2]);
2779 } else {
2780 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
2781 }
2782 break;
2783 case INDEX_op_rotl_i32:
2784 if (const_args[2]) {
2785 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
2786 } else {
2787 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
2788 | MB(0) | ME(31));
2789 }
2790 break;
2791 case INDEX_op_rotr_i32:
2792 if (const_args[2]) {
2793 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
2794 } else {
2795 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
2796 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
2797 | MB(0) | ME(31));
2798 }
2799 break;
2800
2801 case INDEX_op_brcond_i32:
2802 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2803 arg_label(args[3]), TCG_TYPE_I32);
2804 break;
2805 case INDEX_op_brcond_i64:
2806 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
2807 arg_label(args[3]), TCG_TYPE_I64);
2808 break;
2809 case INDEX_op_brcond2_i32:
2810 tcg_out_brcond2(s, args, const_args);
2811 break;
2812
2813 case INDEX_op_neg_i32:
2814 case INDEX_op_neg_i64:
2815 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
2816 break;
2817
2818 case INDEX_op_not_i32:
2819 case INDEX_op_not_i64:
2820 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
2821 break;
2822
2823 case INDEX_op_add_i64:
2824 a0 = args[0], a1 = args[1], a2 = args[2];
2825 if (const_args[2]) {
2826 do_addi_64:
2827 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
2828 } else {
2829 tcg_out32(s, ADD | TAB(a0, a1, a2));
2830 }
2831 break;
2832 case INDEX_op_sub_i64:
2833 a0 = args[0], a1 = args[1], a2 = args[2];
2834 if (const_args[1]) {
2835 if (const_args[2]) {
2836 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
2837 } else {
2838 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
2839 }
2840 } else if (const_args[2]) {
2841 a2 = -a2;
2842 goto do_addi_64;
2843 } else {
2844 tcg_out32(s, SUBF | TAB(a0, a2, a1));
2845 }
2846 break;
2847
2848 case INDEX_op_shl_i64:
2849 if (const_args[2]) {
2850 /* Limit immediate shift count lest we create an illegal insn. */
2851 tcg_out_shli64(s, args[0], args[1], args[2] & 63);
2852 } else {
2853 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
2854 }
2855 break;
2856 case INDEX_op_shr_i64:
2857 if (const_args[2]) {
2858 /* Limit immediate shift count lest we create an illegal insn. */
2859 tcg_out_shri64(s, args[0], args[1], args[2] & 63);
2860 } else {
2861 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
2862 }
2863 break;
2864 case INDEX_op_sar_i64:
2865 if (const_args[2]) {
2866 tcg_out_sari64(s, args[0], args[1], args[2]);
2867 } else {
2868 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
2869 }
2870 break;
2871 case INDEX_op_rotl_i64:
2872 if (const_args[2]) {
2873 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
2874 } else {
2875 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
2876 }
2877 break;
2878 case INDEX_op_rotr_i64:
2879 if (const_args[2]) {
2880 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
2881 } else {
2882 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
2883 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
2884 }
2885 break;
2886
2887 case INDEX_op_mul_i64:
2888 a0 = args[0], a1 = args[1], a2 = args[2];
2889 if (const_args[2]) {
2890 tcg_out32(s, MULLI | TAI(a0, a1, a2));
2891 } else {
2892 tcg_out32(s, MULLD | TAB(a0, a1, a2));
2893 }
2894 break;
2895 case INDEX_op_div_i64:
2896 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
2897 break;
2898 case INDEX_op_divu_i64:
2899 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
2900 break;
2901 case INDEX_op_rem_i64:
2902 tcg_out32(s, MODSD | TAB(args[0], args[1], args[2]));
2903 break;
2904 case INDEX_op_remu_i64:
2905 tcg_out32(s, MODUD | TAB(args[0], args[1], args[2]));
2906 break;
2907
2908 case INDEX_op_qemu_ld_a64_i32:
2909 if (TCG_TARGET_REG_BITS == 32) {
2910 tcg_out_qemu_ld(s, args[0], -1, args[1], args[2],
2911 args[3], TCG_TYPE_I32);
2912 break;
2913 }
2914 /* fall through */
2915 case INDEX_op_qemu_ld_a32_i32:
2916 tcg_out_qemu_ld(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2917 break;
2918 case INDEX_op_qemu_ld_a32_i64:
2919 if (TCG_TARGET_REG_BITS == 64) {
2920 tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2921 args[2], TCG_TYPE_I64);
2922 } else {
2923 tcg_out_qemu_ld(s, args[0], args[1], args[2], -1,
2924 args[3], TCG_TYPE_I64);
2925 }
2926 break;
2927 case INDEX_op_qemu_ld_a64_i64:
2928 if (TCG_TARGET_REG_BITS == 64) {
2929 tcg_out_qemu_ld(s, args[0], -1, args[1], -1,
2930 args[2], TCG_TYPE_I64);
2931 } else {
2932 tcg_out_qemu_ld(s, args[0], args[1], args[2], args[3],
2933 args[4], TCG_TYPE_I64);
2934 }
2935 break;
2936 case INDEX_op_qemu_ld_a32_i128:
2937 case INDEX_op_qemu_ld_a64_i128:
2938 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
2939 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], true);
2940 break;
2941
2942 case INDEX_op_qemu_st_a64_i32:
2943 if (TCG_TARGET_REG_BITS == 32) {
2944 tcg_out_qemu_st(s, args[0], -1, args[1], args[2],
2945 args[3], TCG_TYPE_I32);
2946 break;
2947 }
2948 /* fall through */
2949 case INDEX_op_qemu_st_a32_i32:
2950 tcg_out_qemu_st(s, args[0], -1, args[1], -1, args[2], TCG_TYPE_I32);
2951 break;
2952 case INDEX_op_qemu_st_a32_i64:
2953 if (TCG_TARGET_REG_BITS == 64) {
2954 tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2955 args[2], TCG_TYPE_I64);
2956 } else {
2957 tcg_out_qemu_st(s, args[0], args[1], args[2], -1,
2958 args[3], TCG_TYPE_I64);
2959 }
2960 break;
2961 case INDEX_op_qemu_st_a64_i64:
2962 if (TCG_TARGET_REG_BITS == 64) {
2963 tcg_out_qemu_st(s, args[0], -1, args[1], -1,
2964 args[2], TCG_TYPE_I64);
2965 } else {
2966 tcg_out_qemu_st(s, args[0], args[1], args[2], args[3],
2967 args[4], TCG_TYPE_I64);
2968 }
2969 break;
2970 case INDEX_op_qemu_st_a32_i128:
2971 case INDEX_op_qemu_st_a64_i128:
2972 tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
2973 tcg_out_qemu_ldst_i128(s, args[0], args[1], args[2], args[3], false);
2974 break;
2975
2976 case INDEX_op_setcond_i32:
2977 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
2978 const_args[2]);
2979 break;
2980 case INDEX_op_setcond_i64:
2981 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
2982 const_args[2]);
2983 break;
2984 case INDEX_op_setcond2_i32:
2985 tcg_out_setcond2(s, args, const_args);
2986 break;
2987
2988 case INDEX_op_bswap16_i32:
2989 case INDEX_op_bswap16_i64:
2990 tcg_out_bswap16(s, args[0], args[1], args[2]);
2991 break;
2992 case INDEX_op_bswap32_i32:
2993 tcg_out_bswap32(s, args[0], args[1], 0);
2994 break;
2995 case INDEX_op_bswap32_i64:
2996 tcg_out_bswap32(s, args[0], args[1], args[2]);
2997 break;
2998 case INDEX_op_bswap64_i64:
2999 tcg_out_bswap64(s, args[0], args[1]);
3000 break;
3001
3002 case INDEX_op_deposit_i32:
3003 if (const_args[2]) {
3004 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
3005 tcg_out_andi32(s, args[0], args[0], ~mask);
3006 } else {
3007 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
3008 32 - args[3] - args[4], 31 - args[3]);
3009 }
3010 break;
3011 case INDEX_op_deposit_i64:
3012 if (const_args[2]) {
3013 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
3014 tcg_out_andi64(s, args[0], args[0], ~mask);
3015 } else {
3016 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
3017 64 - args[3] - args[4]);
3018 }
3019 break;
3020
3021 case INDEX_op_extract_i32:
3022 tcg_out_rlw(s, RLWINM, args[0], args[1],
3023 32 - args[2], 32 - args[3], 31);
3024 break;
3025 case INDEX_op_extract_i64:
3026 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 64 - args[3]);
3027 break;
3028
3029 case INDEX_op_movcond_i32:
3030 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
3031 args[3], args[4], const_args[2]);
3032 break;
3033 case INDEX_op_movcond_i64:
3034 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
3035 args[3], args[4], const_args[2]);
3036 break;
3037
3038 #if TCG_TARGET_REG_BITS == 64
3039 case INDEX_op_add2_i64:
3040 #else
3041 case INDEX_op_add2_i32:
3042 #endif
3043 /* Note that the CA bit is defined based on the word size of the
3044 environment. So in 64-bit mode it's always carry-out of bit 63.
3045 The fallback code using deposit works just as well for 32-bit. */
3046 a0 = args[0], a1 = args[1];
3047 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
3048 a0 = TCG_REG_R0;
3049 }
3050 if (const_args[4]) {
3051 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
3052 } else {
3053 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
3054 }
3055 if (const_args[5]) {
3056 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
3057 } else {
3058 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
3059 }
3060 if (a0 != args[0]) {
3061 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3062 }
3063 break;
3064
3065 #if TCG_TARGET_REG_BITS == 64
3066 case INDEX_op_sub2_i64:
3067 #else
3068 case INDEX_op_sub2_i32:
3069 #endif
3070 a0 = args[0], a1 = args[1];
3071 if (a0 == args[5] || (!const_args[3] && a0 == args[3])) {
3072 a0 = TCG_REG_R0;
3073 }
3074 if (const_args[2]) {
3075 tcg_out32(s, SUBFIC | TAI(a0, args[4], args[2]));
3076 } else {
3077 tcg_out32(s, SUBFC | TAB(a0, args[4], args[2]));
3078 }
3079 if (const_args[3]) {
3080 tcg_out32(s, (args[3] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
3081 } else {
3082 tcg_out32(s, SUBFE | TAB(a1, args[5], args[3]));
3083 }
3084 if (a0 != args[0]) {
3085 tcg_out_mov(s, TCG_TYPE_REG, args[0], a0);
3086 }
3087 break;
3088
3089 case INDEX_op_muluh_i32:
3090 tcg_out32(s, MULHWU | TAB(args[0], args[1], args[2]));
3091 break;
3092 case INDEX_op_mulsh_i32:
3093 tcg_out32(s, MULHW | TAB(args[0], args[1], args[2]));
3094 break;
3095 case INDEX_op_muluh_i64:
3096 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
3097 break;
3098 case INDEX_op_mulsh_i64:
3099 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
3100 break;
3101
3102 case INDEX_op_mb:
3103 tcg_out_mb(s, args[0]);
3104 break;
3105
3106 case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
3107 case INDEX_op_mov_i64:
3108 case INDEX_op_call: /* Always emitted via tcg_out_call. */
3109 case INDEX_op_exit_tb: /* Always emitted via tcg_out_exit_tb. */
3110 case INDEX_op_goto_tb: /* Always emitted via tcg_out_goto_tb. */
3111 case INDEX_op_ext8s_i32: /* Always emitted via tcg_reg_alloc_op. */
3112 case INDEX_op_ext8s_i64:
3113 case INDEX_op_ext8u_i32:
3114 case INDEX_op_ext8u_i64:
3115 case INDEX_op_ext16s_i32:
3116 case INDEX_op_ext16s_i64:
3117 case INDEX_op_ext16u_i32:
3118 case INDEX_op_ext16u_i64:
3119 case INDEX_op_ext32s_i64:
3120 case INDEX_op_ext32u_i64:
3121 case INDEX_op_ext_i32_i64:
3122 case INDEX_op_extu_i32_i64:
3123 case INDEX_op_extrl_i64_i32:
3124 default:
3125 g_assert_not_reached();
3126 }
3127 }
3128
3129 int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
3130 {
3131 switch (opc) {
3132 case INDEX_op_and_vec:
3133 case INDEX_op_or_vec:
3134 case INDEX_op_xor_vec:
3135 case INDEX_op_andc_vec:
3136 case INDEX_op_not_vec:
3137 case INDEX_op_nor_vec:
3138 case INDEX_op_eqv_vec:
3139 case INDEX_op_nand_vec:
3140 return 1;
3141 case INDEX_op_orc_vec:
3142 return have_isa_2_07;
3143 case INDEX_op_add_vec:
3144 case INDEX_op_sub_vec:
3145 case INDEX_op_smax_vec:
3146 case INDEX_op_smin_vec:
3147 case INDEX_op_umax_vec:
3148 case INDEX_op_umin_vec:
3149 case INDEX_op_shlv_vec:
3150 case INDEX_op_shrv_vec:
3151 case INDEX_op_sarv_vec:
3152 case INDEX_op_rotlv_vec:
3153 return vece <= MO_32 || have_isa_2_07;
3154 case INDEX_op_ssadd_vec:
3155 case INDEX_op_sssub_vec:
3156 case INDEX_op_usadd_vec:
3157 case INDEX_op_ussub_vec:
3158 return vece <= MO_32;
3159 case INDEX_op_cmp_vec:
3160 case INDEX_op_shli_vec:
3161 case INDEX_op_shri_vec:
3162 case INDEX_op_sari_vec:
3163 case INDEX_op_rotli_vec:
3164 return vece <= MO_32 || have_isa_2_07 ? -1 : 0;
3165 case INDEX_op_neg_vec:
3166 return vece >= MO_32 && have_isa_3_00;
3167 case INDEX_op_mul_vec:
3168 switch (vece) {
3169 case MO_8:
3170 case MO_16:
3171 return -1;
3172 case MO_32:
3173 return have_isa_2_07 ? 1 : -1;
3174 case MO_64:
3175 return have_isa_3_10;
3176 }
3177 return 0;
3178 case INDEX_op_bitsel_vec:
3179 return have_vsx;
3180 case INDEX_op_rotrv_vec:
3181 return -1;
3182 default:
3183 return 0;
3184 }
3185 }
3186
3187 static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
3188 TCGReg dst, TCGReg src)
3189 {
3190 tcg_debug_assert(dst >= TCG_REG_V0);
3191
3192 /* Splat from integer reg allowed via constraints for v3.00. */
3193 if (src < TCG_REG_V0) {
3194 tcg_debug_assert(have_isa_3_00);
3195 switch (vece) {
3196 case MO_64:
3197 tcg_out32(s, MTVSRDD | VRT(dst) | RA(src) | RB(src));
3198 return true;
3199 case MO_32:
3200 tcg_out32(s, MTVSRWS | VRT(dst) | RA(src));
3201 return true;
3202 default:
3203 /* Fail, so that we fall back on either dupm or mov+dup. */
3204 return false;
3205 }
3206 }
3207
3208 /*
3209 * Recall we use (or emulate) VSX integer loads, so the integer is
3210 * right justified within the left (zero-index) double-word.
3211 */
3212 switch (vece) {
3213 case MO_8:
3214 tcg_out32(s, VSPLTB | VRT(dst) | VRB(src) | (7 << 16));
3215 break;
3216 case MO_16:
3217 tcg_out32(s, VSPLTH | VRT(dst) | VRB(src) | (3 << 16));
3218 break;
3219 case MO_32:
3220 tcg_out32(s, VSPLTW | VRT(dst) | VRB(src) | (1 << 16));
3221 break;
3222 case MO_64:
3223 if (have_vsx) {
3224 tcg_out32(s, XXPERMDI | VRT(dst) | VRA(src) | VRB(src));
3225 break;
3226 }
3227 tcg_out_vsldoi(s, TCG_VEC_TMP1, src, src, 8);
3228 tcg_out_vsldoi(s, dst, TCG_VEC_TMP1, src, 8);
3229 break;
3230 default:
3231 g_assert_not_reached();
3232 }
3233 return true;
3234 }
3235
3236 static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
3237 TCGReg out, TCGReg base, intptr_t offset)
3238 {
3239 int elt;
3240
3241 tcg_debug_assert(out >= TCG_REG_V0);
3242 switch (vece) {
3243 case MO_8:
3244 if (have_isa_3_00) {
3245 tcg_out_mem_long(s, LXV, LVX, out, base, offset & -16);
3246 } else {
3247 tcg_out_mem_long(s, 0, LVEBX, out, base, offset);
3248 }
3249 elt = extract32(offset, 0, 4);
3250 #if !HOST_BIG_ENDIAN
3251 elt ^= 15;
3252 #endif
3253 tcg_out32(s, VSPLTB | VRT(out) | VRB(out) | (elt << 16));
3254 break;
3255 case MO_16:
3256 tcg_debug_assert((offset & 1) == 0);
3257 if (have_isa_3_00) {
3258 tcg_out_mem_long(s, LXV | 8, LVX, out, base, offset & -16);
3259 } else {
3260 tcg_out_mem_long(s, 0, LVEHX, out, base, offset);
3261 }
3262 elt = extract32(offset, 1, 3);
3263 #if !HOST_BIG_ENDIAN
3264 elt ^= 7;
3265 #endif
3266 tcg_out32(s, VSPLTH | VRT(out) | VRB(out) | (elt << 16));
3267 break;
3268 case MO_32:
3269 if (have_isa_3_00) {
3270 tcg_out_mem_long(s, 0, LXVWSX, out, base, offset);
3271 break;
3272 }
3273 tcg_debug_assert((offset & 3) == 0);
3274 tcg_out_mem_long(s, 0, LVEWX, out, base, offset);
3275 elt = extract32(offset, 2, 2);
3276 #if !HOST_BIG_ENDIAN
3277 elt ^= 3;
3278 #endif
3279 tcg_out32(s, VSPLTW | VRT(out) | VRB(out) | (elt << 16));
3280 break;
3281 case MO_64:
3282 if (have_vsx) {
3283 tcg_out_mem_long(s, 0, LXVDSX, out, base, offset);
3284 break;
3285 }
3286 tcg_debug_assert((offset & 7) == 0);
3287 tcg_out_mem_long(s, 0, LVX, out, base, offset & -16);
3288 tcg_out_vsldoi(s, TCG_VEC_TMP1, out, out, 8);
3289 elt = extract32(offset, 3, 1);
3290 #if !HOST_BIG_ENDIAN
3291 elt = !elt;
3292 #endif
3293 if (elt) {
3294 tcg_out_vsldoi(s, out, out, TCG_VEC_TMP1, 8);
3295 } else {
3296 tcg_out_vsldoi(s, out, TCG_VEC_TMP1, out, 8);
3297 }
3298 break;
3299 default:
3300 g_assert_not_reached();
3301 }
3302 return true;
3303 }
3304
3305 static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
3306 unsigned vecl, unsigned vece,
3307 const TCGArg args[TCG_MAX_OP_ARGS],
3308 const int const_args[TCG_MAX_OP_ARGS])
3309 {
3310 static const uint32_t
3311 add_op[4] = { VADDUBM, VADDUHM, VADDUWM, VADDUDM },
3312 sub_op[4] = { VSUBUBM, VSUBUHM, VSUBUWM, VSUBUDM },
3313 mul_op[4] = { 0, 0, VMULUWM, VMULLD },
3314 neg_op[4] = { 0, 0, VNEGW, VNEGD },
3315 eq_op[4] = { VCMPEQUB, VCMPEQUH, VCMPEQUW, VCMPEQUD },
3316 ne_op[4] = { VCMPNEB, VCMPNEH, VCMPNEW, 0 },
3317 gts_op[4] = { VCMPGTSB, VCMPGTSH, VCMPGTSW, VCMPGTSD },
3318 gtu_op[4] = { VCMPGTUB, VCMPGTUH, VCMPGTUW, VCMPGTUD },
3319 ssadd_op[4] = { VADDSBS, VADDSHS, VADDSWS, 0 },
3320 usadd_op[4] = { VADDUBS, VADDUHS, VADDUWS, 0 },
3321 sssub_op[4] = { VSUBSBS, VSUBSHS, VSUBSWS, 0 },
3322 ussub_op[4] = { VSUBUBS, VSUBUHS, VSUBUWS, 0 },
3323 umin_op[4] = { VMINUB, VMINUH, VMINUW, VMINUD },
3324 smin_op[4] = { VMINSB, VMINSH, VMINSW, VMINSD },
3325 umax_op[4] = { VMAXUB, VMAXUH, VMAXUW, VMAXUD },
3326 smax_op[4] = { VMAXSB, VMAXSH, VMAXSW, VMAXSD },
3327 shlv_op[4] = { VSLB, VSLH, VSLW, VSLD },
3328 shrv_op[4] = { VSRB, VSRH, VSRW, VSRD },
3329 sarv_op[4] = { VSRAB, VSRAH, VSRAW, VSRAD },
3330 mrgh_op[4] = { VMRGHB, VMRGHH, VMRGHW, 0 },
3331 mrgl_op[4] = { VMRGLB, VMRGLH, VMRGLW, 0 },
3332 muleu_op[4] = { VMULEUB, VMULEUH, VMULEUW, 0 },
3333 mulou_op[4] = { VMULOUB, VMULOUH, VMULOUW, 0 },
3334 pkum_op[4] = { VPKUHUM, VPKUWUM, 0, 0 },
3335 rotl_op[4] = { VRLB, VRLH, VRLW, VRLD };
3336
3337 TCGType type = vecl + TCG_TYPE_V64;
3338 TCGArg a0 = args[0], a1 = args[1], a2 = args[2];
3339 uint32_t insn;
3340
3341 switch (opc) {
3342 case INDEX_op_ld_vec:
3343 tcg_out_ld(s, type, a0, a1, a2);
3344 return;
3345 case INDEX_op_st_vec:
3346 tcg_out_st(s, type, a0, a1, a2);
3347 return;
3348 case INDEX_op_dupm_vec:
3349 tcg_out_dupm_vec(s, type, vece, a0, a1, a2);
3350 return;
3351
3352 case INDEX_op_add_vec:
3353 insn = add_op[vece];
3354 break;
3355 case INDEX_op_sub_vec:
3356 insn = sub_op[vece];
3357 break;
3358 case INDEX_op_neg_vec:
3359 insn = neg_op[vece];
3360 a2 = a1;
3361 a1 = 0;
3362 break;
3363 case INDEX_op_mul_vec:
3364 insn = mul_op[vece];
3365 break;
3366 case INDEX_op_ssadd_vec:
3367 insn = ssadd_op[vece];
3368 break;
3369 case INDEX_op_sssub_vec:
3370 insn = sssub_op[vece];
3371 break;
3372 case INDEX_op_usadd_vec:
3373 insn = usadd_op[vece];
3374 break;
3375 case INDEX_op_ussub_vec:
3376 insn = ussub_op[vece];
3377 break;
3378 case INDEX_op_smin_vec:
3379 insn = smin_op[vece];
3380 break;
3381 case INDEX_op_umin_vec:
3382 insn = umin_op[vece];
3383 break;
3384 case INDEX_op_smax_vec:
3385 insn = smax_op[vece];
3386 break;
3387 case INDEX_op_umax_vec:
3388 insn = umax_op[vece];
3389 break;
3390 case INDEX_op_shlv_vec:
3391 insn = shlv_op[vece];
3392 break;
3393 case INDEX_op_shrv_vec:
3394 insn = shrv_op[vece];
3395 break;
3396 case INDEX_op_sarv_vec:
3397 insn = sarv_op[vece];
3398 break;
3399 case INDEX_op_and_vec:
3400 insn = VAND;
3401 break;
3402 case INDEX_op_or_vec:
3403 insn = VOR;
3404 break;
3405 case INDEX_op_xor_vec:
3406 insn = VXOR;
3407 break;
3408 case INDEX_op_andc_vec:
3409 insn = VANDC;
3410 break;
3411 case INDEX_op_not_vec:
3412 insn = VNOR;
3413 a2 = a1;
3414 break;
3415 case INDEX_op_orc_vec:
3416 insn = VORC;
3417 break;
3418 case INDEX_op_nand_vec:
3419 insn = VNAND;
3420 break;
3421 case INDEX_op_nor_vec:
3422 insn = VNOR;
3423 break;
3424 case INDEX_op_eqv_vec:
3425 insn = VEQV;
3426 break;
3427
3428 case INDEX_op_cmp_vec:
3429 switch (args[3]) {
3430 case TCG_COND_EQ:
3431 insn = eq_op[vece];
3432 break;
3433 case TCG_COND_NE:
3434 insn = ne_op[vece];
3435 break;
3436 case TCG_COND_GT:
3437 insn = gts_op[vece];
3438 break;
3439 case TCG_COND_GTU:
3440 insn = gtu_op[vece];
3441 break;
3442 default:
3443 g_assert_not_reached();
3444 }
3445 break;
3446
3447 case INDEX_op_bitsel_vec:
3448 tcg_out32(s, XXSEL | VRT(a0) | VRC(a1) | VRB(a2) | VRA(args[3]));
3449 return;
3450
3451 case INDEX_op_dup2_vec:
3452 assert(TCG_TARGET_REG_BITS == 32);
3453 /* With inputs a1 = xLxx, a2 = xHxx */
3454 tcg_out32(s, VMRGHW | VRT(a0) | VRA(a2) | VRB(a1)); /* a0 = xxHL */
3455 tcg_out_vsldoi(s, TCG_VEC_TMP1, a0, a0, 8); /* tmp = HLxx */
3456 tcg_out_vsldoi(s, a0, a0, TCG_VEC_TMP1, 8); /* a0 = HLHL */
3457 return;
3458
3459 case INDEX_op_ppc_mrgh_vec:
3460 insn = mrgh_op[vece];
3461 break;
3462 case INDEX_op_ppc_mrgl_vec:
3463 insn = mrgl_op[vece];
3464 break;
3465 case INDEX_op_ppc_muleu_vec:
3466 insn = muleu_op[vece];
3467 break;
3468 case INDEX_op_ppc_mulou_vec:
3469 insn = mulou_op[vece];
3470 break;
3471 case INDEX_op_ppc_pkum_vec:
3472 insn = pkum_op[vece];
3473 break;
3474 case INDEX_op_rotlv_vec:
3475 insn = rotl_op[vece];
3476 break;
3477 case INDEX_op_ppc_msum_vec:
3478 tcg_debug_assert(vece == MO_16);
3479 tcg_out32(s, VMSUMUHM | VRT(a0) | VRA(a1) | VRB(a2) | VRC(args[3]));
3480 return;
3481
3482 case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
3483 case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
3484 default:
3485 g_assert_not_reached();
3486 }
3487
3488 tcg_debug_assert(insn != 0);
3489 tcg_out32(s, insn | VRT(a0) | VRA(a1) | VRB(a2));
3490 }
3491
3492 static void expand_vec_shi(TCGType type, unsigned vece, TCGv_vec v0,
3493 TCGv_vec v1, TCGArg imm, TCGOpcode opci)
3494 {
3495 TCGv_vec t1;
3496
3497 if (vece == MO_32) {
3498 /*
3499 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3500 * So using negative numbers gets us the 4th bit easily.
3501 */
3502 imm = sextract32(imm, 0, 5);
3503 } else {
3504 imm &= (8 << vece) - 1;
3505 }
3506
3507 /* Splat w/bytes for xxspltib when 2.07 allows MO_64. */
3508 t1 = tcg_constant_vec(type, MO_8, imm);
3509 vec_gen_3(opci, type, vece, tcgv_vec_arg(v0),
3510 tcgv_vec_arg(v1), tcgv_vec_arg(t1));
3511 }
3512
3513 static void expand_vec_cmp(TCGType type, unsigned vece, TCGv_vec v0,
3514 TCGv_vec v1, TCGv_vec v2, TCGCond cond)
3515 {
3516 bool need_swap = false, need_inv = false;
3517
3518 tcg_debug_assert(vece <= MO_32 || have_isa_2_07);
3519
3520 switch (cond) {
3521 case TCG_COND_EQ:
3522 case TCG_COND_GT:
3523 case TCG_COND_GTU:
3524 break;
3525 case TCG_COND_NE:
3526 if (have_isa_3_00 && vece <= MO_32) {
3527 break;
3528 }
3529 /* fall through */
3530 case TCG_COND_LE:
3531 case TCG_COND_LEU:
3532 need_inv = true;
3533 break;
3534 case TCG_COND_LT:
3535 case TCG_COND_LTU:
3536 need_swap = true;
3537 break;
3538 case TCG_COND_GE:
3539 case TCG_COND_GEU:
3540 need_swap = need_inv = true;
3541 break;
3542 default:
3543 g_assert_not_reached();
3544 }
3545
3546 if (need_inv) {
3547 cond = tcg_invert_cond(cond);
3548 }
3549 if (need_swap) {
3550 TCGv_vec t1;
3551 t1 = v1, v1 = v2, v2 = t1;
3552 cond = tcg_swap_cond(cond);
3553 }
3554
3555 vec_gen_4(INDEX_op_cmp_vec, type, vece, tcgv_vec_arg(v0),
3556 tcgv_vec_arg(v1), tcgv_vec_arg(v2), cond);
3557
3558 if (need_inv) {
3559 tcg_gen_not_vec(vece, v0, v0);
3560 }
3561 }
3562
3563 static void expand_vec_mul(TCGType type, unsigned vece, TCGv_vec v0,
3564 TCGv_vec v1, TCGv_vec v2)
3565 {
3566 TCGv_vec t1 = tcg_temp_new_vec(type);
3567 TCGv_vec t2 = tcg_temp_new_vec(type);
3568 TCGv_vec c0, c16;
3569
3570 switch (vece) {
3571 case MO_8:
3572 case MO_16:
3573 vec_gen_3(INDEX_op_ppc_muleu_vec, type, vece, tcgv_vec_arg(t1),
3574 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3575 vec_gen_3(INDEX_op_ppc_mulou_vec, type, vece, tcgv_vec_arg(t2),
3576 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3577 vec_gen_3(INDEX_op_ppc_mrgh_vec, type, vece + 1, tcgv_vec_arg(v0),
3578 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3579 vec_gen_3(INDEX_op_ppc_mrgl_vec, type, vece + 1, tcgv_vec_arg(t1),
3580 tcgv_vec_arg(t1), tcgv_vec_arg(t2));
3581 vec_gen_3(INDEX_op_ppc_pkum_vec, type, vece, tcgv_vec_arg(v0),
3582 tcgv_vec_arg(v0), tcgv_vec_arg(t1));
3583 break;
3584
3585 case MO_32:
3586 tcg_debug_assert(!have_isa_2_07);
3587 /*
3588 * Only 5 bits are significant, and VSPLTISB can represent -16..15.
3589 * So using -16 is a quick way to represent 16.
3590 */
3591 c16 = tcg_constant_vec(type, MO_8, -16);
3592 c0 = tcg_constant_vec(type, MO_8, 0);
3593
3594 vec_gen_3(INDEX_op_rotlv_vec, type, MO_32, tcgv_vec_arg(t1),
3595 tcgv_vec_arg(v2), tcgv_vec_arg(c16));
3596 vec_gen_3(INDEX_op_ppc_mulou_vec, type, MO_16, tcgv_vec_arg(t2),
3597 tcgv_vec_arg(v1), tcgv_vec_arg(v2));
3598 vec_gen_4(INDEX_op_ppc_msum_vec, type, MO_16, tcgv_vec_arg(t1),
3599 tcgv_vec_arg(v1), tcgv_vec_arg(t1), tcgv_vec_arg(c0));
3600 vec_gen_3(INDEX_op_shlv_vec, type, MO_32, tcgv_vec_arg(t1),
3601 tcgv_vec_arg(t1), tcgv_vec_arg(c16));
3602 tcg_gen_add_vec(MO_32, v0, t1, t2);
3603 break;
3604
3605 default:
3606 g_assert_not_reached();
3607 }
3608 tcg_temp_free_vec(t1);
3609 tcg_temp_free_vec(t2);
3610 }
3611
3612 void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
3613 TCGArg a0, ...)
3614 {
3615 va_list va;
3616 TCGv_vec v0, v1, v2, t0;
3617 TCGArg a2;
3618
3619 va_start(va, a0);
3620 v0 = temp_tcgv_vec(arg_temp(a0));
3621 v1 = temp_tcgv_vec(arg_temp(va_arg(va, TCGArg)));
3622 a2 = va_arg(va, TCGArg);
3623
3624 switch (opc) {
3625 case INDEX_op_shli_vec:
3626 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shlv_vec);
3627 break;
3628 case INDEX_op_shri_vec:
3629 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_shrv_vec);
3630 break;
3631 case INDEX_op_sari_vec:
3632 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_sarv_vec);
3633 break;
3634 case INDEX_op_rotli_vec:
3635 expand_vec_shi(type, vece, v0, v1, a2, INDEX_op_rotlv_vec);
3636 break;
3637 case INDEX_op_cmp_vec:
3638 v2 = temp_tcgv_vec(arg_temp(a2));
3639 expand_vec_cmp(type, vece, v0, v1, v2, va_arg(va, TCGArg));
3640 break;
3641 case INDEX_op_mul_vec:
3642 v2 = temp_tcgv_vec(arg_temp(a2));
3643 expand_vec_mul(type, vece, v0, v1, v2);
3644 break;
3645 case INDEX_op_rotlv_vec:
3646 v2 = temp_tcgv_vec(arg_temp(a2));
3647 t0 = tcg_temp_new_vec(type);
3648 tcg_gen_neg_vec(vece, t0, v2);
3649 tcg_gen_rotlv_vec(vece, v0, v1, t0);
3650 tcg_temp_free_vec(t0);
3651 break;
3652 default:
3653 g_assert_not_reached();
3654 }
3655 va_end(va);
3656 }
3657
3658 static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
3659 {
3660 switch (op) {
3661 case INDEX_op_goto_ptr:
3662 return C_O0_I1(r);
3663
3664 case INDEX_op_ld8u_i32:
3665 case INDEX_op_ld8s_i32:
3666 case INDEX_op_ld16u_i32:
3667 case INDEX_op_ld16s_i32:
3668 case INDEX_op_ld_i32:
3669 case INDEX_op_ctpop_i32:
3670 case INDEX_op_neg_i32:
3671 case INDEX_op_not_i32:
3672 case INDEX_op_ext8s_i32:
3673 case INDEX_op_ext16s_i32:
3674 case INDEX_op_bswap16_i32:
3675 case INDEX_op_bswap32_i32:
3676 case INDEX_op_extract_i32:
3677 case INDEX_op_ld8u_i64:
3678 case INDEX_op_ld8s_i64:
3679 case INDEX_op_ld16u_i64:
3680 case INDEX_op_ld16s_i64:
3681 case INDEX_op_ld32u_i64:
3682 case INDEX_op_ld32s_i64:
3683 case INDEX_op_ld_i64:
3684 case INDEX_op_ctpop_i64:
3685 case INDEX_op_neg_i64:
3686 case INDEX_op_not_i64:
3687 case INDEX_op_ext8s_i64:
3688 case INDEX_op_ext16s_i64:
3689 case INDEX_op_ext32s_i64:
3690 case INDEX_op_ext_i32_i64:
3691 case INDEX_op_extu_i32_i64:
3692 case INDEX_op_bswap16_i64:
3693 case INDEX_op_bswap32_i64:
3694 case INDEX_op_bswap64_i64:
3695 case INDEX_op_extract_i64:
3696 return C_O1_I1(r, r);
3697
3698 case INDEX_op_st8_i32:
3699 case INDEX_op_st16_i32:
3700 case INDEX_op_st_i32:
3701 case INDEX_op_st8_i64:
3702 case INDEX_op_st16_i64:
3703 case INDEX_op_st32_i64:
3704 case INDEX_op_st_i64:
3705 return C_O0_I2(r, r);
3706
3707 case INDEX_op_add_i32:
3708 case INDEX_op_and_i32:
3709 case INDEX_op_or_i32:
3710 case INDEX_op_xor_i32:
3711 case INDEX_op_andc_i32:
3712 case INDEX_op_orc_i32:
3713 case INDEX_op_eqv_i32:
3714 case INDEX_op_shl_i32:
3715 case INDEX_op_shr_i32:
3716 case INDEX_op_sar_i32:
3717 case INDEX_op_rotl_i32:
3718 case INDEX_op_rotr_i32:
3719 case INDEX_op_setcond_i32:
3720 case INDEX_op_and_i64:
3721 case INDEX_op_andc_i64:
3722 case INDEX_op_shl_i64:
3723 case INDEX_op_shr_i64:
3724 case INDEX_op_sar_i64:
3725 case INDEX_op_rotl_i64:
3726 case INDEX_op_rotr_i64:
3727 case INDEX_op_setcond_i64:
3728 return C_O1_I2(r, r, ri);
3729
3730 case INDEX_op_mul_i32:
3731 case INDEX_op_mul_i64:
3732 return C_O1_I2(r, r, rI);
3733
3734 case INDEX_op_div_i32:
3735 case INDEX_op_divu_i32:
3736 case INDEX_op_rem_i32:
3737 case INDEX_op_remu_i32:
3738 case INDEX_op_nand_i32:
3739 case INDEX_op_nor_i32:
3740 case INDEX_op_muluh_i32:
3741 case INDEX_op_mulsh_i32:
3742 case INDEX_op_orc_i64:
3743 case INDEX_op_eqv_i64:
3744 case INDEX_op_nand_i64:
3745 case INDEX_op_nor_i64:
3746 case INDEX_op_div_i64:
3747 case INDEX_op_divu_i64:
3748 case INDEX_op_rem_i64:
3749 case INDEX_op_remu_i64:
3750 case INDEX_op_mulsh_i64:
3751 case INDEX_op_muluh_i64:
3752 return C_O1_I2(r, r, r);
3753
3754 case INDEX_op_sub_i32:
3755 return C_O1_I2(r, rI, ri);
3756 case INDEX_op_add_i64:
3757 return C_O1_I2(r, r, rT);
3758 case INDEX_op_or_i64:
3759 case INDEX_op_xor_i64:
3760 return C_O1_I2(r, r, rU);
3761 case INDEX_op_sub_i64:
3762 return C_O1_I2(r, rI, rT);
3763 case INDEX_op_clz_i32:
3764 case INDEX_op_ctz_i32:
3765 case INDEX_op_clz_i64:
3766 case INDEX_op_ctz_i64:
3767 return C_O1_I2(r, r, rZW);
3768
3769 case INDEX_op_brcond_i32:
3770 case INDEX_op_brcond_i64:
3771 return C_O0_I2(r, ri);
3772
3773 case INDEX_op_movcond_i32:
3774 case INDEX_op_movcond_i64:
3775 return C_O1_I4(r, r, ri, rZ, rZ);
3776 case INDEX_op_deposit_i32:
3777 case INDEX_op_deposit_i64:
3778 return C_O1_I2(r, 0, rZ);
3779 case INDEX_op_brcond2_i32:
3780 return C_O0_I4(r, r, ri, ri);
3781 case INDEX_op_setcond2_i32:
3782 return C_O1_I4(r, r, r, ri, ri);
3783 case INDEX_op_add2_i64:
3784 case INDEX_op_add2_i32:
3785 return C_O2_I4(r, r, r, r, rI, rZM);
3786 case INDEX_op_sub2_i64:
3787 case INDEX_op_sub2_i32:
3788 return C_O2_I4(r, r, rI, rZM, r, r);
3789
3790 case INDEX_op_qemu_ld_a32_i32:
3791 return C_O1_I1(r, r);
3792 case INDEX_op_qemu_ld_a64_i32:
3793 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O1_I2(r, r, r);
3794 case INDEX_op_qemu_ld_a32_i64:
3795 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I1(r, r, r);
3796 case INDEX_op_qemu_ld_a64_i64:
3797 return TCG_TARGET_REG_BITS == 64 ? C_O1_I1(r, r) : C_O2_I2(r, r, r, r);
3798
3799 case INDEX_op_qemu_st_a32_i32:
3800 return C_O0_I2(r, r);
3801 case INDEX_op_qemu_st_a64_i32:
3802 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
3803 case INDEX_op_qemu_st_a32_i64:
3804 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I3(r, r, r);
3805 case INDEX_op_qemu_st_a64_i64:
3806 return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(r, r) : C_O0_I4(r, r, r, r);
3807
3808 case INDEX_op_qemu_ld_a32_i128:
3809 case INDEX_op_qemu_ld_a64_i128:
3810 return C_O2_I1(o, m, r);
3811 case INDEX_op_qemu_st_a32_i128:
3812 case INDEX_op_qemu_st_a64_i128:
3813 return C_O0_I3(o, m, r);
3814
3815 case INDEX_op_add_vec:
3816 case INDEX_op_sub_vec:
3817 case INDEX_op_mul_vec:
3818 case INDEX_op_and_vec:
3819 case INDEX_op_or_vec:
3820 case INDEX_op_xor_vec:
3821 case INDEX_op_andc_vec:
3822 case INDEX_op_orc_vec:
3823 case INDEX_op_nor_vec:
3824 case INDEX_op_eqv_vec:
3825 case INDEX_op_nand_vec:
3826 case INDEX_op_cmp_vec:
3827 case INDEX_op_ssadd_vec:
3828 case INDEX_op_sssub_vec:
3829 case INDEX_op_usadd_vec:
3830 case INDEX_op_ussub_vec:
3831 case INDEX_op_smax_vec:
3832 case INDEX_op_smin_vec:
3833 case INDEX_op_umax_vec:
3834 case INDEX_op_umin_vec:
3835 case INDEX_op_shlv_vec:
3836 case INDEX_op_shrv_vec:
3837 case INDEX_op_sarv_vec:
3838 case INDEX_op_rotlv_vec:
3839 case INDEX_op_rotrv_vec:
3840 case INDEX_op_ppc_mrgh_vec:
3841 case INDEX_op_ppc_mrgl_vec:
3842 case INDEX_op_ppc_muleu_vec:
3843 case INDEX_op_ppc_mulou_vec:
3844 case INDEX_op_ppc_pkum_vec:
3845 case INDEX_op_dup2_vec:
3846 return C_O1_I2(v, v, v);
3847
3848 case INDEX_op_not_vec:
3849 case INDEX_op_neg_vec:
3850 return C_O1_I1(v, v);
3851
3852 case INDEX_op_dup_vec:
3853 return have_isa_3_00 ? C_O1_I1(v, vr) : C_O1_I1(v, v);
3854
3855 case INDEX_op_ld_vec:
3856 case INDEX_op_dupm_vec:
3857 return C_O1_I1(v, r);
3858
3859 case INDEX_op_st_vec:
3860 return C_O0_I2(v, r);
3861
3862 case INDEX_op_bitsel_vec:
3863 case INDEX_op_ppc_msum_vec:
3864 return C_O1_I3(v, v, v, v);
3865
3866 default:
3867 g_assert_not_reached();
3868 }
3869 }
3870
3871 static void tcg_target_init(TCGContext *s)
3872 {
3873 unsigned long hwcap = qemu_getauxval(AT_HWCAP);
3874 unsigned long hwcap2 = qemu_getauxval(AT_HWCAP2);
3875
3876 have_isa = tcg_isa_base;
3877 if (hwcap & PPC_FEATURE_ARCH_2_06) {
3878 have_isa = tcg_isa_2_06;
3879 }
3880 #ifdef PPC_FEATURE2_ARCH_2_07
3881 if (hwcap2 & PPC_FEATURE2_ARCH_2_07) {
3882 have_isa = tcg_isa_2_07;
3883 }
3884 #endif
3885 #ifdef PPC_FEATURE2_ARCH_3_00
3886 if (hwcap2 & PPC_FEATURE2_ARCH_3_00) {
3887 have_isa = tcg_isa_3_00;
3888 }
3889 #endif
3890 #ifdef PPC_FEATURE2_ARCH_3_10
3891 if (hwcap2 & PPC_FEATURE2_ARCH_3_10) {
3892 have_isa = tcg_isa_3_10;
3893 }
3894 #endif
3895
3896 #ifdef PPC_FEATURE2_HAS_ISEL
3897 /* Prefer explicit instruction from the kernel. */
3898 have_isel = (hwcap2 & PPC_FEATURE2_HAS_ISEL) != 0;
3899 #else
3900 /* Fall back to knowing Power7 (2.06) has ISEL. */
3901 have_isel = have_isa_2_06;
3902 #endif
3903
3904 if (hwcap & PPC_FEATURE_HAS_ALTIVEC) {
3905 have_altivec = true;
3906 /* We only care about the portion of VSX that overlaps Altivec. */
3907 if (hwcap & PPC_FEATURE_HAS_VSX) {
3908 have_vsx = true;
3909 }
3910 }
3911
3912 tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
3913 tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
3914 if (have_altivec) {
3915 tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
3916 tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
3917 }
3918
3919 tcg_target_call_clobber_regs = 0;
3920 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R0);
3921 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R2);
3922 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R3);
3923 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R4);
3924 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R5);
3925 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R6);
3926 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R7);
3927 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R8);
3928 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R9);
3929 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R10);
3930 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R11);
3931 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R12);
3932
3933 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V0);
3934 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V1);
3935 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V2);
3936 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V3);
3937 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V4);
3938 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V5);
3939 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V6);
3940 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V7);
3941 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V8);
3942 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V9);
3943 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V10);
3944 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V11);
3945 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V12);
3946 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V13);
3947 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V14);
3948 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V15);
3949 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V16);
3950 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V17);
3951 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V18);
3952 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_V19);
3953
3954 s->reserved_regs = 0;
3955 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
3956 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
3957 #if defined(_CALL_SYSV)
3958 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* toc pointer */
3959 #endif
3960 #if defined(_CALL_SYSV) || TCG_TARGET_REG_BITS == 64
3961 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
3962 #endif
3963 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1);
3964 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
3965 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP1);
3966 tcg_regset_set_reg(s->reserved_regs, TCG_VEC_TMP2);
3967 if (USE_REG_TB) {
3968 tcg_regset_set_reg(s->reserved_regs, TCG_REG_TB); /* tb->tc_ptr */
3969 }
3970 }
3971
3972 #ifdef __ELF__
3973 typedef struct {
3974 DebugFrameCIE cie;
3975 DebugFrameFDEHeader fde;
3976 uint8_t fde_def_cfa[4];
3977 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
3978 } DebugFrame;
3979
3980 /* We're expecting a 2 byte uleb128 encoded value. */
3981 QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
3982
3983 #if TCG_TARGET_REG_BITS == 64
3984 # define ELF_HOST_MACHINE EM_PPC64
3985 #else
3986 # define ELF_HOST_MACHINE EM_PPC
3987 #endif
3988
3989 static DebugFrame debug_frame = {
3990 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
3991 .cie.id = -1,
3992 .cie.version = 1,
3993 .cie.code_align = 1,
3994 .cie.data_align = (-SZR & 0x7f), /* sleb128 -SZR */
3995 .cie.return_column = 65,
3996
3997 /* Total FDE size does not include the "len" member. */
3998 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
3999
4000 .fde_def_cfa = {
4001 12, TCG_REG_R1, /* DW_CFA_def_cfa r1, ... */
4002 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
4003 (FRAME_SIZE >> 7)
4004 },
4005 .fde_reg_ofs = {
4006 /* DW_CFA_offset_extended_sf, lr, LR_OFFSET */
4007 0x11, 65, (LR_OFFSET / -SZR) & 0x7f,
4008 }
4009 };
4010
4011 void tcg_register_jit(const void *buf, size_t buf_size)
4012 {
4013 uint8_t *p = &debug_frame.fde_reg_ofs[3];
4014 int i;
4015
4016 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
4017 p[0] = 0x80 + tcg_target_callee_save_regs[i];
4018 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * SZR)) / SZR;
4019 }
4020
4021 debug_frame.fde.func_start = (uintptr_t)buf;
4022 debug_frame.fde.func_len = buf_size;
4023
4024 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
4025 }
4026 #endif /* __ELF__ */
4027 #undef VMULEUB
4028 #undef VMULEUH
4029 #undef VMULEUW
4030 #undef VMULOUB
4031 #undef VMULOUH
4032 #undef VMULOUW
4033 #undef VMSUMUHM