]> git.proxmox.com Git - qemu.git/blob - tcg/ppc64/tcg-target.c
tcg-ppc64: Use I constraint for mul
[qemu.git] / tcg / ppc64 / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #define TCG_CT_CONST_S16 0x100
26 #define TCG_CT_CONST_U16 0x200
27 #define TCG_CT_CONST_S32 0x400
28 #define TCG_CT_CONST_U32 0x800
29 #define TCG_CT_CONST_ZERO 0x1000
30
31 static uint8_t *tb_ret_addr;
32
33 #define FAST_PATH
34
35 #if TARGET_LONG_BITS == 32
36 #define LD_ADDR LWZU
37 #define CMP_L 0
38 #else
39 #define LD_ADDR LDU
40 #define CMP_L (1<<21)
41 #endif
42
43 #ifndef GUEST_BASE
44 #define GUEST_BASE 0
45 #endif
46
47 #define HAVE_ISA_2_06 0
48
49 #ifdef CONFIG_USE_GUEST_BASE
50 #define TCG_GUEST_BASE_REG 30
51 #else
52 #define TCG_GUEST_BASE_REG 0
53 #endif
54
55 #ifndef NDEBUG
56 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
57 "r0",
58 "r1",
59 "r2",
60 "r3",
61 "r4",
62 "r5",
63 "r6",
64 "r7",
65 "r8",
66 "r9",
67 "r10",
68 "r11",
69 "r12",
70 "r13",
71 "r14",
72 "r15",
73 "r16",
74 "r17",
75 "r18",
76 "r19",
77 "r20",
78 "r21",
79 "r22",
80 "r23",
81 "r24",
82 "r25",
83 "r26",
84 "r27",
85 "r28",
86 "r29",
87 "r30",
88 "r31"
89 };
90 #endif
91
92 static const int tcg_target_reg_alloc_order[] = {
93 TCG_REG_R14,
94 TCG_REG_R15,
95 TCG_REG_R16,
96 TCG_REG_R17,
97 TCG_REG_R18,
98 TCG_REG_R19,
99 TCG_REG_R20,
100 TCG_REG_R21,
101 TCG_REG_R22,
102 TCG_REG_R23,
103 TCG_REG_R28,
104 TCG_REG_R29,
105 TCG_REG_R30,
106 TCG_REG_R31,
107 #ifdef __APPLE__
108 TCG_REG_R2,
109 #endif
110 TCG_REG_R3,
111 TCG_REG_R4,
112 TCG_REG_R5,
113 TCG_REG_R6,
114 TCG_REG_R7,
115 TCG_REG_R8,
116 TCG_REG_R9,
117 TCG_REG_R10,
118 #ifndef __APPLE__
119 TCG_REG_R11,
120 #endif
121 TCG_REG_R12,
122 TCG_REG_R24,
123 TCG_REG_R25,
124 TCG_REG_R26,
125 TCG_REG_R27
126 };
127
128 static const int tcg_target_call_iarg_regs[] = {
129 TCG_REG_R3,
130 TCG_REG_R4,
131 TCG_REG_R5,
132 TCG_REG_R6,
133 TCG_REG_R7,
134 TCG_REG_R8,
135 TCG_REG_R9,
136 TCG_REG_R10
137 };
138
139 static const int tcg_target_call_oarg_regs[] = {
140 TCG_REG_R3
141 };
142
143 static const int tcg_target_callee_save_regs[] = {
144 #ifdef __APPLE__
145 TCG_REG_R11,
146 #endif
147 TCG_REG_R14,
148 TCG_REG_R15,
149 TCG_REG_R16,
150 TCG_REG_R17,
151 TCG_REG_R18,
152 TCG_REG_R19,
153 TCG_REG_R20,
154 TCG_REG_R21,
155 TCG_REG_R22,
156 TCG_REG_R23,
157 TCG_REG_R24,
158 TCG_REG_R25,
159 TCG_REG_R26,
160 TCG_REG_R27, /* currently used for the global env */
161 TCG_REG_R28,
162 TCG_REG_R29,
163 TCG_REG_R30,
164 TCG_REG_R31
165 };
166
167 static uint32_t reloc_pc24_val (void *pc, tcg_target_long target)
168 {
169 tcg_target_long disp;
170
171 disp = target - (tcg_target_long) pc;
172 if ((disp << 38) >> 38 != disp)
173 tcg_abort ();
174
175 return disp & 0x3fffffc;
176 }
177
178 static void reloc_pc24 (void *pc, tcg_target_long target)
179 {
180 *(uint32_t *) pc = (*(uint32_t *) pc & ~0x3fffffc)
181 | reloc_pc24_val (pc, target);
182 }
183
184 static uint16_t reloc_pc14_val (void *pc, tcg_target_long target)
185 {
186 tcg_target_long disp;
187
188 disp = target - (tcg_target_long) pc;
189 if (disp != (int16_t) disp)
190 tcg_abort ();
191
192 return disp & 0xfffc;
193 }
194
195 static void reloc_pc14 (void *pc, tcg_target_long target)
196 {
197 *(uint32_t *) pc = (*(uint32_t *) pc & ~0xfffc)
198 | reloc_pc14_val (pc, target);
199 }
200
201 static void patch_reloc (uint8_t *code_ptr, int type,
202 tcg_target_long value, tcg_target_long addend)
203 {
204 value += addend;
205 switch (type) {
206 case R_PPC_REL14:
207 reloc_pc14 (code_ptr, value);
208 break;
209 case R_PPC_REL24:
210 reloc_pc24 (code_ptr, value);
211 break;
212 default:
213 tcg_abort ();
214 }
215 }
216
217 /* parse target specific constraints */
218 static int target_parse_constraint (TCGArgConstraint *ct, const char **pct_str)
219 {
220 const char *ct_str;
221
222 ct_str = *pct_str;
223 switch (ct_str[0]) {
224 case 'A': case 'B': case 'C': case 'D':
225 ct->ct |= TCG_CT_REG;
226 tcg_regset_set_reg (ct->u.regs, 3 + ct_str[0] - 'A');
227 break;
228 case 'r':
229 ct->ct |= TCG_CT_REG;
230 tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
231 break;
232 case 'L': /* qemu_ld constraint */
233 ct->ct |= TCG_CT_REG;
234 tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
235 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
236 #ifdef CONFIG_SOFTMMU
237 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
238 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
239 #endif
240 break;
241 case 'S': /* qemu_st constraint */
242 ct->ct |= TCG_CT_REG;
243 tcg_regset_set32 (ct->u.regs, 0, 0xffffffff);
244 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R3);
245 #ifdef CONFIG_SOFTMMU
246 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R4);
247 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R5);
248 tcg_regset_reset_reg (ct->u.regs, TCG_REG_R6);
249 #endif
250 break;
251 case 'I':
252 ct->ct |= TCG_CT_CONST_S16;
253 break;
254 case 'J':
255 ct->ct |= TCG_CT_CONST_U16;
256 break;
257 case 'T':
258 ct->ct |= TCG_CT_CONST_S32;
259 break;
260 case 'U':
261 ct->ct |= TCG_CT_CONST_U32;
262 break;
263 case 'Z':
264 ct->ct |= TCG_CT_CONST_ZERO;
265 break;
266 default:
267 return -1;
268 }
269 ct_str++;
270 *pct_str = ct_str;
271 return 0;
272 }
273
274 /* test if a constant matches the constraint */
275 static int tcg_target_const_match (tcg_target_long val,
276 const TCGArgConstraint *arg_ct)
277 {
278 int ct = arg_ct->ct;
279 if (ct & TCG_CT_CONST) {
280 return 1;
281 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
282 return 1;
283 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
284 return 1;
285 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
286 return 1;
287 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
288 return 1;
289 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
290 return 1;
291 }
292 return 0;
293 }
294
295 #define OPCD(opc) ((opc)<<26)
296 #define XO19(opc) (OPCD(19)|((opc)<<1))
297 #define XO30(opc) (OPCD(30)|((opc)<<2))
298 #define XO31(opc) (OPCD(31)|((opc)<<1))
299 #define XO58(opc) (OPCD(58)|(opc))
300 #define XO62(opc) (OPCD(62)|(opc))
301
302 #define B OPCD( 18)
303 #define BC OPCD( 16)
304 #define LBZ OPCD( 34)
305 #define LHZ OPCD( 40)
306 #define LHA OPCD( 42)
307 #define LWZ OPCD( 32)
308 #define STB OPCD( 38)
309 #define STH OPCD( 44)
310 #define STW OPCD( 36)
311
312 #define STD XO62( 0)
313 #define STDU XO62( 1)
314 #define STDX XO31(149)
315
316 #define LD XO58( 0)
317 #define LDX XO31( 21)
318 #define LDU XO58( 1)
319 #define LWA XO58( 2)
320 #define LWAX XO31(341)
321
322 #define ADDIC OPCD( 12)
323 #define ADDI OPCD( 14)
324 #define ADDIS OPCD( 15)
325 #define ORI OPCD( 24)
326 #define ORIS OPCD( 25)
327 #define XORI OPCD( 26)
328 #define XORIS OPCD( 27)
329 #define ANDI OPCD( 28)
330 #define ANDIS OPCD( 29)
331 #define MULLI OPCD( 7)
332 #define CMPLI OPCD( 10)
333 #define CMPI OPCD( 11)
334 #define SUBFIC OPCD( 8)
335
336 #define LWZU OPCD( 33)
337 #define STWU OPCD( 37)
338
339 #define RLWIMI OPCD( 20)
340 #define RLWINM OPCD( 21)
341 #define RLWNM OPCD( 23)
342
343 #define RLDICL XO30( 0)
344 #define RLDICR XO30( 1)
345 #define RLDIMI XO30( 3)
346 #define RLDCL XO30( 8)
347
348 #define BCLR XO19( 16)
349 #define BCCTR XO19(528)
350 #define CRAND XO19(257)
351 #define CRANDC XO19(129)
352 #define CRNAND XO19(225)
353 #define CROR XO19(449)
354 #define CRNOR XO19( 33)
355
356 #define EXTSB XO31(954)
357 #define EXTSH XO31(922)
358 #define EXTSW XO31(986)
359 #define ADD XO31(266)
360 #define ADDE XO31(138)
361 #define ADDC XO31( 10)
362 #define AND XO31( 28)
363 #define SUBF XO31( 40)
364 #define SUBFC XO31( 8)
365 #define SUBFE XO31(136)
366 #define OR XO31(444)
367 #define XOR XO31(316)
368 #define MULLW XO31(235)
369 #define MULHWU XO31( 11)
370 #define DIVW XO31(491)
371 #define DIVWU XO31(459)
372 #define CMP XO31( 0)
373 #define CMPL XO31( 32)
374 #define LHBRX XO31(790)
375 #define LWBRX XO31(534)
376 #define LDBRX XO31(532)
377 #define STHBRX XO31(918)
378 #define STWBRX XO31(662)
379 #define STDBRX XO31(660)
380 #define MFSPR XO31(339)
381 #define MTSPR XO31(467)
382 #define SRAWI XO31(824)
383 #define NEG XO31(104)
384 #define MFCR XO31( 19)
385 #define NOR XO31(124)
386 #define CNTLZW XO31( 26)
387 #define CNTLZD XO31( 58)
388 #define ANDC XO31( 60)
389 #define ORC XO31(412)
390 #define EQV XO31(284)
391 #define NAND XO31(476)
392
393 #define MULLD XO31(233)
394 #define MULHD XO31( 73)
395 #define MULHDU XO31( 9)
396 #define DIVD XO31(489)
397 #define DIVDU XO31(457)
398
399 #define LBZX XO31( 87)
400 #define LHZX XO31(279)
401 #define LHAX XO31(343)
402 #define LWZX XO31( 23)
403 #define STBX XO31(215)
404 #define STHX XO31(407)
405 #define STWX XO31(151)
406
407 #define SPR(a,b) ((((a)<<5)|(b))<<11)
408 #define LR SPR(8, 0)
409 #define CTR SPR(9, 0)
410
411 #define SLW XO31( 24)
412 #define SRW XO31(536)
413 #define SRAW XO31(792)
414
415 #define SLD XO31( 27)
416 #define SRD XO31(539)
417 #define SRAD XO31(794)
418 #define SRADI XO31(413<<1)
419
420 #define TW XO31( 4)
421 #define TRAP (TW | TO (31))
422
423 #define RT(r) ((r)<<21)
424 #define RS(r) ((r)<<21)
425 #define RA(r) ((r)<<16)
426 #define RB(r) ((r)<<11)
427 #define TO(t) ((t)<<21)
428 #define SH(s) ((s)<<11)
429 #define MB(b) ((b)<<6)
430 #define ME(e) ((e)<<1)
431 #define BO(o) ((o)<<21)
432 #define MB64(b) ((b)<<5)
433
434 #define LK 1
435
436 #define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
437 #define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
438 #define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
439 #define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
440
441 #define BF(n) ((n)<<23)
442 #define BI(n, c) (((c)+((n)*4))<<16)
443 #define BT(n, c) (((c)+((n)*4))<<21)
444 #define BA(n, c) (((c)+((n)*4))<<16)
445 #define BB(n, c) (((c)+((n)*4))<<11)
446
447 #define BO_COND_TRUE BO (12)
448 #define BO_COND_FALSE BO ( 4)
449 #define BO_ALWAYS BO (20)
450
451 enum {
452 CR_LT,
453 CR_GT,
454 CR_EQ,
455 CR_SO
456 };
457
458 static const uint32_t tcg_to_bc[] = {
459 [TCG_COND_EQ] = BC | BI (7, CR_EQ) | BO_COND_TRUE,
460 [TCG_COND_NE] = BC | BI (7, CR_EQ) | BO_COND_FALSE,
461 [TCG_COND_LT] = BC | BI (7, CR_LT) | BO_COND_TRUE,
462 [TCG_COND_GE] = BC | BI (7, CR_LT) | BO_COND_FALSE,
463 [TCG_COND_LE] = BC | BI (7, CR_GT) | BO_COND_FALSE,
464 [TCG_COND_GT] = BC | BI (7, CR_GT) | BO_COND_TRUE,
465 [TCG_COND_LTU] = BC | BI (7, CR_LT) | BO_COND_TRUE,
466 [TCG_COND_GEU] = BC | BI (7, CR_LT) | BO_COND_FALSE,
467 [TCG_COND_LEU] = BC | BI (7, CR_GT) | BO_COND_FALSE,
468 [TCG_COND_GTU] = BC | BI (7, CR_GT) | BO_COND_TRUE,
469 };
470
471 static inline void tcg_out_mov(TCGContext *s, TCGType type,
472 TCGReg ret, TCGReg arg)
473 {
474 tcg_out32 (s, OR | SAB (arg, ret, arg));
475 }
476
477 static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
478 int sh, int mb)
479 {
480 sh = SH (sh & 0x1f) | (((sh >> 5) & 1) << 1);
481 mb = MB64 ((mb >> 5) | ((mb << 1) & 0x3f));
482 tcg_out32 (s, op | RA (ra) | RS (rs) | sh | mb);
483 }
484
485 static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
486 int sh, int mb, int me)
487 {
488 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
489 }
490
491 static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
492 {
493 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
494 }
495
496 static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
497 {
498 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
499 }
500
501 static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
502 {
503 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
504 }
505
506 static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg)
507 {
508 if (arg == (int16_t) arg) {
509 tcg_out32(s, ADDI | TAI(ret, 0, arg));
510 } else {
511 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
512 if (arg & 0xffff) {
513 tcg_out32(s, ORI | SAI(ret, ret, arg));
514 }
515 }
516 }
517
518 static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
519 tcg_target_long arg)
520 {
521 if (type == TCG_TYPE_I32 || arg == (int32_t)arg) {
522 tcg_out_movi32(s, ret, arg);
523 } else if (arg == (uint32_t)arg && !(arg & 0x8000)) {
524 tcg_out32(s, ADDI | TAI(ret, 0, arg));
525 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
526 } else {
527 int32_t high = arg >> 32;
528 tcg_out_movi32(s, ret, high);
529 if (high) {
530 tcg_out_shli64(s, ret, ret, 32);
531 }
532 if (arg & 0xffff0000) {
533 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
534 }
535 if (arg & 0xffff) {
536 tcg_out32(s, ORI | SAI(ret, ret, arg));
537 }
538 }
539 }
540
541 static bool mask_operand(uint32_t c, int *mb, int *me)
542 {
543 uint32_t lsb, test;
544
545 /* Accept a bit pattern like:
546 0....01....1
547 1....10....0
548 0..01..10..0
549 Keep track of the transitions. */
550 if (c == 0 || c == -1) {
551 return false;
552 }
553 test = c;
554 lsb = test & -test;
555 test += lsb;
556 if (test & (test - 1)) {
557 return false;
558 }
559
560 *me = clz32(lsb);
561 *mb = test ? clz32(test & -test) + 1 : 0;
562 return true;
563 }
564
565 static bool mask64_operand(uint64_t c, int *mb, int *me)
566 {
567 uint64_t lsb;
568
569 if (c == 0) {
570 return false;
571 }
572
573 lsb = c & -c;
574 /* Accept 1..10..0. */
575 if (c == -lsb) {
576 *mb = 0;
577 *me = clz64(lsb);
578 return true;
579 }
580 /* Accept 0..01..1. */
581 if (lsb == 1 && (c & (c + 1)) == 0) {
582 *mb = clz64(c + 1) + 1;
583 *me = 63;
584 return true;
585 }
586 return false;
587 }
588
589 static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
590 {
591 int mb, me;
592
593 if ((c & 0xffff) == c) {
594 tcg_out32(s, ANDI | SAI(src, dst, c));
595 return;
596 } else if ((c & 0xffff0000) == c) {
597 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
598 return;
599 } else if (mask_operand(c, &mb, &me)) {
600 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
601 } else {
602 tcg_out_movi(s, TCG_TYPE_I32, 0, c);
603 tcg_out32(s, AND | SAB(src, dst, 0));
604 }
605 }
606
607 static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
608 {
609 int mb, me;
610
611 if ((c & 0xffff) == c) {
612 tcg_out32(s, ANDI | SAI(src, dst, c));
613 return;
614 } else if ((c & 0xffff0000) == c) {
615 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
616 return;
617 } else if (mask64_operand(c, &mb, &me)) {
618 if (mb == 0) {
619 tcg_out_rld(s, RLDICR, dst, src, 0, me);
620 } else {
621 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
622 }
623 } else {
624 tcg_out_movi(s, TCG_TYPE_I64, 0, c);
625 tcg_out32(s, AND | SAB(src, dst, 0));
626 }
627 }
628
629 static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
630 int op_lo, int op_hi)
631 {
632 if (c >> 16) {
633 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
634 src = dst;
635 }
636 if (c & 0xffff) {
637 tcg_out32(s, op_lo | SAI(src, dst, c));
638 src = dst;
639 }
640 }
641
642 static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
643 {
644 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
645 }
646
647 static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
648 {
649 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
650 }
651
652 static void tcg_out_b (TCGContext *s, int mask, tcg_target_long target)
653 {
654 tcg_target_long disp;
655
656 disp = target - (tcg_target_long) s->code_ptr;
657 if ((disp << 38) >> 38 == disp)
658 tcg_out32 (s, B | (disp & 0x3fffffc) | mask);
659 else {
660 tcg_out_movi (s, TCG_TYPE_I64, 0, (tcg_target_long) target);
661 tcg_out32 (s, MTSPR | RS (0) | CTR);
662 tcg_out32 (s, BCCTR | BO_ALWAYS | mask);
663 }
664 }
665
666 static void tcg_out_call (TCGContext *s, tcg_target_long arg, int const_arg)
667 {
668 #ifdef __APPLE__
669 if (const_arg) {
670 tcg_out_b (s, LK, arg);
671 }
672 else {
673 tcg_out32 (s, MTSPR | RS (arg) | LR);
674 tcg_out32 (s, BCLR | BO_ALWAYS | LK);
675 }
676 #else
677 int reg;
678
679 if (const_arg) {
680 reg = 2;
681 tcg_out_movi (s, TCG_TYPE_I64, reg, arg);
682 }
683 else reg = arg;
684
685 tcg_out32 (s, LD | RT (0) | RA (reg));
686 tcg_out32 (s, MTSPR | RA (0) | CTR);
687 tcg_out32 (s, LD | RT (11) | RA (reg) | 16);
688 tcg_out32 (s, LD | RT (2) | RA (reg) | 8);
689 tcg_out32 (s, BCCTR | BO_ALWAYS | LK);
690 #endif
691 }
692
693 static void tcg_out_ldst(TCGContext *s, TCGReg ret, TCGReg addr,
694 int offset, int op1, int op2)
695 {
696 if (offset == (int16_t) offset) {
697 tcg_out32(s, op1 | TAI(ret, addr, offset));
698 } else {
699 tcg_out_movi(s, TCG_TYPE_I64, 0, offset);
700 tcg_out32(s, op2 | TAB(ret, addr, 0));
701 }
702 }
703
704 static void tcg_out_ldsta(TCGContext *s, TCGReg ret, TCGReg addr,
705 int offset, int op1, int op2)
706 {
707 if (offset == (int16_t) (offset & ~3)) {
708 tcg_out32(s, op1 | TAI(ret, addr, offset));
709 } else {
710 tcg_out_movi(s, TCG_TYPE_I64, 0, offset);
711 tcg_out32(s, op2 | TAB(ret, addr, 0));
712 }
713 }
714
715 #if defined (CONFIG_SOFTMMU)
716
717 #include "exec/softmmu_defs.h"
718
719 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
720 int mmu_idx) */
721 static const void * const qemu_ld_helpers[4] = {
722 helper_ldb_mmu,
723 helper_ldw_mmu,
724 helper_ldl_mmu,
725 helper_ldq_mmu,
726 };
727
728 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
729 uintxx_t val, int mmu_idx) */
730 static const void * const qemu_st_helpers[4] = {
731 helper_stb_mmu,
732 helper_stw_mmu,
733 helper_stl_mmu,
734 helper_stq_mmu,
735 };
736
737 static void tcg_out_tlb_read(TCGContext *s, TCGReg r0, TCGReg r1, TCGReg r2,
738 TCGReg addr_reg, int s_bits, int offset)
739 {
740 #if TARGET_LONG_BITS == 32
741 tcg_out_ext32u(s, addr_reg, addr_reg);
742
743 tcg_out_rlw(s, RLWINM, r0, addr_reg,
744 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
745 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
746 31 - CPU_TLB_ENTRY_BITS);
747 tcg_out32(s, ADD | TAB(r0, r0, TCG_AREG0));
748 tcg_out32(s, LWZU | TAI(r1, r0, offset));
749 tcg_out_rlw(s, RLWINM, r2, addr_reg, 0,
750 (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
751 #else
752 tcg_out_rld (s, RLDICL, r0, addr_reg,
753 64 - TARGET_PAGE_BITS,
754 64 - CPU_TLB_BITS);
755 tcg_out_shli64(s, r0, r0, CPU_TLB_ENTRY_BITS);
756
757 tcg_out32(s, ADD | TAB(r0, r0, TCG_AREG0));
758 tcg_out32(s, LD_ADDR | TAI(r1, r0, offset));
759
760 if (!s_bits) {
761 tcg_out_rld (s, RLDICR, r2, addr_reg, 0, 63 - TARGET_PAGE_BITS);
762 }
763 else {
764 tcg_out_rld (s, RLDICL, r2, addr_reg,
765 64 - TARGET_PAGE_BITS,
766 TARGET_PAGE_BITS - s_bits);
767 tcg_out_rld (s, RLDICL, r2, r2, TARGET_PAGE_BITS, 0);
768 }
769 #endif
770 }
771 #endif
772
773 static const uint32_t qemu_ldx_opc[8] = {
774 #ifdef TARGET_WORDS_BIGENDIAN
775 LBZX, LHZX, LWZX, LDX,
776 0, LHAX, LWAX, LDX
777 #else
778 LBZX, LHBRX, LWBRX, LDBRX,
779 0, 0, 0, LDBRX,
780 #endif
781 };
782
783 static const uint32_t qemu_stx_opc[4] = {
784 #ifdef TARGET_WORDS_BIGENDIAN
785 STBX, STHX, STWX, STDX
786 #else
787 STBX, STHBRX, STWBRX, STDBRX,
788 #endif
789 };
790
791 static const uint32_t qemu_exts_opc[4] = {
792 EXTSB, EXTSH, EXTSW, 0
793 };
794
795 static void tcg_out_qemu_ld (TCGContext *s, const TCGArg *args, int opc)
796 {
797 TCGReg addr_reg, data_reg, r0, r1, rbase;
798 uint32_t insn, s_bits;
799 #ifdef CONFIG_SOFTMMU
800 TCGReg r2, ir;
801 int mem_index;
802 void *label1_ptr, *label2_ptr;
803 #endif
804
805 data_reg = *args++;
806 addr_reg = *args++;
807 s_bits = opc & 3;
808
809 #ifdef CONFIG_SOFTMMU
810 mem_index = *args;
811
812 r0 = 3;
813 r1 = 4;
814 r2 = 0;
815 rbase = 0;
816
817 tcg_out_tlb_read (s, r0, r1, r2, addr_reg, s_bits,
818 offsetof (CPUArchState, tlb_table[mem_index][0].addr_read));
819
820 tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
821
822 label1_ptr = s->code_ptr;
823 #ifdef FAST_PATH
824 tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
825 #endif
826
827 /* slow path */
828 ir = 3;
829 tcg_out_mov (s, TCG_TYPE_I64, ir++, TCG_AREG0);
830 tcg_out_mov (s, TCG_TYPE_I64, ir++, addr_reg);
831 tcg_out_movi (s, TCG_TYPE_I64, ir++, mem_index);
832
833 tcg_out_call (s, (tcg_target_long) qemu_ld_helpers[s_bits], 1);
834
835 if (opc & 4) {
836 insn = qemu_exts_opc[s_bits];
837 tcg_out32(s, insn | RA(data_reg) | RS(3));
838 } else if (data_reg != 3) {
839 tcg_out_mov(s, TCG_TYPE_I64, data_reg, 3);
840 }
841 label2_ptr = s->code_ptr;
842 tcg_out32 (s, B);
843
844 /* label1: fast path */
845 #ifdef FAST_PATH
846 reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
847 #endif
848
849 /* r0 now contains &env->tlb_table[mem_index][index].addr_read */
850 tcg_out32(s, LD | TAI(r0, r0,
851 offsetof(CPUTLBEntry, addend)
852 - offsetof(CPUTLBEntry, addr_read)));
853 /* r0 = env->tlb_table[mem_index][index].addend */
854 tcg_out32(s, ADD | TAB(r0, r0, addr_reg));
855 /* r0 = env->tlb_table[mem_index][index].addend + addr */
856
857 #else /* !CONFIG_SOFTMMU */
858 #if TARGET_LONG_BITS == 32
859 tcg_out_ext32u(s, addr_reg, addr_reg);
860 #endif
861 r0 = addr_reg;
862 r1 = 3;
863 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
864 #endif
865
866 insn = qemu_ldx_opc[opc];
867 if (!HAVE_ISA_2_06 && insn == LDBRX) {
868 tcg_out32(s, ADDI | TAI(r1, r0, 4));
869 tcg_out32(s, LWBRX | TAB(data_reg, rbase, r0));
870 tcg_out32(s, LWBRX | TAB( r1, rbase, r1));
871 tcg_out_rld(s, RLDIMI, data_reg, r1, 32, 0);
872 } else if (insn) {
873 tcg_out32(s, insn | TAB(data_reg, rbase, r0));
874 } else {
875 insn = qemu_ldx_opc[s_bits];
876 tcg_out32(s, insn | TAB(data_reg, rbase, r0));
877 insn = qemu_exts_opc[s_bits];
878 tcg_out32 (s, insn | RA(data_reg) | RS(data_reg));
879 }
880
881 #ifdef CONFIG_SOFTMMU
882 reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
883 #endif
884 }
885
886 static void tcg_out_qemu_st (TCGContext *s, const TCGArg *args, int opc)
887 {
888 TCGReg addr_reg, r0, r1, rbase, data_reg;
889 uint32_t insn;
890 #ifdef CONFIG_SOFTMMU
891 TCGReg r2, ir;
892 int mem_index;
893 void *label1_ptr, *label2_ptr;
894 #endif
895
896 data_reg = *args++;
897 addr_reg = *args++;
898
899 #ifdef CONFIG_SOFTMMU
900 mem_index = *args;
901
902 r0 = 3;
903 r1 = 4;
904 r2 = 0;
905 rbase = 0;
906
907 tcg_out_tlb_read (s, r0, r1, r2, addr_reg, opc,
908 offsetof (CPUArchState, tlb_table[mem_index][0].addr_write));
909
910 tcg_out32 (s, CMP | BF (7) | RA (r2) | RB (r1) | CMP_L);
911
912 label1_ptr = s->code_ptr;
913 #ifdef FAST_PATH
914 tcg_out32 (s, BC | BI (7, CR_EQ) | BO_COND_TRUE);
915 #endif
916
917 /* slow path */
918 ir = 3;
919 tcg_out_mov (s, TCG_TYPE_I64, ir++, TCG_AREG0);
920 tcg_out_mov (s, TCG_TYPE_I64, ir++, addr_reg);
921 tcg_out_rld (s, RLDICL, ir++, data_reg, 0, 64 - (1 << (3 + opc)));
922 tcg_out_movi (s, TCG_TYPE_I64, ir++, mem_index);
923
924 tcg_out_call (s, (tcg_target_long) qemu_st_helpers[opc], 1);
925
926 label2_ptr = s->code_ptr;
927 tcg_out32 (s, B);
928
929 /* label1: fast path */
930 #ifdef FAST_PATH
931 reloc_pc14 (label1_ptr, (tcg_target_long) s->code_ptr);
932 #endif
933
934 tcg_out32 (s, (LD
935 | RT (r0)
936 | RA (r0)
937 | (offsetof (CPUTLBEntry, addend)
938 - offsetof (CPUTLBEntry, addr_write))
939 ));
940 /* r0 = env->tlb_table[mem_index][index].addend */
941 tcg_out32(s, ADD | TAB(r0, r0, addr_reg));
942 /* r0 = env->tlb_table[mem_index][index].addend + addr */
943
944 #else /* !CONFIG_SOFTMMU */
945 #if TARGET_LONG_BITS == 32
946 tcg_out_ext32u(s, addr_reg, addr_reg);
947 #endif
948 r1 = 3;
949 r0 = addr_reg;
950 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
951 #endif
952
953 insn = qemu_stx_opc[opc];
954 if (!HAVE_ISA_2_06 && insn == STDBRX) {
955 tcg_out32(s, STWBRX | SAB(data_reg, rbase, r0));
956 tcg_out32(s, ADDI | TAI(r1, r0, 4));
957 tcg_out_shri64(s, 0, data_reg, 32);
958 tcg_out32(s, STWBRX | SAB(0, rbase, r1));
959 } else {
960 tcg_out32(s, insn | SAB(data_reg, rbase, r0));
961 }
962
963 #ifdef CONFIG_SOFTMMU
964 reloc_pc24 (label2_ptr, (tcg_target_long) s->code_ptr);
965 #endif
966 }
967
968 static void tcg_target_qemu_prologue (TCGContext *s)
969 {
970 int i, frame_size;
971 #ifndef __APPLE__
972 uint64_t addr;
973 #endif
974
975 frame_size = 0
976 + 8 /* back chain */
977 + 8 /* CR */
978 + 8 /* LR */
979 + 8 /* compiler doubleword */
980 + 8 /* link editor doubleword */
981 + 8 /* TOC save area */
982 + TCG_STATIC_CALL_ARGS_SIZE
983 + ARRAY_SIZE (tcg_target_callee_save_regs) * 8
984 + CPU_TEMP_BUF_NLONGS * sizeof(long)
985 ;
986 frame_size = (frame_size + 15) & ~15;
987
988 tcg_set_frame (s, TCG_REG_CALL_STACK, frame_size
989 - CPU_TEMP_BUF_NLONGS * sizeof (long),
990 CPU_TEMP_BUF_NLONGS * sizeof (long));
991
992 #ifndef __APPLE__
993 /* First emit adhoc function descriptor */
994 addr = (uint64_t) s->code_ptr + 24;
995 tcg_out32 (s, addr >> 32); tcg_out32 (s, addr); /* entry point */
996 s->code_ptr += 16; /* skip TOC and environment pointer */
997 #endif
998
999 /* Prologue */
1000 tcg_out32 (s, MFSPR | RT (0) | LR);
1001 tcg_out32 (s, STDU | RS (1) | RA (1) | (-frame_size & 0xffff));
1002 for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
1003 tcg_out32 (s, (STD
1004 | RS (tcg_target_callee_save_regs[i])
1005 | RA (1)
1006 | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
1007 )
1008 );
1009 tcg_out32 (s, STD | RS (0) | RA (1) | (frame_size + 16));
1010
1011 #ifdef CONFIG_USE_GUEST_BASE
1012 if (GUEST_BASE) {
1013 tcg_out_movi (s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE);
1014 tcg_regset_set_reg (s->reserved_regs, TCG_GUEST_BASE_REG);
1015 }
1016 #endif
1017
1018 tcg_out_mov (s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1019 tcg_out32 (s, MTSPR | RS (tcg_target_call_iarg_regs[1]) | CTR);
1020 tcg_out32 (s, BCCTR | BO_ALWAYS);
1021
1022 /* Epilogue */
1023 tb_ret_addr = s->code_ptr;
1024
1025 for (i = 0; i < ARRAY_SIZE (tcg_target_callee_save_regs); ++i)
1026 tcg_out32 (s, (LD
1027 | RT (tcg_target_callee_save_regs[i])
1028 | RA (1)
1029 | (i * 8 + 48 + TCG_STATIC_CALL_ARGS_SIZE)
1030 )
1031 );
1032 tcg_out32(s, LD | TAI(0, 1, frame_size + 16));
1033 tcg_out32(s, MTSPR | RS(0) | LR);
1034 tcg_out32(s, ADDI | TAI(1, 1, frame_size));
1035 tcg_out32(s, BCLR | BO_ALWAYS);
1036 }
1037
1038 static void tcg_out_ld (TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
1039 tcg_target_long arg2)
1040 {
1041 if (type == TCG_TYPE_I32)
1042 tcg_out_ldst (s, ret, arg1, arg2, LWZ, LWZX);
1043 else
1044 tcg_out_ldsta (s, ret, arg1, arg2, LD, LDX);
1045 }
1046
1047 static void tcg_out_st (TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1,
1048 tcg_target_long arg2)
1049 {
1050 if (type == TCG_TYPE_I32)
1051 tcg_out_ldst (s, arg, arg1, arg2, STW, STWX);
1052 else
1053 tcg_out_ldsta (s, arg, arg1, arg2, STD, STDX);
1054 }
1055
1056 static void tcg_out_cmp (TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1057 int const_arg2, int cr, int arch64)
1058 {
1059 int imm;
1060 uint32_t op;
1061
1062 switch (cond) {
1063 case TCG_COND_EQ:
1064 case TCG_COND_NE:
1065 if (const_arg2) {
1066 if ((int16_t) arg2 == arg2) {
1067 op = CMPI;
1068 imm = 1;
1069 break;
1070 }
1071 else if ((uint16_t) arg2 == arg2) {
1072 op = CMPLI;
1073 imm = 1;
1074 break;
1075 }
1076 }
1077 op = CMPL;
1078 imm = 0;
1079 break;
1080
1081 case TCG_COND_LT:
1082 case TCG_COND_GE:
1083 case TCG_COND_LE:
1084 case TCG_COND_GT:
1085 if (const_arg2) {
1086 if ((int16_t) arg2 == arg2) {
1087 op = CMPI;
1088 imm = 1;
1089 break;
1090 }
1091 }
1092 op = CMP;
1093 imm = 0;
1094 break;
1095
1096 case TCG_COND_LTU:
1097 case TCG_COND_GEU:
1098 case TCG_COND_LEU:
1099 case TCG_COND_GTU:
1100 if (const_arg2) {
1101 if ((uint16_t) arg2 == arg2) {
1102 op = CMPLI;
1103 imm = 1;
1104 break;
1105 }
1106 }
1107 op = CMPL;
1108 imm = 0;
1109 break;
1110
1111 default:
1112 tcg_abort ();
1113 }
1114 op |= BF (cr) | (arch64 << 21);
1115
1116 if (imm)
1117 tcg_out32 (s, op | RA (arg1) | (arg2 & 0xffff));
1118 else {
1119 if (const_arg2) {
1120 tcg_out_movi (s, TCG_TYPE_I64, 0, arg2);
1121 tcg_out32 (s, op | RA (arg1) | RB (0));
1122 }
1123 else
1124 tcg_out32 (s, op | RA (arg1) | RB (arg2));
1125 }
1126
1127 }
1128
1129 static void tcg_out_setcond (TCGContext *s, TCGType type, TCGCond cond,
1130 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1131 int const_arg2)
1132 {
1133 int crop, sh, arg;
1134
1135 switch (cond) {
1136 case TCG_COND_EQ:
1137 if (const_arg2) {
1138 if (!arg2) {
1139 arg = arg1;
1140 }
1141 else {
1142 arg = 0;
1143 if ((uint16_t) arg2 == arg2) {
1144 tcg_out32(s, XORI | SAI(arg1, 0, arg2));
1145 }
1146 else {
1147 tcg_out_movi (s, type, 0, arg2);
1148 tcg_out32 (s, XOR | SAB (arg1, 0, 0));
1149 }
1150 }
1151 }
1152 else {
1153 arg = 0;
1154 tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
1155 }
1156
1157 if (type == TCG_TYPE_I64) {
1158 tcg_out32 (s, CNTLZD | RS (arg) | RA (0));
1159 tcg_out_rld (s, RLDICL, arg0, 0, 58, 6);
1160 }
1161 else {
1162 tcg_out32 (s, CNTLZW | RS (arg) | RA (0));
1163 tcg_out_rlw(s, RLWINM, arg0, 0, 27, 5, 31);
1164 }
1165 break;
1166
1167 case TCG_COND_NE:
1168 if (const_arg2) {
1169 if (!arg2) {
1170 arg = arg1;
1171 }
1172 else {
1173 arg = 0;
1174 if ((uint16_t) arg2 == arg2) {
1175 tcg_out32(s, XORI | SAI(arg1, 0, arg2));
1176 } else {
1177 tcg_out_movi (s, type, 0, arg2);
1178 tcg_out32 (s, XOR | SAB (arg1, 0, 0));
1179 }
1180 }
1181 }
1182 else {
1183 arg = 0;
1184 tcg_out32 (s, XOR | SAB (arg1, 0, arg2));
1185 }
1186
1187 /* Make sure and discard the high 32-bits of the input. */
1188 if (type == TCG_TYPE_I32) {
1189 tcg_out32(s, EXTSW | RA(TCG_REG_R0) | RS(arg));
1190 arg = TCG_REG_R0;
1191 }
1192
1193 if (arg == arg1 && arg1 == arg0) {
1194 tcg_out32(s, ADDIC | TAI(0, arg, -1));
1195 tcg_out32(s, SUBFE | TAB(arg0, 0, arg));
1196 }
1197 else {
1198 tcg_out32(s, ADDIC | TAI(arg0, arg, -1));
1199 tcg_out32(s, SUBFE | TAB(arg0, arg0, arg));
1200 }
1201 break;
1202
1203 case TCG_COND_GT:
1204 case TCG_COND_GTU:
1205 sh = 30;
1206 crop = 0;
1207 goto crtest;
1208
1209 case TCG_COND_LT:
1210 case TCG_COND_LTU:
1211 sh = 29;
1212 crop = 0;
1213 goto crtest;
1214
1215 case TCG_COND_GE:
1216 case TCG_COND_GEU:
1217 sh = 31;
1218 crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_LT) | BB (7, CR_LT);
1219 goto crtest;
1220
1221 case TCG_COND_LE:
1222 case TCG_COND_LEU:
1223 sh = 31;
1224 crop = CRNOR | BT (7, CR_EQ) | BA (7, CR_GT) | BB (7, CR_GT);
1225 crtest:
1226 tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7, type == TCG_TYPE_I64);
1227 if (crop) tcg_out32 (s, crop);
1228 tcg_out32 (s, MFCR | RT (0));
1229 tcg_out_rlw(s, RLWINM, arg0, 0, sh, 31, 31);
1230 break;
1231
1232 default:
1233 tcg_abort ();
1234 }
1235 }
1236
1237 static void tcg_out_bc (TCGContext *s, int bc, int label_index)
1238 {
1239 TCGLabel *l = &s->labels[label_index];
1240
1241 if (l->has_value)
1242 tcg_out32 (s, bc | reloc_pc14_val (s->code_ptr, l->u.value));
1243 else {
1244 uint16_t val = *(uint16_t *) &s->code_ptr[2];
1245
1246 /* Thanks to Andrzej Zaborowski */
1247 tcg_out32 (s, bc | (val & 0xfffc));
1248 tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL14, label_index, 0);
1249 }
1250 }
1251
1252 static void tcg_out_brcond (TCGContext *s, TCGCond cond,
1253 TCGArg arg1, TCGArg arg2, int const_arg2,
1254 int label_index, int arch64)
1255 {
1256 tcg_out_cmp (s, cond, arg1, arg2, const_arg2, 7, arch64);
1257 tcg_out_bc (s, tcg_to_bc[cond], label_index);
1258 }
1259
1260 void ppc_tb_set_jmp_target (unsigned long jmp_addr, unsigned long addr)
1261 {
1262 TCGContext s;
1263 unsigned long patch_size;
1264
1265 s.code_ptr = (uint8_t *) jmp_addr;
1266 tcg_out_b (&s, 0, addr);
1267 patch_size = s.code_ptr - (uint8_t *) jmp_addr;
1268 flush_icache_range (jmp_addr, jmp_addr + patch_size);
1269 }
1270
1271 static void tcg_out_op (TCGContext *s, TCGOpcode opc, const TCGArg *args,
1272 const int *const_args)
1273 {
1274 TCGArg a0, a1, a2;
1275 int c;
1276
1277 switch (opc) {
1278 case INDEX_op_exit_tb:
1279 tcg_out_movi (s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
1280 tcg_out_b (s, 0, (tcg_target_long) tb_ret_addr);
1281 break;
1282 case INDEX_op_goto_tb:
1283 if (s->tb_jmp_offset) {
1284 /* direct jump method */
1285
1286 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
1287 s->code_ptr += 28;
1288 }
1289 else {
1290 tcg_abort ();
1291 }
1292 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1293 break;
1294 case INDEX_op_br:
1295 {
1296 TCGLabel *l = &s->labels[args[0]];
1297
1298 if (l->has_value) {
1299 tcg_out_b (s, 0, l->u.value);
1300 }
1301 else {
1302 uint32_t val = *(uint32_t *) s->code_ptr;
1303
1304 /* Thanks to Andrzej Zaborowski */
1305 tcg_out32 (s, B | (val & 0x3fffffc));
1306 tcg_out_reloc (s, s->code_ptr - 4, R_PPC_REL24, args[0], 0);
1307 }
1308 }
1309 break;
1310 case INDEX_op_call:
1311 tcg_out_call (s, args[0], const_args[0]);
1312 break;
1313 case INDEX_op_movi_i32:
1314 tcg_out_movi (s, TCG_TYPE_I32, args[0], args[1]);
1315 break;
1316 case INDEX_op_movi_i64:
1317 tcg_out_movi (s, TCG_TYPE_I64, args[0], args[1]);
1318 break;
1319 case INDEX_op_ld8u_i32:
1320 case INDEX_op_ld8u_i64:
1321 tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
1322 break;
1323 case INDEX_op_ld8s_i32:
1324 case INDEX_op_ld8s_i64:
1325 tcg_out_ldst (s, args[0], args[1], args[2], LBZ, LBZX);
1326 tcg_out32 (s, EXTSB | RS (args[0]) | RA (args[0]));
1327 break;
1328 case INDEX_op_ld16u_i32:
1329 case INDEX_op_ld16u_i64:
1330 tcg_out_ldst (s, args[0], args[1], args[2], LHZ, LHZX);
1331 break;
1332 case INDEX_op_ld16s_i32:
1333 case INDEX_op_ld16s_i64:
1334 tcg_out_ldst (s, args[0], args[1], args[2], LHA, LHAX);
1335 break;
1336 case INDEX_op_ld_i32:
1337 case INDEX_op_ld32u_i64:
1338 tcg_out_ldst (s, args[0], args[1], args[2], LWZ, LWZX);
1339 break;
1340 case INDEX_op_ld32s_i64:
1341 tcg_out_ldsta (s, args[0], args[1], args[2], LWA, LWAX);
1342 break;
1343 case INDEX_op_ld_i64:
1344 tcg_out_ldsta (s, args[0], args[1], args[2], LD, LDX);
1345 break;
1346 case INDEX_op_st8_i32:
1347 case INDEX_op_st8_i64:
1348 tcg_out_ldst (s, args[0], args[1], args[2], STB, STBX);
1349 break;
1350 case INDEX_op_st16_i32:
1351 case INDEX_op_st16_i64:
1352 tcg_out_ldst (s, args[0], args[1], args[2], STH, STHX);
1353 break;
1354 case INDEX_op_st_i32:
1355 case INDEX_op_st32_i64:
1356 tcg_out_ldst (s, args[0], args[1], args[2], STW, STWX);
1357 break;
1358 case INDEX_op_st_i64:
1359 tcg_out_ldsta (s, args[0], args[1], args[2], STD, STDX);
1360 break;
1361
1362 case INDEX_op_add_i32:
1363 a0 = args[0], a1 = args[1], a2 = args[2];
1364 if (const_args[2]) {
1365 int32_t l, h;
1366 do_addi_32:
1367 l = (int16_t)a2;
1368 h = a2 - l;
1369 if (h) {
1370 tcg_out32(s, ADDIS | TAI(a0, a1, h >> 16));
1371 a1 = a0;
1372 }
1373 if (l || a0 != a1) {
1374 tcg_out32(s, ADDI | TAI(a0, a1, l));
1375 }
1376 } else {
1377 tcg_out32(s, ADD | TAB(a0, a1, a2));
1378 }
1379 break;
1380 case INDEX_op_sub_i32:
1381 a0 = args[0], a1 = args[1], a2 = args[2];
1382 if (const_args[1]) {
1383 if (const_args[2]) {
1384 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
1385 } else {
1386 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1387 }
1388 } else if (const_args[2]) {
1389 a2 = -a2;
1390 goto do_addi_32;
1391 } else {
1392 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1393 }
1394 break;
1395
1396 case INDEX_op_and_i32:
1397 a0 = args[0], a1 = args[1], a2 = args[2];
1398 if (const_args[2]) {
1399 tcg_out_andi32(s, a0, a1, a2);
1400 } else {
1401 tcg_out32(s, AND | SAB(a1, a0, a2));
1402 }
1403 break;
1404 case INDEX_op_and_i64:
1405 a0 = args[0], a1 = args[1], a2 = args[2];
1406 if (const_args[2]) {
1407 tcg_out_andi64(s, a0, a1, a2);
1408 } else {
1409 tcg_out32(s, AND | SAB(a1, a0, a2));
1410 }
1411 break;
1412 case INDEX_op_or_i64:
1413 case INDEX_op_or_i32:
1414 a0 = args[0], a1 = args[1], a2 = args[2];
1415 if (const_args[2]) {
1416 tcg_out_ori32(s, a0, a1, a2);
1417 } else {
1418 tcg_out32(s, OR | SAB(a1, a0, a2));
1419 }
1420 break;
1421 case INDEX_op_xor_i64:
1422 case INDEX_op_xor_i32:
1423 a0 = args[0], a1 = args[1], a2 = args[2];
1424 if (const_args[2]) {
1425 tcg_out_xori32(s, a0, a1, a2);
1426 } else {
1427 tcg_out32(s, XOR | SAB(a1, a0, a2));
1428 }
1429 break;
1430 case INDEX_op_andc_i32:
1431 a0 = args[0], a1 = args[1], a2 = args[2];
1432 if (const_args[2]) {
1433 tcg_out_andi32(s, a0, a1, ~a2);
1434 } else {
1435 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1436 }
1437 break;
1438 case INDEX_op_andc_i64:
1439 a0 = args[0], a1 = args[1], a2 = args[2];
1440 if (const_args[2]) {
1441 tcg_out_andi64(s, a0, a1, ~a2);
1442 } else {
1443 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1444 }
1445 break;
1446 case INDEX_op_orc_i32:
1447 if (const_args[2]) {
1448 tcg_out_ori32(s, args[0], args[1], ~args[2]);
1449 break;
1450 }
1451 /* FALLTHRU */
1452 case INDEX_op_orc_i64:
1453 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
1454 break;
1455 case INDEX_op_eqv_i32:
1456 if (const_args[2]) {
1457 tcg_out_xori32(s, args[0], args[1], ~args[2]);
1458 break;
1459 }
1460 /* FALLTHRU */
1461 case INDEX_op_eqv_i64:
1462 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
1463 break;
1464 case INDEX_op_nand_i32:
1465 case INDEX_op_nand_i64:
1466 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
1467 break;
1468 case INDEX_op_nor_i32:
1469 case INDEX_op_nor_i64:
1470 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
1471 break;
1472
1473 case INDEX_op_mul_i32:
1474 a0 = args[0], a1 = args[1], a2 = args[2];
1475 if (const_args[2]) {
1476 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1477 } else {
1478 tcg_out32(s, MULLW | TAB(a0, a1, a2));
1479 }
1480 break;
1481
1482 case INDEX_op_div_i32:
1483 tcg_out32 (s, DIVW | TAB (args[0], args[1], args[2]));
1484 break;
1485
1486 case INDEX_op_divu_i32:
1487 tcg_out32 (s, DIVWU | TAB (args[0], args[1], args[2]));
1488 break;
1489
1490 case INDEX_op_rem_i32:
1491 tcg_out32 (s, DIVW | TAB (0, args[1], args[2]));
1492 tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
1493 tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
1494 break;
1495
1496 case INDEX_op_remu_i32:
1497 tcg_out32 (s, DIVWU | TAB (0, args[1], args[2]));
1498 tcg_out32 (s, MULLW | TAB (0, 0, args[2]));
1499 tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
1500 break;
1501
1502 case INDEX_op_shl_i32:
1503 if (const_args[2]) {
1504 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]);
1505 } else {
1506 tcg_out32 (s, SLW | SAB (args[1], args[0], args[2]));
1507 }
1508 break;
1509 case INDEX_op_shr_i32:
1510 if (const_args[2]) {
1511 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31);
1512 } else {
1513 tcg_out32 (s, SRW | SAB (args[1], args[0], args[2]));
1514 }
1515 break;
1516 case INDEX_op_sar_i32:
1517 if (const_args[2])
1518 tcg_out32 (s, SRAWI | RS (args[1]) | RA (args[0]) | SH (args[2]));
1519 else
1520 tcg_out32 (s, SRAW | SAB (args[1], args[0], args[2]));
1521 break;
1522 case INDEX_op_rotl_i32:
1523 if (const_args[2]) {
1524 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
1525 } else {
1526 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
1527 | MB(0) | ME(31));
1528 }
1529 break;
1530 case INDEX_op_rotr_i32:
1531 if (const_args[2]) {
1532 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
1533 } else {
1534 tcg_out32(s, SUBFIC | TAI(0, args[2], 32));
1535 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
1536 | MB(0) | ME(31));
1537 }
1538 break;
1539
1540 case INDEX_op_brcond_i32:
1541 tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 0);
1542 break;
1543
1544 case INDEX_op_brcond_i64:
1545 tcg_out_brcond (s, args[2], args[0], args[1], const_args[1], args[3], 1);
1546 break;
1547
1548 case INDEX_op_neg_i32:
1549 case INDEX_op_neg_i64:
1550 tcg_out32 (s, NEG | RT (args[0]) | RA (args[1]));
1551 break;
1552
1553 case INDEX_op_not_i32:
1554 case INDEX_op_not_i64:
1555 tcg_out32 (s, NOR | SAB (args[1], args[0], args[1]));
1556 break;
1557
1558 case INDEX_op_add_i64:
1559 a0 = args[0], a1 = args[1], a2 = args[2];
1560 if (const_args[2]) {
1561 int32_t l0, h1, h2;
1562 do_addi_64:
1563 /* We can always split any 32-bit signed constant into 3 pieces.
1564 Note the positive 0x80000000 coming from the sub_i64 path,
1565 handled with the same code we need for eg 0x7fff8000. */
1566 assert(a2 == (int32_t)a2 || a2 == 0x80000000);
1567 l0 = (int16_t)a2;
1568 h1 = a2 - l0;
1569 h2 = 0;
1570 if (h1 < 0 && (int64_t)a2 > 0) {
1571 h2 = 0x40000000;
1572 h1 = a2 - h2 - l0;
1573 }
1574 assert((TCGArg)h2 + h1 + l0 == a2);
1575
1576 if (h2) {
1577 tcg_out32(s, ADDIS | TAI(a0, a1, h2 >> 16));
1578 a1 = a0;
1579 }
1580 if (h1) {
1581 tcg_out32(s, ADDIS | TAI(a0, a1, h1 >> 16));
1582 a1 = a0;
1583 }
1584 if (l0 || a0 != a1) {
1585 tcg_out32(s, ADDI | TAI(a0, a1, l0));
1586 }
1587 } else {
1588 tcg_out32(s, ADD | TAB(a0, a1, a2));
1589 }
1590 break;
1591 case INDEX_op_sub_i64:
1592 a0 = args[0], a1 = args[1], a2 = args[2];
1593 if (const_args[1]) {
1594 if (const_args[2]) {
1595 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
1596 } else {
1597 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1598 }
1599 } else if (const_args[2]) {
1600 a2 = -a2;
1601 goto do_addi_64;
1602 } else {
1603 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1604 }
1605 break;
1606
1607 case INDEX_op_shl_i64:
1608 if (const_args[2])
1609 tcg_out_shli64(s, args[0], args[1], args[2]);
1610 else
1611 tcg_out32 (s, SLD | SAB (args[1], args[0], args[2]));
1612 break;
1613 case INDEX_op_shr_i64:
1614 if (const_args[2])
1615 tcg_out_shri64(s, args[0], args[1], args[2]);
1616 else
1617 tcg_out32 (s, SRD | SAB (args[1], args[0], args[2]));
1618 break;
1619 case INDEX_op_sar_i64:
1620 if (const_args[2]) {
1621 int sh = SH (args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
1622 tcg_out32 (s, SRADI | RA (args[0]) | RS (args[1]) | sh);
1623 }
1624 else
1625 tcg_out32 (s, SRAD | SAB (args[1], args[0], args[2]));
1626 break;
1627 case INDEX_op_rotl_i64:
1628 if (const_args[2]) {
1629 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
1630 } else {
1631 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
1632 }
1633 break;
1634 case INDEX_op_rotr_i64:
1635 if (const_args[2]) {
1636 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
1637 } else {
1638 tcg_out32(s, SUBFIC | TAI(0, args[2], 64));
1639 tcg_out32(s, RLDCL | SAB(args[1], args[0], 0) | MB64(0));
1640 }
1641 break;
1642
1643 case INDEX_op_mul_i64:
1644 a0 = args[0], a1 = args[1], a2 = args[2];
1645 if (const_args[2]) {
1646 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1647 } else {
1648 tcg_out32(s, MULLD | TAB(a0, a1, a2));
1649 }
1650 break;
1651 case INDEX_op_div_i64:
1652 tcg_out32 (s, DIVD | TAB (args[0], args[1], args[2]));
1653 break;
1654 case INDEX_op_divu_i64:
1655 tcg_out32 (s, DIVDU | TAB (args[0], args[1], args[2]));
1656 break;
1657 case INDEX_op_rem_i64:
1658 tcg_out32 (s, DIVD | TAB (0, args[1], args[2]));
1659 tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
1660 tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
1661 break;
1662 case INDEX_op_remu_i64:
1663 tcg_out32 (s, DIVDU | TAB (0, args[1], args[2]));
1664 tcg_out32 (s, MULLD | TAB (0, 0, args[2]));
1665 tcg_out32 (s, SUBF | TAB (args[0], 0, args[1]));
1666 break;
1667
1668 case INDEX_op_qemu_ld8u:
1669 tcg_out_qemu_ld (s, args, 0);
1670 break;
1671 case INDEX_op_qemu_ld8s:
1672 tcg_out_qemu_ld (s, args, 0 | 4);
1673 break;
1674 case INDEX_op_qemu_ld16u:
1675 tcg_out_qemu_ld (s, args, 1);
1676 break;
1677 case INDEX_op_qemu_ld16s:
1678 tcg_out_qemu_ld (s, args, 1 | 4);
1679 break;
1680 case INDEX_op_qemu_ld32:
1681 case INDEX_op_qemu_ld32u:
1682 tcg_out_qemu_ld (s, args, 2);
1683 break;
1684 case INDEX_op_qemu_ld32s:
1685 tcg_out_qemu_ld (s, args, 2 | 4);
1686 break;
1687 case INDEX_op_qemu_ld64:
1688 tcg_out_qemu_ld (s, args, 3);
1689 break;
1690 case INDEX_op_qemu_st8:
1691 tcg_out_qemu_st (s, args, 0);
1692 break;
1693 case INDEX_op_qemu_st16:
1694 tcg_out_qemu_st (s, args, 1);
1695 break;
1696 case INDEX_op_qemu_st32:
1697 tcg_out_qemu_st (s, args, 2);
1698 break;
1699 case INDEX_op_qemu_st64:
1700 tcg_out_qemu_st (s, args, 3);
1701 break;
1702
1703 case INDEX_op_ext8s_i32:
1704 case INDEX_op_ext8s_i64:
1705 c = EXTSB;
1706 goto gen_ext;
1707 case INDEX_op_ext16s_i32:
1708 case INDEX_op_ext16s_i64:
1709 c = EXTSH;
1710 goto gen_ext;
1711 case INDEX_op_ext32s_i64:
1712 c = EXTSW;
1713 goto gen_ext;
1714 gen_ext:
1715 tcg_out32 (s, c | RS (args[1]) | RA (args[0]));
1716 break;
1717
1718 case INDEX_op_setcond_i32:
1719 tcg_out_setcond (s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
1720 const_args[2]);
1721 break;
1722 case INDEX_op_setcond_i64:
1723 tcg_out_setcond (s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
1724 const_args[2]);
1725 break;
1726
1727 case INDEX_op_bswap16_i32:
1728 case INDEX_op_bswap16_i64:
1729 a0 = args[0], a1 = args[1];
1730 /* a1 = abcd */
1731 if (a0 != a1) {
1732 /* a0 = (a1 r<< 24) & 0xff # 000c */
1733 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1734 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
1735 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
1736 } else {
1737 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
1738 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
1739 /* a0 = (a1 r<< 24) & 0xff # 000c */
1740 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1741 /* a0 = a0 | r0 # 00dc */
1742 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
1743 }
1744 break;
1745
1746 case INDEX_op_bswap32_i32:
1747 case INDEX_op_bswap32_i64:
1748 /* Stolen from gcc's builtin_bswap32 */
1749 a1 = args[1];
1750 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
1751
1752 /* a1 = args[1] # abcd */
1753 /* a0 = rotate_left (a1, 8) # bcda */
1754 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1755 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
1756 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1757 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
1758 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1759
1760 if (a0 == TCG_REG_R0) {
1761 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1762 }
1763 break;
1764
1765 case INDEX_op_bswap64_i64:
1766 a0 = args[0], a1 = args[1], a2 = 0;
1767 if (a0 == a1) {
1768 a0 = 0;
1769 a2 = a1;
1770 }
1771
1772 /* a1 = # abcd efgh */
1773 /* a0 = rl32(a1, 8) # 0000 fghe */
1774 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1775 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
1776 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1777 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
1778 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1779
1780 /* a0 = rl64(a0, 32) # hgfe 0000 */
1781 /* a2 = rl64(a1, 32) # efgh abcd */
1782 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
1783 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
1784
1785 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
1786 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
1787 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
1788 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
1789 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
1790 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
1791
1792 if (a0 == 0) {
1793 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1794 /* Revert the source rotate that we performed above. */
1795 tcg_out_rld(s, RLDICL, a1, a1, 32, 0);
1796 }
1797 break;
1798
1799 case INDEX_op_deposit_i32:
1800 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
1801 32 - args[3] - args[4], 31 - args[3]);
1802 break;
1803 case INDEX_op_deposit_i64:
1804 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
1805 64 - args[3] - args[4]);
1806 break;
1807
1808 default:
1809 tcg_dump_ops (s);
1810 tcg_abort ();
1811 }
1812 }
1813
1814 static const TCGTargetOpDef ppc_op_defs[] = {
1815 { INDEX_op_exit_tb, { } },
1816 { INDEX_op_goto_tb, { } },
1817 { INDEX_op_call, { "ri" } },
1818 { INDEX_op_br, { } },
1819
1820 { INDEX_op_mov_i32, { "r", "r" } },
1821 { INDEX_op_mov_i64, { "r", "r" } },
1822 { INDEX_op_movi_i32, { "r" } },
1823 { INDEX_op_movi_i64, { "r" } },
1824
1825 { INDEX_op_ld8u_i32, { "r", "r" } },
1826 { INDEX_op_ld8s_i32, { "r", "r" } },
1827 { INDEX_op_ld16u_i32, { "r", "r" } },
1828 { INDEX_op_ld16s_i32, { "r", "r" } },
1829 { INDEX_op_ld_i32, { "r", "r" } },
1830 { INDEX_op_ld_i64, { "r", "r" } },
1831 { INDEX_op_st8_i32, { "r", "r" } },
1832 { INDEX_op_st8_i64, { "r", "r" } },
1833 { INDEX_op_st16_i32, { "r", "r" } },
1834 { INDEX_op_st16_i64, { "r", "r" } },
1835 { INDEX_op_st_i32, { "r", "r" } },
1836 { INDEX_op_st_i64, { "r", "r" } },
1837 { INDEX_op_st32_i64, { "r", "r" } },
1838
1839 { INDEX_op_ld8u_i64, { "r", "r" } },
1840 { INDEX_op_ld8s_i64, { "r", "r" } },
1841 { INDEX_op_ld16u_i64, { "r", "r" } },
1842 { INDEX_op_ld16s_i64, { "r", "r" } },
1843 { INDEX_op_ld32u_i64, { "r", "r" } },
1844 { INDEX_op_ld32s_i64, { "r", "r" } },
1845
1846 { INDEX_op_add_i32, { "r", "r", "ri" } },
1847 { INDEX_op_mul_i32, { "r", "r", "rI" } },
1848 { INDEX_op_div_i32, { "r", "r", "r" } },
1849 { INDEX_op_divu_i32, { "r", "r", "r" } },
1850 { INDEX_op_rem_i32, { "r", "r", "r" } },
1851 { INDEX_op_remu_i32, { "r", "r", "r" } },
1852 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1853 { INDEX_op_and_i32, { "r", "r", "ri" } },
1854 { INDEX_op_or_i32, { "r", "r", "ri" } },
1855 { INDEX_op_xor_i32, { "r", "r", "ri" } },
1856 { INDEX_op_andc_i32, { "r", "r", "ri" } },
1857 { INDEX_op_orc_i32, { "r", "r", "ri" } },
1858 { INDEX_op_eqv_i32, { "r", "r", "ri" } },
1859 { INDEX_op_nand_i32, { "r", "r", "r" } },
1860 { INDEX_op_nor_i32, { "r", "r", "r" } },
1861
1862 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1863 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1864 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1865 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1866 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1867
1868 { INDEX_op_brcond_i32, { "r", "ri" } },
1869 { INDEX_op_brcond_i64, { "r", "ri" } },
1870
1871 { INDEX_op_neg_i32, { "r", "r" } },
1872 { INDEX_op_not_i32, { "r", "r" } },
1873
1874 { INDEX_op_add_i64, { "r", "r", "rT" } },
1875 { INDEX_op_sub_i64, { "r", "rI", "rT" } },
1876 { INDEX_op_and_i64, { "r", "r", "ri" } },
1877 { INDEX_op_or_i64, { "r", "r", "rU" } },
1878 { INDEX_op_xor_i64, { "r", "r", "rU" } },
1879 { INDEX_op_andc_i64, { "r", "r", "ri" } },
1880 { INDEX_op_orc_i64, { "r", "r", "r" } },
1881 { INDEX_op_eqv_i64, { "r", "r", "r" } },
1882 { INDEX_op_nand_i64, { "r", "r", "r" } },
1883 { INDEX_op_nor_i64, { "r", "r", "r" } },
1884
1885 { INDEX_op_shl_i64, { "r", "r", "ri" } },
1886 { INDEX_op_shr_i64, { "r", "r", "ri" } },
1887 { INDEX_op_sar_i64, { "r", "r", "ri" } },
1888 { INDEX_op_rotl_i64, { "r", "r", "ri" } },
1889 { INDEX_op_rotr_i64, { "r", "r", "ri" } },
1890
1891 { INDEX_op_mul_i64, { "r", "r", "rI" } },
1892 { INDEX_op_div_i64, { "r", "r", "r" } },
1893 { INDEX_op_divu_i64, { "r", "r", "r" } },
1894 { INDEX_op_rem_i64, { "r", "r", "r" } },
1895 { INDEX_op_remu_i64, { "r", "r", "r" } },
1896
1897 { INDEX_op_neg_i64, { "r", "r" } },
1898 { INDEX_op_not_i64, { "r", "r" } },
1899
1900 { INDEX_op_qemu_ld8u, { "r", "L" } },
1901 { INDEX_op_qemu_ld8s, { "r", "L" } },
1902 { INDEX_op_qemu_ld16u, { "r", "L" } },
1903 { INDEX_op_qemu_ld16s, { "r", "L" } },
1904 { INDEX_op_qemu_ld32, { "r", "L" } },
1905 { INDEX_op_qemu_ld32u, { "r", "L" } },
1906 { INDEX_op_qemu_ld32s, { "r", "L" } },
1907 { INDEX_op_qemu_ld64, { "r", "L" } },
1908
1909 { INDEX_op_qemu_st8, { "S", "S" } },
1910 { INDEX_op_qemu_st16, { "S", "S" } },
1911 { INDEX_op_qemu_st32, { "S", "S" } },
1912 { INDEX_op_qemu_st64, { "S", "S" } },
1913
1914 { INDEX_op_ext8s_i32, { "r", "r" } },
1915 { INDEX_op_ext16s_i32, { "r", "r" } },
1916 { INDEX_op_ext8s_i64, { "r", "r" } },
1917 { INDEX_op_ext16s_i64, { "r", "r" } },
1918 { INDEX_op_ext32s_i64, { "r", "r" } },
1919
1920 { INDEX_op_setcond_i32, { "r", "r", "ri" } },
1921 { INDEX_op_setcond_i64, { "r", "r", "ri" } },
1922
1923 { INDEX_op_bswap16_i32, { "r", "r" } },
1924 { INDEX_op_bswap16_i64, { "r", "r" } },
1925 { INDEX_op_bswap32_i32, { "r", "r" } },
1926 { INDEX_op_bswap32_i64, { "r", "r" } },
1927 { INDEX_op_bswap64_i64, { "r", "r" } },
1928
1929 { INDEX_op_deposit_i32, { "r", "0", "r" } },
1930 { INDEX_op_deposit_i64, { "r", "0", "r" } },
1931
1932 { -1 },
1933 };
1934
1935 static void tcg_target_init (TCGContext *s)
1936 {
1937 tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1938 tcg_regset_set32 (tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
1939 tcg_regset_set32 (tcg_target_call_clobber_regs, 0,
1940 (1 << TCG_REG_R0) |
1941 #ifdef __APPLE__
1942 (1 << TCG_REG_R2) |
1943 #endif
1944 (1 << TCG_REG_R3) |
1945 (1 << TCG_REG_R4) |
1946 (1 << TCG_REG_R5) |
1947 (1 << TCG_REG_R6) |
1948 (1 << TCG_REG_R7) |
1949 (1 << TCG_REG_R8) |
1950 (1 << TCG_REG_R9) |
1951 (1 << TCG_REG_R10) |
1952 (1 << TCG_REG_R11) |
1953 (1 << TCG_REG_R12)
1954 );
1955
1956 tcg_regset_clear (s->reserved_regs);
1957 tcg_regset_set_reg (s->reserved_regs, TCG_REG_R0);
1958 tcg_regset_set_reg (s->reserved_regs, TCG_REG_R1);
1959 #ifndef __APPLE__
1960 tcg_regset_set_reg (s->reserved_regs, TCG_REG_R2);
1961 #endif
1962 tcg_regset_set_reg (s->reserved_regs, TCG_REG_R13);
1963
1964 tcg_add_target_add_op_defs (ppc_op_defs);
1965 }