]> git.proxmox.com Git - qemu.git/blame - tcg/ppc64/tcg-target.c
tcg-ppc64: Implement CONFIG_QEMU_LDST_OPTIMIZATION
[qemu.git] / tcg / ppc64 / tcg-target.c
CommitLineData
810260a8 1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
3d582c61
RH
25#define TCG_CT_CONST_S16 0x100
26#define TCG_CT_CONST_U16 0x200
27#define TCG_CT_CONST_S32 0x400
28#define TCG_CT_CONST_U32 0x800
29#define TCG_CT_CONST_ZERO 0x1000
6c858762 30#define TCG_CT_CONST_MONE 0x2000
fe6f943f 31
810260a8 32static uint8_t *tb_ret_addr;
33
810260a8 34#if TARGET_LONG_BITS == 32
fedee3e7 35#define LD_ADDR LWZ
e924bbec 36#define CMP_L 0
810260a8 37#else
fedee3e7 38#define LD_ADDR LD
e924bbec 39#define CMP_L (1<<21)
810260a8 40#endif
41
f6548c0a 42#ifndef GUEST_BASE
43#define GUEST_BASE 0
44#endif
45
1e6e9aca
RH
46#ifdef CONFIG_GETAUXVAL
47#include <sys/auxv.h>
48static bool have_isa_2_06;
49#define HAVE_ISA_2_06 have_isa_2_06
50#define HAVE_ISEL have_isa_2_06
51#else
49d9870a 52#define HAVE_ISA_2_06 0
70fac59a 53#define HAVE_ISEL 0
1e6e9aca 54#endif
49d9870a 55
f6548c0a 56#ifdef CONFIG_USE_GUEST_BASE
57#define TCG_GUEST_BASE_REG 30
58#else
59#define TCG_GUEST_BASE_REG 0
60#endif
61
d4a9eb1f 62#ifndef NDEBUG
810260a8 63static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
64 "r0",
65 "r1",
98926b0a 66 "r2",
810260a8 67 "r3",
68 "r4",
69 "r5",
70 "r6",
71 "r7",
72 "r8",
73 "r9",
74 "r10",
75 "r11",
76 "r12",
77 "r13",
78 "r14",
79 "r15",
80 "r16",
81 "r17",
82 "r18",
83 "r19",
84 "r20",
85 "r21",
86 "r22",
87 "r23",
88 "r24",
89 "r25",
90 "r26",
91 "r27",
92 "r28",
93 "r29",
94 "r30",
95 "r31"
96};
d4a9eb1f 97#endif
810260a8 98
99static const int tcg_target_reg_alloc_order[] = {
5e1702b0 100 TCG_REG_R14, /* call saved registers */
810260a8 101 TCG_REG_R15,
102 TCG_REG_R16,
103 TCG_REG_R17,
104 TCG_REG_R18,
105 TCG_REG_R19,
106 TCG_REG_R20,
107 TCG_REG_R21,
108 TCG_REG_R22,
109 TCG_REG_R23,
5e1702b0
RH
110 TCG_REG_R24,
111 TCG_REG_R25,
112 TCG_REG_R26,
113 TCG_REG_R27,
810260a8 114 TCG_REG_R28,
115 TCG_REG_R29,
116 TCG_REG_R30,
117 TCG_REG_R31,
5e1702b0
RH
118 TCG_REG_R12, /* call clobbered, non-arguments */
119 TCG_REG_R11,
5e1702b0 120 TCG_REG_R10, /* call clobbered, arguments */
810260a8 121 TCG_REG_R9,
5e1702b0
RH
122 TCG_REG_R8,
123 TCG_REG_R7,
124 TCG_REG_R6,
125 TCG_REG_R5,
126 TCG_REG_R4,
127 TCG_REG_R3,
810260a8 128};
129
130static const int tcg_target_call_iarg_regs[] = {
131 TCG_REG_R3,
132 TCG_REG_R4,
133 TCG_REG_R5,
134 TCG_REG_R6,
135 TCG_REG_R7,
136 TCG_REG_R8,
137 TCG_REG_R9,
138 TCG_REG_R10
139};
140
be9c4183 141static const int tcg_target_call_oarg_regs[] = {
810260a8 142 TCG_REG_R3
143};
144
145static const int tcg_target_callee_save_regs[] = {
5d7ff5bb
AF
146#ifdef __APPLE__
147 TCG_REG_R11,
148#endif
810260a8 149 TCG_REG_R14,
150 TCG_REG_R15,
151 TCG_REG_R16,
152 TCG_REG_R17,
153 TCG_REG_R18,
154 TCG_REG_R19,
155 TCG_REG_R20,
156 TCG_REG_R21,
157 TCG_REG_R22,
158 TCG_REG_R23,
095271d4 159 TCG_REG_R24,
160 TCG_REG_R25,
161 TCG_REG_R26,
cea5f9a2 162 TCG_REG_R27, /* currently used for the global env */
810260a8 163 TCG_REG_R28,
164 TCG_REG_R29,
165 TCG_REG_R30,
166 TCG_REG_R31
167};
168
b0940da0
RH
169static inline bool in_range_b(tcg_target_long target)
170{
171 return target == sextract64(target, 0, 26);
172}
173
541dd4ce 174static uint32_t reloc_pc24_val(void *pc, tcg_target_long target)
810260a8 175{
176 tcg_target_long disp;
177
541dd4ce 178 disp = target - (tcg_target_long)pc;
b0940da0 179 assert(in_range_b(disp));
810260a8 180
181 return disp & 0x3fffffc;
182}
183
541dd4ce 184static void reloc_pc24(void *pc, tcg_target_long target)
810260a8 185{
541dd4ce
RH
186 *(uint32_t *)pc = (*(uint32_t *)pc & ~0x3fffffc)
187 | reloc_pc24_val(pc, target);
810260a8 188}
189
541dd4ce 190static uint16_t reloc_pc14_val(void *pc, tcg_target_long target)
810260a8 191{
192 tcg_target_long disp;
193
541dd4ce
RH
194 disp = target - (tcg_target_long)pc;
195 if (disp != (int16_t) disp) {
196 tcg_abort();
197 }
810260a8 198
199 return disp & 0xfffc;
200}
201
541dd4ce 202static void reloc_pc14(void *pc, tcg_target_long target)
810260a8 203{
541dd4ce 204 *(uint32_t *)pc = (*(uint32_t *)pc & ~0xfffc) | reloc_pc14_val(pc, target);
810260a8 205}
206
c7ca6a2b
RH
207static inline void tcg_out_b_noaddr(TCGContext *s, int insn)
208{
209 unsigned retrans = *(uint32_t *)s->code_ptr & 0x3fffffc;
210 tcg_out32(s, insn | retrans);
211}
212
213static inline void tcg_out_bc_noaddr(TCGContext *s, int insn)
214{
215 unsigned retrans = *(uint32_t *)s->code_ptr & 0xfffc;
216 tcg_out32(s, insn | retrans);
217}
218
541dd4ce
RH
219static void patch_reloc(uint8_t *code_ptr, int type,
220 intptr_t value, intptr_t addend)
810260a8 221{
222 value += addend;
223 switch (type) {
224 case R_PPC_REL14:
541dd4ce 225 reloc_pc14(code_ptr, value);
810260a8 226 break;
227 case R_PPC_REL24:
541dd4ce 228 reloc_pc24(code_ptr, value);
810260a8 229 break;
230 default:
541dd4ce 231 tcg_abort();
810260a8 232 }
233}
234
810260a8 235/* parse target specific constraints */
541dd4ce 236static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
810260a8 237{
238 const char *ct_str;
239
240 ct_str = *pct_str;
241 switch (ct_str[0]) {
242 case 'A': case 'B': case 'C': case 'D':
243 ct->ct |= TCG_CT_REG;
541dd4ce 244 tcg_regset_set_reg(ct->u.regs, 3 + ct_str[0] - 'A');
810260a8 245 break;
246 case 'r':
247 ct->ct |= TCG_CT_REG;
541dd4ce 248 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
810260a8 249 break;
250 case 'L': /* qemu_ld constraint */
251 ct->ct |= TCG_CT_REG;
541dd4ce
RH
252 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
253 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
735ee40d 254#ifdef CONFIG_SOFTMMU
541dd4ce
RH
255 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
256 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
735ee40d 257#endif
810260a8 258 break;
c070355d 259 case 'S': /* qemu_st constraint */
810260a8 260 ct->ct |= TCG_CT_REG;
541dd4ce
RH
261 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
262 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R3);
735ee40d 263#ifdef CONFIG_SOFTMMU
541dd4ce
RH
264 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R4);
265 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R5);
266 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R6);
735ee40d 267#endif
810260a8 268 break;
3d582c61
RH
269 case 'I':
270 ct->ct |= TCG_CT_CONST_S16;
271 break;
272 case 'J':
273 ct->ct |= TCG_CT_CONST_U16;
274 break;
6c858762
RH
275 case 'M':
276 ct->ct |= TCG_CT_CONST_MONE;
277 break;
3d582c61
RH
278 case 'T':
279 ct->ct |= TCG_CT_CONST_S32;
280 break;
281 case 'U':
fe6f943f 282 ct->ct |= TCG_CT_CONST_U32;
283 break;
3d582c61
RH
284 case 'Z':
285 ct->ct |= TCG_CT_CONST_ZERO;
286 break;
810260a8 287 default:
288 return -1;
289 }
290 ct_str++;
291 *pct_str = ct_str;
292 return 0;
293}
294
295/* test if a constant matches the constraint */
541dd4ce
RH
296static int tcg_target_const_match(tcg_target_long val,
297 const TCGArgConstraint *arg_ct)
810260a8 298{
3d582c61
RH
299 int ct = arg_ct->ct;
300 if (ct & TCG_CT_CONST) {
301 return 1;
302 } else if ((ct & TCG_CT_CONST_S16) && val == (int16_t)val) {
303 return 1;
304 } else if ((ct & TCG_CT_CONST_U16) && val == (uint16_t)val) {
810260a8 305 return 1;
3d582c61 306 } else if ((ct & TCG_CT_CONST_S32) && val == (int32_t)val) {
fe6f943f 307 return 1;
3d582c61
RH
308 } else if ((ct & TCG_CT_CONST_U32) && val == (uint32_t)val) {
309 return 1;
310 } else if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
311 return 1;
6c858762
RH
312 } else if ((ct & TCG_CT_CONST_MONE) && val == -1) {
313 return 1;
3d582c61 314 }
810260a8 315 return 0;
316}
317
318#define OPCD(opc) ((opc)<<26)
319#define XO19(opc) (OPCD(19)|((opc)<<1))
8a94cfb0
AB
320#define MD30(opc) (OPCD(30)|((opc)<<2))
321#define MDS30(opc) (OPCD(30)|((opc)<<1))
810260a8 322#define XO31(opc) (OPCD(31)|((opc)<<1))
323#define XO58(opc) (OPCD(58)|(opc))
324#define XO62(opc) (OPCD(62)|(opc))
325
326#define B OPCD( 18)
327#define BC OPCD( 16)
328#define LBZ OPCD( 34)
329#define LHZ OPCD( 40)
330#define LHA OPCD( 42)
331#define LWZ OPCD( 32)
332#define STB OPCD( 38)
333#define STH OPCD( 44)
334#define STW OPCD( 36)
335
336#define STD XO62( 0)
337#define STDU XO62( 1)
338#define STDX XO31(149)
339
340#define LD XO58( 0)
341#define LDX XO31( 21)
342#define LDU XO58( 1)
301f6d90 343#define LWA XO58( 2)
810260a8 344#define LWAX XO31(341)
345
1cd62ae9 346#define ADDIC OPCD( 12)
810260a8 347#define ADDI OPCD( 14)
348#define ADDIS OPCD( 15)
349#define ORI OPCD( 24)
350#define ORIS OPCD( 25)
351#define XORI OPCD( 26)
352#define XORIS OPCD( 27)
353#define ANDI OPCD( 28)
354#define ANDIS OPCD( 29)
355#define MULLI OPCD( 7)
356#define CMPLI OPCD( 10)
357#define CMPI OPCD( 11)
148bdd23 358#define SUBFIC OPCD( 8)
810260a8 359
360#define LWZU OPCD( 33)
361#define STWU OPCD( 37)
362
313d91c7 363#define RLWIMI OPCD( 20)
810260a8 364#define RLWINM OPCD( 21)
313d91c7 365#define RLWNM OPCD( 23)
810260a8 366
8a94cfb0
AB
367#define RLDICL MD30( 0)
368#define RLDICR MD30( 1)
369#define RLDIMI MD30( 3)
370#define RLDCL MDS30( 8)
810260a8 371
372#define BCLR XO19( 16)
373#define BCCTR XO19(528)
374#define CRAND XO19(257)
375#define CRANDC XO19(129)
376#define CRNAND XO19(225)
377#define CROR XO19(449)
1cd62ae9 378#define CRNOR XO19( 33)
810260a8 379
380#define EXTSB XO31(954)
381#define EXTSH XO31(922)
382#define EXTSW XO31(986)
383#define ADD XO31(266)
384#define ADDE XO31(138)
6c858762
RH
385#define ADDME XO31(234)
386#define ADDZE XO31(202)
810260a8 387#define ADDC XO31( 10)
388#define AND XO31( 28)
389#define SUBF XO31( 40)
390#define SUBFC XO31( 8)
391#define SUBFE XO31(136)
6c858762
RH
392#define SUBFME XO31(232)
393#define SUBFZE XO31(200)
810260a8 394#define OR XO31(444)
395#define XOR XO31(316)
396#define MULLW XO31(235)
397#define MULHWU XO31( 11)
398#define DIVW XO31(491)
399#define DIVWU XO31(459)
400#define CMP XO31( 0)
401#define CMPL XO31( 32)
402#define LHBRX XO31(790)
403#define LWBRX XO31(534)
49d9870a 404#define LDBRX XO31(532)
810260a8 405#define STHBRX XO31(918)
406#define STWBRX XO31(662)
49d9870a 407#define STDBRX XO31(660)
810260a8 408#define MFSPR XO31(339)
409#define MTSPR XO31(467)
410#define SRAWI XO31(824)
411#define NEG XO31(104)
1cd62ae9 412#define MFCR XO31( 19)
6995a4a0 413#define MFOCRF (MFCR | (1u << 20))
157f2662 414#define NOR XO31(124)
1cd62ae9 415#define CNTLZW XO31( 26)
416#define CNTLZD XO31( 58)
ce1010d6
RH
417#define ANDC XO31( 60)
418#define ORC XO31(412)
419#define EQV XO31(284)
420#define NAND XO31(476)
70fac59a 421#define ISEL XO31( 15)
810260a8 422
423#define MULLD XO31(233)
424#define MULHD XO31( 73)
425#define MULHDU XO31( 9)
426#define DIVD XO31(489)
427#define DIVDU XO31(457)
428
429#define LBZX XO31( 87)
4f4a67ae 430#define LHZX XO31(279)
810260a8 431#define LHAX XO31(343)
432#define LWZX XO31( 23)
433#define STBX XO31(215)
434#define STHX XO31(407)
435#define STWX XO31(151)
436
541dd4ce 437#define SPR(a, b) ((((a)<<5)|(b))<<11)
810260a8 438#define LR SPR(8, 0)
439#define CTR SPR(9, 0)
440
441#define SLW XO31( 24)
442#define SRW XO31(536)
443#define SRAW XO31(792)
444
445#define SLD XO31( 27)
446#define SRD XO31(539)
447#define SRAD XO31(794)
fe6f943f 448#define SRADI XO31(413<<1)
810260a8 449
810260a8 450#define TW XO31( 4)
541dd4ce 451#define TRAP (TW | TO(31))
810260a8 452
453#define RT(r) ((r)<<21)
454#define RS(r) ((r)<<21)
455#define RA(r) ((r)<<16)
456#define RB(r) ((r)<<11)
457#define TO(t) ((t)<<21)
458#define SH(s) ((s)<<11)
459#define MB(b) ((b)<<6)
460#define ME(e) ((e)<<1)
461#define BO(o) ((o)<<21)
462#define MB64(b) ((b)<<5)
6995a4a0 463#define FXM(b) (1 << (19 - (b)))
810260a8 464
465#define LK 1
466
2fd8eddc
RH
467#define TAB(t, a, b) (RT(t) | RA(a) | RB(b))
468#define SAB(s, a, b) (RS(s) | RA(a) | RB(b))
469#define TAI(s, a, i) (RT(s) | RA(a) | ((i) & 0xffff))
470#define SAI(s, a, i) (RS(s) | RA(a) | ((i) & 0xffff))
810260a8 471
472#define BF(n) ((n)<<23)
473#define BI(n, c) (((c)+((n)*4))<<16)
474#define BT(n, c) (((c)+((n)*4))<<21)
475#define BA(n, c) (((c)+((n)*4))<<16)
476#define BB(n, c) (((c)+((n)*4))<<11)
70fac59a 477#define BC_(n, c) (((c)+((n)*4))<<6)
810260a8 478
541dd4ce
RH
479#define BO_COND_TRUE BO(12)
480#define BO_COND_FALSE BO( 4)
481#define BO_ALWAYS BO(20)
810260a8 482
483enum {
484 CR_LT,
485 CR_GT,
486 CR_EQ,
487 CR_SO
488};
489
0aed257f 490static const uint32_t tcg_to_bc[] = {
541dd4ce
RH
491 [TCG_COND_EQ] = BC | BI(7, CR_EQ) | BO_COND_TRUE,
492 [TCG_COND_NE] = BC | BI(7, CR_EQ) | BO_COND_FALSE,
493 [TCG_COND_LT] = BC | BI(7, CR_LT) | BO_COND_TRUE,
494 [TCG_COND_GE] = BC | BI(7, CR_LT) | BO_COND_FALSE,
495 [TCG_COND_LE] = BC | BI(7, CR_GT) | BO_COND_FALSE,
496 [TCG_COND_GT] = BC | BI(7, CR_GT) | BO_COND_TRUE,
497 [TCG_COND_LTU] = BC | BI(7, CR_LT) | BO_COND_TRUE,
498 [TCG_COND_GEU] = BC | BI(7, CR_LT) | BO_COND_FALSE,
499 [TCG_COND_LEU] = BC | BI(7, CR_GT) | BO_COND_FALSE,
500 [TCG_COND_GTU] = BC | BI(7, CR_GT) | BO_COND_TRUE,
810260a8 501};
502
70fac59a
RH
503/* The low bit here is set if the RA and RB fields must be inverted. */
504static const uint32_t tcg_to_isel[] = {
505 [TCG_COND_EQ] = ISEL | BC_(7, CR_EQ),
506 [TCG_COND_NE] = ISEL | BC_(7, CR_EQ) | 1,
507 [TCG_COND_LT] = ISEL | BC_(7, CR_LT),
508 [TCG_COND_GE] = ISEL | BC_(7, CR_LT) | 1,
509 [TCG_COND_LE] = ISEL | BC_(7, CR_GT) | 1,
510 [TCG_COND_GT] = ISEL | BC_(7, CR_GT),
511 [TCG_COND_LTU] = ISEL | BC_(7, CR_LT),
512 [TCG_COND_GEU] = ISEL | BC_(7, CR_LT) | 1,
513 [TCG_COND_LEU] = ISEL | BC_(7, CR_GT) | 1,
514 [TCG_COND_GTU] = ISEL | BC_(7, CR_GT),
515};
516
aceac8d6
RH
517static inline void tcg_out_mov(TCGContext *s, TCGType type,
518 TCGReg ret, TCGReg arg)
810260a8 519{
f8b84129
RH
520 if (ret != arg) {
521 tcg_out32(s, OR | SAB(arg, ret, arg));
522 }
810260a8 523}
524
aceac8d6
RH
525static inline void tcg_out_rld(TCGContext *s, int op, TCGReg ra, TCGReg rs,
526 int sh, int mb)
810260a8 527{
541dd4ce
RH
528 sh = SH(sh & 0x1f) | (((sh >> 5) & 1) << 1);
529 mb = MB64((mb >> 5) | ((mb << 1) & 0x3f));
530 tcg_out32(s, op | RA(ra) | RS(rs) | sh | mb);
810260a8 531}
532
9e555b73
RH
533static inline void tcg_out_rlw(TCGContext *s, int op, TCGReg ra, TCGReg rs,
534 int sh, int mb, int me)
535{
536 tcg_out32(s, op | RA(ra) | RS(rs) | SH(sh) | MB(mb) | ME(me));
537}
538
6e5e0602
RH
539static inline void tcg_out_ext32u(TCGContext *s, TCGReg dst, TCGReg src)
540{
541 tcg_out_rld(s, RLDICL, dst, src, 0, 32);
542}
543
0a9564b9
RH
544static inline void tcg_out_shli64(TCGContext *s, TCGReg dst, TCGReg src, int c)
545{
546 tcg_out_rld(s, RLDICR, dst, src, c, 63 - c);
547}
548
5e916c28
RH
549static inline void tcg_out_shri64(TCGContext *s, TCGReg dst, TCGReg src, int c)
550{
551 tcg_out_rld(s, RLDICL, dst, src, 64 - c, c);
552}
553
aceac8d6 554static void tcg_out_movi32(TCGContext *s, TCGReg ret, int32_t arg)
810260a8 555{
2fd8eddc
RH
556 if (arg == (int16_t) arg) {
557 tcg_out32(s, ADDI | TAI(ret, 0, arg));
558 } else {
559 tcg_out32(s, ADDIS | TAI(ret, 0, arg >> 16));
560 if (arg & 0xffff) {
561 tcg_out32(s, ORI | SAI(ret, ret, arg));
562 }
810260a8 563 }
564}
565
421233a1
RH
566static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg ret,
567 tcg_target_long arg)
810260a8 568{
421233a1
RH
569 if (type == TCG_TYPE_I32 || arg == (int32_t)arg) {
570 tcg_out_movi32(s, ret, arg);
571 } else if (arg == (uint32_t)arg && !(arg & 0x8000)) {
572 tcg_out32(s, ADDI | TAI(ret, 0, arg));
573 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
574 } else {
575 int32_t high = arg >> 32;
576 tcg_out_movi32(s, ret, high);
577 if (high) {
0a9564b9 578 tcg_out_shli64(s, ret, ret, 32);
421233a1
RH
579 }
580 if (arg & 0xffff0000) {
581 tcg_out32(s, ORIS | SAI(ret, ret, arg >> 16));
582 }
583 if (arg & 0xffff) {
584 tcg_out32(s, ORI | SAI(ret, ret, arg));
810260a8 585 }
586 }
587}
588
637af30c 589static bool mask_operand(uint32_t c, int *mb, int *me)
a9249dff
RH
590{
591 uint32_t lsb, test;
592
593 /* Accept a bit pattern like:
594 0....01....1
595 1....10....0
596 0..01..10..0
597 Keep track of the transitions. */
598 if (c == 0 || c == -1) {
599 return false;
600 }
601 test = c;
602 lsb = test & -test;
603 test += lsb;
604 if (test & (test - 1)) {
605 return false;
606 }
607
608 *me = clz32(lsb);
609 *mb = test ? clz32(test & -test) + 1 : 0;
610 return true;
611}
612
637af30c
RH
613static bool mask64_operand(uint64_t c, int *mb, int *me)
614{
615 uint64_t lsb;
616
617 if (c == 0) {
618 return false;
619 }
620
621 lsb = c & -c;
622 /* Accept 1..10..0. */
623 if (c == -lsb) {
624 *mb = 0;
625 *me = clz64(lsb);
626 return true;
627 }
628 /* Accept 0..01..1. */
629 if (lsb == 1 && (c & (c + 1)) == 0) {
630 *mb = clz64(c + 1) + 1;
631 *me = 63;
632 return true;
633 }
634 return false;
635}
636
a9249dff
RH
637static void tcg_out_andi32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
638{
639 int mb, me;
640
641 if ((c & 0xffff) == c) {
642 tcg_out32(s, ANDI | SAI(src, dst, c));
643 return;
644 } else if ((c & 0xffff0000) == c) {
645 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
646 return;
647 } else if (mask_operand(c, &mb, &me)) {
648 tcg_out_rlw(s, RLWINM, dst, src, 0, mb, me);
649 } else {
8327a470
RH
650 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R0, c);
651 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
a9249dff
RH
652 }
653}
654
637af30c
RH
655static void tcg_out_andi64(TCGContext *s, TCGReg dst, TCGReg src, uint64_t c)
656{
657 int mb, me;
658
659 if ((c & 0xffff) == c) {
660 tcg_out32(s, ANDI | SAI(src, dst, c));
661 return;
662 } else if ((c & 0xffff0000) == c) {
663 tcg_out32(s, ANDIS | SAI(src, dst, c >> 16));
664 return;
665 } else if (mask64_operand(c, &mb, &me)) {
666 if (mb == 0) {
667 tcg_out_rld(s, RLDICR, dst, src, 0, me);
668 } else {
669 tcg_out_rld(s, RLDICL, dst, src, 0, mb);
670 }
671 } else {
8327a470
RH
672 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, c);
673 tcg_out32(s, AND | SAB(src, dst, TCG_REG_R0));
637af30c
RH
674 }
675}
676
dce74c57
RH
677static void tcg_out_zori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c,
678 int op_lo, int op_hi)
679{
680 if (c >> 16) {
681 tcg_out32(s, op_hi | SAI(src, dst, c >> 16));
682 src = dst;
683 }
684 if (c & 0xffff) {
685 tcg_out32(s, op_lo | SAI(src, dst, c));
686 src = dst;
687 }
688}
689
690static void tcg_out_ori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
691{
692 tcg_out_zori32(s, dst, src, c, ORI, ORIS);
693}
694
695static void tcg_out_xori32(TCGContext *s, TCGReg dst, TCGReg src, uint32_t c)
696{
697 tcg_out_zori32(s, dst, src, c, XORI, XORIS);
698}
699
541dd4ce 700static void tcg_out_b(TCGContext *s, int mask, tcg_target_long target)
5d7ff5bb
AF
701{
702 tcg_target_long disp;
703
541dd4ce 704 disp = target - (tcg_target_long)s->code_ptr;
b0940da0 705 if (in_range_b(disp)) {
541dd4ce
RH
706 tcg_out32(s, B | (disp & 0x3fffffc) | mask);
707 } else {
8327a470
RH
708 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, (tcg_target_long)target);
709 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | CTR);
541dd4ce 710 tcg_out32(s, BCCTR | BO_ALWAYS | mask);
5d7ff5bb
AF
711 }
712}
713
541dd4ce 714static void tcg_out_call(TCGContext *s, tcg_target_long arg, int const_arg)
810260a8 715{
5d7ff5bb
AF
716#ifdef __APPLE__
717 if (const_arg) {
541dd4ce
RH
718 tcg_out_b(s, LK, arg);
719 } else {
720 tcg_out32(s, MTSPR | RS(arg) | LR);
721 tcg_out32(s, BCLR | BO_ALWAYS | LK);
5d7ff5bb
AF
722 }
723#else
d40f3cb1
RH
724 TCGReg reg = arg;
725 int ofs = 0;
810260a8 726
727 if (const_arg) {
b0940da0
RH
728 /* Look through the descriptor. If the branch is in range, and we
729 don't have to spend too much effort on building the toc. */
730 intptr_t tgt = ((intptr_t *)arg)[0];
731 intptr_t toc = ((intptr_t *)arg)[1];
732 intptr_t diff = tgt - (intptr_t)s->code_ptr;
733
734 if (in_range_b(diff) && toc == (uint32_t)toc) {
735 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, toc);
736 tcg_out_b(s, LK, tgt);
737 return;
738 }
739
d40f3cb1
RH
740 /* Fold the low bits of the constant into the addresses below. */
741 ofs = (int16_t)arg;
742 if (ofs + 8 < 0x8000) {
743 arg -= ofs;
744 } else {
745 ofs = 0;
746 }
8327a470 747 reg = TCG_REG_R2;
541dd4ce 748 tcg_out_movi(s, TCG_TYPE_I64, reg, arg);
810260a8 749 }
810260a8 750
d40f3cb1 751 tcg_out32(s, LD | TAI(TCG_REG_R0, reg, ofs));
8327a470 752 tcg_out32(s, MTSPR | RA(TCG_REG_R0) | CTR);
d40f3cb1 753 tcg_out32(s, LD | TAI(TCG_REG_R2, reg, ofs + 8));
541dd4ce 754 tcg_out32(s, BCCTR | BO_ALWAYS | LK);
5d7ff5bb 755#endif
810260a8 756}
757
b18d5d2b
RH
758static void tcg_out_mem_long(TCGContext *s, int opi, int opx, TCGReg rt,
759 TCGReg base, tcg_target_long offset)
810260a8 760{
b18d5d2b
RH
761 tcg_target_long orig = offset, l0, l1, extra = 0, align = 0;
762 TCGReg rs = TCG_REG_R2;
763
764 assert(rt != TCG_REG_R2 && base != TCG_REG_R2);
765
766 switch (opi) {
767 case LD: case LWA:
768 align = 3;
769 /* FALLTHRU */
770 default:
771 if (rt != TCG_REG_R0) {
772 rs = rt;
773 }
774 break;
775 case STD:
776 align = 3;
777 break;
778 case STB: case STH: case STW:
779 break;
810260a8 780 }
810260a8 781
b18d5d2b
RH
782 /* For unaligned, or very large offsets, use the indexed form. */
783 if (offset & align || offset != (int32_t)offset) {
784 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R2, orig);
785 tcg_out32(s, opx | TAB(rt, base, TCG_REG_R2));
786 return;
787 }
788
789 l0 = (int16_t)offset;
790 offset = (offset - l0) >> 16;
791 l1 = (int16_t)offset;
792
793 if (l1 < 0 && orig >= 0) {
794 extra = 0x4000;
795 l1 = (int16_t)(offset - 0x4000);
796 }
797 if (l1) {
798 tcg_out32(s, ADDIS | TAI(rs, base, l1));
799 base = rs;
800 }
801 if (extra) {
802 tcg_out32(s, ADDIS | TAI(rs, base, extra));
803 base = rs;
804 }
805 if (opi != ADDI || base != rt || l0 != 0) {
806 tcg_out32(s, opi | TAI(rt, base, l0));
828808f5 807 }
808}
809
7f12d649
RH
810static const uint32_t qemu_ldx_opc[8] = {
811#ifdef TARGET_WORDS_BIGENDIAN
812 LBZX, LHZX, LWZX, LDX,
813 0, LHAX, LWAX, LDX
814#else
815 LBZX, LHBRX, LWBRX, LDBRX,
816 0, 0, 0, LDBRX,
817#endif
818};
819
820static const uint32_t qemu_stx_opc[4] = {
821#ifdef TARGET_WORDS_BIGENDIAN
822 STBX, STHX, STWX, STDX
823#else
824 STBX, STHBRX, STWBRX, STDBRX,
825#endif
826};
827
828static const uint32_t qemu_exts_opc[4] = {
829 EXTSB, EXTSH, EXTSW, 0
830};
831
832#if defined (CONFIG_SOFTMMU)
e141ab52 833/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
7f12d649
RH
834 * int mmu_idx, uintptr_t ra)
835 */
e141ab52 836static const void * const qemu_ld_helpers[4] = {
7f12d649
RH
837 helper_ret_ldub_mmu,
838 helper_ret_lduw_mmu,
839 helper_ret_ldul_mmu,
840 helper_ret_ldq_mmu,
e141ab52
BS
841};
842
843/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
7f12d649
RH
844 * uintxx_t val, int mmu_idx, uintptr_t ra)
845 */
e141ab52 846static const void * const qemu_st_helpers[4] = {
7f12d649
RH
847 helper_ret_stb_mmu,
848 helper_ret_stw_mmu,
849 helper_ret_stl_mmu,
850 helper_ret_stq_mmu,
e141ab52 851};
810260a8 852
fedee3e7
RH
853/* Perform the TLB load and compare. Places the result of the comparison
854 in CR7, loads the addend of the TLB into R3, and returns the register
855 containing the guest address (zero-extended into R4). Clobbers R0 and R2. */
856
857static TCGReg tcg_out_tlb_read(TCGContext *s, int s_bits, TCGReg addr_reg,
858 int mem_index, bool is_read)
810260a8 859{
fedee3e7
RH
860 int cmp_off
861 = (is_read
862 ? offsetof(CPUArchState, tlb_table[mem_index][0].addr_read)
863 : offsetof(CPUArchState, tlb_table[mem_index][0].addr_write));
864 int add_off = offsetof(CPUArchState, tlb_table[mem_index][0].addend);
865 TCGReg base = TCG_AREG0;
866
867 /* Extract the page index, shifted into place for tlb index. */
868 if (TARGET_LONG_BITS == 32) {
869 /* Zero-extend the address into a place helpful for further use. */
870 tcg_out_ext32u(s, TCG_REG_R4, addr_reg);
871 addr_reg = TCG_REG_R4;
872 } else {
873 tcg_out_rld(s, RLDICL, TCG_REG_R3, addr_reg,
874 64 - TARGET_PAGE_BITS, 64 - CPU_TLB_BITS);
875 }
810260a8 876
fedee3e7
RH
877 /* Compensate for very large offsets. */
878 if (add_off >= 0x8000) {
879 /* Most target env are smaller than 32k; none are larger than 64k.
880 Simplify the logic here merely to offset by 0x7ff0, giving us a
881 range just shy of 64k. Check this assumption. */
882 QEMU_BUILD_BUG_ON(offsetof(CPUArchState,
883 tlb_table[NB_MMU_MODES - 1][1])
884 > 0x7ff0 + 0x7fff);
885 tcg_out32(s, ADDI | TAI(TCG_REG_R2, base, 0x7ff0));
886 base = TCG_REG_R2;
887 cmp_off -= 0x7ff0;
888 add_off -= 0x7ff0;
889 }
810260a8 890
fedee3e7
RH
891 /* Extraction and shifting, part 2. */
892 if (TARGET_LONG_BITS == 32) {
893 tcg_out_rlw(s, RLWINM, TCG_REG_R3, addr_reg,
894 32 - (TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS),
895 32 - (CPU_TLB_BITS + CPU_TLB_ENTRY_BITS),
896 31 - CPU_TLB_ENTRY_BITS);
541dd4ce 897 } else {
fedee3e7 898 tcg_out_shli64(s, TCG_REG_R3, TCG_REG_R3, CPU_TLB_ENTRY_BITS);
4a40e231 899 }
fedee3e7
RH
900
901 tcg_out32(s, ADD | TAB(TCG_REG_R3, TCG_REG_R3, base));
902
903 /* Load the tlb comparator. */
904 tcg_out32(s, LD_ADDR | TAI(TCG_REG_R2, TCG_REG_R3, cmp_off));
905
906 /* Load the TLB addend for use on the fast path. Do this asap
907 to minimize any load use delay. */
908 tcg_out32(s, LD | TAI(TCG_REG_R3, TCG_REG_R3, add_off));
909
910 /* Clear the non-page, non-alignment bits from the address. */
911 if (TARGET_LONG_BITS == 32) {
912 tcg_out_rlw(s, RLWINM, TCG_REG_R0, addr_reg, 0,
913 (32 - s_bits) & 31, 31 - TARGET_PAGE_BITS);
914 } else if (!s_bits) {
915 tcg_out_rld(s, RLDICR, TCG_REG_R0, addr_reg, 0, 63 - TARGET_PAGE_BITS);
916 } else {
917 tcg_out_rld(s, RLDICL, TCG_REG_R0, addr_reg,
918 64 - TARGET_PAGE_BITS, TARGET_PAGE_BITS - s_bits);
919 tcg_out_rld(s, RLDICL, TCG_REG_R0, TCG_REG_R0, TARGET_PAGE_BITS, 0);
920 }
921
922 tcg_out32(s, CMP | BF(7) | RA(TCG_REG_R0) | RB(TCG_REG_R2) | CMP_L);
923
924 return addr_reg;
810260a8 925}
926
7f12d649
RH
927/* Record the context of a call to the out of line helper code for the slow
928 path for a load or store, so that we can later generate the correct
929 helper code. */
930static void add_qemu_ldst_label(TCGContext *s, bool is_ld, int opc,
931 int data_reg, int addr_reg, int mem_index,
932 uint8_t *raddr, uint8_t *label_ptr)
933{
934 int idx;
935 TCGLabelQemuLdst *label;
49d9870a 936
7f12d649
RH
937 if (s->nb_qemu_ldst_labels >= TCG_MAX_QEMU_LDST) {
938 tcg_abort();
939 }
49d9870a 940
7f12d649
RH
941 idx = s->nb_qemu_ldst_labels++;
942 label = (TCGLabelQemuLdst *)&s->qemu_ldst_labels[idx];
943 label->is_ld = is_ld;
944 label->opc = opc;
945 label->datalo_reg = data_reg;
946 label->addrlo_reg = addr_reg;
947 label->mem_index = mem_index;
948 label->raddr = raddr;
949 label->label_ptr[0] = label_ptr;
950}
951
952static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
953{
954 int opc = lb->opc;
955 int s_bits = opc & 3;
956
957 reloc_pc14(lb->label_ptr[0], (uintptr_t)s->code_ptr);
958
959 tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_R3, TCG_AREG0);
960
961 /* If the address needed to be zero-extended, we'll have already
962 placed it in R4. The only remaining case is 64-bit guest. */
963 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
964
965 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, lb->mem_index);
966 tcg_out32(s, MFSPR | RT(TCG_REG_R6) | LR);
967
968 tcg_out_call(s, (tcg_target_long)qemu_ld_helpers[s_bits], 1);
969
970 if (opc & 4) {
971 uint32_t insn = qemu_exts_opc[s_bits];
972 tcg_out32(s, insn | RA(lb->datalo_reg) | RS(TCG_REG_R3));
973 } else {
974 tcg_out_mov(s, TCG_TYPE_I64, lb->datalo_reg, TCG_REG_R3);
975 }
976
977 tcg_out_b(s, 0, (uintptr_t)lb->raddr);
978}
979
980static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
981{
982 int opc = lb->opc;
983
984 reloc_pc14(lb->label_ptr[0], (uintptr_t)s->code_ptr);
985
986 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R3, TCG_AREG0);
987
988 /* If the address needed to be zero-extended, we'll have already
989 placed it in R4. The only remaining case is 64-bit guest. */
990 tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_R4, lb->addrlo_reg);
991
992 tcg_out_rld(s, RLDICL, TCG_REG_R5, lb->datalo_reg,
993 0, 64 - (1 << (3 + opc)));
994 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R6, lb->mem_index);
995 tcg_out32(s, MFSPR | RT(TCG_REG_R7) | LR);
996
997 tcg_out_call(s, (tcg_target_long)qemu_st_helpers[opc], 1);
998
999 tcg_out_b(s, 0, (uintptr_t)lb->raddr);
1000}
1001
1002void tcg_out_tb_finalize(TCGContext *s)
1003{
1004 int i, n = s->nb_qemu_ldst_labels;
1005
1006 /* qemu_ld/st slow paths */
1007 for (i = 0; i < n; i++) {
1008 TCGLabelQemuLdst *label = &s->qemu_ldst_labels[i];
1009 if (label->is_ld) {
1010 tcg_out_qemu_ld_slow_path(s, label);
1011 } else {
1012 tcg_out_qemu_st_slow_path(s, label);
1013 }
1014 }
1015}
1016#endif /* SOFTMMU */
49d9870a 1017
541dd4ce 1018static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
810260a8 1019{
fedee3e7 1020 TCGReg addr_reg, data_reg, rbase;
49d9870a 1021 uint32_t insn, s_bits;
810260a8 1022#ifdef CONFIG_SOFTMMU
49d9870a 1023 int mem_index;
7f12d649 1024 void *label_ptr;
810260a8 1025#endif
1026
1027 data_reg = *args++;
1028 addr_reg = *args++;
49d9870a 1029 s_bits = opc & 3;
9df3b45d
DG
1030
1031#ifdef CONFIG_SOFTMMU
810260a8 1032 mem_index = *args;
810260a8 1033
fedee3e7 1034 addr_reg = tcg_out_tlb_read(s, s_bits, addr_reg, mem_index, true);
810260a8 1035
7f12d649
RH
1036 /* Load a pointer into the current opcode w/conditional branch-link. */
1037 label_ptr = s->code_ptr;
1038 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
810260a8 1039
fedee3e7 1040 rbase = TCG_REG_R3;
810260a8 1041#else /* !CONFIG_SOFTMMU */
f6548c0a 1042 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
fedee3e7
RH
1043 if (TARGET_LONG_BITS == 32) {
1044 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1045 addr_reg = TCG_REG_R2;
1046 }
810260a8 1047#endif
1048
49d9870a
RH
1049 insn = qemu_ldx_opc[opc];
1050 if (!HAVE_ISA_2_06 && insn == LDBRX) {
fedee3e7
RH
1051 tcg_out32(s, ADDI | TAI(TCG_REG_R0, addr_reg, 4));
1052 tcg_out32(s, LWBRX | TAB(data_reg, rbase, addr_reg));
1053 tcg_out32(s, LWBRX | TAB(TCG_REG_R0, rbase, TCG_REG_R0));
1054 tcg_out_rld(s, RLDIMI, data_reg, TCG_REG_R0, 32, 0);
49d9870a 1055 } else if (insn) {
fedee3e7 1056 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
49d9870a
RH
1057 } else {
1058 insn = qemu_ldx_opc[s_bits];
fedee3e7 1059 tcg_out32(s, insn | TAB(data_reg, rbase, addr_reg));
49d9870a 1060 insn = qemu_exts_opc[s_bits];
541dd4ce 1061 tcg_out32(s, insn | RA(data_reg) | RS(data_reg));
810260a8 1062 }
1063
1064#ifdef CONFIG_SOFTMMU
7f12d649
RH
1065 add_qemu_ldst_label(s, true, opc, data_reg, addr_reg, mem_index,
1066 s->code_ptr, label_ptr);
810260a8 1067#endif
1068}
1069
541dd4ce 1070static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
810260a8 1071{
fedee3e7 1072 TCGReg addr_reg, rbase, data_reg;
49d9870a 1073 uint32_t insn;
810260a8 1074#ifdef CONFIG_SOFTMMU
aceac8d6 1075 int mem_index;
7f12d649 1076 void *label_ptr;
810260a8 1077#endif
1078
1079 data_reg = *args++;
1080 addr_reg = *args++;
810260a8 1081
1082#ifdef CONFIG_SOFTMMU
9df3b45d
DG
1083 mem_index = *args;
1084
fedee3e7 1085 addr_reg = tcg_out_tlb_read(s, opc, addr_reg, mem_index, false);
810260a8 1086
7f12d649
RH
1087 /* Load a pointer into the current opcode w/conditional branch-link. */
1088 label_ptr = s->code_ptr;
1089 tcg_out_bc_noaddr(s, BC | BI(7, CR_EQ) | BO_COND_FALSE | LK);
810260a8 1090
fedee3e7 1091 rbase = TCG_REG_R3;
810260a8 1092#else /* !CONFIG_SOFTMMU */
f6548c0a 1093 rbase = GUEST_BASE ? TCG_GUEST_BASE_REG : 0;
fedee3e7
RH
1094 if (TARGET_LONG_BITS == 32) {
1095 tcg_out_ext32u(s, TCG_REG_R2, addr_reg);
1096 addr_reg = TCG_REG_R2;
1097 }
810260a8 1098#endif
1099
49d9870a
RH
1100 insn = qemu_stx_opc[opc];
1101 if (!HAVE_ISA_2_06 && insn == STDBRX) {
fedee3e7
RH
1102 tcg_out32(s, STWBRX | SAB(data_reg, rbase, addr_reg));
1103 tcg_out32(s, ADDI | TAI(TCG_REG_R2, addr_reg, 4));
8327a470 1104 tcg_out_shri64(s, TCG_REG_R0, data_reg, 32);
fedee3e7 1105 tcg_out32(s, STWBRX | SAB(TCG_REG_R0, rbase, TCG_REG_R2));
49d9870a 1106 } else {
fedee3e7 1107 tcg_out32(s, insn | SAB(data_reg, rbase, addr_reg));
810260a8 1108 }
1109
1110#ifdef CONFIG_SOFTMMU
7f12d649
RH
1111 add_qemu_ldst_label(s, false, opc, data_reg, addr_reg, mem_index,
1112 s->code_ptr, label_ptr);
810260a8 1113#endif
1114}
1115
fa94c3be
RH
1116#define FRAME_SIZE ((int) \
1117 ((8 /* back chain */ \
1118 + 8 /* CR */ \
1119 + 8 /* LR */ \
1120 + 8 /* compiler doubleword */ \
1121 + 8 /* link editor doubleword */ \
1122 + 8 /* TOC save area */ \
1123 + TCG_STATIC_CALL_ARGS_SIZE \
1124 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1125 + ARRAY_SIZE(tcg_target_callee_save_regs) * 8 \
1126 + 15) & ~15))
1127
1128#define REG_SAVE_BOT (FRAME_SIZE - ARRAY_SIZE(tcg_target_callee_save_regs) * 8)
1129
541dd4ce 1130static void tcg_target_qemu_prologue(TCGContext *s)
810260a8 1131{
fa94c3be
RH
1132 int i;
1133
1134 tcg_set_frame(s, TCG_REG_CALL_STACK,
1135 REG_SAVE_BOT - CPU_TEMP_BUF_NLONGS * sizeof(long),
541dd4ce 1136 CPU_TEMP_BUF_NLONGS * sizeof(long));
136a0b5a 1137
5d7ff5bb 1138#ifndef __APPLE__
a69abbe0 1139 /* First emit adhoc function descriptor */
5e0f40cf 1140 tcg_out64(s, (uint64_t)s->code_ptr + 24); /* entry point */
a69abbe0 1141 s->code_ptr += 16; /* skip TOC and environment pointer */
5d7ff5bb 1142#endif
a69abbe0 1143
1144 /* Prologue */
8327a470 1145 tcg_out32(s, MFSPR | RT(TCG_REG_R0) | LR);
fa94c3be 1146 tcg_out32(s, STDU | SAI(TCG_REG_R1, TCG_REG_R1, -FRAME_SIZE));
29b69198
RH
1147 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
1148 tcg_out32(s, STD | SAI(tcg_target_callee_save_regs[i], 1,
fa94c3be 1149 REG_SAVE_BOT + i * 8));
29b69198 1150 }
fa94c3be 1151 tcg_out32(s, STD | SAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16));
810260a8 1152
f6548c0a 1153#ifdef CONFIG_USE_GUEST_BASE
b9e946c7 1154 if (GUEST_BASE) {
541dd4ce
RH
1155 tcg_out_movi(s, TCG_TYPE_I64, TCG_GUEST_BASE_REG, GUEST_BASE);
1156 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
b9e946c7 1157 }
f6548c0a 1158#endif
1159
541dd4ce
RH
1160 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1161 tcg_out32(s, MTSPR | RS(tcg_target_call_iarg_regs[1]) | CTR);
1162 tcg_out32(s, BCCTR | BO_ALWAYS);
a69abbe0 1163
1164 /* Epilogue */
810260a8 1165 tb_ret_addr = s->code_ptr;
1166
29b69198 1167 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i) {
8327a470 1168 tcg_out32(s, LD | TAI(tcg_target_callee_save_regs[i], TCG_REG_R1,
fa94c3be 1169 REG_SAVE_BOT + i * 8));
29b69198 1170 }
fa94c3be 1171 tcg_out32(s, LD | TAI(TCG_REG_R0, TCG_REG_R1, FRAME_SIZE + 16));
8327a470 1172 tcg_out32(s, MTSPR | RS(TCG_REG_R0) | LR);
fa94c3be 1173 tcg_out32(s, ADDI | TAI(TCG_REG_R1, TCG_REG_R1, FRAME_SIZE));
2fd8eddc 1174 tcg_out32(s, BCLR | BO_ALWAYS);
810260a8 1175}
1176
b18d5d2b
RH
1177static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
1178 TCGReg arg1, intptr_t arg2)
810260a8 1179{
b18d5d2b
RH
1180 int opi, opx;
1181
541dd4ce 1182 if (type == TCG_TYPE_I32) {
b18d5d2b 1183 opi = LWZ, opx = LWZX;
541dd4ce 1184 } else {
b18d5d2b 1185 opi = LD, opx = LDX;
541dd4ce 1186 }
b18d5d2b 1187 tcg_out_mem_long(s, opi, opx, ret, arg1, arg2);
810260a8 1188}
1189
b18d5d2b
RH
1190static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
1191 TCGReg arg1, intptr_t arg2)
810260a8 1192{
b18d5d2b
RH
1193 int opi, opx;
1194
541dd4ce 1195 if (type == TCG_TYPE_I32) {
b18d5d2b 1196 opi = STW, opx = STWX;
541dd4ce 1197 } else {
b18d5d2b 1198 opi = STD, opx = STDX;
541dd4ce 1199 }
b18d5d2b 1200 tcg_out_mem_long(s, opi, opx, arg, arg1, arg2);
810260a8 1201}
1202
4c314da6
RH
1203static void tcg_out_cmp(TCGContext *s, int cond, TCGArg arg1, TCGArg arg2,
1204 int const_arg2, int cr, TCGType type)
810260a8 1205{
1206 int imm;
1207 uint32_t op;
1208
991041a4
RH
1209 /* Simplify the comparisons below wrt CMPI. */
1210 if (type == TCG_TYPE_I32) {
1211 arg2 = (int32_t)arg2;
1212 }
1213
810260a8 1214 switch (cond) {
1215 case TCG_COND_EQ:
1216 case TCG_COND_NE:
1217 if (const_arg2) {
1218 if ((int16_t) arg2 == arg2) {
1219 op = CMPI;
1220 imm = 1;
1221 break;
541dd4ce 1222 } else if ((uint16_t) arg2 == arg2) {
810260a8 1223 op = CMPLI;
1224 imm = 1;
1225 break;
1226 }
1227 }
1228 op = CMPL;
1229 imm = 0;
1230 break;
1231
1232 case TCG_COND_LT:
1233 case TCG_COND_GE:
1234 case TCG_COND_LE:
1235 case TCG_COND_GT:
1236 if (const_arg2) {
1237 if ((int16_t) arg2 == arg2) {
1238 op = CMPI;
1239 imm = 1;
1240 break;
1241 }
1242 }
1243 op = CMP;
1244 imm = 0;
1245 break;
1246
1247 case TCG_COND_LTU:
1248 case TCG_COND_GEU:
1249 case TCG_COND_LEU:
1250 case TCG_COND_GTU:
1251 if (const_arg2) {
1252 if ((uint16_t) arg2 == arg2) {
1253 op = CMPLI;
1254 imm = 1;
1255 break;
1256 }
1257 }
1258 op = CMPL;
1259 imm = 0;
1260 break;
1261
1262 default:
541dd4ce 1263 tcg_abort();
810260a8 1264 }
4c314da6 1265 op |= BF(cr) | ((type == TCG_TYPE_I64) << 21);
810260a8 1266
4c314da6
RH
1267 if (imm) {
1268 tcg_out32(s, op | RA(arg1) | (arg2 & 0xffff));
1269 } else {
810260a8 1270 if (const_arg2) {
8327a470
RH
1271 tcg_out_movi(s, type, TCG_REG_R0, arg2);
1272 arg2 = TCG_REG_R0;
810260a8 1273 }
4c314da6 1274 tcg_out32(s, op | RA(arg1) | RB(arg2));
810260a8 1275 }
810260a8 1276}
1277
70fac59a
RH
1278static void tcg_out_setcond_eq0(TCGContext *s, TCGType type,
1279 TCGReg dst, TCGReg src)
1cd62ae9 1280{
70fac59a
RH
1281 tcg_out32(s, (type == TCG_TYPE_I64 ? CNTLZD : CNTLZW) | RS(src) | RA(dst));
1282 tcg_out_shri64(s, dst, dst, type == TCG_TYPE_I64 ? 6 : 5);
1283}
1cd62ae9 1284
70fac59a
RH
1285static void tcg_out_setcond_ne0(TCGContext *s, TCGReg dst, TCGReg src)
1286{
1287 /* X != 0 implies X + -1 generates a carry. Extra addition
1288 trickery means: R = X-1 + ~X + C = X-1 + (-X+1) + C = C. */
1289 if (dst != src) {
1290 tcg_out32(s, ADDIC | TAI(dst, src, -1));
1291 tcg_out32(s, SUBFE | TAB(dst, dst, src));
1292 } else {
8327a470
RH
1293 tcg_out32(s, ADDIC | TAI(TCG_REG_R0, src, -1));
1294 tcg_out32(s, SUBFE | TAB(dst, TCG_REG_R0, src));
70fac59a
RH
1295 }
1296}
1cd62ae9 1297
70fac59a
RH
1298static TCGReg tcg_gen_setcond_xor(TCGContext *s, TCGReg arg1, TCGArg arg2,
1299 bool const_arg2)
1300{
1301 if (const_arg2) {
1302 if ((uint32_t)arg2 == arg2) {
1303 tcg_out_xori32(s, TCG_REG_R0, arg1, arg2);
1304 } else {
1305 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R0, arg2);
1306 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, TCG_REG_R0));
1cd62ae9 1307 }
70fac59a
RH
1308 } else {
1309 tcg_out32(s, XOR | SAB(arg1, TCG_REG_R0, arg2));
1310 }
1311 return TCG_REG_R0;
1312}
1cd62ae9 1313
70fac59a
RH
1314static void tcg_out_setcond(TCGContext *s, TCGType type, TCGCond cond,
1315 TCGArg arg0, TCGArg arg1, TCGArg arg2,
1316 int const_arg2)
1317{
1318 int crop, sh;
1319
1320 /* Ignore high bits of a potential constant arg2. */
1321 if (type == TCG_TYPE_I32) {
1322 arg2 = (uint32_t)arg2;
1323 }
1324
1325 /* Handle common and trivial cases before handling anything else. */
1326 if (arg2 == 0) {
1327 switch (cond) {
1328 case TCG_COND_EQ:
1329 tcg_out_setcond_eq0(s, type, arg0, arg1);
1330 return;
1331 case TCG_COND_NE:
1332 if (type == TCG_TYPE_I32) {
1333 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1334 arg1 = TCG_REG_R0;
1cd62ae9 1335 }
70fac59a
RH
1336 tcg_out_setcond_ne0(s, arg0, arg1);
1337 return;
1338 case TCG_COND_GE:
1339 tcg_out32(s, NOR | SAB(arg1, arg0, arg1));
1340 arg1 = arg0;
1341 /* FALLTHRU */
1342 case TCG_COND_LT:
1343 /* Extract the sign bit. */
1344 tcg_out_rld(s, RLDICL, arg0, arg1,
1345 type == TCG_TYPE_I64 ? 1 : 33, 63);
1346 return;
1347 default:
1348 break;
1cd62ae9 1349 }
70fac59a 1350 }
1cd62ae9 1351
70fac59a
RH
1352 /* If we have ISEL, we can implement everything with 3 or 4 insns.
1353 All other cases below are also at least 3 insns, so speed up the
1354 code generator by not considering them and always using ISEL. */
1355 if (HAVE_ISEL) {
1356 int isel, tab;
752c1fdb 1357
70fac59a
RH
1358 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1359
1360 isel = tcg_to_isel[cond];
1361
1362 tcg_out_movi(s, type, arg0, 1);
1363 if (isel & 1) {
1364 /* arg0 = (bc ? 0 : 1) */
1365 tab = TAB(arg0, 0, arg0);
1366 isel &= ~1;
1367 } else {
1368 /* arg0 = (bc ? 1 : 0) */
1369 tcg_out_movi(s, type, TCG_REG_R0, 0);
1370 tab = TAB(arg0, arg0, TCG_REG_R0);
1cd62ae9 1371 }
70fac59a
RH
1372 tcg_out32(s, isel | tab);
1373 return;
1374 }
1375
1376 switch (cond) {
1377 case TCG_COND_EQ:
1378 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1379 tcg_out_setcond_eq0(s, type, arg0, arg1);
1380 return;
1381
1382 case TCG_COND_NE:
1383 arg1 = tcg_gen_setcond_xor(s, arg1, arg2, const_arg2);
1384 /* Discard the high bits only once, rather than both inputs. */
1385 if (type == TCG_TYPE_I32) {
1386 tcg_out_ext32u(s, TCG_REG_R0, arg1);
1387 arg1 = TCG_REG_R0;
1cd62ae9 1388 }
70fac59a
RH
1389 tcg_out_setcond_ne0(s, arg0, arg1);
1390 return;
1cd62ae9 1391
1392 case TCG_COND_GT:
1393 case TCG_COND_GTU:
1394 sh = 30;
1395 crop = 0;
1396 goto crtest;
1397
1398 case TCG_COND_LT:
1399 case TCG_COND_LTU:
1400 sh = 29;
1401 crop = 0;
1402 goto crtest;
1403
1404 case TCG_COND_GE:
1405 case TCG_COND_GEU:
1406 sh = 31;
541dd4ce 1407 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_LT) | BB(7, CR_LT);
1cd62ae9 1408 goto crtest;
1409
1410 case TCG_COND_LE:
1411 case TCG_COND_LEU:
1412 sh = 31;
541dd4ce 1413 crop = CRNOR | BT(7, CR_EQ) | BA(7, CR_GT) | BB(7, CR_GT);
1cd62ae9 1414 crtest:
6995a4a0
RH
1415 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1416 if (crop) {
1417 tcg_out32(s, crop);
1418 }
1419 tcg_out32(s, MFOCRF | RT(TCG_REG_R0) | FXM(7));
1420 tcg_out_rlw(s, RLWINM, arg0, TCG_REG_R0, sh, 31, 31);
1cd62ae9 1421 break;
1422
1423 default:
541dd4ce 1424 tcg_abort();
1cd62ae9 1425 }
1426}
1427
541dd4ce 1428static void tcg_out_bc(TCGContext *s, int bc, int label_index)
810260a8 1429{
1430 TCGLabel *l = &s->labels[label_index];
1431
541dd4ce
RH
1432 if (l->has_value) {
1433 tcg_out32(s, bc | reloc_pc14_val(s->code_ptr, l->u.value));
1434 } else {
c7ca6a2b
RH
1435 tcg_out_reloc(s, s->code_ptr, R_PPC_REL14, label_index, 0);
1436 tcg_out_bc_noaddr(s, bc);
810260a8 1437 }
1438}
1439
4c314da6
RH
1440static void tcg_out_brcond(TCGContext *s, TCGCond cond,
1441 TCGArg arg1, TCGArg arg2, int const_arg2,
1442 int label_index, TCGType type)
810260a8 1443{
4c314da6
RH
1444 tcg_out_cmp(s, cond, arg1, arg2, const_arg2, 7, type);
1445 tcg_out_bc(s, tcg_to_bc[cond], label_index);
810260a8 1446}
1447
027ffea9
RH
1448static void tcg_out_movcond(TCGContext *s, TCGType type, TCGCond cond,
1449 TCGArg dest, TCGArg c1, TCGArg c2, TCGArg v1,
1450 TCGArg v2, bool const_c2)
1451{
1452 /* If for some reason both inputs are zero, don't produce bad code. */
1453 if (v1 == 0 && v2 == 0) {
1454 tcg_out_movi(s, type, dest, 0);
1455 return;
1456 }
1457
1458 tcg_out_cmp(s, cond, c1, c2, const_c2, 7, type);
1459
1460 if (HAVE_ISEL) {
1461 int isel = tcg_to_isel[cond];
1462
1463 /* Swap the V operands if the operation indicates inversion. */
1464 if (isel & 1) {
1465 int t = v1;
1466 v1 = v2;
1467 v2 = t;
1468 isel &= ~1;
1469 }
1470 /* V1 == 0 is handled by isel; V2 == 0 must be handled by hand. */
1471 if (v2 == 0) {
8327a470 1472 tcg_out_movi(s, type, TCG_REG_R0, 0);
027ffea9
RH
1473 }
1474 tcg_out32(s, isel | TAB(dest, v1, v2));
1475 } else {
1476 if (dest == v2) {
1477 cond = tcg_invert_cond(cond);
1478 v2 = v1;
1479 } else if (dest != v1) {
1480 if (v1 == 0) {
1481 tcg_out_movi(s, type, dest, 0);
1482 } else {
1483 tcg_out_mov(s, type, dest, v1);
1484 }
1485 }
1486 /* Branch forward over one insn */
1487 tcg_out32(s, tcg_to_bc[cond] | 8);
1488 if (v2 == 0) {
1489 tcg_out_movi(s, type, dest, 0);
1490 } else {
1491 tcg_out_mov(s, type, dest, v2);
1492 }
1493 }
1494}
1495
541dd4ce 1496void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr)
810260a8 1497{
1498 TCGContext s;
1499 unsigned long patch_size;
1500
1501 s.code_ptr = (uint8_t *) jmp_addr;
541dd4ce 1502 tcg_out_b(&s, 0, addr);
810260a8 1503 patch_size = s.code_ptr - (uint8_t *) jmp_addr;
541dd4ce 1504 flush_icache_range(jmp_addr, jmp_addr + patch_size);
810260a8 1505}
1506
541dd4ce
RH
1507static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1508 const int *const_args)
810260a8 1509{
ee924fa6 1510 TCGArg a0, a1, a2;
e46b9681 1511 int c;
1512
810260a8 1513 switch (opc) {
1514 case INDEX_op_exit_tb:
541dd4ce
RH
1515 tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_R3, args[0]);
1516 tcg_out_b(s, 0, (tcg_target_long)tb_ret_addr);
810260a8 1517 break;
1518 case INDEX_op_goto_tb:
1519 if (s->tb_jmp_offset) {
541dd4ce 1520 /* Direct jump method. */
810260a8 1521 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
5424fd10 1522 s->code_ptr += 28;
541dd4ce
RH
1523 } else {
1524 /* Indirect jump method. */
1525 tcg_abort();
810260a8 1526 }
1527 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
1528 break;
1529 case INDEX_op_br:
1530 {
1531 TCGLabel *l = &s->labels[args[0]];
1532
1533 if (l->has_value) {
541dd4ce
RH
1534 tcg_out_b(s, 0, l->u.value);
1535 } else {
c7ca6a2b
RH
1536 tcg_out_reloc(s, s->code_ptr, R_PPC_REL24, args[0], 0);
1537 tcg_out_b_noaddr(s, B);
810260a8 1538 }
1539 }
1540 break;
1541 case INDEX_op_call:
541dd4ce 1542 tcg_out_call(s, args[0], const_args[0]);
810260a8 1543 break;
810260a8 1544 case INDEX_op_movi_i32:
541dd4ce 1545 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]);
810260a8 1546 break;
1547 case INDEX_op_movi_i64:
541dd4ce 1548 tcg_out_movi(s, TCG_TYPE_I64, args[0], args[1]);
810260a8 1549 break;
1550 case INDEX_op_ld8u_i32:
1551 case INDEX_op_ld8u_i64:
b18d5d2b 1552 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
810260a8 1553 break;
1554 case INDEX_op_ld8s_i32:
1555 case INDEX_op_ld8s_i64:
b18d5d2b 1556 tcg_out_mem_long(s, LBZ, LBZX, args[0], args[1], args[2]);
541dd4ce 1557 tcg_out32(s, EXTSB | RS(args[0]) | RA(args[0]));
810260a8 1558 break;
1559 case INDEX_op_ld16u_i32:
1560 case INDEX_op_ld16u_i64:
b18d5d2b 1561 tcg_out_mem_long(s, LHZ, LHZX, args[0], args[1], args[2]);
810260a8 1562 break;
1563 case INDEX_op_ld16s_i32:
1564 case INDEX_op_ld16s_i64:
b18d5d2b 1565 tcg_out_mem_long(s, LHA, LHAX, args[0], args[1], args[2]);
810260a8 1566 break;
1567 case INDEX_op_ld_i32:
1568 case INDEX_op_ld32u_i64:
b18d5d2b 1569 tcg_out_mem_long(s, LWZ, LWZX, args[0], args[1], args[2]);
810260a8 1570 break;
1571 case INDEX_op_ld32s_i64:
b18d5d2b 1572 tcg_out_mem_long(s, LWA, LWAX, args[0], args[1], args[2]);
810260a8 1573 break;
1574 case INDEX_op_ld_i64:
b18d5d2b 1575 tcg_out_mem_long(s, LD, LDX, args[0], args[1], args[2]);
810260a8 1576 break;
1577 case INDEX_op_st8_i32:
1578 case INDEX_op_st8_i64:
b18d5d2b 1579 tcg_out_mem_long(s, STB, STBX, args[0], args[1], args[2]);
810260a8 1580 break;
1581 case INDEX_op_st16_i32:
1582 case INDEX_op_st16_i64:
b18d5d2b 1583 tcg_out_mem_long(s, STH, STHX, args[0], args[1], args[2]);
810260a8 1584 break;
1585 case INDEX_op_st_i32:
1586 case INDEX_op_st32_i64:
b18d5d2b 1587 tcg_out_mem_long(s, STW, STWX, args[0], args[1], args[2]);
810260a8 1588 break;
1589 case INDEX_op_st_i64:
b18d5d2b 1590 tcg_out_mem_long(s, STD, STDX, args[0], args[1], args[2]);
810260a8 1591 break;
1592
1593 case INDEX_op_add_i32:
ee924fa6
RH
1594 a0 = args[0], a1 = args[1], a2 = args[2];
1595 if (const_args[2]) {
ee924fa6 1596 do_addi_32:
b18d5d2b 1597 tcg_out_mem_long(s, ADDI, ADD, a0, a1, (int32_t)a2);
ee924fa6
RH
1598 } else {
1599 tcg_out32(s, ADD | TAB(a0, a1, a2));
1600 }
810260a8 1601 break;
1602 case INDEX_op_sub_i32:
ee924fa6 1603 a0 = args[0], a1 = args[1], a2 = args[2];
148bdd23
RH
1604 if (const_args[1]) {
1605 if (const_args[2]) {
1606 tcg_out_movi(s, TCG_TYPE_I32, a0, a1 - a2);
1607 } else {
1608 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1609 }
1610 } else if (const_args[2]) {
ee924fa6
RH
1611 a2 = -a2;
1612 goto do_addi_32;
1613 } else {
1614 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1615 }
810260a8 1616 break;
1617
1618 case INDEX_op_and_i32:
37251b98 1619 a0 = args[0], a1 = args[1], a2 = args[2];
a9249dff 1620 if (const_args[2]) {
37251b98 1621 tcg_out_andi32(s, a0, a1, a2);
a9249dff 1622 } else {
37251b98 1623 tcg_out32(s, AND | SAB(a1, a0, a2));
a9249dff
RH
1624 }
1625 break;
1626 case INDEX_op_and_i64:
37251b98 1627 a0 = args[0], a1 = args[1], a2 = args[2];
810260a8 1628 if (const_args[2]) {
37251b98 1629 tcg_out_andi64(s, a0, a1, a2);
637af30c 1630 } else {
37251b98 1631 tcg_out32(s, AND | SAB(a1, a0, a2));
810260a8 1632 }
810260a8 1633 break;
fe6f943f 1634 case INDEX_op_or_i64:
810260a8 1635 case INDEX_op_or_i32:
dce74c57 1636 a0 = args[0], a1 = args[1], a2 = args[2];
810260a8 1637 if (const_args[2]) {
dce74c57
RH
1638 tcg_out_ori32(s, a0, a1, a2);
1639 } else {
1640 tcg_out32(s, OR | SAB(a1, a0, a2));
810260a8 1641 }
810260a8 1642 break;
fe6f943f 1643 case INDEX_op_xor_i64:
810260a8 1644 case INDEX_op_xor_i32:
dce74c57 1645 a0 = args[0], a1 = args[1], a2 = args[2];
810260a8 1646 if (const_args[2]) {
dce74c57
RH
1647 tcg_out_xori32(s, a0, a1, a2);
1648 } else {
1649 tcg_out32(s, XOR | SAB(a1, a0, a2));
810260a8 1650 }
810260a8 1651 break;
ce1010d6 1652 case INDEX_op_andc_i32:
37251b98
RH
1653 a0 = args[0], a1 = args[1], a2 = args[2];
1654 if (const_args[2]) {
1655 tcg_out_andi32(s, a0, a1, ~a2);
1656 } else {
1657 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1658 }
1659 break;
ce1010d6 1660 case INDEX_op_andc_i64:
37251b98
RH
1661 a0 = args[0], a1 = args[1], a2 = args[2];
1662 if (const_args[2]) {
1663 tcg_out_andi64(s, a0, a1, ~a2);
1664 } else {
1665 tcg_out32(s, ANDC | SAB(a1, a0, a2));
1666 }
ce1010d6
RH
1667 break;
1668 case INDEX_op_orc_i32:
37251b98
RH
1669 if (const_args[2]) {
1670 tcg_out_ori32(s, args[0], args[1], ~args[2]);
1671 break;
1672 }
1673 /* FALLTHRU */
ce1010d6
RH
1674 case INDEX_op_orc_i64:
1675 tcg_out32(s, ORC | SAB(args[1], args[0], args[2]));
1676 break;
1677 case INDEX_op_eqv_i32:
37251b98
RH
1678 if (const_args[2]) {
1679 tcg_out_xori32(s, args[0], args[1], ~args[2]);
1680 break;
1681 }
1682 /* FALLTHRU */
ce1010d6
RH
1683 case INDEX_op_eqv_i64:
1684 tcg_out32(s, EQV | SAB(args[1], args[0], args[2]));
1685 break;
1686 case INDEX_op_nand_i32:
1687 case INDEX_op_nand_i64:
1688 tcg_out32(s, NAND | SAB(args[1], args[0], args[2]));
1689 break;
1690 case INDEX_op_nor_i32:
1691 case INDEX_op_nor_i64:
1692 tcg_out32(s, NOR | SAB(args[1], args[0], args[2]));
1693 break;
810260a8 1694
1695 case INDEX_op_mul_i32:
ef809300 1696 a0 = args[0], a1 = args[1], a2 = args[2];
810260a8 1697 if (const_args[2]) {
ef809300
RH
1698 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1699 } else {
1700 tcg_out32(s, MULLW | TAB(a0, a1, a2));
810260a8 1701 }
810260a8 1702 break;
1703
1704 case INDEX_op_div_i32:
541dd4ce 1705 tcg_out32(s, DIVW | TAB(args[0], args[1], args[2]));
810260a8 1706 break;
1707
1708 case INDEX_op_divu_i32:
541dd4ce 1709 tcg_out32(s, DIVWU | TAB(args[0], args[1], args[2]));
810260a8 1710 break;
1711
810260a8 1712 case INDEX_op_shl_i32:
1713 if (const_args[2]) {
9e555b73
RH
1714 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31 - args[2]);
1715 } else {
541dd4ce 1716 tcg_out32(s, SLW | SAB(args[1], args[0], args[2]));
9e555b73 1717 }
810260a8 1718 break;
1719 case INDEX_op_shr_i32:
1720 if (const_args[2]) {
9e555b73
RH
1721 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], args[2], 31);
1722 } else {
541dd4ce 1723 tcg_out32(s, SRW | SAB(args[1], args[0], args[2]));
9e555b73 1724 }
810260a8 1725 break;
1726 case INDEX_op_sar_i32:
541dd4ce
RH
1727 if (const_args[2]) {
1728 tcg_out32(s, SRAWI | RS(args[1]) | RA(args[0]) | SH(args[2]));
1729 } else {
1730 tcg_out32(s, SRAW | SAB(args[1], args[0], args[2]));
1731 }
810260a8 1732 break;
313d91c7
RH
1733 case INDEX_op_rotl_i32:
1734 if (const_args[2]) {
1735 tcg_out_rlw(s, RLWINM, args[0], args[1], args[2], 0, 31);
1736 } else {
1737 tcg_out32(s, RLWNM | SAB(args[1], args[0], args[2])
1738 | MB(0) | ME(31));
1739 }
1740 break;
1741 case INDEX_op_rotr_i32:
1742 if (const_args[2]) {
1743 tcg_out_rlw(s, RLWINM, args[0], args[1], 32 - args[2], 0, 31);
1744 } else {
8327a470
RH
1745 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 32));
1746 tcg_out32(s, RLWNM | SAB(args[1], args[0], TCG_REG_R0)
313d91c7
RH
1747 | MB(0) | ME(31));
1748 }
1749 break;
810260a8 1750
1751 case INDEX_op_brcond_i32:
4c314da6
RH
1752 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1753 args[3], TCG_TYPE_I32);
e924bbec 1754 break;
1755
810260a8 1756 case INDEX_op_brcond_i64:
4c314da6
RH
1757 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1],
1758 args[3], TCG_TYPE_I64);
810260a8 1759 break;
1760
1761 case INDEX_op_neg_i32:
810260a8 1762 case INDEX_op_neg_i64:
541dd4ce 1763 tcg_out32(s, NEG | RT(args[0]) | RA(args[1]));
810260a8 1764 break;
1765
157f2662 1766 case INDEX_op_not_i32:
1767 case INDEX_op_not_i64:
541dd4ce 1768 tcg_out32(s, NOR | SAB(args[1], args[0], args[1]));
157f2662 1769 break;
1770
810260a8 1771 case INDEX_op_add_i64:
ee924fa6
RH
1772 a0 = args[0], a1 = args[1], a2 = args[2];
1773 if (const_args[2]) {
ee924fa6 1774 do_addi_64:
b18d5d2b 1775 tcg_out_mem_long(s, ADDI, ADD, a0, a1, a2);
ee924fa6
RH
1776 } else {
1777 tcg_out32(s, ADD | TAB(a0, a1, a2));
1778 }
810260a8 1779 break;
1780 case INDEX_op_sub_i64:
ee924fa6 1781 a0 = args[0], a1 = args[1], a2 = args[2];
148bdd23
RH
1782 if (const_args[1]) {
1783 if (const_args[2]) {
1784 tcg_out_movi(s, TCG_TYPE_I64, a0, a1 - a2);
1785 } else {
1786 tcg_out32(s, SUBFIC | TAI(a0, a2, a1));
1787 }
1788 } else if (const_args[2]) {
ee924fa6
RH
1789 a2 = -a2;
1790 goto do_addi_64;
1791 } else {
1792 tcg_out32(s, SUBF | TAB(a0, a2, a1));
1793 }
810260a8 1794 break;
1795
1796 case INDEX_op_shl_i64:
541dd4ce 1797 if (const_args[2]) {
0a9564b9 1798 tcg_out_shli64(s, args[0], args[1], args[2]);
541dd4ce
RH
1799 } else {
1800 tcg_out32(s, SLD | SAB(args[1], args[0], args[2]));
1801 }
810260a8 1802 break;
1803 case INDEX_op_shr_i64:
541dd4ce 1804 if (const_args[2]) {
5e916c28 1805 tcg_out_shri64(s, args[0], args[1], args[2]);
541dd4ce
RH
1806 } else {
1807 tcg_out32(s, SRD | SAB(args[1], args[0], args[2]));
1808 }
810260a8 1809 break;
1810 case INDEX_op_sar_i64:
fe6f943f 1811 if (const_args[2]) {
541dd4ce
RH
1812 int sh = SH(args[2] & 0x1f) | (((args[2] >> 5) & 1) << 1);
1813 tcg_out32(s, SRADI | RA(args[0]) | RS(args[1]) | sh);
1814 } else {
1815 tcg_out32(s, SRAD | SAB(args[1], args[0], args[2]));
fe6f943f 1816 }
810260a8 1817 break;
313d91c7
RH
1818 case INDEX_op_rotl_i64:
1819 if (const_args[2]) {
1820 tcg_out_rld(s, RLDICL, args[0], args[1], args[2], 0);
1821 } else {
1822 tcg_out32(s, RLDCL | SAB(args[1], args[0], args[2]) | MB64(0));
1823 }
1824 break;
1825 case INDEX_op_rotr_i64:
1826 if (const_args[2]) {
1827 tcg_out_rld(s, RLDICL, args[0], args[1], 64 - args[2], 0);
1828 } else {
8327a470
RH
1829 tcg_out32(s, SUBFIC | TAI(TCG_REG_R0, args[2], 64));
1830 tcg_out32(s, RLDCL | SAB(args[1], args[0], TCG_REG_R0) | MB64(0));
313d91c7
RH
1831 }
1832 break;
810260a8 1833
1834 case INDEX_op_mul_i64:
ef809300
RH
1835 a0 = args[0], a1 = args[1], a2 = args[2];
1836 if (const_args[2]) {
1837 tcg_out32(s, MULLI | TAI(a0, a1, a2));
1838 } else {
1839 tcg_out32(s, MULLD | TAB(a0, a1, a2));
1840 }
810260a8 1841 break;
1842 case INDEX_op_div_i64:
541dd4ce 1843 tcg_out32(s, DIVD | TAB(args[0], args[1], args[2]));
810260a8 1844 break;
1845 case INDEX_op_divu_i64:
541dd4ce 1846 tcg_out32(s, DIVDU | TAB(args[0], args[1], args[2]));
810260a8 1847 break;
810260a8 1848
1849 case INDEX_op_qemu_ld8u:
541dd4ce 1850 tcg_out_qemu_ld(s, args, 0);
810260a8 1851 break;
1852 case INDEX_op_qemu_ld8s:
541dd4ce 1853 tcg_out_qemu_ld(s, args, 0 | 4);
810260a8 1854 break;
1855 case INDEX_op_qemu_ld16u:
541dd4ce 1856 tcg_out_qemu_ld(s, args, 1);
810260a8 1857 break;
1858 case INDEX_op_qemu_ld16s:
541dd4ce 1859 tcg_out_qemu_ld(s, args, 1 | 4);
810260a8 1860 break;
86feb1c8 1861 case INDEX_op_qemu_ld32:
810260a8 1862 case INDEX_op_qemu_ld32u:
541dd4ce 1863 tcg_out_qemu_ld(s, args, 2);
810260a8 1864 break;
1865 case INDEX_op_qemu_ld32s:
541dd4ce 1866 tcg_out_qemu_ld(s, args, 2 | 4);
810260a8 1867 break;
1868 case INDEX_op_qemu_ld64:
541dd4ce 1869 tcg_out_qemu_ld(s, args, 3);
810260a8 1870 break;
1871 case INDEX_op_qemu_st8:
541dd4ce 1872 tcg_out_qemu_st(s, args, 0);
810260a8 1873 break;
1874 case INDEX_op_qemu_st16:
541dd4ce 1875 tcg_out_qemu_st(s, args, 1);
810260a8 1876 break;
1877 case INDEX_op_qemu_st32:
541dd4ce 1878 tcg_out_qemu_st(s, args, 2);
810260a8 1879 break;
1880 case INDEX_op_qemu_st64:
541dd4ce 1881 tcg_out_qemu_st(s, args, 3);
810260a8 1882 break;
1883
e46b9681 1884 case INDEX_op_ext8s_i32:
1885 case INDEX_op_ext8s_i64:
1886 c = EXTSB;
1887 goto gen_ext;
1888 case INDEX_op_ext16s_i32:
1889 case INDEX_op_ext16s_i64:
1890 c = EXTSH;
1891 goto gen_ext;
1892 case INDEX_op_ext32s_i64:
1893 c = EXTSW;
1894 goto gen_ext;
1895 gen_ext:
541dd4ce 1896 tcg_out32(s, c | RS(args[1]) | RA(args[0]));
e46b9681 1897 break;
1898
1cd62ae9 1899 case INDEX_op_setcond_i32:
541dd4ce
RH
1900 tcg_out_setcond(s, TCG_TYPE_I32, args[3], args[0], args[1], args[2],
1901 const_args[2]);
1cd62ae9 1902 break;
1903 case INDEX_op_setcond_i64:
541dd4ce
RH
1904 tcg_out_setcond(s, TCG_TYPE_I64, args[3], args[0], args[1], args[2],
1905 const_args[2]);
1cd62ae9 1906 break;
1907
5d221582
RH
1908 case INDEX_op_bswap16_i32:
1909 case INDEX_op_bswap16_i64:
1910 a0 = args[0], a1 = args[1];
1911 /* a1 = abcd */
1912 if (a0 != a1) {
1913 /* a0 = (a1 r<< 24) & 0xff # 000c */
1914 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1915 /* a0 = (a0 & ~0xff00) | (a1 r<< 8) & 0xff00 # 00dc */
1916 tcg_out_rlw(s, RLWIMI, a0, a1, 8, 16, 23);
1917 } else {
1918 /* r0 = (a1 r<< 8) & 0xff00 # 00d0 */
1919 tcg_out_rlw(s, RLWINM, TCG_REG_R0, a1, 8, 16, 23);
1920 /* a0 = (a1 r<< 24) & 0xff # 000c */
1921 tcg_out_rlw(s, RLWINM, a0, a1, 24, 24, 31);
1922 /* a0 = a0 | r0 # 00dc */
1923 tcg_out32(s, OR | SAB(TCG_REG_R0, a0, a0));
1924 }
1925 break;
1926
1927 case INDEX_op_bswap32_i32:
1928 case INDEX_op_bswap32_i64:
1929 /* Stolen from gcc's builtin_bswap32 */
1930 a1 = args[1];
1931 a0 = args[0] == a1 ? TCG_REG_R0 : args[0];
1932
1933 /* a1 = args[1] # abcd */
1934 /* a0 = rotate_left (a1, 8) # bcda */
1935 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1936 /* a0 = (a0 & ~0xff000000) | ((a1 r<< 24) & 0xff000000) # dcda */
1937 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1938 /* a0 = (a0 & ~0x0000ff00) | ((a1 r<< 24) & 0x0000ff00) # dcba */
1939 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1940
1941 if (a0 == TCG_REG_R0) {
1942 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
1943 }
1944 break;
1945
68aebd45 1946 case INDEX_op_bswap64_i64:
8327a470 1947 a0 = args[0], a1 = args[1], a2 = TCG_REG_R0;
68aebd45 1948 if (a0 == a1) {
8327a470 1949 a0 = TCG_REG_R0;
68aebd45
RH
1950 a2 = a1;
1951 }
1952
1953 /* a1 = # abcd efgh */
1954 /* a0 = rl32(a1, 8) # 0000 fghe */
1955 tcg_out_rlw(s, RLWINM, a0, a1, 8, 0, 31);
1956 /* a0 = dep(a0, rl32(a1, 24), 0xff000000) # 0000 hghe */
1957 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 0, 7);
1958 /* a0 = dep(a0, rl32(a1, 24), 0x0000ff00) # 0000 hgfe */
1959 tcg_out_rlw(s, RLWIMI, a0, a1, 24, 16, 23);
1960
1961 /* a0 = rl64(a0, 32) # hgfe 0000 */
1962 /* a2 = rl64(a1, 32) # efgh abcd */
1963 tcg_out_rld(s, RLDICL, a0, a0, 32, 0);
1964 tcg_out_rld(s, RLDICL, a2, a1, 32, 0);
1965
1966 /* a0 = dep(a0, rl32(a2, 8), 0xffffffff) # hgfe bcda */
1967 tcg_out_rlw(s, RLWIMI, a0, a2, 8, 0, 31);
1968 /* a0 = dep(a0, rl32(a2, 24), 0xff000000) # hgfe dcda */
1969 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 0, 7);
1970 /* a0 = dep(a0, rl32(a2, 24), 0x0000ff00) # hgfe dcba */
1971 tcg_out_rlw(s, RLWIMI, a0, a2, 24, 16, 23);
1972
1973 if (a0 == 0) {
1974 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
68aebd45
RH
1975 }
1976 break;
1977
33de9ed2 1978 case INDEX_op_deposit_i32:
39dc85b9
RH
1979 if (const_args[2]) {
1980 uint32_t mask = ((2u << (args[4] - 1)) - 1) << args[3];
1981 tcg_out_andi32(s, args[0], args[0], ~mask);
1982 } else {
1983 tcg_out_rlw(s, RLWIMI, args[0], args[2], args[3],
1984 32 - args[3] - args[4], 31 - args[3]);
1985 }
33de9ed2
RH
1986 break;
1987 case INDEX_op_deposit_i64:
39dc85b9
RH
1988 if (const_args[2]) {
1989 uint64_t mask = ((2ull << (args[4] - 1)) - 1) << args[3];
1990 tcg_out_andi64(s, args[0], args[0], ~mask);
1991 } else {
1992 tcg_out_rld(s, RLDIMI, args[0], args[2], args[3],
1993 64 - args[3] - args[4]);
1994 }
33de9ed2
RH
1995 break;
1996
027ffea9
RH
1997 case INDEX_op_movcond_i32:
1998 tcg_out_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], args[2],
1999 args[3], args[4], const_args[2]);
2000 break;
2001 case INDEX_op_movcond_i64:
2002 tcg_out_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], args[2],
2003 args[3], args[4], const_args[2]);
2004 break;
2005
6c858762
RH
2006 case INDEX_op_add2_i64:
2007 /* Note that the CA bit is defined based on the word size of the
2008 environment. So in 64-bit mode it's always carry-out of bit 63.
2009 The fallback code using deposit works just as well for 32-bit. */
2010 a0 = args[0], a1 = args[1];
84247357 2011 if (a0 == args[3] || (!const_args[5] && a0 == args[5])) {
6c858762
RH
2012 a0 = TCG_REG_R0;
2013 }
84247357
AB
2014 if (const_args[4]) {
2015 tcg_out32(s, ADDIC | TAI(a0, args[2], args[4]));
6c858762 2016 } else {
84247357 2017 tcg_out32(s, ADDC | TAB(a0, args[2], args[4]));
6c858762
RH
2018 }
2019 if (const_args[5]) {
84247357 2020 tcg_out32(s, (args[5] ? ADDME : ADDZE) | RT(a1) | RA(args[3]));
6c858762 2021 } else {
84247357 2022 tcg_out32(s, ADDE | TAB(a1, args[3], args[5]));
6c858762
RH
2023 }
2024 if (a0 != args[0]) {
2025 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
2026 }
2027 break;
2028
2029 case INDEX_op_sub2_i64:
2030 a0 = args[0], a1 = args[1];
2031 if (a0 == args[5] || (!const_args[4] && a0 == args[4])) {
2032 a0 = TCG_REG_R0;
2033 }
2034 if (const_args[2]) {
2035 tcg_out32(s, SUBFIC | TAI(a0, args[3], args[2]));
2036 } else {
2037 tcg_out32(s, SUBFC | TAB(a0, args[3], args[2]));
2038 }
2039 if (const_args[4]) {
2040 tcg_out32(s, (args[4] ? SUBFME : SUBFZE) | RT(a1) | RA(args[5]));
2041 } else {
2042 tcg_out32(s, SUBFE | TAB(a1, args[5], args[4]));
2043 }
2044 if (a0 != args[0]) {
2045 tcg_out_mov(s, TCG_TYPE_I64, args[0], a0);
2046 }
2047 break;
2048
32f5717f
RH
2049 case INDEX_op_muluh_i64:
2050 tcg_out32(s, MULHDU | TAB(args[0], args[1], args[2]));
2051 break;
2052 case INDEX_op_mulsh_i64:
2053 tcg_out32(s, MULHD | TAB(args[0], args[1], args[2]));
6645c147
RH
2054 break;
2055
810260a8 2056 default:
541dd4ce
RH
2057 tcg_dump_ops(s);
2058 tcg_abort();
810260a8 2059 }
2060}
2061
2062static const TCGTargetOpDef ppc_op_defs[] = {
2063 { INDEX_op_exit_tb, { } },
2064 { INDEX_op_goto_tb, { } },
2065 { INDEX_op_call, { "ri" } },
810260a8 2066 { INDEX_op_br, { } },
2067
2068 { INDEX_op_mov_i32, { "r", "r" } },
2069 { INDEX_op_mov_i64, { "r", "r" } },
2070 { INDEX_op_movi_i32, { "r" } },
2071 { INDEX_op_movi_i64, { "r" } },
2072
2073 { INDEX_op_ld8u_i32, { "r", "r" } },
2074 { INDEX_op_ld8s_i32, { "r", "r" } },
2075 { INDEX_op_ld16u_i32, { "r", "r" } },
2076 { INDEX_op_ld16s_i32, { "r", "r" } },
2077 { INDEX_op_ld_i32, { "r", "r" } },
2078 { INDEX_op_ld_i64, { "r", "r" } },
2079 { INDEX_op_st8_i32, { "r", "r" } },
2080 { INDEX_op_st8_i64, { "r", "r" } },
2081 { INDEX_op_st16_i32, { "r", "r" } },
2082 { INDEX_op_st16_i64, { "r", "r" } },
2083 { INDEX_op_st_i32, { "r", "r" } },
2084 { INDEX_op_st_i64, { "r", "r" } },
2085 { INDEX_op_st32_i64, { "r", "r" } },
2086
2087 { INDEX_op_ld8u_i64, { "r", "r" } },
2088 { INDEX_op_ld8s_i64, { "r", "r" } },
2089 { INDEX_op_ld16u_i64, { "r", "r" } },
2090 { INDEX_op_ld16s_i64, { "r", "r" } },
2091 { INDEX_op_ld32u_i64, { "r", "r" } },
2092 { INDEX_op_ld32s_i64, { "r", "r" } },
810260a8 2093
2094 { INDEX_op_add_i32, { "r", "r", "ri" } },
ef809300 2095 { INDEX_op_mul_i32, { "r", "r", "rI" } },
810260a8 2096 { INDEX_op_div_i32, { "r", "r", "r" } },
2097 { INDEX_op_divu_i32, { "r", "r", "r" } },
148bdd23 2098 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
810260a8 2099 { INDEX_op_and_i32, { "r", "r", "ri" } },
2100 { INDEX_op_or_i32, { "r", "r", "ri" } },
2101 { INDEX_op_xor_i32, { "r", "r", "ri" } },
37251b98
RH
2102 { INDEX_op_andc_i32, { "r", "r", "ri" } },
2103 { INDEX_op_orc_i32, { "r", "r", "ri" } },
2104 { INDEX_op_eqv_i32, { "r", "r", "ri" } },
ce1010d6
RH
2105 { INDEX_op_nand_i32, { "r", "r", "r" } },
2106 { INDEX_op_nor_i32, { "r", "r", "r" } },
810260a8 2107
2108 { INDEX_op_shl_i32, { "r", "r", "ri" } },
2109 { INDEX_op_shr_i32, { "r", "r", "ri" } },
2110 { INDEX_op_sar_i32, { "r", "r", "ri" } },
313d91c7
RH
2111 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
2112 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
810260a8 2113
2114 { INDEX_op_brcond_i32, { "r", "ri" } },
2115 { INDEX_op_brcond_i64, { "r", "ri" } },
2116
2117 { INDEX_op_neg_i32, { "r", "r" } },
157f2662 2118 { INDEX_op_not_i32, { "r", "r" } },
810260a8 2119
ee924fa6 2120 { INDEX_op_add_i64, { "r", "r", "rT" } },
148bdd23 2121 { INDEX_op_sub_i64, { "r", "rI", "rT" } },
37251b98 2122 { INDEX_op_and_i64, { "r", "r", "ri" } },
3d582c61
RH
2123 { INDEX_op_or_i64, { "r", "r", "rU" } },
2124 { INDEX_op_xor_i64, { "r", "r", "rU" } },
37251b98 2125 { INDEX_op_andc_i64, { "r", "r", "ri" } },
ce1010d6
RH
2126 { INDEX_op_orc_i64, { "r", "r", "r" } },
2127 { INDEX_op_eqv_i64, { "r", "r", "r" } },
2128 { INDEX_op_nand_i64, { "r", "r", "r" } },
2129 { INDEX_op_nor_i64, { "r", "r", "r" } },
810260a8 2130
fe6f943f 2131 { INDEX_op_shl_i64, { "r", "r", "ri" } },
2132 { INDEX_op_shr_i64, { "r", "r", "ri" } },
2133 { INDEX_op_sar_i64, { "r", "r", "ri" } },
313d91c7
RH
2134 { INDEX_op_rotl_i64, { "r", "r", "ri" } },
2135 { INDEX_op_rotr_i64, { "r", "r", "ri" } },
810260a8 2136
ef809300 2137 { INDEX_op_mul_i64, { "r", "r", "rI" } },
810260a8 2138 { INDEX_op_div_i64, { "r", "r", "r" } },
2139 { INDEX_op_divu_i64, { "r", "r", "r" } },
810260a8 2140
2141 { INDEX_op_neg_i64, { "r", "r" } },
157f2662 2142 { INDEX_op_not_i64, { "r", "r" } },
810260a8 2143
2144 { INDEX_op_qemu_ld8u, { "r", "L" } },
2145 { INDEX_op_qemu_ld8s, { "r", "L" } },
2146 { INDEX_op_qemu_ld16u, { "r", "L" } },
2147 { INDEX_op_qemu_ld16s, { "r", "L" } },
86feb1c8 2148 { INDEX_op_qemu_ld32, { "r", "L" } },
810260a8 2149 { INDEX_op_qemu_ld32u, { "r", "L" } },
2150 { INDEX_op_qemu_ld32s, { "r", "L" } },
b01d9fea 2151 { INDEX_op_qemu_ld64, { "r", "L" } },
810260a8 2152
c070355d 2153 { INDEX_op_qemu_st8, { "S", "S" } },
2154 { INDEX_op_qemu_st16, { "S", "S" } },
2155 { INDEX_op_qemu_st32, { "S", "S" } },
016b2b28 2156 { INDEX_op_qemu_st64, { "S", "S" } },
810260a8 2157
e46b9681 2158 { INDEX_op_ext8s_i32, { "r", "r" } },
2159 { INDEX_op_ext16s_i32, { "r", "r" } },
2160 { INDEX_op_ext8s_i64, { "r", "r" } },
2161 { INDEX_op_ext16s_i64, { "r", "r" } },
2162 { INDEX_op_ext32s_i64, { "r", "r" } },
2163
1cd62ae9 2164 { INDEX_op_setcond_i32, { "r", "r", "ri" } },
2165 { INDEX_op_setcond_i64, { "r", "r", "ri" } },
027ffea9
RH
2166 { INDEX_op_movcond_i32, { "r", "r", "ri", "rZ", "rZ" } },
2167 { INDEX_op_movcond_i64, { "r", "r", "ri", "rZ", "rZ" } },
1cd62ae9 2168
5d221582
RH
2169 { INDEX_op_bswap16_i32, { "r", "r" } },
2170 { INDEX_op_bswap16_i64, { "r", "r" } },
2171 { INDEX_op_bswap32_i32, { "r", "r" } },
2172 { INDEX_op_bswap32_i64, { "r", "r" } },
68aebd45 2173 { INDEX_op_bswap64_i64, { "r", "r" } },
5d221582 2174
39dc85b9
RH
2175 { INDEX_op_deposit_i32, { "r", "0", "rZ" } },
2176 { INDEX_op_deposit_i64, { "r", "0", "rZ" } },
33de9ed2 2177
84247357 2178 { INDEX_op_add2_i64, { "r", "r", "r", "r", "rI", "rZM" } },
6c858762 2179 { INDEX_op_sub2_i64, { "r", "r", "rI", "r", "rZM", "r" } },
32f5717f
RH
2180 { INDEX_op_mulsh_i64, { "r", "r", "r" } },
2181 { INDEX_op_muluh_i64, { "r", "r", "r" } },
6c858762 2182
810260a8 2183 { -1 },
2184};
2185
541dd4ce 2186static void tcg_target_init(TCGContext *s)
810260a8 2187{
1e6e9aca
RH
2188#ifdef CONFIG_GETAUXVAL
2189 unsigned long hwcap = getauxval(AT_HWCAP);
2190 if (hwcap & PPC_FEATURE_ARCH_2_06) {
2191 have_isa_2_06 = true;
2192 }
2193#endif
2194
541dd4ce
RH
2195 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
2196 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, 0xffffffff);
2197 tcg_regset_set32(tcg_target_call_clobber_regs, 0,
810260a8 2198 (1 << TCG_REG_R0) |
5d7ff5bb 2199 (1 << TCG_REG_R2) |
810260a8 2200 (1 << TCG_REG_R3) |
2201 (1 << TCG_REG_R4) |
2202 (1 << TCG_REG_R5) |
2203 (1 << TCG_REG_R6) |
2204 (1 << TCG_REG_R7) |
2205 (1 << TCG_REG_R8) |
2206 (1 << TCG_REG_R9) |
2207 (1 << TCG_REG_R10) |
2208 (1 << TCG_REG_R11) |
5e1702b0 2209 (1 << TCG_REG_R12));
810260a8 2210
541dd4ce 2211 tcg_regset_clear(s->reserved_regs);
5e1702b0
RH
2212 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* tcg temp */
2213 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* stack pointer */
b18d5d2b 2214 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R2); /* mem temp */
5e1702b0
RH
2215#ifdef __APPLE__
2216 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R11); /* ??? */
5d7ff5bb 2217#endif
5e1702b0 2218 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R13); /* thread pointer */
810260a8 2219
541dd4ce 2220 tcg_add_target_add_op_defs(ppc_op_defs);
810260a8 2221}
fa94c3be
RH
2222
2223typedef struct {
2224 DebugFrameCIE cie;
2225 DebugFrameFDEHeader fde;
2226 uint8_t fde_def_cfa[4];
2227 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2 + 3];
2228} DebugFrame;
2229
2230/* We're expecting a 2 byte uleb128 encoded value. */
2231QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
2232
2233#define ELF_HOST_MACHINE EM_PPC64
2234
2235static DebugFrame debug_frame = {
2236 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
2237 .cie.id = -1,
2238 .cie.version = 1,
2239 .cie.code_align = 1,
2240 .cie.data_align = 0x78, /* sleb128 -8 */
2241 .cie.return_column = 65,
2242
2243 /* Total FDE size does not include the "len" member. */
2244 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
2245
2246 .fde_def_cfa = {
2247 12, 1, /* DW_CFA_def_cfa r1, ... */
2248 (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */
2249 (FRAME_SIZE >> 7)
2250 },
2251 .fde_reg_ofs = {
2252 0x11, 65, 0x7e, /* DW_CFA_offset_extended_sf, lr, 16 */
2253 }
2254};
2255
2256void tcg_register_jit(void *buf, size_t buf_size)
2257{
2258 uint8_t *p = &debug_frame.fde_reg_ofs[3];
2259 int i;
2260
2261 for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); ++i, p += 2) {
2262 p[0] = 0x80 + tcg_target_callee_save_regs[i];
2263 p[1] = (FRAME_SIZE - (REG_SAVE_BOT + i * 8)) / 8;
2264 }
2265
2266 debug_frame.fde.func_start = (tcg_target_long) buf;
2267 debug_frame.fde.func_len = buf_size;
2268
2269 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
2270}