]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/hppa/tcg-target.c
tcg: Add TCG_COND_NEVER, TCG_COND_ALWAYS
[mirror_qemu.git] / tcg / hppa / tcg-target.c
CommitLineData
f54b3f92
AJ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
d4a9eb1f 25#ifndef NDEBUG
f54b3f92 26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
fd76e73a
RH
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
f54b3f92 31};
d4a9eb1f 32#endif
f54b3f92 33
fd76e73a
RH
34/* This is an 8 byte temp slot in the stack frame. */
35#define STACK_TEMP_OFS -16
36
fd76e73a
RH
37#ifdef CONFIG_USE_GUEST_BASE
38#define TCG_GUEST_BASE_REG TCG_REG_R16
39#else
40#define TCG_GUEST_BASE_REG TCG_REG_R0
41#endif
42
f54b3f92
AJ
43static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
54
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
fd76e73a
RH
59
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
64
65 TCG_REG_RET0,
66 TCG_REG_RET1,
f54b3f92
AJ
67};
68
69static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
74};
75
76static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
79};
80
fd76e73a
RH
81/* True iff val fits a signed field of width BITS. */
82static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83{
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86}
87
88/* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94static inline int or_mask_p(tcg_target_ulong mask)
95{
0085bd51
RH
96 if (mask == 0 || mask == -1) {
97 return 0;
98 }
fd76e73a
RH
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
101}
102
103/* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109static inline int and_mask_p(tcg_target_ulong mask)
110{
111 return or_mask_p(~mask);
112}
113
114static int low_sign_ext(int val, int len)
115{
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117}
118
119static int reassemble_12(int as12)
120{
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
124}
125
126static int reassemble_17(int as17)
127{
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
132}
133
134static int reassemble_21(int as21)
135{
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
141}
142
143/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145#define R_PARISC_PCREL12F R_PARISC_NONE
146
f54b3f92
AJ
147static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
149{
fd76e73a
RH
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
153
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156
f54b3f92 157 switch (type) {
fd76e73a
RH
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
f54b3f92 166 case R_PARISC_PCREL17F:
fd76e73a
RH
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
f54b3f92
AJ
170 break;
171 default:
172 tcg_abort();
173 }
fd76e73a
RH
174
175 *insn_ptr = insn;
f54b3f92
AJ
176}
177
f54b3f92 178/* parse target specific constraints */
d4a9eb1f 179static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
f54b3f92
AJ
180{
181 const char *ct_str;
182
183 ct_str = *pct_str;
184 switch (ct_str[0]) {
185 case 'r':
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
188 break;
189 case 'L': /* qemu_ld/st constraint */
190 ct->ct |= TCG_CT_REG;
191 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
192 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
193 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
194 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
195 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
196 break;
fd76e73a
RH
197 case 'Z':
198 ct->ct |= TCG_CT_CONST_0;
199 break;
200 case 'I':
201 ct->ct |= TCG_CT_CONST_S11;
202 break;
203 case 'J':
204 ct->ct |= TCG_CT_CONST_S5;
205 break;
91493631
RH
206 case 'K':
207 ct->ct |= TCG_CT_CONST_MS11;
208 break;
0085bd51
RH
209 case 'M':
210 ct->ct |= TCG_CT_CONST_AND;
211 break;
212 case 'O':
213 ct->ct |= TCG_CT_CONST_OR;
214 break;
f54b3f92
AJ
215 default:
216 return -1;
217 }
218 ct_str++;
219 *pct_str = ct_str;
220 return 0;
221}
222
223/* test if a constant matches the constraint */
fd76e73a
RH
224static int tcg_target_const_match(tcg_target_long val,
225 const TCGArgConstraint *arg_ct)
f54b3f92 226{
fd76e73a
RH
227 int ct = arg_ct->ct;
228 if (ct & TCG_CT_CONST) {
229 return 1;
230 } else if (ct & TCG_CT_CONST_0) {
231 return val == 0;
232 } else if (ct & TCG_CT_CONST_S5) {
233 return check_fit_tl(val, 5);
234 } else if (ct & TCG_CT_CONST_S11) {
235 return check_fit_tl(val, 11);
91493631
RH
236 } else if (ct & TCG_CT_CONST_MS11) {
237 return check_fit_tl(-val, 11);
0085bd51
RH
238 } else if (ct & TCG_CT_CONST_AND) {
239 return and_mask_p(val);
240 } else if (ct & TCG_CT_CONST_OR) {
241 return or_mask_p(val);
fd76e73a 242 }
f54b3f92
AJ
243 return 0;
244}
245
246#define INSN_OP(x) ((x) << 26)
247#define INSN_EXT3BR(x) ((x) << 13)
248#define INSN_EXT3SH(x) ((x) << 10)
249#define INSN_EXT4(x) ((x) << 6)
250#define INSN_EXT5(x) (x)
251#define INSN_EXT6(x) ((x) << 6)
252#define INSN_EXT7(x) ((x) << 6)
253#define INSN_EXT8A(x) ((x) << 6)
254#define INSN_EXT8B(x) ((x) << 5)
255#define INSN_T(x) (x)
256#define INSN_R1(x) ((x) << 16)
257#define INSN_R2(x) ((x) << 21)
258#define INSN_DEP_LEN(x) (32 - (x))
259#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
260#define INSN_SHDEP_P(x) ((x) << 5)
261#define INSN_COND(x) ((x) << 13)
fd76e73a
RH
262#define INSN_IM11(x) low_sign_ext(x, 11)
263#define INSN_IM14(x) low_sign_ext(x, 14)
264#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
265
266#define COND_NEVER 0
267#define COND_EQ 1
268#define COND_LT 2
269#define COND_LE 3
270#define COND_LTU 4
271#define COND_LEU 5
272#define COND_SV 6
273#define COND_OD 7
274#define COND_FALSE 8
275
276#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
277#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
278#define INSN_ADDI (INSN_OP(0x2d))
279#define INSN_ADDIL (INSN_OP(0x0a))
280#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
281#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
282#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
283#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
284#define INSN_COMICLR (INSN_OP(0x24))
285#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
286#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
287#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
288#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
289#define INSN_LDIL (INSN_OP(0x08))
290#define INSN_LDO (INSN_OP(0x0d))
291#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
292#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
293#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
294#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
295#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
296#define INSN_SUBI (INSN_OP(0x25))
297#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
298#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
299#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
300#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
301#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
302#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
303
304#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
305#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
306#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
307#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
308#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
309#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
310
311#define INSN_LDB (INSN_OP(0x10))
312#define INSN_LDH (INSN_OP(0x11))
313#define INSN_LDW (INSN_OP(0x12))
314#define INSN_LDWM (INSN_OP(0x13))
315#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
316
317#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
318#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
319#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
320
321#define INSN_STB (INSN_OP(0x18))
322#define INSN_STH (INSN_OP(0x19))
323#define INSN_STW (INSN_OP(0x1a))
324#define INSN_STWM (INSN_OP(0x1b))
325#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
326
327#define INSN_COMBT (INSN_OP(0x20))
328#define INSN_COMBF (INSN_OP(0x22))
329#define INSN_COMIBT (INSN_OP(0x21))
330#define INSN_COMIBF (INSN_OP(0x23))
331
332/* supplied by libgcc */
e7bd6300 333extern void *__canonicalize_funcptr_for_compare(const void *);
fd76e73a 334
2a534aff 335static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
fd76e73a
RH
336{
337 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
338 but hppa-dis.c is unaware of this definition */
339 if (ret != arg) {
340 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
341 | INSN_R2(TCG_REG_R0));
342 }
343}
f54b3f92 344
fd76e73a 345static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 346 TCGReg ret, tcg_target_long arg)
fd76e73a
RH
347{
348 if (check_fit_tl(arg, 14)) {
349 tcg_out32(s, INSN_LDO | INSN_R1(ret)
350 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
351 } else {
352 uint32_t hi, lo;
353 hi = arg >> 11;
354 lo = arg & 0x7ff;
355
356 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
357 if (lo) {
358 tcg_out32(s, INSN_LDO | INSN_R1(ret)
359 | INSN_R2(ret) | INSN_IM14(lo));
360 }
361 }
362}
f54b3f92 363
fd76e73a
RH
364static void tcg_out_ldst(TCGContext *s, int ret, int addr,
365 tcg_target_long offset, int op)
366{
367 if (!check_fit_tl(offset, 14)) {
368 uint32_t hi, lo, op;
f54b3f92 369
fd76e73a
RH
370 hi = offset >> 11;
371 lo = offset & 0x7ff;
f54b3f92 372
fd76e73a
RH
373 if (addr == TCG_REG_R0) {
374 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
375 } else {
376 op = INSN_ADDIL | INSN_R2(addr);
377 }
378 tcg_out32(s, op | reassemble_21(hi));
f54b3f92 379
fd76e73a
RH
380 addr = TCG_REG_R1;
381 offset = lo;
382 }
f54b3f92 383
fd76e73a
RH
384 if (ret != addr || offset != 0 || op != INSN_LDO) {
385 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
386 }
387}
f54b3f92 388
fd76e73a 389/* This function is required by tcg.c. */
2a534aff
RH
390static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
391 TCGReg arg1, tcg_target_long arg2)
fd76e73a
RH
392{
393 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
394}
395
396/* This function is required by tcg.c. */
2a534aff
RH
397static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
398 TCGReg arg1, tcg_target_long arg2)
fd76e73a
RH
399{
400 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
401}
402
403static void tcg_out_ldst_index(TCGContext *s, int data,
404 int base, int index, int op)
405{
406 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
407}
408
409static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
410 tcg_target_long val)
411{
412 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
413}
f54b3f92 414
fd76e73a
RH
415/* This function is required by tcg.c. */
416static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
417{
418 tcg_out_addi2(s, reg, reg, val);
419}
f54b3f92 420
fd76e73a
RH
421static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
422{
423 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
424}
f54b3f92 425
fd76e73a
RH
426static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
427 tcg_target_long val, int op)
f54b3f92 428{
fd76e73a
RH
429 assert(check_fit_tl(val, 11));
430 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
f54b3f92
AJ
431}
432
fd76e73a 433static inline void tcg_out_nop(TCGContext *s)
f54b3f92 434{
fd76e73a
RH
435 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
436}
f54b3f92 437
fd76e73a
RH
438static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
439{
440 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
441}
442
443/* Extract LEN bits at position OFS from ARG and place in RET.
444 Note that here the bit ordering is reversed from the PA-RISC
445 standard, such that the right-most bit is 0. */
446static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
447 unsigned ofs, unsigned len, int sign)
448{
449 assert(ofs < 32 && len <= 32 - ofs);
450 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
451 | INSN_R1(ret) | INSN_R2(arg)
452 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
f54b3f92
AJ
453}
454
fd76e73a
RH
455/* Likewise with OFS interpreted little-endian. */
456static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
457 unsigned ofs, unsigned len)
f54b3f92 458{
fd76e73a
RH
459 assert(ofs < 32 && len <= 32 - ofs);
460 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
461 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
462}
463
ec188429
RH
464static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
465 unsigned ofs, unsigned len)
466{
467 assert(ofs < 32 && len <= 32 - ofs);
468 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
469 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
470}
471
fd76e73a
RH
472static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
473 unsigned count)
474{
475 assert(count < 32);
476 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
477 | INSN_SHDEP_CP(count));
478}
479
480static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
481{
482 tcg_out_mtctl_sar(s, creg);
483 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
484}
485
486static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
487{
0085bd51
RH
488 int bs0, bs1;
489
490 /* Note that the argument is constrained to match or_mask_p. */
491 for (bs0 = 0; bs0 < 32; bs0++) {
492 if ((m & (1u << bs0)) != 0) {
493 break;
fd76e73a 494 }
0085bd51
RH
495 }
496 for (bs1 = bs0; bs1 < 32; bs1++) {
497 if ((m & (1u << bs1)) == 0) {
498 break;
fd76e73a 499 }
fd76e73a 500 }
0085bd51
RH
501 assert(bs1 == 32 || (1ul << bs1) > m);
502
3b6dac34 503 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
ec188429 504 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
fd76e73a
RH
505}
506
507static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
508{
0085bd51 509 int ls0, ls1, ms0;
fd76e73a 510
0085bd51
RH
511 /* Note that the argument is constrained to match and_mask_p. */
512 for (ls0 = 0; ls0 < 32; ls0++) {
513 if ((m & (1u << ls0)) == 0) {
514 break;
fd76e73a 515 }
0085bd51
RH
516 }
517 for (ls1 = ls0; ls1 < 32; ls1++) {
518 if ((m & (1u << ls1)) != 0) {
519 break;
fd76e73a 520 }
0085bd51
RH
521 }
522 for (ms0 = ls1; ms0 < 32; ms0++) {
523 if ((m & (1u << ms0)) == 0) {
524 break;
fd76e73a 525 }
0085bd51
RH
526 }
527 assert (ms0 == 32);
fd76e73a 528
0085bd51
RH
529 if (ls1 == 32) {
530 tcg_out_extr(s, ret, arg, 0, ls0, 0);
f54b3f92 531 } else {
3b6dac34 532 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
ec188429 533 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
f54b3f92
AJ
534 }
535}
536
fd76e73a 537static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
f54b3f92 538{
fd76e73a 539 tcg_out_extr(s, ret, arg, 0, 8, 1);
f54b3f92
AJ
540}
541
fd76e73a 542static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
f54b3f92 543{
fd76e73a 544 tcg_out_extr(s, ret, arg, 0, 16, 1);
f54b3f92
AJ
545}
546
fd76e73a 547static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
f54b3f92 548{
fd76e73a
RH
549 count &= 31;
550 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
f54b3f92
AJ
552}
553
fd76e73a 554static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
f54b3f92 555{
fd76e73a
RH
556 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557 tcg_out_mtctl_sar(s, TCG_REG_R20);
558 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
559}
560
fd76e73a 561static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
f54b3f92 562{
fd76e73a
RH
563 count &= 31;
564 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
f54b3f92
AJ
565}
566
fd76e73a 567static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
f54b3f92 568{
fd76e73a 569 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
f54b3f92
AJ
570}
571
fd76e73a 572static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
f54b3f92 573{
fd76e73a
RH
574 count &= 31;
575 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
f54b3f92
AJ
576}
577
fd76e73a 578static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
f54b3f92 579{
fd76e73a
RH
580 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581 tcg_out_mtctl_sar(s, TCG_REG_R20);
582 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
583}
584
fd76e73a 585static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
f54b3f92 586{
fd76e73a
RH
587 count &= 31;
588 tcg_out_shd(s, ret, arg, arg, 32 - count);
f54b3f92
AJ
589}
590
fd76e73a
RH
591static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
592{
593 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
f54b3f92
AJ
595}
596
fd76e73a
RH
597static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
598{
599 count &= 31;
600 tcg_out_shd(s, ret, arg, arg, count);
f54b3f92
AJ
601}
602
fd76e73a
RH
603static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
604{
605 tcg_out_vshd(s, ret, arg, arg, creg);
f54b3f92
AJ
606}
607
fd76e73a
RH
608static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
609{
610 if (ret != arg) {
3b6dac34 611 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
fd76e73a
RH
612 }
613 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
f54b3f92
AJ
615}
616
fd76e73a 617static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
f54b3f92 618{
fd76e73a
RH
619 /* arg = ABCD */
620 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
621 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
f54b3f92
AJ
623}
624
e7bd6300 625static void tcg_out_call(TCGContext *s, const void *func)
fd76e73a
RH
626{
627 tcg_target_long val, hi, lo, disp;
628
629 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
631
632 if (check_fit_tl(disp, 17)) {
633 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634 } else {
635 hi = val >> 11;
636 lo = val & 0x7ff;
637
638 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640 | reassemble_17(lo >> 2));
3b6dac34 641 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
fd76e73a
RH
642 }
643}
79383c9c 644
fd76e73a
RH
645static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646 int arg1, int arg2)
647{
648 /* Store both words into the stack for copy to the FPU. */
a42bceec
BS
649 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
650 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
fd76e73a
RH
651
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
a42bceec 656 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
657 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
658
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s, 0x3ad64796);
661
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
a42bceec 664 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
665 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
666
667 /* Load the pieces of the result that the caller requested. */
668 if (reth) {
a42bceec 669 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
fd76e73a
RH
670 }
671 if (retl) {
a42bceec
BS
672 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
673 INSN_LDW);
fd76e73a
RH
674 }
675}
676
91493631
RH
677static void tcg_out_add2(TCGContext *s, int destl, int desth,
678 int al, int ah, int bl, int bh, int blconst)
679{
680 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
681
682 if (blconst) {
683 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
684 } else {
685 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
686 }
687 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
688
3b6dac34 689 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
690}
691
692static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
693 int bl, int bh, int alconst, int blconst)
694{
695 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
696
697 if (alconst) {
698 if (blconst) {
699 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
700 bl = TCG_REG_R20;
701 }
702 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
703 } else if (blconst) {
704 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
705 } else {
706 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
707 }
708 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
709
3b6dac34 710 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
711}
712
fd76e73a
RH
713static void tcg_out_branch(TCGContext *s, int label_index, int nul)
714{
715 TCGLabel *l = &s->labels[label_index];
716 uint32_t op = nul ? INSN_BL_N : INSN_BL;
717
718 if (l->has_value) {
719 tcg_target_long val = l->u.value;
720
721 val -= (tcg_target_long)s->code_ptr + 8;
722 val >>= 2;
723 assert(check_fit_tl(val, 17));
724
725 tcg_out32(s, op | reassemble_17(val));
726 } else {
2d097a83
RH
727 /* We need to keep the offset unchanged for retranslation. */
728 uint32_t old_insn = *(uint32_t *)s->code_ptr;
729
fd76e73a 730 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
2d097a83 731 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
fd76e73a
RH
732 }
733}
734
0aed257f 735static const uint8_t tcg_cond_to_cmp_cond[] =
fd76e73a
RH
736{
737 [TCG_COND_EQ] = COND_EQ,
738 [TCG_COND_NE] = COND_EQ | COND_FALSE,
739 [TCG_COND_LT] = COND_LT,
740 [TCG_COND_GE] = COND_LT | COND_FALSE,
741 [TCG_COND_LE] = COND_LE,
742 [TCG_COND_GT] = COND_LE | COND_FALSE,
743 [TCG_COND_LTU] = COND_LTU,
744 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
745 [TCG_COND_LEU] = COND_LEU,
746 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
747};
748
749static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
750 TCGArg c2, int c2const, int label_index)
751{
752 TCGLabel *l = &s->labels[label_index];
753 int op, pacond;
754
755 /* Note that COMIB operates as if the immediate is the first
756 operand. We model brcond with the immediate in the second
757 to better match what targets are likely to give us. For
758 consistency, model COMB with reversed operands as well. */
759 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
760
761 if (c2const) {
762 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
763 op |= INSN_IM5(c2);
764 } else {
765 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
766 op |= INSN_R1(c2);
767 }
768 op |= INSN_R2(c1);
769 op |= INSN_COND(pacond & 7);
770
771 if (l->has_value) {
772 tcg_target_long val = l->u.value;
773
774 val -= (tcg_target_long)s->code_ptr + 8;
775 val >>= 2;
776 assert(check_fit_tl(val, 12));
777
778 /* ??? Assume that all branches to defined labels are backward.
779 Which means that if the nul bit is set, the delay slot is
780 executed if the branch is taken, and not executed in fallthru. */
781 tcg_out32(s, op | reassemble_12(val));
782 tcg_out_nop(s);
783 } else {
2d097a83
RH
784 /* We need to keep the offset unchanged for retranslation. */
785 uint32_t old_insn = *(uint32_t *)s->code_ptr;
786
fd76e73a
RH
787 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
788 /* ??? Assume that all branches to undefined labels are forward.
789 Which means that if the nul bit is set, the delay slot is
790 not executed if the branch is taken, which is what we want. */
2d097a83 791 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
fd76e73a
RH
792 }
793}
794
795static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
796 TCGArg c1, TCGArg c2, int c2const)
797{
798 int op, pacond;
799
800 /* Note that COMICLR operates as if the immediate is the first
801 operand. We model setcond with the immediate in the second
802 to better match what targets are likely to give us. For
803 consistency, model COMCLR with reversed operands as well. */
804 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
805
806 if (c2const) {
807 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
808 } else {
809 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
810 }
811 op |= INSN_COND(pacond & 7);
812 op |= pacond & COND_FALSE ? 1 << 12 : 0;
813
814 tcg_out32(s, op);
815}
816
c08d9ee3
RH
817static TCGCond const tcg_high_cond[] = {
818 [TCG_COND_EQ] = TCG_COND_EQ,
819 [TCG_COND_NE] = TCG_COND_NE,
820 [TCG_COND_LT] = TCG_COND_LT,
821 [TCG_COND_LE] = TCG_COND_LT,
822 [TCG_COND_GT] = TCG_COND_GT,
823 [TCG_COND_GE] = TCG_COND_GT,
824 [TCG_COND_LTU] = TCG_COND_LTU,
825 [TCG_COND_LEU] = TCG_COND_LTU,
826 [TCG_COND_GTU] = TCG_COND_GTU,
827 [TCG_COND_GEU] = TCG_COND_GTU
828};
829
fd76e73a
RH
830static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
831 TCGArg bl, int blconst, TCGArg bh, int bhconst,
832 int label_index)
833{
834 switch (cond) {
835 case TCG_COND_EQ:
c08d9ee3
RH
836 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
837 tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
838 break;
fd76e73a 839 case TCG_COND_NE:
c08d9ee3
RH
840 tcg_out_brcond(s, TCG_COND_NE, al, bl, bhconst, label_index);
841 tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
fd76e73a 842 break;
fd76e73a 843 default:
c08d9ee3 844 tcg_out_brcond(s, tcg_high_cond[cond], ah, bh, bhconst, label_index);
fd76e73a
RH
845 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
846 tcg_out_brcond(s, tcg_unsigned_cond(cond),
847 al, bl, blconst, label_index);
848 break;
849 }
850}
851
852static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
853 TCGArg c1, TCGArg c2, int c2const)
854{
855 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
856 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
857}
858
859static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
860 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
861 TCGArg bh, int bhconst)
862{
863 int scratch = TCG_REG_R20;
864
c08d9ee3
RH
865 /* Note that the low parts are fully consumed before scratch is set. */
866 if (ret != ah && (bhconst || ret != bh)) {
fd76e73a
RH
867 scratch = ret;
868 }
869
870 switch (cond) {
871 case TCG_COND_EQ:
872 case TCG_COND_NE:
873 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
874 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
875 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
876 break;
877
c08d9ee3
RH
878 case TCG_COND_GE:
879 case TCG_COND_GEU:
880 case TCG_COND_LT:
881 case TCG_COND_LTU:
882 /* Optimize compares with low part zero. */
883 if (bl == 0) {
884 tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
885 return;
886 }
887 /* FALLTHRU */
888
889 case TCG_COND_LE:
890 case TCG_COND_LEU:
891 case TCG_COND_GT:
892 case TCG_COND_GTU:
893 /* <= : ah < bh | (ah == bh && al <= bl) */
fd76e73a
RH
894 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
895 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
896 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
c08d9ee3
RH
897 tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond[cond]),
898 TCG_REG_R0, ah, bh, bhconst);
fd76e73a
RH
899 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
900 break;
c08d9ee3
RH
901
902 default:
903 tcg_abort();
fd76e73a
RH
904 }
905
3b6dac34 906 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
fd76e73a
RH
907}
908
f0da3757
RH
909static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
910 TCGArg c1, TCGArg c2, int c2const,
911 TCGArg v1, int v1const)
912{
913 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const);
914 if (v1const) {
915 tcg_out_movi(s, TCG_TYPE_I32, ret, v1);
916 } else {
917 tcg_out_mov(s, TCG_TYPE_I32, ret, v1);
918 }
919}
920
fd76e73a 921#if defined(CONFIG_SOFTMMU)
79383c9c 922#include "../../softmmu_defs.h"
f54b3f92 923
e141ab52
BS
924/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
925 int mmu_idx) */
926static const void * const qemu_ld_helpers[4] = {
927 helper_ldb_mmu,
928 helper_ldw_mmu,
929 helper_ldl_mmu,
930 helper_ldq_mmu,
931};
932
933/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
934 uintxx_t val, int mmu_idx) */
935static const void * const qemu_st_helpers[4] = {
936 helper_stb_mmu,
937 helper_stw_mmu,
938 helper_stl_mmu,
939 helper_stq_mmu,
940};
fd76e73a
RH
941
942/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
943 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
944 TLB for the memory index. The return value is the offset from ENV
945 contained in R1 afterward (to be used when loading ADDEND); if the
946 return value is 0, R1 is not used. */
947
948static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
949 int addrhi, int s_bits, int lab_miss, int offset)
950{
951 int ret;
952
953 /* Extracting the index into the TLB. The "normal C operation" is
954 r1 = addr_reg >> TARGET_PAGE_BITS;
955 r1 &= CPU_TLB_SIZE - 1;
956 r1 <<= CPU_TLB_ENTRY_BITS;
957 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
958 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
959 operations with an EXTRU. Unfortunately, the current value of
960 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
961 add that follows. */
962 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
fd76e73a
RH
963 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
964 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
965
966 /* Make sure that both the addr_{read,write} and addend can be
967 read with a 14-bit offset from the same base register. */
968 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
969 ret = 0;
970 } else {
971 ret = (offset + 0x400) & ~0x7ff;
972 offset = ret - offset;
973 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
974 r1 = TCG_REG_R1;
975 }
976
977 /* Load the entry from the computed slot. */
978 if (TARGET_LONG_BITS == 64) {
979 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
980 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
981 } else {
982 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
983 }
984
e55f523d
RH
985 /* Compute the value that ought to appear in the TLB for a hit, namely,
986 the page of the address. We include the low N bits of the address
987 to catch unaligned accesses and force them onto the slow path. Do
988 this computation after having issued the load from the TLB slot to
989 give the load time to complete. */
739734cb
RH
990 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
991
fd76e73a
RH
992 /* If not equal, jump to lab_miss. */
993 if (TARGET_LONG_BITS == 64) {
994 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
995 r0, 0, addrhi, 0, lab_miss);
996 } else {
997 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
998 }
999
1000 return ret;
1001}
e55f523d
RH
1002
1003static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
1004{
1005 if (argno < 4) {
1006 if (vconst) {
1007 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1008 } else {
1009 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1010 }
1011 } else {
1012 if (vconst && v != 0) {
1013 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
1014 v = TCG_REG_R20;
1015 }
1016 tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
1017 TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
1018 }
1019 return argno + 1;
1020}
1021
1022static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
1023{
1024 /* 64-bit arguments must go in even reg pairs and stack slots. */
1025 if (argno & 1) {
1026 argno++;
1027 }
1028 argno = tcg_out_arg_reg32(s, argno, vl, false);
1029 argno = tcg_out_arg_reg32(s, argno, vh, false);
1030 return argno;
1031}
f54b3f92
AJ
1032#endif
1033
f061b40e
RH
1034static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1035 int addr_reg, int addend_reg, int opc)
f54b3f92 1036{
f54b3f92 1037#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 1038 const int bswap = 0;
f54b3f92 1039#else
f061b40e 1040 const int bswap = 1;
f54b3f92 1041#endif
f061b40e 1042
f54b3f92 1043 switch (opc) {
fd76e73a 1044 case 0:
f061b40e 1045 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
fd76e73a
RH
1046 break;
1047 case 0 | 4:
f061b40e
RH
1048 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1049 tcg_out_ext8s(s, datalo_reg, datalo_reg);
fd76e73a
RH
1050 break;
1051 case 1:
f061b40e 1052 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 1053 if (bswap) {
f061b40e 1054 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
fd76e73a
RH
1055 }
1056 break;
1057 case 1 | 4:
f061b40e 1058 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 1059 if (bswap) {
f061b40e 1060 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
fd76e73a 1061 } else {
f061b40e 1062 tcg_out_ext16s(s, datalo_reg, datalo_reg);
fd76e73a
RH
1063 }
1064 break;
1065 case 2:
f061b40e 1066 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
fd76e73a 1067 if (bswap) {
f061b40e 1068 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
fd76e73a
RH
1069 }
1070 break;
1071 case 3:
1072 if (bswap) {
f061b40e
RH
1073 int t = datahi_reg;
1074 datahi_reg = datalo_reg;
1075 datalo_reg = t;
fd76e73a 1076 }
f061b40e
RH
1077 /* We can't access the low-part with a reg+reg addressing mode,
1078 so perform the addition now and use reg_ofs addressing mode. */
1079 if (addend_reg != TCG_REG_R0) {
1080 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1081 addr_reg = TCG_REG_R20;
1082 }
1083 /* Make sure not to clobber the base register. */
1084 if (datahi_reg == addr_reg) {
1085 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1086 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
fd76e73a 1087 } else {
f061b40e
RH
1088 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1089 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
fd76e73a
RH
1090 }
1091 if (bswap) {
f061b40e
RH
1092 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1093 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
fd76e73a
RH
1094 }
1095 break;
1096 default:
1097 tcg_abort();
f54b3f92 1098 }
f061b40e
RH
1099}
1100
1101static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1102{
1103 int datalo_reg = *args++;
1104 /* Note that datahi_reg is only used for 64-bit loads. */
1105 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1106 int addrlo_reg = *args++;
f54b3f92
AJ
1107
1108#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1109 /* Note that addrhi_reg is only used for 64-bit guests. */
1110 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1111 int mem_index = *args;
e55f523d 1112 int lab1, lab2, argno, offset;
f061b40e
RH
1113
1114 lab1 = gen_new_label();
1115 lab2 = gen_new_label();
1116
9349b4f9 1117 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
e55f523d
RH
1118 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1119 addrhi_reg, opc & 3, lab1, offset);
f061b40e
RH
1120
1121 /* TLB Hit. */
e55f523d
RH
1122 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1123 (offset ? TCG_REG_R1 : TCG_REG_R25),
9349b4f9 1124 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
e55f523d
RH
1125 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1126 TCG_REG_R20, opc);
fd76e73a
RH
1127 tcg_out_branch(s, lab2, 1);
1128
1129 /* TLB Miss. */
1130 /* label1: */
9d6fca70 1131 tcg_out_label(s, lab1, s->code_ptr);
fd76e73a 1132
e55f523d
RH
1133 argno = 0;
1134 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
fd76e73a 1135 if (TARGET_LONG_BITS == 64) {
e55f523d
RH
1136 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1137 } else {
1138 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
fd76e73a 1139 }
e55f523d
RH
1140 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1141
f061b40e 1142 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
fd76e73a
RH
1143
1144 switch (opc) {
1145 case 0:
f061b40e 1146 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
fd76e73a
RH
1147 break;
1148 case 0 | 4:
f061b40e 1149 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1150 break;
1151 case 1:
f061b40e 1152 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
fd76e73a
RH
1153 break;
1154 case 1 | 4:
f061b40e 1155 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1156 break;
1157 case 2:
1158 case 2 | 4:
3b6dac34 1159 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1160 break;
1161 case 3:
3b6dac34
RH
1162 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1163 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
fd76e73a
RH
1164 break;
1165 default:
1166 tcg_abort();
1167 }
1168
f54b3f92 1169 /* label2: */
9d6fca70 1170 tcg_out_label(s, lab2, s->code_ptr);
f061b40e
RH
1171#else
1172 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1173 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
f54b3f92
AJ
1174#endif
1175}
1176
e55f523d
RH
1177static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
1178 int datahi_reg, int addr_reg, int opc)
f54b3f92 1179{
f54b3f92 1180#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 1181 const int bswap = 0;
f54b3f92 1182#else
f061b40e 1183 const int bswap = 1;
f54b3f92 1184#endif
f061b40e 1185
f54b3f92
AJ
1186 switch (opc) {
1187 case 0:
f061b40e 1188 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
f54b3f92
AJ
1189 break;
1190 case 1:
1191 if (bswap) {
f061b40e
RH
1192 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1193 datalo_reg = TCG_REG_R20;
f54b3f92 1194 }
f061b40e 1195 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
f54b3f92
AJ
1196 break;
1197 case 2:
1198 if (bswap) {
f061b40e
RH
1199 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1200 datalo_reg = TCG_REG_R20;
f54b3f92 1201 }
f061b40e 1202 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
f54b3f92
AJ
1203 break;
1204 case 3:
fd76e73a 1205 if (bswap) {
f061b40e
RH
1206 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1207 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1208 datahi_reg = TCG_REG_R20;
1209 datalo_reg = TCG_REG_R23;
f54b3f92 1210 }
f061b40e
RH
1211 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1212 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
f54b3f92
AJ
1213 break;
1214 default:
1215 tcg_abort();
1216 }
1217
f061b40e
RH
1218}
1219
1220static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1221{
1222 int datalo_reg = *args++;
1223 /* Note that datahi_reg is only used for 64-bit loads. */
1224 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1225 int addrlo_reg = *args++;
1226
f54b3f92 1227#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1228 /* Note that addrhi_reg is only used for 64-bit guests. */
1229 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1230 int mem_index = *args;
e55f523d 1231 int lab1, lab2, argno, next, offset;
f061b40e
RH
1232
1233 lab1 = gen_new_label();
1234 lab2 = gen_new_label();
1235
9349b4f9 1236 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
e55f523d
RH
1237 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1238 addrhi_reg, opc, lab1, offset);
f061b40e
RH
1239
1240 /* TLB Hit. */
e55f523d
RH
1241 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1242 (offset ? TCG_REG_R1 : TCG_REG_R25),
9349b4f9 1243 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
f061b40e
RH
1244
1245 /* There are no indexed stores, so we must do this addition explitly.
1246 Careful to avoid R20, which is used for the bswaps to follow. */
1247 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1248 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
fd76e73a
RH
1249 tcg_out_branch(s, lab2, 1);
1250
1251 /* TLB Miss. */
1252 /* label1: */
9d6fca70 1253 tcg_out_label(s, lab1, s->code_ptr);
fd76e73a 1254
e55f523d
RH
1255 argno = 0;
1256 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
fd76e73a 1257 if (TARGET_LONG_BITS == 64) {
e55f523d
RH
1258 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1259 } else {
1260 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
fd76e73a
RH
1261 }
1262
e55f523d 1263 next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
fd76e73a
RH
1264 switch(opc) {
1265 case 0:
e55f523d
RH
1266 tcg_out_andi(s, next, datalo_reg, 0xff);
1267 argno = tcg_out_arg_reg32(s, argno, next, false);
fd76e73a
RH
1268 break;
1269 case 1:
e55f523d
RH
1270 tcg_out_andi(s, next, datalo_reg, 0xffff);
1271 argno = tcg_out_arg_reg32(s, argno, next, false);
fd76e73a
RH
1272 break;
1273 case 2:
e55f523d 1274 argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
fd76e73a
RH
1275 break;
1276 case 3:
e55f523d 1277 argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
fd76e73a
RH
1278 break;
1279 default:
1280 tcg_abort();
1281 }
e55f523d 1282 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
fd76e73a 1283
f061b40e 1284 tcg_out_call(s, qemu_st_helpers[opc]);
fd76e73a 1285
f54b3f92 1286 /* label2: */
9d6fca70 1287 tcg_out_label(s, lab2, s->code_ptr);
f061b40e 1288#else
e55f523d
RH
1289 /* There are no indexed stores, so if GUEST_BASE is set we must do
1290 the add explicitly. Careful to avoid R20, which is used for the
1291 bswaps to follow. */
f061b40e 1292 if (GUEST_BASE != 0) {
e55f523d
RH
1293 tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
1294 TCG_GUEST_BASE_REG, INSN_ADDL);
f061b40e
RH
1295 addrlo_reg = TCG_REG_R31;
1296 }
1297 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
f54b3f92
AJ
1298#endif
1299}
1300
fd76e73a
RH
1301static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1302{
1303 if (!check_fit_tl(arg, 14)) {
1304 uint32_t hi, lo;
1305 hi = arg & ~0x7ff;
1306 lo = arg & 0x7ff;
1307 if (lo) {
1308 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1309 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1310 tcg_out_addi(s, TCG_REG_RET0, lo);
1311 return;
1312 }
1313 arg = hi;
1314 }
1315 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1316 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1317}
1318
1319static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1320{
1321 if (s->tb_jmp_offset) {
1322 /* direct jump method */
1323 fprintf(stderr, "goto_tb direct\n");
1324 tcg_abort();
1325 } else {
1326 /* indirect jump method */
1327 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1328 (tcg_target_long)(s->tb_next + arg));
1329 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1330 }
1331 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1332}
1333
a9751609 1334static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
f54b3f92
AJ
1335 const int *const_args)
1336{
f54b3f92
AJ
1337 switch (opc) {
1338 case INDEX_op_exit_tb:
fd76e73a 1339 tcg_out_exit_tb(s, args[0]);
f54b3f92
AJ
1340 break;
1341 case INDEX_op_goto_tb:
fd76e73a 1342 tcg_out_goto_tb(s, args[0]);
f54b3f92 1343 break;
fd76e73a 1344
f54b3f92 1345 case INDEX_op_call:
fd76e73a
RH
1346 if (const_args[0]) {
1347 tcg_out_call(s, (void *)args[0]);
1348 } else {
3e1f46ea
RH
1349 /* ??? FIXME: the value in the register in args[0] is almost
1350 certainly a procedure descriptor, not a code address. We
1351 probably need to use the millicode $$dyncall routine. */
1352 tcg_abort();
fd76e73a 1353 }
f54b3f92 1354 break;
fd76e73a 1355
f54b3f92 1356 case INDEX_op_br:
fd76e73a 1357 tcg_out_branch(s, args[0], 1);
f54b3f92 1358 break;
fd76e73a 1359
f54b3f92
AJ
1360 case INDEX_op_movi_i32:
1361 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1362 break;
1363
1364 case INDEX_op_ld8u_i32:
fd76e73a 1365 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1366 break;
1367 case INDEX_op_ld8s_i32:
fd76e73a 1368 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1369 tcg_out_ext8s(s, args[0], args[0]);
1370 break;
1371 case INDEX_op_ld16u_i32:
fd76e73a 1372 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1373 break;
1374 case INDEX_op_ld16s_i32:
fd76e73a 1375 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1376 tcg_out_ext16s(s, args[0], args[0]);
1377 break;
1378 case INDEX_op_ld_i32:
fd76e73a 1379 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
f54b3f92
AJ
1380 break;
1381
1382 case INDEX_op_st8_i32:
fd76e73a 1383 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
f54b3f92
AJ
1384 break;
1385 case INDEX_op_st16_i32:
fd76e73a 1386 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
f54b3f92
AJ
1387 break;
1388 case INDEX_op_st_i32:
fd76e73a
RH
1389 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1390 break;
1391
1392 case INDEX_op_add_i32:
1393 if (const_args[2]) {
1394 tcg_out_addi2(s, args[0], args[1], args[2]);
1395 } else {
1396 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1397 }
f54b3f92
AJ
1398 break;
1399
1400 case INDEX_op_sub_i32:
fd76e73a
RH
1401 if (const_args[1]) {
1402 if (const_args[2]) {
1403 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1404 } else {
1405 /* Recall that SUBI is a reversed subtract. */
1406 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1407 }
1408 } else if (const_args[2]) {
1409 tcg_out_addi2(s, args[0], args[1], -args[2]);
1410 } else {
1411 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1412 }
1413 break;
1414
f54b3f92 1415 case INDEX_op_and_i32:
fd76e73a
RH
1416 if (const_args[2]) {
1417 tcg_out_andi(s, args[0], args[1], args[2]);
1418 } else {
1419 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1420 }
1421 break;
1422
f54b3f92 1423 case INDEX_op_or_i32:
fd76e73a
RH
1424 if (const_args[2]) {
1425 tcg_out_ori(s, args[0], args[1], args[2]);
1426 } else {
1427 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1428 }
1429 break;
1430
f54b3f92 1431 case INDEX_op_xor_i32:
fd76e73a
RH
1432 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1433 break;
1434
1435 case INDEX_op_andc_i32:
1436 if (const_args[2]) {
1437 tcg_out_andi(s, args[0], args[1], ~args[2]);
1438 } else {
1439 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1440 }
1441 break;
f54b3f92
AJ
1442
1443 case INDEX_op_shl_i32:
fd76e73a
RH
1444 if (const_args[2]) {
1445 tcg_out_shli(s, args[0], args[1], args[2]);
1446 } else {
1447 tcg_out_shl(s, args[0], args[1], args[2]);
1448 }
f54b3f92 1449 break;
fd76e73a 1450
f54b3f92 1451 case INDEX_op_shr_i32:
fd76e73a
RH
1452 if (const_args[2]) {
1453 tcg_out_shri(s, args[0], args[1], args[2]);
1454 } else {
1455 tcg_out_shr(s, args[0], args[1], args[2]);
1456 }
f54b3f92 1457 break;
fd76e73a 1458
f54b3f92 1459 case INDEX_op_sar_i32:
fd76e73a
RH
1460 if (const_args[2]) {
1461 tcg_out_sari(s, args[0], args[1], args[2]);
1462 } else {
1463 tcg_out_sar(s, args[0], args[1], args[2]);
1464 }
1465 break;
1466
1467 case INDEX_op_rotl_i32:
1468 if (const_args[2]) {
1469 tcg_out_rotli(s, args[0], args[1], args[2]);
1470 } else {
1471 tcg_out_rotl(s, args[0], args[1], args[2]);
1472 }
1473 break;
1474
1475 case INDEX_op_rotr_i32:
1476 if (const_args[2]) {
1477 tcg_out_rotri(s, args[0], args[1], args[2]);
1478 } else {
1479 tcg_out_rotr(s, args[0], args[1], args[2]);
1480 }
f54b3f92
AJ
1481 break;
1482
1483 case INDEX_op_mul_i32:
fd76e73a 1484 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
f54b3f92
AJ
1485 break;
1486 case INDEX_op_mulu2_i32:
fd76e73a 1487 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
f54b3f92 1488 break;
fd76e73a
RH
1489
1490 case INDEX_op_bswap16_i32:
1491 tcg_out_bswap16(s, args[0], args[1], 0);
f54b3f92 1492 break;
fd76e73a
RH
1493 case INDEX_op_bswap32_i32:
1494 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1495 break;
1496
1497 case INDEX_op_not_i32:
1498 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1499 break;
1500 case INDEX_op_ext8s_i32:
1501 tcg_out_ext8s(s, args[0], args[1]);
1502 break;
1503 case INDEX_op_ext16s_i32:
1504 tcg_out_ext16s(s, args[0], args[1]);
1505 break;
1506
f54b3f92 1507 case INDEX_op_brcond_i32:
fd76e73a
RH
1508 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1509 break;
1510 case INDEX_op_brcond2_i32:
1511 tcg_out_brcond2(s, args[4], args[0], args[1],
1512 args[2], const_args[2],
1513 args[3], const_args[3], args[5]);
1514 break;
1515
1516 case INDEX_op_setcond_i32:
1517 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1518 break;
1519 case INDEX_op_setcond2_i32:
1520 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1521 args[3], const_args[3], args[4], const_args[4]);
1522 break;
1523
f0da3757
RH
1524 case INDEX_op_movcond_i32:
1525 tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2],
1526 args[3], const_args[3]);
1527 break;
1528
fd76e73a 1529 case INDEX_op_add2_i32:
91493631
RH
1530 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1531 args[4], args[5], const_args[4]);
fd76e73a
RH
1532 break;
1533
1534 case INDEX_op_sub2_i32:
91493631
RH
1535 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1536 args[4], args[5], const_args[2], const_args[4]);
f54b3f92
AJ
1537 break;
1538
ec188429
RH
1539 case INDEX_op_deposit_i32:
1540 if (const_args[2]) {
1541 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1542 } else {
1543 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1544 }
1545 break;
1546
f54b3f92
AJ
1547 case INDEX_op_qemu_ld8u:
1548 tcg_out_qemu_ld(s, args, 0);
1549 break;
1550 case INDEX_op_qemu_ld8s:
1551 tcg_out_qemu_ld(s, args, 0 | 4);
1552 break;
1553 case INDEX_op_qemu_ld16u:
1554 tcg_out_qemu_ld(s, args, 1);
1555 break;
1556 case INDEX_op_qemu_ld16s:
1557 tcg_out_qemu_ld(s, args, 1 | 4);
1558 break;
86feb1c8 1559 case INDEX_op_qemu_ld32:
f54b3f92
AJ
1560 tcg_out_qemu_ld(s, args, 2);
1561 break;
fd76e73a
RH
1562 case INDEX_op_qemu_ld64:
1563 tcg_out_qemu_ld(s, args, 3);
1564 break;
f54b3f92
AJ
1565
1566 case INDEX_op_qemu_st8:
1567 tcg_out_qemu_st(s, args, 0);
1568 break;
1569 case INDEX_op_qemu_st16:
1570 tcg_out_qemu_st(s, args, 1);
1571 break;
1572 case INDEX_op_qemu_st32:
1573 tcg_out_qemu_st(s, args, 2);
1574 break;
fd76e73a
RH
1575 case INDEX_op_qemu_st64:
1576 tcg_out_qemu_st(s, args, 3);
1577 break;
f54b3f92
AJ
1578
1579 default:
1580 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1581 tcg_abort();
1582 }
f54b3f92
AJ
1583}
1584
1585static const TCGTargetOpDef hppa_op_defs[] = {
1586 { INDEX_op_exit_tb, { } },
1587 { INDEX_op_goto_tb, { } },
1588
fd76e73a 1589 { INDEX_op_call, { "ri" } },
f54b3f92
AJ
1590 { INDEX_op_br, { } },
1591
1592 { INDEX_op_mov_i32, { "r", "r" } },
1593 { INDEX_op_movi_i32, { "r" } },
fd76e73a 1594
f54b3f92
AJ
1595 { INDEX_op_ld8u_i32, { "r", "r" } },
1596 { INDEX_op_ld8s_i32, { "r", "r" } },
1597 { INDEX_op_ld16u_i32, { "r", "r" } },
1598 { INDEX_op_ld16s_i32, { "r", "r" } },
1599 { INDEX_op_ld_i32, { "r", "r" } },
fd76e73a
RH
1600 { INDEX_op_st8_i32, { "rZ", "r" } },
1601 { INDEX_op_st16_i32, { "rZ", "r" } },
1602 { INDEX_op_st_i32, { "rZ", "r" } },
1603
1604 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1605 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
0085bd51
RH
1606 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1607 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
fd76e73a 1608 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
0085bd51
RH
1609 /* Note that the second argument will be inverted, which means
1610 we want a constant whose inversion matches M, and that O = ~M.
1611 See the implementation of and_mask_p. */
1612 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
fd76e73a
RH
1613
1614 { INDEX_op_mul_i32, { "r", "r", "r" } },
1615 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
f54b3f92 1616
fd76e73a
RH
1617 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1618 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1619 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1620 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1621 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
f54b3f92 1622
fd76e73a
RH
1623 { INDEX_op_bswap16_i32, { "r", "r" } },
1624 { INDEX_op_bswap32_i32, { "r", "r" } },
fd76e73a 1625 { INDEX_op_not_i32, { "r", "r" } },
f54b3f92 1626
fd76e73a 1627 { INDEX_op_ext8s_i32, { "r", "r" } },
fd76e73a 1628 { INDEX_op_ext16s_i32, { "r", "r" } },
fd76e73a
RH
1629
1630 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1631 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1632
1633 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1634 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1635
f0da3757
RH
1636 /* ??? We can actually support a signed 14-bit arg3, but we
1637 only have existing constraints for a signed 11-bit. */
1638 { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } },
1639
fd76e73a 1640 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
91493631 1641 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
f54b3f92 1642
ec188429
RH
1643 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1644
f54b3f92
AJ
1645#if TARGET_LONG_BITS == 32
1646 { INDEX_op_qemu_ld8u, { "r", "L" } },
1647 { INDEX_op_qemu_ld8s, { "r", "L" } },
1648 { INDEX_op_qemu_ld16u, { "r", "L" } },
1649 { INDEX_op_qemu_ld16s, { "r", "L" } },
86feb1c8 1650 { INDEX_op_qemu_ld32, { "r", "L" } },
f54b3f92
AJ
1651 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1652
fd76e73a
RH
1653 { INDEX_op_qemu_st8, { "LZ", "L" } },
1654 { INDEX_op_qemu_st16, { "LZ", "L" } },
1655 { INDEX_op_qemu_st32, { "LZ", "L" } },
1656 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
f54b3f92
AJ
1657#else
1658 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1659 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1660 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1661 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
86feb1c8 1662 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
f54b3f92
AJ
1663 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1664
fd76e73a
RH
1665 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1666 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1667 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1668 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
f54b3f92
AJ
1669#endif
1670 { -1 },
1671};
1672
fd76e73a
RH
1673static int tcg_target_callee_save_regs[] = {
1674 /* R2, the return address register, is saved specially
1675 in the caller's frame. */
1676 /* R3, the frame pointer, is not currently modified. */
1677 TCG_REG_R4,
1678 TCG_REG_R5,
1679 TCG_REG_R6,
1680 TCG_REG_R7,
1681 TCG_REG_R8,
1682 TCG_REG_R9,
1683 TCG_REG_R10,
1684 TCG_REG_R11,
1685 TCG_REG_R12,
1686 TCG_REG_R13,
1687 TCG_REG_R14,
1688 TCG_REG_R15,
1689 TCG_REG_R16,
cea5f9a2 1690 TCG_REG_R17, /* R17 is the global env. */
fd76e73a
RH
1691 TCG_REG_R18
1692};
1693
e7bd6300
RH
1694#define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1695 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1696 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1697 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1698 + TCG_TARGET_STACK_ALIGN - 1) \
1699 & -TCG_TARGET_STACK_ALIGN)
1700
e4d58b41 1701static void tcg_target_qemu_prologue(TCGContext *s)
fd76e73a
RH
1702{
1703 int frame_size, i;
1704
e7bd6300 1705 frame_size = FRAME_SIZE;
fd76e73a
RH
1706
1707 /* The return address is stored in the caller's frame. */
a42bceec 1708 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
fd76e73a
RH
1709
1710 /* Allocate stack frame, saving the first register at the same time. */
1711 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1712 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
fd76e73a
RH
1713
1714 /* Save all callee saved registers. */
1715 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1716 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1717 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1718 }
1719
2a6a665f
BS
1720 /* Record the location of the TCG temps. */
1721 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
6e6a9924 1722 CPU_TEMP_BUF_NLONGS * sizeof(long));
2a6a665f 1723
884d348b 1724#ifdef CONFIG_USE_GUEST_BASE
4b31713d
RH
1725 if (GUEST_BASE != 0) {
1726 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1727 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1728 }
884d348b 1729#endif
fd76e73a 1730
cea5f9a2
BS
1731 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1732
fd76e73a 1733 /* Jump to TB, and adjust R18 to be the return address. */
cea5f9a2 1734 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
3b6dac34 1735 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
fd76e73a
RH
1736
1737 /* Restore callee saved registers. */
a42bceec
BS
1738 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1739 -frame_size - 20);
fd76e73a
RH
1740 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1741 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1742 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1743 }
1744
1745 /* Deallocate stack frame and return. */
1746 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1747 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1748 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
fd76e73a
RH
1749}
1750
e4d58b41 1751static void tcg_target_init(TCGContext *s)
f54b3f92
AJ
1752{
1753 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
fd76e73a
RH
1754
1755 tcg_regset_clear(tcg_target_call_clobber_regs);
1756 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1757 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1758 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1759 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1760 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1761 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1763 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1764 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
f54b3f92
AJ
1765
1766 tcg_regset_clear(s->reserved_regs);
1767 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1768 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1769 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1770 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1771 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1772 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1773 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1774 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
a42bceec 1775 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
f54b3f92
AJ
1776 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1777
1778 tcg_add_target_add_op_defs(hppa_op_defs);
1779}
e7bd6300
RH
1780
1781typedef struct {
1782 uint32_t len __attribute__((aligned((sizeof(void *)))));
1783 uint32_t id;
1784 uint8_t version;
1785 char augmentation[1];
1786 uint8_t code_align;
1787 uint8_t data_align;
1788 uint8_t return_column;
1789} DebugFrameCIE;
1790
1791typedef struct {
1792 uint32_t len __attribute__((aligned((sizeof(void *)))));
1793 uint32_t cie_offset;
1794 tcg_target_long func_start __attribute__((packed));
1795 tcg_target_long func_len __attribute__((packed));
1796 uint8_t def_cfa[4];
1797 uint8_t ret_ofs[3];
1798 uint8_t reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1799} DebugFrameFDE;
1800
1801typedef struct {
1802 DebugFrameCIE cie;
1803 DebugFrameFDE fde;
1804} DebugFrame;
1805
1806#define ELF_HOST_MACHINE EM_PARISC
1807#define ELF_HOST_FLAGS EFA_PARISC_1_1
1808
1809/* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1810 and other extensions. We don't really care, but if we don't set this
1811 to *something* then the object file won't be properly matched. */
1812#define ELF_OSABI ELFOSABI_LINUX
1813
1814static DebugFrame debug_frame = {
1815 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1816 .cie.id = -1,
1817 .cie.version = 1,
1818 .cie.code_align = 1,
1819 .cie.data_align = 1,
1820 .cie.return_column = 2,
1821
1822 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1823 .fde.def_cfa = {
1824 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1825 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1826 (-FRAME_SIZE >> 7) & 0x7f
1827 },
1828 .fde.ret_ofs = {
1829 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1830 },
1831 .fde.reg_ofs = {
1832 /* This must match the ordering in tcg_target_callee_save_regs. */
1833 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1834 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1835 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1836 0x80 + 7, 12, /* ... */
1837 0x80 + 8, 16,
1838 0x80 + 9, 20,
1839 0x80 + 10, 24,
1840 0x80 + 11, 28,
1841 0x80 + 12, 32,
1842 0x80 + 13, 36,
1843 0x80 + 14, 40,
1844 0x80 + 15, 44,
1845 0x80 + 16, 48,
1846 0x80 + 17, 52,
1847 0x80 + 18, 56,
1848 }
1849};
1850
1851void tcg_register_jit(void *buf, size_t buf_size)
1852{
1853 debug_frame.fde.func_start = (tcg_target_long) buf;
1854 debug_frame.fde.func_len = buf_size;
1855
1856 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1857}