]> git.proxmox.com Git - qemu.git/blame - tcg/hppa/tcg-target.c
tcg: Allow TCG_TARGET_REG_BITS to be specified independantly
[qemu.git] / tcg / hppa / tcg-target.c
CommitLineData
f54b3f92
AJ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
78cd7b83
RH
25#if TCG_TARGET_REG_BITS != 32
26#error unsupported
27#endif
28
d4a9eb1f 29#ifndef NDEBUG
f54b3f92 30static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
fd76e73a
RH
31 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
32 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
33 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
34 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
f54b3f92 35};
d4a9eb1f 36#endif
f54b3f92 37
fd76e73a
RH
38/* This is an 8 byte temp slot in the stack frame. */
39#define STACK_TEMP_OFS -16
40
fd76e73a
RH
41#ifdef CONFIG_USE_GUEST_BASE
42#define TCG_GUEST_BASE_REG TCG_REG_R16
43#else
44#define TCG_GUEST_BASE_REG TCG_REG_R0
45#endif
46
f54b3f92
AJ
47static const int tcg_target_reg_alloc_order[] = {
48 TCG_REG_R4,
49 TCG_REG_R5,
50 TCG_REG_R6,
51 TCG_REG_R7,
52 TCG_REG_R8,
53 TCG_REG_R9,
54 TCG_REG_R10,
55 TCG_REG_R11,
56 TCG_REG_R12,
57 TCG_REG_R13,
58
59 TCG_REG_R17,
60 TCG_REG_R14,
61 TCG_REG_R15,
62 TCG_REG_R16,
fd76e73a
RH
63
64 TCG_REG_R26,
65 TCG_REG_R25,
66 TCG_REG_R24,
67 TCG_REG_R23,
68
69 TCG_REG_RET0,
70 TCG_REG_RET1,
f54b3f92
AJ
71};
72
73static const int tcg_target_call_iarg_regs[4] = {
74 TCG_REG_R26,
75 TCG_REG_R25,
76 TCG_REG_R24,
77 TCG_REG_R23,
78};
79
80static const int tcg_target_call_oarg_regs[2] = {
81 TCG_REG_RET0,
82 TCG_REG_RET1,
83};
84
fd76e73a
RH
85/* True iff val fits a signed field of width BITS. */
86static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
87{
88 return (val << ((sizeof(tcg_target_long) * 8 - bits))
89 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
90}
91
92/* True iff depi can be used to compute (reg | MASK).
93 Accept a bit pattern like:
94 0....01....1
95 1....10....0
96 0..01..10..0
97 Copied from gcc sources. */
98static inline int or_mask_p(tcg_target_ulong mask)
99{
0085bd51
RH
100 if (mask == 0 || mask == -1) {
101 return 0;
102 }
fd76e73a
RH
103 mask += mask & -mask;
104 return (mask & (mask - 1)) == 0;
105}
106
107/* True iff depi or extru can be used to compute (reg & mask).
108 Accept a bit pattern like these:
109 0....01....1
110 1....10....0
111 1..10..01..1
112 Copied from gcc sources. */
113static inline int and_mask_p(tcg_target_ulong mask)
114{
115 return or_mask_p(~mask);
116}
117
118static int low_sign_ext(int val, int len)
119{
120 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
121}
122
123static int reassemble_12(int as12)
124{
125 return (((as12 & 0x800) >> 11) |
126 ((as12 & 0x400) >> 8) |
127 ((as12 & 0x3ff) << 3));
128}
129
130static int reassemble_17(int as17)
131{
132 return (((as17 & 0x10000) >> 16) |
133 ((as17 & 0x0f800) << 5) |
134 ((as17 & 0x00400) >> 8) |
135 ((as17 & 0x003ff) << 3));
136}
137
138static int reassemble_21(int as21)
139{
140 return (((as21 & 0x100000) >> 20) |
141 ((as21 & 0x0ffe00) >> 8) |
142 ((as21 & 0x000180) << 7) |
143 ((as21 & 0x00007c) << 14) |
144 ((as21 & 0x000003) << 12));
145}
146
147/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
148 such relocations are simply fully handled by the assembler. */
149#define R_PARISC_PCREL12F R_PARISC_NONE
150
f54b3f92
AJ
151static void patch_reloc(uint8_t *code_ptr, int type,
152 tcg_target_long value, tcg_target_long addend)
153{
fd76e73a
RH
154 uint32_t *insn_ptr = (uint32_t *)code_ptr;
155 uint32_t insn = *insn_ptr;
156 tcg_target_long pcrel;
157
158 value += addend;
159 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
160
f54b3f92 161 switch (type) {
fd76e73a
RH
162 case R_PARISC_PCREL12F:
163 assert(check_fit_tl(pcrel, 12));
164 /* ??? We assume all patches are forward. See tcg_out_brcond
165 re setting the NUL bit on the branch and eliding the nop. */
166 assert(pcrel >= 0);
167 insn &= ~0x1ffdu;
168 insn |= reassemble_12(pcrel);
169 break;
f54b3f92 170 case R_PARISC_PCREL17F:
fd76e73a
RH
171 assert(check_fit_tl(pcrel, 17));
172 insn &= ~0x1f1ffdu;
173 insn |= reassemble_17(pcrel);
f54b3f92
AJ
174 break;
175 default:
176 tcg_abort();
177 }
fd76e73a
RH
178
179 *insn_ptr = insn;
f54b3f92
AJ
180}
181
f54b3f92 182/* parse target specific constraints */
d4a9eb1f 183static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
f54b3f92
AJ
184{
185 const char *ct_str;
186
187 ct_str = *pct_str;
188 switch (ct_str[0]) {
189 case 'r':
190 ct->ct |= TCG_CT_REG;
191 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
192 break;
193 case 'L': /* qemu_ld/st constraint */
194 ct->ct |= TCG_CT_REG;
195 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
196 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
197 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
200 break;
fd76e73a
RH
201 case 'Z':
202 ct->ct |= TCG_CT_CONST_0;
203 break;
204 case 'I':
205 ct->ct |= TCG_CT_CONST_S11;
206 break;
207 case 'J':
208 ct->ct |= TCG_CT_CONST_S5;
209 break;
91493631
RH
210 case 'K':
211 ct->ct |= TCG_CT_CONST_MS11;
212 break;
0085bd51
RH
213 case 'M':
214 ct->ct |= TCG_CT_CONST_AND;
215 break;
216 case 'O':
217 ct->ct |= TCG_CT_CONST_OR;
218 break;
f54b3f92
AJ
219 default:
220 return -1;
221 }
222 ct_str++;
223 *pct_str = ct_str;
224 return 0;
225}
226
227/* test if a constant matches the constraint */
fd76e73a
RH
228static int tcg_target_const_match(tcg_target_long val,
229 const TCGArgConstraint *arg_ct)
f54b3f92 230{
fd76e73a
RH
231 int ct = arg_ct->ct;
232 if (ct & TCG_CT_CONST) {
233 return 1;
234 } else if (ct & TCG_CT_CONST_0) {
235 return val == 0;
236 } else if (ct & TCG_CT_CONST_S5) {
237 return check_fit_tl(val, 5);
238 } else if (ct & TCG_CT_CONST_S11) {
239 return check_fit_tl(val, 11);
91493631
RH
240 } else if (ct & TCG_CT_CONST_MS11) {
241 return check_fit_tl(-val, 11);
0085bd51
RH
242 } else if (ct & TCG_CT_CONST_AND) {
243 return and_mask_p(val);
244 } else if (ct & TCG_CT_CONST_OR) {
245 return or_mask_p(val);
fd76e73a 246 }
f54b3f92
AJ
247 return 0;
248}
249
250#define INSN_OP(x) ((x) << 26)
251#define INSN_EXT3BR(x) ((x) << 13)
252#define INSN_EXT3SH(x) ((x) << 10)
253#define INSN_EXT4(x) ((x) << 6)
254#define INSN_EXT5(x) (x)
255#define INSN_EXT6(x) ((x) << 6)
256#define INSN_EXT7(x) ((x) << 6)
257#define INSN_EXT8A(x) ((x) << 6)
258#define INSN_EXT8B(x) ((x) << 5)
259#define INSN_T(x) (x)
260#define INSN_R1(x) ((x) << 16)
261#define INSN_R2(x) ((x) << 21)
262#define INSN_DEP_LEN(x) (32 - (x))
263#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
264#define INSN_SHDEP_P(x) ((x) << 5)
265#define INSN_COND(x) ((x) << 13)
fd76e73a
RH
266#define INSN_IM11(x) low_sign_ext(x, 11)
267#define INSN_IM14(x) low_sign_ext(x, 14)
268#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
269
270#define COND_NEVER 0
271#define COND_EQ 1
272#define COND_LT 2
273#define COND_LE 3
274#define COND_LTU 4
275#define COND_LEU 5
276#define COND_SV 6
277#define COND_OD 7
278#define COND_FALSE 8
279
280#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
281#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
282#define INSN_ADDI (INSN_OP(0x2d))
283#define INSN_ADDIL (INSN_OP(0x0a))
284#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
285#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
286#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
287#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
288#define INSN_COMICLR (INSN_OP(0x24))
289#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
290#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
291#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
292#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
293#define INSN_LDIL (INSN_OP(0x08))
294#define INSN_LDO (INSN_OP(0x0d))
295#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
296#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
297#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
298#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
299#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
300#define INSN_SUBI (INSN_OP(0x25))
301#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
302#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
303#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
304#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
305#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
306#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
307
308#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
309#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
310#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
311#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
312#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
313#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
314
315#define INSN_LDB (INSN_OP(0x10))
316#define INSN_LDH (INSN_OP(0x11))
317#define INSN_LDW (INSN_OP(0x12))
318#define INSN_LDWM (INSN_OP(0x13))
319#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
320
321#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
322#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
323#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
324
325#define INSN_STB (INSN_OP(0x18))
326#define INSN_STH (INSN_OP(0x19))
327#define INSN_STW (INSN_OP(0x1a))
328#define INSN_STWM (INSN_OP(0x1b))
329#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
330
331#define INSN_COMBT (INSN_OP(0x20))
332#define INSN_COMBF (INSN_OP(0x22))
333#define INSN_COMIBT (INSN_OP(0x21))
334#define INSN_COMIBF (INSN_OP(0x23))
335
336/* supplied by libgcc */
e7bd6300 337extern void *__canonicalize_funcptr_for_compare(const void *);
fd76e73a 338
2a534aff 339static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
fd76e73a
RH
340{
341 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
342 but hppa-dis.c is unaware of this definition */
343 if (ret != arg) {
344 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
345 | INSN_R2(TCG_REG_R0));
346 }
347}
f54b3f92 348
fd76e73a 349static void tcg_out_movi(TCGContext *s, TCGType type,
2a534aff 350 TCGReg ret, tcg_target_long arg)
fd76e73a
RH
351{
352 if (check_fit_tl(arg, 14)) {
353 tcg_out32(s, INSN_LDO | INSN_R1(ret)
354 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
355 } else {
356 uint32_t hi, lo;
357 hi = arg >> 11;
358 lo = arg & 0x7ff;
359
360 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
361 if (lo) {
362 tcg_out32(s, INSN_LDO | INSN_R1(ret)
363 | INSN_R2(ret) | INSN_IM14(lo));
364 }
365 }
366}
f54b3f92 367
fd76e73a
RH
368static void tcg_out_ldst(TCGContext *s, int ret, int addr,
369 tcg_target_long offset, int op)
370{
371 if (!check_fit_tl(offset, 14)) {
372 uint32_t hi, lo, op;
f54b3f92 373
fd76e73a
RH
374 hi = offset >> 11;
375 lo = offset & 0x7ff;
f54b3f92 376
fd76e73a
RH
377 if (addr == TCG_REG_R0) {
378 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
379 } else {
380 op = INSN_ADDIL | INSN_R2(addr);
381 }
382 tcg_out32(s, op | reassemble_21(hi));
f54b3f92 383
fd76e73a
RH
384 addr = TCG_REG_R1;
385 offset = lo;
386 }
f54b3f92 387
fd76e73a
RH
388 if (ret != addr || offset != 0 || op != INSN_LDO) {
389 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
390 }
391}
f54b3f92 392
fd76e73a 393/* This function is required by tcg.c. */
2a534aff
RH
394static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
395 TCGReg arg1, tcg_target_long arg2)
fd76e73a
RH
396{
397 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
398}
399
400/* This function is required by tcg.c. */
2a534aff
RH
401static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
402 TCGReg arg1, tcg_target_long arg2)
fd76e73a
RH
403{
404 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
405}
406
407static void tcg_out_ldst_index(TCGContext *s, int data,
408 int base, int index, int op)
409{
410 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
411}
412
413static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
414 tcg_target_long val)
415{
416 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
417}
f54b3f92 418
fd76e73a
RH
419/* This function is required by tcg.c. */
420static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
421{
422 tcg_out_addi2(s, reg, reg, val);
423}
f54b3f92 424
fd76e73a
RH
425static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
426{
427 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
428}
f54b3f92 429
fd76e73a
RH
430static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
431 tcg_target_long val, int op)
f54b3f92 432{
fd76e73a
RH
433 assert(check_fit_tl(val, 11));
434 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
f54b3f92
AJ
435}
436
fd76e73a 437static inline void tcg_out_nop(TCGContext *s)
f54b3f92 438{
fd76e73a
RH
439 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
440}
f54b3f92 441
fd76e73a
RH
442static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
443{
444 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
445}
446
447/* Extract LEN bits at position OFS from ARG and place in RET.
448 Note that here the bit ordering is reversed from the PA-RISC
449 standard, such that the right-most bit is 0. */
450static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
451 unsigned ofs, unsigned len, int sign)
452{
453 assert(ofs < 32 && len <= 32 - ofs);
454 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
455 | INSN_R1(ret) | INSN_R2(arg)
456 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
f54b3f92
AJ
457}
458
fd76e73a
RH
459/* Likewise with OFS interpreted little-endian. */
460static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
461 unsigned ofs, unsigned len)
f54b3f92 462{
fd76e73a
RH
463 assert(ofs < 32 && len <= 32 - ofs);
464 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
465 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
466}
467
ec188429
RH
468static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
469 unsigned ofs, unsigned len)
470{
471 assert(ofs < 32 && len <= 32 - ofs);
472 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
473 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
474}
475
fd76e73a
RH
476static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
477 unsigned count)
478{
479 assert(count < 32);
480 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
481 | INSN_SHDEP_CP(count));
482}
483
484static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
485{
486 tcg_out_mtctl_sar(s, creg);
487 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
488}
489
490static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
491{
0085bd51
RH
492 int bs0, bs1;
493
494 /* Note that the argument is constrained to match or_mask_p. */
495 for (bs0 = 0; bs0 < 32; bs0++) {
496 if ((m & (1u << bs0)) != 0) {
497 break;
fd76e73a 498 }
0085bd51
RH
499 }
500 for (bs1 = bs0; bs1 < 32; bs1++) {
501 if ((m & (1u << bs1)) == 0) {
502 break;
fd76e73a 503 }
fd76e73a 504 }
0085bd51
RH
505 assert(bs1 == 32 || (1ul << bs1) > m);
506
3b6dac34 507 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
ec188429 508 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
fd76e73a
RH
509}
510
511static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
512{
0085bd51 513 int ls0, ls1, ms0;
fd76e73a 514
0085bd51
RH
515 /* Note that the argument is constrained to match and_mask_p. */
516 for (ls0 = 0; ls0 < 32; ls0++) {
517 if ((m & (1u << ls0)) == 0) {
518 break;
fd76e73a 519 }
0085bd51
RH
520 }
521 for (ls1 = ls0; ls1 < 32; ls1++) {
522 if ((m & (1u << ls1)) != 0) {
523 break;
fd76e73a 524 }
0085bd51
RH
525 }
526 for (ms0 = ls1; ms0 < 32; ms0++) {
527 if ((m & (1u << ms0)) == 0) {
528 break;
fd76e73a 529 }
0085bd51
RH
530 }
531 assert (ms0 == 32);
fd76e73a 532
0085bd51
RH
533 if (ls1 == 32) {
534 tcg_out_extr(s, ret, arg, 0, ls0, 0);
f54b3f92 535 } else {
3b6dac34 536 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
ec188429 537 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
f54b3f92
AJ
538 }
539}
540
fd76e73a 541static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
f54b3f92 542{
fd76e73a 543 tcg_out_extr(s, ret, arg, 0, 8, 1);
f54b3f92
AJ
544}
545
fd76e73a 546static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
f54b3f92 547{
fd76e73a 548 tcg_out_extr(s, ret, arg, 0, 16, 1);
f54b3f92
AJ
549}
550
fd76e73a 551static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
f54b3f92 552{
fd76e73a
RH
553 count &= 31;
554 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
555 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
f54b3f92
AJ
556}
557
fd76e73a 558static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
f54b3f92 559{
fd76e73a
RH
560 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
561 tcg_out_mtctl_sar(s, TCG_REG_R20);
562 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
563}
564
fd76e73a 565static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
f54b3f92 566{
fd76e73a
RH
567 count &= 31;
568 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
f54b3f92
AJ
569}
570
fd76e73a 571static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
f54b3f92 572{
fd76e73a 573 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
f54b3f92
AJ
574}
575
fd76e73a 576static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
f54b3f92 577{
fd76e73a
RH
578 count &= 31;
579 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
f54b3f92
AJ
580}
581
fd76e73a 582static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
f54b3f92 583{
fd76e73a
RH
584 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
585 tcg_out_mtctl_sar(s, TCG_REG_R20);
586 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
587}
588
fd76e73a 589static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
f54b3f92 590{
fd76e73a
RH
591 count &= 31;
592 tcg_out_shd(s, ret, arg, arg, 32 - count);
f54b3f92
AJ
593}
594
fd76e73a
RH
595static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
596{
597 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
598 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
f54b3f92
AJ
599}
600
fd76e73a
RH
601static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
602{
603 count &= 31;
604 tcg_out_shd(s, ret, arg, arg, count);
f54b3f92
AJ
605}
606
fd76e73a
RH
607static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
608{
609 tcg_out_vshd(s, ret, arg, arg, creg);
f54b3f92
AJ
610}
611
fd76e73a
RH
612static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
613{
614 if (ret != arg) {
3b6dac34 615 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
fd76e73a
RH
616 }
617 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
618 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
f54b3f92
AJ
619}
620
fd76e73a 621static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
f54b3f92 622{
fd76e73a
RH
623 /* arg = ABCD */
624 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
625 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
626 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
f54b3f92
AJ
627}
628
e7bd6300 629static void tcg_out_call(TCGContext *s, const void *func)
fd76e73a
RH
630{
631 tcg_target_long val, hi, lo, disp;
632
633 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
634 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
635
636 if (check_fit_tl(disp, 17)) {
637 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
638 } else {
639 hi = val >> 11;
640 lo = val & 0x7ff;
641
642 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
643 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
644 | reassemble_17(lo >> 2));
3b6dac34 645 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
fd76e73a
RH
646 }
647}
79383c9c 648
fd76e73a
RH
649static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
650 int arg1, int arg2)
651{
652 /* Store both words into the stack for copy to the FPU. */
a42bceec
BS
653 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
654 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
fd76e73a
RH
655
656 /* Load both words into the FPU at the same time. We get away
657 with this because we can address the left and right half of the
658 FPU registers individually once loaded. */
659 /* fldds stack_temp(sp),fr22 */
a42bceec 660 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
661 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
662
663 /* xmpyu fr22r,fr22,fr22 */
664 tcg_out32(s, 0x3ad64796);
665
666 /* Store the 64-bit result back into the stack. */
667 /* fstds stack_temp(sp),fr22 */
a42bceec 668 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
669 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
670
671 /* Load the pieces of the result that the caller requested. */
672 if (reth) {
a42bceec 673 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
fd76e73a
RH
674 }
675 if (retl) {
a42bceec
BS
676 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
677 INSN_LDW);
fd76e73a
RH
678 }
679}
680
91493631
RH
681static void tcg_out_add2(TCGContext *s, int destl, int desth,
682 int al, int ah, int bl, int bh, int blconst)
683{
684 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
685
686 if (blconst) {
687 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
688 } else {
689 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
690 }
691 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
692
3b6dac34 693 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
694}
695
696static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
697 int bl, int bh, int alconst, int blconst)
698{
699 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
700
701 if (alconst) {
702 if (blconst) {
703 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
704 bl = TCG_REG_R20;
705 }
706 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
707 } else if (blconst) {
708 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
709 } else {
710 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
711 }
712 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
713
3b6dac34 714 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
715}
716
fd76e73a
RH
717static void tcg_out_branch(TCGContext *s, int label_index, int nul)
718{
719 TCGLabel *l = &s->labels[label_index];
720 uint32_t op = nul ? INSN_BL_N : INSN_BL;
721
722 if (l->has_value) {
723 tcg_target_long val = l->u.value;
724
725 val -= (tcg_target_long)s->code_ptr + 8;
726 val >>= 2;
727 assert(check_fit_tl(val, 17));
728
729 tcg_out32(s, op | reassemble_17(val));
730 } else {
2d097a83
RH
731 /* We need to keep the offset unchanged for retranslation. */
732 uint32_t old_insn = *(uint32_t *)s->code_ptr;
733
fd76e73a 734 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
2d097a83 735 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
fd76e73a
RH
736 }
737}
738
0aed257f 739static const uint8_t tcg_cond_to_cmp_cond[] =
fd76e73a
RH
740{
741 [TCG_COND_EQ] = COND_EQ,
742 [TCG_COND_NE] = COND_EQ | COND_FALSE,
743 [TCG_COND_LT] = COND_LT,
744 [TCG_COND_GE] = COND_LT | COND_FALSE,
745 [TCG_COND_LE] = COND_LE,
746 [TCG_COND_GT] = COND_LE | COND_FALSE,
747 [TCG_COND_LTU] = COND_LTU,
748 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
749 [TCG_COND_LEU] = COND_LEU,
750 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
751};
752
753static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
754 TCGArg c2, int c2const, int label_index)
755{
756 TCGLabel *l = &s->labels[label_index];
757 int op, pacond;
758
759 /* Note that COMIB operates as if the immediate is the first
760 operand. We model brcond with the immediate in the second
761 to better match what targets are likely to give us. For
762 consistency, model COMB with reversed operands as well. */
763 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
764
765 if (c2const) {
766 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
767 op |= INSN_IM5(c2);
768 } else {
769 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
770 op |= INSN_R1(c2);
771 }
772 op |= INSN_R2(c1);
773 op |= INSN_COND(pacond & 7);
774
775 if (l->has_value) {
776 tcg_target_long val = l->u.value;
777
778 val -= (tcg_target_long)s->code_ptr + 8;
779 val >>= 2;
780 assert(check_fit_tl(val, 12));
781
782 /* ??? Assume that all branches to defined labels are backward.
783 Which means that if the nul bit is set, the delay slot is
784 executed if the branch is taken, and not executed in fallthru. */
785 tcg_out32(s, op | reassemble_12(val));
786 tcg_out_nop(s);
787 } else {
2d097a83
RH
788 /* We need to keep the offset unchanged for retranslation. */
789 uint32_t old_insn = *(uint32_t *)s->code_ptr;
790
fd76e73a
RH
791 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
792 /* ??? Assume that all branches to undefined labels are forward.
793 Which means that if the nul bit is set, the delay slot is
794 not executed if the branch is taken, which is what we want. */
2d097a83 795 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
fd76e73a
RH
796 }
797}
798
799static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
800 TCGArg c1, TCGArg c2, int c2const)
801{
802 int op, pacond;
803
804 /* Note that COMICLR operates as if the immediate is the first
805 operand. We model setcond with the immediate in the second
806 to better match what targets are likely to give us. For
807 consistency, model COMCLR with reversed operands as well. */
808 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
809
810 if (c2const) {
811 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
812 } else {
813 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
814 }
815 op |= INSN_COND(pacond & 7);
816 op |= pacond & COND_FALSE ? 1 << 12 : 0;
817
818 tcg_out32(s, op);
819}
820
821static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
822 TCGArg bl, int blconst, TCGArg bh, int bhconst,
823 int label_index)
824{
825 switch (cond) {
826 case TCG_COND_EQ:
c08d9ee3
RH
827 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
828 tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
829 break;
fd76e73a 830 case TCG_COND_NE:
753d99d3 831 tcg_out_brcond(s, TCG_COND_NE, al, bl, blconst, label_index);
c08d9ee3 832 tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
fd76e73a 833 break;
fd76e73a 834 default:
d1e321b8 835 tcg_out_brcond(s, tcg_high_cond(cond), ah, bh, bhconst, label_index);
fd76e73a
RH
836 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
837 tcg_out_brcond(s, tcg_unsigned_cond(cond),
838 al, bl, blconst, label_index);
839 break;
840 }
841}
842
843static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
844 TCGArg c1, TCGArg c2, int c2const)
845{
846 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
847 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
848}
849
850static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
851 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
852 TCGArg bh, int bhconst)
853{
854 int scratch = TCG_REG_R20;
855
c08d9ee3
RH
856 /* Note that the low parts are fully consumed before scratch is set. */
857 if (ret != ah && (bhconst || ret != bh)) {
fd76e73a
RH
858 scratch = ret;
859 }
860
861 switch (cond) {
862 case TCG_COND_EQ:
863 case TCG_COND_NE:
864 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
865 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
866 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
867 break;
868
c08d9ee3
RH
869 case TCG_COND_GE:
870 case TCG_COND_GEU:
871 case TCG_COND_LT:
872 case TCG_COND_LTU:
873 /* Optimize compares with low part zero. */
874 if (bl == 0) {
875 tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
876 return;
877 }
878 /* FALLTHRU */
879
880 case TCG_COND_LE:
881 case TCG_COND_LEU:
882 case TCG_COND_GT:
883 case TCG_COND_GTU:
884 /* <= : ah < bh | (ah == bh && al <= bl) */
fd76e73a
RH
885 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
886 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
887 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
d1e321b8 888 tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond(cond)),
c08d9ee3 889 TCG_REG_R0, ah, bh, bhconst);
fd76e73a
RH
890 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
891 break;
c08d9ee3
RH
892
893 default:
894 tcg_abort();
fd76e73a
RH
895 }
896
3b6dac34 897 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
fd76e73a
RH
898}
899
f0da3757
RH
900static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
901 TCGArg c1, TCGArg c2, int c2const,
902 TCGArg v1, int v1const)
903{
904 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const);
905 if (v1const) {
906 tcg_out_movi(s, TCG_TYPE_I32, ret, v1);
907 } else {
908 tcg_out_mov(s, TCG_TYPE_I32, ret, v1);
909 }
910}
911
fd76e73a 912#if defined(CONFIG_SOFTMMU)
022c62cb 913#include "exec/softmmu_defs.h"
f54b3f92 914
e141ab52
BS
915/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
916 int mmu_idx) */
917static const void * const qemu_ld_helpers[4] = {
918 helper_ldb_mmu,
919 helper_ldw_mmu,
920 helper_ldl_mmu,
921 helper_ldq_mmu,
922};
923
924/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
925 uintxx_t val, int mmu_idx) */
926static const void * const qemu_st_helpers[4] = {
927 helper_stb_mmu,
928 helper_stw_mmu,
929 helper_stl_mmu,
930 helper_stq_mmu,
931};
fd76e73a
RH
932
933/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
934 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
935 TLB for the memory index. The return value is the offset from ENV
936 contained in R1 afterward (to be used when loading ADDEND); if the
937 return value is 0, R1 is not used. */
938
939static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
940 int addrhi, int s_bits, int lab_miss, int offset)
941{
942 int ret;
943
944 /* Extracting the index into the TLB. The "normal C operation" is
945 r1 = addr_reg >> TARGET_PAGE_BITS;
946 r1 &= CPU_TLB_SIZE - 1;
947 r1 <<= CPU_TLB_ENTRY_BITS;
948 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
949 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
950 operations with an EXTRU. Unfortunately, the current value of
951 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
952 add that follows. */
953 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
fd76e73a
RH
954 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
955 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
956
957 /* Make sure that both the addr_{read,write} and addend can be
958 read with a 14-bit offset from the same base register. */
959 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
960 ret = 0;
961 } else {
962 ret = (offset + 0x400) & ~0x7ff;
963 offset = ret - offset;
964 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
965 r1 = TCG_REG_R1;
966 }
967
968 /* Load the entry from the computed slot. */
969 if (TARGET_LONG_BITS == 64) {
970 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
971 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
972 } else {
973 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
974 }
975
e55f523d
RH
976 /* Compute the value that ought to appear in the TLB for a hit, namely,
977 the page of the address. We include the low N bits of the address
978 to catch unaligned accesses and force them onto the slow path. Do
979 this computation after having issued the load from the TLB slot to
980 give the load time to complete. */
739734cb
RH
981 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
982
fd76e73a
RH
983 /* If not equal, jump to lab_miss. */
984 if (TARGET_LONG_BITS == 64) {
985 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
986 r0, 0, addrhi, 0, lab_miss);
987 } else {
988 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
989 }
990
991 return ret;
992}
e55f523d
RH
993
994static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
995{
996 if (argno < 4) {
997 if (vconst) {
998 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
999 } else {
1000 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1001 }
1002 } else {
1003 if (vconst && v != 0) {
1004 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
1005 v = TCG_REG_R20;
1006 }
1007 tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
1008 TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
1009 }
1010 return argno + 1;
1011}
1012
1013static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
1014{
1015 /* 64-bit arguments must go in even reg pairs and stack slots. */
1016 if (argno & 1) {
1017 argno++;
1018 }
1019 argno = tcg_out_arg_reg32(s, argno, vl, false);
1020 argno = tcg_out_arg_reg32(s, argno, vh, false);
1021 return argno;
1022}
f54b3f92
AJ
1023#endif
1024
f061b40e
RH
1025static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1026 int addr_reg, int addend_reg, int opc)
f54b3f92 1027{
f54b3f92 1028#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 1029 const int bswap = 0;
f54b3f92 1030#else
f061b40e 1031 const int bswap = 1;
f54b3f92 1032#endif
f061b40e 1033
f54b3f92 1034 switch (opc) {
fd76e73a 1035 case 0:
f061b40e 1036 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
fd76e73a
RH
1037 break;
1038 case 0 | 4:
f061b40e
RH
1039 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1040 tcg_out_ext8s(s, datalo_reg, datalo_reg);
fd76e73a
RH
1041 break;
1042 case 1:
f061b40e 1043 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 1044 if (bswap) {
f061b40e 1045 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
fd76e73a
RH
1046 }
1047 break;
1048 case 1 | 4:
f061b40e 1049 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 1050 if (bswap) {
f061b40e 1051 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
fd76e73a 1052 } else {
f061b40e 1053 tcg_out_ext16s(s, datalo_reg, datalo_reg);
fd76e73a
RH
1054 }
1055 break;
1056 case 2:
f061b40e 1057 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
fd76e73a 1058 if (bswap) {
f061b40e 1059 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
fd76e73a
RH
1060 }
1061 break;
1062 case 3:
1063 if (bswap) {
f061b40e
RH
1064 int t = datahi_reg;
1065 datahi_reg = datalo_reg;
1066 datalo_reg = t;
fd76e73a 1067 }
f061b40e
RH
1068 /* We can't access the low-part with a reg+reg addressing mode,
1069 so perform the addition now and use reg_ofs addressing mode. */
1070 if (addend_reg != TCG_REG_R0) {
1071 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1072 addr_reg = TCG_REG_R20;
1073 }
1074 /* Make sure not to clobber the base register. */
1075 if (datahi_reg == addr_reg) {
1076 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1077 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
fd76e73a 1078 } else {
f061b40e
RH
1079 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1080 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
fd76e73a
RH
1081 }
1082 if (bswap) {
f061b40e
RH
1083 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1084 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
fd76e73a
RH
1085 }
1086 break;
1087 default:
1088 tcg_abort();
f54b3f92 1089 }
f061b40e
RH
1090}
1091
1092static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1093{
1094 int datalo_reg = *args++;
1095 /* Note that datahi_reg is only used for 64-bit loads. */
1096 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1097 int addrlo_reg = *args++;
f54b3f92
AJ
1098
1099#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1100 /* Note that addrhi_reg is only used for 64-bit guests. */
1101 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1102 int mem_index = *args;
e55f523d 1103 int lab1, lab2, argno, offset;
f061b40e
RH
1104
1105 lab1 = gen_new_label();
1106 lab2 = gen_new_label();
1107
9349b4f9 1108 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
e55f523d
RH
1109 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1110 addrhi_reg, opc & 3, lab1, offset);
f061b40e
RH
1111
1112 /* TLB Hit. */
e55f523d
RH
1113 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1114 (offset ? TCG_REG_R1 : TCG_REG_R25),
9349b4f9 1115 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
e55f523d
RH
1116 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1117 TCG_REG_R20, opc);
fd76e73a
RH
1118 tcg_out_branch(s, lab2, 1);
1119
1120 /* TLB Miss. */
1121 /* label1: */
9d6fca70 1122 tcg_out_label(s, lab1, s->code_ptr);
fd76e73a 1123
e55f523d
RH
1124 argno = 0;
1125 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
fd76e73a 1126 if (TARGET_LONG_BITS == 64) {
e55f523d
RH
1127 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1128 } else {
1129 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
fd76e73a 1130 }
e55f523d
RH
1131 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1132
f061b40e 1133 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
fd76e73a
RH
1134
1135 switch (opc) {
1136 case 0:
f061b40e 1137 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
fd76e73a
RH
1138 break;
1139 case 0 | 4:
f061b40e 1140 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1141 break;
1142 case 1:
f061b40e 1143 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
fd76e73a
RH
1144 break;
1145 case 1 | 4:
f061b40e 1146 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1147 break;
1148 case 2:
1149 case 2 | 4:
3b6dac34 1150 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1151 break;
1152 case 3:
3b6dac34
RH
1153 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1154 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
fd76e73a
RH
1155 break;
1156 default:
1157 tcg_abort();
1158 }
1159
f54b3f92 1160 /* label2: */
9d6fca70 1161 tcg_out_label(s, lab2, s->code_ptr);
f061b40e
RH
1162#else
1163 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1164 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
f54b3f92
AJ
1165#endif
1166}
1167
e55f523d
RH
1168static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
1169 int datahi_reg, int addr_reg, int opc)
f54b3f92 1170{
f54b3f92 1171#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 1172 const int bswap = 0;
f54b3f92 1173#else
f061b40e 1174 const int bswap = 1;
f54b3f92 1175#endif
f061b40e 1176
f54b3f92
AJ
1177 switch (opc) {
1178 case 0:
f061b40e 1179 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
f54b3f92
AJ
1180 break;
1181 case 1:
1182 if (bswap) {
f061b40e
RH
1183 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1184 datalo_reg = TCG_REG_R20;
f54b3f92 1185 }
f061b40e 1186 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
f54b3f92
AJ
1187 break;
1188 case 2:
1189 if (bswap) {
f061b40e
RH
1190 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1191 datalo_reg = TCG_REG_R20;
f54b3f92 1192 }
f061b40e 1193 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
f54b3f92
AJ
1194 break;
1195 case 3:
fd76e73a 1196 if (bswap) {
f061b40e
RH
1197 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1198 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1199 datahi_reg = TCG_REG_R20;
1200 datalo_reg = TCG_REG_R23;
f54b3f92 1201 }
f061b40e
RH
1202 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1203 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
f54b3f92
AJ
1204 break;
1205 default:
1206 tcg_abort();
1207 }
1208
f061b40e
RH
1209}
1210
1211static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1212{
1213 int datalo_reg = *args++;
1214 /* Note that datahi_reg is only used for 64-bit loads. */
1215 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1216 int addrlo_reg = *args++;
1217
f54b3f92 1218#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1219 /* Note that addrhi_reg is only used for 64-bit guests. */
1220 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1221 int mem_index = *args;
e55f523d 1222 int lab1, lab2, argno, next, offset;
f061b40e
RH
1223
1224 lab1 = gen_new_label();
1225 lab2 = gen_new_label();
1226
9349b4f9 1227 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
e55f523d
RH
1228 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1229 addrhi_reg, opc, lab1, offset);
f061b40e
RH
1230
1231 /* TLB Hit. */
e55f523d
RH
1232 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1233 (offset ? TCG_REG_R1 : TCG_REG_R25),
9349b4f9 1234 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
f061b40e
RH
1235
1236 /* There are no indexed stores, so we must do this addition explitly.
1237 Careful to avoid R20, which is used for the bswaps to follow. */
1238 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1239 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
fd76e73a
RH
1240 tcg_out_branch(s, lab2, 1);
1241
1242 /* TLB Miss. */
1243 /* label1: */
9d6fca70 1244 tcg_out_label(s, lab1, s->code_ptr);
fd76e73a 1245
e55f523d
RH
1246 argno = 0;
1247 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
fd76e73a 1248 if (TARGET_LONG_BITS == 64) {
e55f523d
RH
1249 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1250 } else {
1251 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
fd76e73a
RH
1252 }
1253
e55f523d 1254 next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
fd76e73a
RH
1255 switch(opc) {
1256 case 0:
e55f523d
RH
1257 tcg_out_andi(s, next, datalo_reg, 0xff);
1258 argno = tcg_out_arg_reg32(s, argno, next, false);
fd76e73a
RH
1259 break;
1260 case 1:
e55f523d
RH
1261 tcg_out_andi(s, next, datalo_reg, 0xffff);
1262 argno = tcg_out_arg_reg32(s, argno, next, false);
fd76e73a
RH
1263 break;
1264 case 2:
e55f523d 1265 argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
fd76e73a
RH
1266 break;
1267 case 3:
e55f523d 1268 argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
fd76e73a
RH
1269 break;
1270 default:
1271 tcg_abort();
1272 }
e55f523d 1273 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
fd76e73a 1274
f061b40e 1275 tcg_out_call(s, qemu_st_helpers[opc]);
fd76e73a 1276
f54b3f92 1277 /* label2: */
9d6fca70 1278 tcg_out_label(s, lab2, s->code_ptr);
f061b40e 1279#else
e55f523d
RH
1280 /* There are no indexed stores, so if GUEST_BASE is set we must do
1281 the add explicitly. Careful to avoid R20, which is used for the
1282 bswaps to follow. */
f061b40e 1283 if (GUEST_BASE != 0) {
e55f523d
RH
1284 tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
1285 TCG_GUEST_BASE_REG, INSN_ADDL);
f061b40e
RH
1286 addrlo_reg = TCG_REG_R31;
1287 }
1288 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
f54b3f92
AJ
1289#endif
1290}
1291
fd76e73a
RH
1292static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1293{
1294 if (!check_fit_tl(arg, 14)) {
1295 uint32_t hi, lo;
1296 hi = arg & ~0x7ff;
1297 lo = arg & 0x7ff;
1298 if (lo) {
1299 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1300 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1301 tcg_out_addi(s, TCG_REG_RET0, lo);
1302 return;
1303 }
1304 arg = hi;
1305 }
1306 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1307 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1308}
1309
1310static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1311{
1312 if (s->tb_jmp_offset) {
1313 /* direct jump method */
1314 fprintf(stderr, "goto_tb direct\n");
1315 tcg_abort();
1316 } else {
1317 /* indirect jump method */
1318 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1319 (tcg_target_long)(s->tb_next + arg));
1320 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1321 }
1322 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1323}
1324
a9751609 1325static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
f54b3f92
AJ
1326 const int *const_args)
1327{
f54b3f92
AJ
1328 switch (opc) {
1329 case INDEX_op_exit_tb:
fd76e73a 1330 tcg_out_exit_tb(s, args[0]);
f54b3f92
AJ
1331 break;
1332 case INDEX_op_goto_tb:
fd76e73a 1333 tcg_out_goto_tb(s, args[0]);
f54b3f92 1334 break;
fd76e73a 1335
f54b3f92 1336 case INDEX_op_call:
fd76e73a
RH
1337 if (const_args[0]) {
1338 tcg_out_call(s, (void *)args[0]);
1339 } else {
3e1f46ea
RH
1340 /* ??? FIXME: the value in the register in args[0] is almost
1341 certainly a procedure descriptor, not a code address. We
1342 probably need to use the millicode $$dyncall routine. */
1343 tcg_abort();
fd76e73a 1344 }
f54b3f92 1345 break;
fd76e73a 1346
f54b3f92 1347 case INDEX_op_br:
fd76e73a 1348 tcg_out_branch(s, args[0], 1);
f54b3f92 1349 break;
fd76e73a 1350
f54b3f92
AJ
1351 case INDEX_op_movi_i32:
1352 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1353 break;
1354
1355 case INDEX_op_ld8u_i32:
fd76e73a 1356 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1357 break;
1358 case INDEX_op_ld8s_i32:
fd76e73a 1359 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1360 tcg_out_ext8s(s, args[0], args[0]);
1361 break;
1362 case INDEX_op_ld16u_i32:
fd76e73a 1363 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1364 break;
1365 case INDEX_op_ld16s_i32:
fd76e73a 1366 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1367 tcg_out_ext16s(s, args[0], args[0]);
1368 break;
1369 case INDEX_op_ld_i32:
fd76e73a 1370 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
f54b3f92
AJ
1371 break;
1372
1373 case INDEX_op_st8_i32:
fd76e73a 1374 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
f54b3f92
AJ
1375 break;
1376 case INDEX_op_st16_i32:
fd76e73a 1377 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
f54b3f92
AJ
1378 break;
1379 case INDEX_op_st_i32:
fd76e73a
RH
1380 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1381 break;
1382
1383 case INDEX_op_add_i32:
1384 if (const_args[2]) {
1385 tcg_out_addi2(s, args[0], args[1], args[2]);
1386 } else {
1387 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1388 }
f54b3f92
AJ
1389 break;
1390
1391 case INDEX_op_sub_i32:
fd76e73a
RH
1392 if (const_args[1]) {
1393 if (const_args[2]) {
1394 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1395 } else {
1396 /* Recall that SUBI is a reversed subtract. */
1397 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1398 }
1399 } else if (const_args[2]) {
1400 tcg_out_addi2(s, args[0], args[1], -args[2]);
1401 } else {
1402 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1403 }
1404 break;
1405
f54b3f92 1406 case INDEX_op_and_i32:
fd76e73a
RH
1407 if (const_args[2]) {
1408 tcg_out_andi(s, args[0], args[1], args[2]);
1409 } else {
1410 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1411 }
1412 break;
1413
f54b3f92 1414 case INDEX_op_or_i32:
fd76e73a
RH
1415 if (const_args[2]) {
1416 tcg_out_ori(s, args[0], args[1], args[2]);
1417 } else {
1418 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1419 }
1420 break;
1421
f54b3f92 1422 case INDEX_op_xor_i32:
fd76e73a
RH
1423 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1424 break;
1425
1426 case INDEX_op_andc_i32:
1427 if (const_args[2]) {
1428 tcg_out_andi(s, args[0], args[1], ~args[2]);
1429 } else {
1430 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1431 }
1432 break;
f54b3f92
AJ
1433
1434 case INDEX_op_shl_i32:
fd76e73a
RH
1435 if (const_args[2]) {
1436 tcg_out_shli(s, args[0], args[1], args[2]);
1437 } else {
1438 tcg_out_shl(s, args[0], args[1], args[2]);
1439 }
f54b3f92 1440 break;
fd76e73a 1441
f54b3f92 1442 case INDEX_op_shr_i32:
fd76e73a
RH
1443 if (const_args[2]) {
1444 tcg_out_shri(s, args[0], args[1], args[2]);
1445 } else {
1446 tcg_out_shr(s, args[0], args[1], args[2]);
1447 }
f54b3f92 1448 break;
fd76e73a 1449
f54b3f92 1450 case INDEX_op_sar_i32:
fd76e73a
RH
1451 if (const_args[2]) {
1452 tcg_out_sari(s, args[0], args[1], args[2]);
1453 } else {
1454 tcg_out_sar(s, args[0], args[1], args[2]);
1455 }
1456 break;
1457
1458 case INDEX_op_rotl_i32:
1459 if (const_args[2]) {
1460 tcg_out_rotli(s, args[0], args[1], args[2]);
1461 } else {
1462 tcg_out_rotl(s, args[0], args[1], args[2]);
1463 }
1464 break;
1465
1466 case INDEX_op_rotr_i32:
1467 if (const_args[2]) {
1468 tcg_out_rotri(s, args[0], args[1], args[2]);
1469 } else {
1470 tcg_out_rotr(s, args[0], args[1], args[2]);
1471 }
f54b3f92
AJ
1472 break;
1473
1474 case INDEX_op_mul_i32:
fd76e73a 1475 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
f54b3f92
AJ
1476 break;
1477 case INDEX_op_mulu2_i32:
fd76e73a 1478 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
f54b3f92 1479 break;
fd76e73a
RH
1480
1481 case INDEX_op_bswap16_i32:
1482 tcg_out_bswap16(s, args[0], args[1], 0);
f54b3f92 1483 break;
fd76e73a
RH
1484 case INDEX_op_bswap32_i32:
1485 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1486 break;
1487
1488 case INDEX_op_not_i32:
1489 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1490 break;
1491 case INDEX_op_ext8s_i32:
1492 tcg_out_ext8s(s, args[0], args[1]);
1493 break;
1494 case INDEX_op_ext16s_i32:
1495 tcg_out_ext16s(s, args[0], args[1]);
1496 break;
1497
f54b3f92 1498 case INDEX_op_brcond_i32:
fd76e73a
RH
1499 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1500 break;
1501 case INDEX_op_brcond2_i32:
1502 tcg_out_brcond2(s, args[4], args[0], args[1],
1503 args[2], const_args[2],
1504 args[3], const_args[3], args[5]);
1505 break;
1506
1507 case INDEX_op_setcond_i32:
1508 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1509 break;
1510 case INDEX_op_setcond2_i32:
1511 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1512 args[3], const_args[3], args[4], const_args[4]);
1513 break;
1514
f0da3757
RH
1515 case INDEX_op_movcond_i32:
1516 tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2],
1517 args[3], const_args[3]);
1518 break;
1519
fd76e73a 1520 case INDEX_op_add2_i32:
91493631
RH
1521 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1522 args[4], args[5], const_args[4]);
fd76e73a
RH
1523 break;
1524
1525 case INDEX_op_sub2_i32:
91493631
RH
1526 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1527 args[4], args[5], const_args[2], const_args[4]);
f54b3f92
AJ
1528 break;
1529
ec188429
RH
1530 case INDEX_op_deposit_i32:
1531 if (const_args[2]) {
1532 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1533 } else {
1534 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1535 }
1536 break;
1537
f54b3f92
AJ
1538 case INDEX_op_qemu_ld8u:
1539 tcg_out_qemu_ld(s, args, 0);
1540 break;
1541 case INDEX_op_qemu_ld8s:
1542 tcg_out_qemu_ld(s, args, 0 | 4);
1543 break;
1544 case INDEX_op_qemu_ld16u:
1545 tcg_out_qemu_ld(s, args, 1);
1546 break;
1547 case INDEX_op_qemu_ld16s:
1548 tcg_out_qemu_ld(s, args, 1 | 4);
1549 break;
86feb1c8 1550 case INDEX_op_qemu_ld32:
f54b3f92
AJ
1551 tcg_out_qemu_ld(s, args, 2);
1552 break;
fd76e73a
RH
1553 case INDEX_op_qemu_ld64:
1554 tcg_out_qemu_ld(s, args, 3);
1555 break;
f54b3f92
AJ
1556
1557 case INDEX_op_qemu_st8:
1558 tcg_out_qemu_st(s, args, 0);
1559 break;
1560 case INDEX_op_qemu_st16:
1561 tcg_out_qemu_st(s, args, 1);
1562 break;
1563 case INDEX_op_qemu_st32:
1564 tcg_out_qemu_st(s, args, 2);
1565 break;
fd76e73a
RH
1566 case INDEX_op_qemu_st64:
1567 tcg_out_qemu_st(s, args, 3);
1568 break;
f54b3f92
AJ
1569
1570 default:
1571 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1572 tcg_abort();
1573 }
f54b3f92
AJ
1574}
1575
1576static const TCGTargetOpDef hppa_op_defs[] = {
1577 { INDEX_op_exit_tb, { } },
1578 { INDEX_op_goto_tb, { } },
1579
fd76e73a 1580 { INDEX_op_call, { "ri" } },
f54b3f92
AJ
1581 { INDEX_op_br, { } },
1582
1583 { INDEX_op_mov_i32, { "r", "r" } },
1584 { INDEX_op_movi_i32, { "r" } },
fd76e73a 1585
f54b3f92
AJ
1586 { INDEX_op_ld8u_i32, { "r", "r" } },
1587 { INDEX_op_ld8s_i32, { "r", "r" } },
1588 { INDEX_op_ld16u_i32, { "r", "r" } },
1589 { INDEX_op_ld16s_i32, { "r", "r" } },
1590 { INDEX_op_ld_i32, { "r", "r" } },
fd76e73a
RH
1591 { INDEX_op_st8_i32, { "rZ", "r" } },
1592 { INDEX_op_st16_i32, { "rZ", "r" } },
1593 { INDEX_op_st_i32, { "rZ", "r" } },
1594
1595 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1596 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
0085bd51
RH
1597 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1598 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
fd76e73a 1599 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
0085bd51
RH
1600 /* Note that the second argument will be inverted, which means
1601 we want a constant whose inversion matches M, and that O = ~M.
1602 See the implementation of and_mask_p. */
1603 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
fd76e73a
RH
1604
1605 { INDEX_op_mul_i32, { "r", "r", "r" } },
1606 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
f54b3f92 1607
fd76e73a
RH
1608 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1609 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1610 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1611 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1612 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
f54b3f92 1613
fd76e73a
RH
1614 { INDEX_op_bswap16_i32, { "r", "r" } },
1615 { INDEX_op_bswap32_i32, { "r", "r" } },
fd76e73a 1616 { INDEX_op_not_i32, { "r", "r" } },
f54b3f92 1617
fd76e73a 1618 { INDEX_op_ext8s_i32, { "r", "r" } },
fd76e73a 1619 { INDEX_op_ext16s_i32, { "r", "r" } },
fd76e73a
RH
1620
1621 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1622 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1623
1624 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1625 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1626
f0da3757
RH
1627 /* ??? We can actually support a signed 14-bit arg3, but we
1628 only have existing constraints for a signed 11-bit. */
1629 { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } },
1630
fd76e73a 1631 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
91493631 1632 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
f54b3f92 1633
ec188429
RH
1634 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1635
f54b3f92
AJ
1636#if TARGET_LONG_BITS == 32
1637 { INDEX_op_qemu_ld8u, { "r", "L" } },
1638 { INDEX_op_qemu_ld8s, { "r", "L" } },
1639 { INDEX_op_qemu_ld16u, { "r", "L" } },
1640 { INDEX_op_qemu_ld16s, { "r", "L" } },
86feb1c8 1641 { INDEX_op_qemu_ld32, { "r", "L" } },
f54b3f92
AJ
1642 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1643
fd76e73a
RH
1644 { INDEX_op_qemu_st8, { "LZ", "L" } },
1645 { INDEX_op_qemu_st16, { "LZ", "L" } },
1646 { INDEX_op_qemu_st32, { "LZ", "L" } },
1647 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
f54b3f92
AJ
1648#else
1649 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1650 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1651 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1652 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
86feb1c8 1653 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
f54b3f92
AJ
1654 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1655
fd76e73a
RH
1656 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1657 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1658 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1659 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
f54b3f92
AJ
1660#endif
1661 { -1 },
1662};
1663
fd76e73a
RH
1664static int tcg_target_callee_save_regs[] = {
1665 /* R2, the return address register, is saved specially
1666 in the caller's frame. */
1667 /* R3, the frame pointer, is not currently modified. */
1668 TCG_REG_R4,
1669 TCG_REG_R5,
1670 TCG_REG_R6,
1671 TCG_REG_R7,
1672 TCG_REG_R8,
1673 TCG_REG_R9,
1674 TCG_REG_R10,
1675 TCG_REG_R11,
1676 TCG_REG_R12,
1677 TCG_REG_R13,
1678 TCG_REG_R14,
1679 TCG_REG_R15,
1680 TCG_REG_R16,
cea5f9a2 1681 TCG_REG_R17, /* R17 is the global env. */
fd76e73a
RH
1682 TCG_REG_R18
1683};
1684
e7bd6300
RH
1685#define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1686 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1687 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1688 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1689 + TCG_TARGET_STACK_ALIGN - 1) \
1690 & -TCG_TARGET_STACK_ALIGN)
1691
e4d58b41 1692static void tcg_target_qemu_prologue(TCGContext *s)
fd76e73a
RH
1693{
1694 int frame_size, i;
1695
e7bd6300 1696 frame_size = FRAME_SIZE;
fd76e73a
RH
1697
1698 /* The return address is stored in the caller's frame. */
a42bceec 1699 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
fd76e73a
RH
1700
1701 /* Allocate stack frame, saving the first register at the same time. */
1702 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1703 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
fd76e73a
RH
1704
1705 /* Save all callee saved registers. */
1706 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1707 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1708 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1709 }
1710
2a6a665f
BS
1711 /* Record the location of the TCG temps. */
1712 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
6e6a9924 1713 CPU_TEMP_BUF_NLONGS * sizeof(long));
2a6a665f 1714
884d348b 1715#ifdef CONFIG_USE_GUEST_BASE
4b31713d
RH
1716 if (GUEST_BASE != 0) {
1717 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1718 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1719 }
884d348b 1720#endif
fd76e73a 1721
cea5f9a2
BS
1722 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1723
fd76e73a 1724 /* Jump to TB, and adjust R18 to be the return address. */
cea5f9a2 1725 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
3b6dac34 1726 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
fd76e73a
RH
1727
1728 /* Restore callee saved registers. */
a42bceec
BS
1729 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1730 -frame_size - 20);
fd76e73a
RH
1731 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1732 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1733 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1734 }
1735
1736 /* Deallocate stack frame and return. */
1737 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1738 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1739 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
fd76e73a
RH
1740}
1741
e4d58b41 1742static void tcg_target_init(TCGContext *s)
f54b3f92
AJ
1743{
1744 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
fd76e73a
RH
1745
1746 tcg_regset_clear(tcg_target_call_clobber_regs);
1747 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1748 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1749 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1750 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1751 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1752 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1753 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1754 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1755 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
f54b3f92
AJ
1756
1757 tcg_regset_clear(s->reserved_regs);
1758 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1759 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1760 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1761 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1762 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1763 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1764 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1765 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
a42bceec 1766 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
f54b3f92
AJ
1767 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1768
1769 tcg_add_target_add_op_defs(hppa_op_defs);
1770}
e7bd6300 1771
e7bd6300
RH
1772typedef struct {
1773 DebugFrameCIE cie;
497a22eb
RH
1774 DebugFrameFDEHeader fde;
1775 uint8_t fde_def_cfa[4];
1776 uint8_t fde_ret_ofs[3];
1777 uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
e7bd6300
RH
1778} DebugFrame;
1779
1780#define ELF_HOST_MACHINE EM_PARISC
1781#define ELF_HOST_FLAGS EFA_PARISC_1_1
1782
1783/* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1784 and other extensions. We don't really care, but if we don't set this
1785 to *something* then the object file won't be properly matched. */
1786#define ELF_OSABI ELFOSABI_LINUX
1787
1788static DebugFrame debug_frame = {
1789 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1790 .cie.id = -1,
1791 .cie.version = 1,
1792 .cie.code_align = 1,
1793 .cie.data_align = 1,
1794 .cie.return_column = 2,
1795
497a22eb
RH
1796 /* Total FDE size does not include the "len" member. */
1797 .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset),
1798
1799 .fde_def_cfa = {
e7bd6300
RH
1800 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1801 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1802 (-FRAME_SIZE >> 7) & 0x7f
1803 },
497a22eb 1804 .fde_ret_ofs = {
e7bd6300
RH
1805 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1806 },
497a22eb 1807 .fde_reg_ofs = {
e7bd6300
RH
1808 /* This must match the ordering in tcg_target_callee_save_regs. */
1809 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1810 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1811 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1812 0x80 + 7, 12, /* ... */
1813 0x80 + 8, 16,
1814 0x80 + 9, 20,
1815 0x80 + 10, 24,
1816 0x80 + 11, 28,
1817 0x80 + 12, 32,
1818 0x80 + 13, 36,
1819 0x80 + 14, 40,
1820 0x80 + 15, 44,
1821 0x80 + 16, 48,
1822 0x80 + 17, 52,
1823 0x80 + 18, 56,
1824 }
1825};
1826
1827void tcg_register_jit(void *buf, size_t buf_size)
1828{
1829 debug_frame.fde.func_start = (tcg_target_long) buf;
1830 debug_frame.fde.func_len = buf_size;
1831
1832 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1833}