]> git.proxmox.com Git - mirror_qemu.git/blame - tcg/hppa/tcg-target.c
TCG/HPPA: use TCG_REG_CALL_STACK instead of TCG_REG_SP
[mirror_qemu.git] / tcg / hppa / tcg-target.c
CommitLineData
f54b3f92
AJ
1/*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
d4a9eb1f 25#ifndef NDEBUG
f54b3f92 26static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
fd76e73a
RH
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
f54b3f92 31};
d4a9eb1f 32#endif
f54b3f92 33
fd76e73a
RH
34/* This is an 8 byte temp slot in the stack frame. */
35#define STACK_TEMP_OFS -16
36
fd76e73a
RH
37#ifdef CONFIG_USE_GUEST_BASE
38#define TCG_GUEST_BASE_REG TCG_REG_R16
39#else
40#define TCG_GUEST_BASE_REG TCG_REG_R0
41#endif
42
f54b3f92
AJ
43static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
54
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
fd76e73a
RH
59
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
64
65 TCG_REG_RET0,
66 TCG_REG_RET1,
f54b3f92
AJ
67};
68
69static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
74};
75
76static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
79};
80
fd76e73a
RH
81/* True iff val fits a signed field of width BITS. */
82static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83{
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86}
87
88/* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94static inline int or_mask_p(tcg_target_ulong mask)
95{
0085bd51
RH
96 if (mask == 0 || mask == -1) {
97 return 0;
98 }
fd76e73a
RH
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
101}
102
103/* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109static inline int and_mask_p(tcg_target_ulong mask)
110{
111 return or_mask_p(~mask);
112}
113
114static int low_sign_ext(int val, int len)
115{
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117}
118
119static int reassemble_12(int as12)
120{
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
124}
125
126static int reassemble_17(int as17)
127{
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
132}
133
134static int reassemble_21(int as21)
135{
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
141}
142
143/* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145#define R_PARISC_PCREL12F R_PARISC_NONE
146
f54b3f92
AJ
147static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
149{
fd76e73a
RH
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
153
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156
f54b3f92 157 switch (type) {
fd76e73a
RH
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
f54b3f92 166 case R_PARISC_PCREL17F:
fd76e73a
RH
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
f54b3f92
AJ
170 break;
171 default:
172 tcg_abort();
173 }
fd76e73a
RH
174
175 *insn_ptr = insn;
f54b3f92
AJ
176}
177
178/* maximum number of register used for input function arguments */
179static inline int tcg_target_get_call_iarg_regs_count(int flags)
180{
181 return 4;
182}
183
184/* parse target specific constraints */
d4a9eb1f 185static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
f54b3f92
AJ
186{
187 const char *ct_str;
188
189 ct_str = *pct_str;
190 switch (ct_str[0]) {
191 case 'r':
192 ct->ct |= TCG_CT_REG;
193 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194 break;
195 case 'L': /* qemu_ld/st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202 break;
fd76e73a
RH
203 case 'Z':
204 ct->ct |= TCG_CT_CONST_0;
205 break;
206 case 'I':
207 ct->ct |= TCG_CT_CONST_S11;
208 break;
209 case 'J':
210 ct->ct |= TCG_CT_CONST_S5;
211 break;
91493631
RH
212 case 'K':
213 ct->ct |= TCG_CT_CONST_MS11;
214 break;
0085bd51
RH
215 case 'M':
216 ct->ct |= TCG_CT_CONST_AND;
217 break;
218 case 'O':
219 ct->ct |= TCG_CT_CONST_OR;
220 break;
f54b3f92
AJ
221 default:
222 return -1;
223 }
224 ct_str++;
225 *pct_str = ct_str;
226 return 0;
227}
228
229/* test if a constant matches the constraint */
fd76e73a
RH
230static int tcg_target_const_match(tcg_target_long val,
231 const TCGArgConstraint *arg_ct)
f54b3f92 232{
fd76e73a
RH
233 int ct = arg_ct->ct;
234 if (ct & TCG_CT_CONST) {
235 return 1;
236 } else if (ct & TCG_CT_CONST_0) {
237 return val == 0;
238 } else if (ct & TCG_CT_CONST_S5) {
239 return check_fit_tl(val, 5);
240 } else if (ct & TCG_CT_CONST_S11) {
241 return check_fit_tl(val, 11);
91493631
RH
242 } else if (ct & TCG_CT_CONST_MS11) {
243 return check_fit_tl(-val, 11);
0085bd51
RH
244 } else if (ct & TCG_CT_CONST_AND) {
245 return and_mask_p(val);
246 } else if (ct & TCG_CT_CONST_OR) {
247 return or_mask_p(val);
fd76e73a 248 }
f54b3f92
AJ
249 return 0;
250}
251
252#define INSN_OP(x) ((x) << 26)
253#define INSN_EXT3BR(x) ((x) << 13)
254#define INSN_EXT3SH(x) ((x) << 10)
255#define INSN_EXT4(x) ((x) << 6)
256#define INSN_EXT5(x) (x)
257#define INSN_EXT6(x) ((x) << 6)
258#define INSN_EXT7(x) ((x) << 6)
259#define INSN_EXT8A(x) ((x) << 6)
260#define INSN_EXT8B(x) ((x) << 5)
261#define INSN_T(x) (x)
262#define INSN_R1(x) ((x) << 16)
263#define INSN_R2(x) ((x) << 21)
264#define INSN_DEP_LEN(x) (32 - (x))
265#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266#define INSN_SHDEP_P(x) ((x) << 5)
267#define INSN_COND(x) ((x) << 13)
fd76e73a
RH
268#define INSN_IM11(x) low_sign_ext(x, 11)
269#define INSN_IM14(x) low_sign_ext(x, 14)
270#define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
271
272#define COND_NEVER 0
273#define COND_EQ 1
274#define COND_LT 2
275#define COND_LE 3
276#define COND_LTU 4
277#define COND_LEU 5
278#define COND_SV 6
279#define COND_OD 7
280#define COND_FALSE 8
281
282#define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283#define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284#define INSN_ADDI (INSN_OP(0x2d))
285#define INSN_ADDIL (INSN_OP(0x0a))
286#define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287#define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288#define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289#define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290#define INSN_COMICLR (INSN_OP(0x24))
291#define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292#define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293#define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294#define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295#define INSN_LDIL (INSN_OP(0x08))
296#define INSN_LDO (INSN_OP(0x0d))
297#define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298#define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299#define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300#define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301#define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302#define INSN_SUBI (INSN_OP(0x25))
303#define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304#define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305#define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306#define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307#define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308#define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
309
310#define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311#define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312#define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313#define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314#define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315#define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
316
317#define INSN_LDB (INSN_OP(0x10))
318#define INSN_LDH (INSN_OP(0x11))
319#define INSN_LDW (INSN_OP(0x12))
320#define INSN_LDWM (INSN_OP(0x13))
321#define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
322
323#define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324#define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325#define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
326
327#define INSN_STB (INSN_OP(0x18))
328#define INSN_STH (INSN_OP(0x19))
329#define INSN_STW (INSN_OP(0x1a))
330#define INSN_STWM (INSN_OP(0x1b))
331#define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
332
333#define INSN_COMBT (INSN_OP(0x20))
334#define INSN_COMBF (INSN_OP(0x22))
335#define INSN_COMIBT (INSN_OP(0x21))
336#define INSN_COMIBF (INSN_OP(0x23))
337
338/* supplied by libgcc */
339extern void *__canonicalize_funcptr_for_compare(void *);
340
3b6dac34 341static void tcg_out_mov(TCGContext *s, TCGType type, int ret, int arg)
fd76e73a
RH
342{
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
345 if (ret != arg) {
346 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347 | INSN_R2(TCG_REG_R0));
348 }
349}
f54b3f92 350
fd76e73a
RH
351static void tcg_out_movi(TCGContext *s, TCGType type,
352 int ret, tcg_target_long arg)
353{
354 if (check_fit_tl(arg, 14)) {
355 tcg_out32(s, INSN_LDO | INSN_R1(ret)
356 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357 } else {
358 uint32_t hi, lo;
359 hi = arg >> 11;
360 lo = arg & 0x7ff;
361
362 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363 if (lo) {
364 tcg_out32(s, INSN_LDO | INSN_R1(ret)
365 | INSN_R2(ret) | INSN_IM14(lo));
366 }
367 }
368}
f54b3f92 369
fd76e73a
RH
370static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371 tcg_target_long offset, int op)
372{
373 if (!check_fit_tl(offset, 14)) {
374 uint32_t hi, lo, op;
f54b3f92 375
fd76e73a
RH
376 hi = offset >> 11;
377 lo = offset & 0x7ff;
f54b3f92 378
fd76e73a
RH
379 if (addr == TCG_REG_R0) {
380 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381 } else {
382 op = INSN_ADDIL | INSN_R2(addr);
383 }
384 tcg_out32(s, op | reassemble_21(hi));
f54b3f92 385
fd76e73a
RH
386 addr = TCG_REG_R1;
387 offset = lo;
388 }
f54b3f92 389
fd76e73a
RH
390 if (ret != addr || offset != 0 || op != INSN_LDO) {
391 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
392 }
393}
f54b3f92 394
fd76e73a
RH
395/* This function is required by tcg.c. */
396static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
397 int arg1, tcg_target_long arg2)
398{
399 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400}
401
402/* This function is required by tcg.c. */
403static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
404 int arg1, tcg_target_long arg2)
405{
406 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407}
408
409static void tcg_out_ldst_index(TCGContext *s, int data,
410 int base, int index, int op)
411{
412 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413}
414
415static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416 tcg_target_long val)
417{
418 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419}
f54b3f92 420
fd76e73a
RH
421/* This function is required by tcg.c. */
422static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
423{
424 tcg_out_addi2(s, reg, reg, val);
425}
f54b3f92 426
fd76e73a
RH
427static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
428{
429 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430}
f54b3f92 431
fd76e73a
RH
432static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433 tcg_target_long val, int op)
f54b3f92 434{
fd76e73a
RH
435 assert(check_fit_tl(val, 11));
436 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
f54b3f92
AJ
437}
438
fd76e73a 439static inline void tcg_out_nop(TCGContext *s)
f54b3f92 440{
fd76e73a
RH
441 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442}
f54b3f92 443
fd76e73a
RH
444static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
445{
446 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447}
448
449/* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453 unsigned ofs, unsigned len, int sign)
454{
455 assert(ofs < 32 && len <= 32 - ofs);
456 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457 | INSN_R1(ret) | INSN_R2(arg)
458 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
f54b3f92
AJ
459}
460
fd76e73a
RH
461/* Likewise with OFS interpreted little-endian. */
462static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463 unsigned ofs, unsigned len)
f54b3f92 464{
fd76e73a
RH
465 assert(ofs < 32 && len <= 32 - ofs);
466 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468}
469
470static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
471 unsigned count)
472{
473 assert(count < 32);
474 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
475 | INSN_SHDEP_CP(count));
476}
477
478static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
479{
480 tcg_out_mtctl_sar(s, creg);
481 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
482}
483
484static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
485{
0085bd51
RH
486 int bs0, bs1;
487
488 /* Note that the argument is constrained to match or_mask_p. */
489 for (bs0 = 0; bs0 < 32; bs0++) {
490 if ((m & (1u << bs0)) != 0) {
491 break;
fd76e73a 492 }
0085bd51
RH
493 }
494 for (bs1 = bs0; bs1 < 32; bs1++) {
495 if ((m & (1u << bs1)) == 0) {
496 break;
fd76e73a 497 }
fd76e73a 498 }
0085bd51
RH
499 assert(bs1 == 32 || (1ul << bs1) > m);
500
3b6dac34 501 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
0085bd51
RH
502 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(-1)
503 | INSN_SHDEP_CP(31 - bs0) | INSN_DEP_LEN(bs1 - bs0));
fd76e73a
RH
504}
505
506static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
507{
0085bd51 508 int ls0, ls1, ms0;
fd76e73a 509
0085bd51
RH
510 /* Note that the argument is constrained to match and_mask_p. */
511 for (ls0 = 0; ls0 < 32; ls0++) {
512 if ((m & (1u << ls0)) == 0) {
513 break;
fd76e73a 514 }
0085bd51
RH
515 }
516 for (ls1 = ls0; ls1 < 32; ls1++) {
517 if ((m & (1u << ls1)) != 0) {
518 break;
fd76e73a 519 }
0085bd51
RH
520 }
521 for (ms0 = ls1; ms0 < 32; ms0++) {
522 if ((m & (1u << ms0)) == 0) {
523 break;
fd76e73a 524 }
0085bd51
RH
525 }
526 assert (ms0 == 32);
fd76e73a 527
0085bd51
RH
528 if (ls1 == 32) {
529 tcg_out_extr(s, ret, arg, 0, ls0, 0);
f54b3f92 530 } else {
3b6dac34 531 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
0085bd51
RH
532 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(0)
533 | INSN_SHDEP_CP(31 - ls0) | INSN_DEP_LEN(ls1 - ls0));
f54b3f92
AJ
534 }
535}
536
fd76e73a 537static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
f54b3f92 538{
fd76e73a 539 tcg_out_extr(s, ret, arg, 0, 8, 1);
f54b3f92
AJ
540}
541
fd76e73a 542static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
f54b3f92 543{
fd76e73a 544 tcg_out_extr(s, ret, arg, 0, 16, 1);
f54b3f92
AJ
545}
546
fd76e73a 547static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
f54b3f92 548{
fd76e73a
RH
549 count &= 31;
550 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
f54b3f92
AJ
552}
553
fd76e73a 554static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
f54b3f92 555{
fd76e73a
RH
556 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557 tcg_out_mtctl_sar(s, TCG_REG_R20);
558 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
559}
560
fd76e73a 561static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
f54b3f92 562{
fd76e73a
RH
563 count &= 31;
564 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
f54b3f92
AJ
565}
566
fd76e73a 567static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
f54b3f92 568{
fd76e73a 569 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
f54b3f92
AJ
570}
571
fd76e73a 572static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
f54b3f92 573{
fd76e73a
RH
574 count &= 31;
575 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
f54b3f92
AJ
576}
577
fd76e73a 578static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
f54b3f92 579{
fd76e73a
RH
580 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581 tcg_out_mtctl_sar(s, TCG_REG_R20);
582 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
f54b3f92
AJ
583}
584
fd76e73a 585static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
f54b3f92 586{
fd76e73a
RH
587 count &= 31;
588 tcg_out_shd(s, ret, arg, arg, 32 - count);
f54b3f92
AJ
589}
590
fd76e73a
RH
591static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
592{
593 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
f54b3f92
AJ
595}
596
fd76e73a
RH
597static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
598{
599 count &= 31;
600 tcg_out_shd(s, ret, arg, arg, count);
f54b3f92
AJ
601}
602
fd76e73a
RH
603static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
604{
605 tcg_out_vshd(s, ret, arg, arg, creg);
f54b3f92
AJ
606}
607
fd76e73a
RH
608static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
609{
610 if (ret != arg) {
3b6dac34 611 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
fd76e73a
RH
612 }
613 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
f54b3f92
AJ
615}
616
fd76e73a 617static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
f54b3f92 618{
fd76e73a
RH
619 /* arg = ABCD */
620 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
621 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
f54b3f92
AJ
623}
624
fd76e73a
RH
625static void tcg_out_call(TCGContext *s, void *func)
626{
627 tcg_target_long val, hi, lo, disp;
628
629 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
631
632 if (check_fit_tl(disp, 17)) {
633 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634 } else {
635 hi = val >> 11;
636 lo = val & 0x7ff;
637
638 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640 | reassemble_17(lo >> 2));
3b6dac34 641 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
fd76e73a
RH
642 }
643}
79383c9c 644
fd76e73a
RH
645static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646 int arg1, int arg2)
647{
648 /* Store both words into the stack for copy to the FPU. */
a42bceec
BS
649 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
650 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
fd76e73a
RH
651
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
a42bceec 656 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
657 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
658
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s, 0x3ad64796);
661
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
a42bceec 664 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
fd76e73a
RH
665 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
666
667 /* Load the pieces of the result that the caller requested. */
668 if (reth) {
a42bceec 669 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
fd76e73a
RH
670 }
671 if (retl) {
a42bceec
BS
672 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
673 INSN_LDW);
fd76e73a
RH
674 }
675}
676
91493631
RH
677static void tcg_out_add2(TCGContext *s, int destl, int desth,
678 int al, int ah, int bl, int bh, int blconst)
679{
680 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
681
682 if (blconst) {
683 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
684 } else {
685 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
686 }
687 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
688
3b6dac34 689 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
690}
691
692static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
693 int bl, int bh, int alconst, int blconst)
694{
695 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
696
697 if (alconst) {
698 if (blconst) {
699 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
700 bl = TCG_REG_R20;
701 }
702 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
703 } else if (blconst) {
704 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
705 } else {
706 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
707 }
708 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
709
3b6dac34 710 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
91493631
RH
711}
712
fd76e73a
RH
713static void tcg_out_branch(TCGContext *s, int label_index, int nul)
714{
715 TCGLabel *l = &s->labels[label_index];
716 uint32_t op = nul ? INSN_BL_N : INSN_BL;
717
718 if (l->has_value) {
719 tcg_target_long val = l->u.value;
720
721 val -= (tcg_target_long)s->code_ptr + 8;
722 val >>= 2;
723 assert(check_fit_tl(val, 17));
724
725 tcg_out32(s, op | reassemble_17(val));
726 } else {
2d097a83
RH
727 /* We need to keep the offset unchanged for retranslation. */
728 uint32_t old_insn = *(uint32_t *)s->code_ptr;
729
fd76e73a 730 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
2d097a83 731 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
fd76e73a
RH
732 }
733}
734
735static const uint8_t tcg_cond_to_cmp_cond[10] =
736{
737 [TCG_COND_EQ] = COND_EQ,
738 [TCG_COND_NE] = COND_EQ | COND_FALSE,
739 [TCG_COND_LT] = COND_LT,
740 [TCG_COND_GE] = COND_LT | COND_FALSE,
741 [TCG_COND_LE] = COND_LE,
742 [TCG_COND_GT] = COND_LE | COND_FALSE,
743 [TCG_COND_LTU] = COND_LTU,
744 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
745 [TCG_COND_LEU] = COND_LEU,
746 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
747};
748
749static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
750 TCGArg c2, int c2const, int label_index)
751{
752 TCGLabel *l = &s->labels[label_index];
753 int op, pacond;
754
755 /* Note that COMIB operates as if the immediate is the first
756 operand. We model brcond with the immediate in the second
757 to better match what targets are likely to give us. For
758 consistency, model COMB with reversed operands as well. */
759 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
760
761 if (c2const) {
762 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
763 op |= INSN_IM5(c2);
764 } else {
765 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
766 op |= INSN_R1(c2);
767 }
768 op |= INSN_R2(c1);
769 op |= INSN_COND(pacond & 7);
770
771 if (l->has_value) {
772 tcg_target_long val = l->u.value;
773
774 val -= (tcg_target_long)s->code_ptr + 8;
775 val >>= 2;
776 assert(check_fit_tl(val, 12));
777
778 /* ??? Assume that all branches to defined labels are backward.
779 Which means that if the nul bit is set, the delay slot is
780 executed if the branch is taken, and not executed in fallthru. */
781 tcg_out32(s, op | reassemble_12(val));
782 tcg_out_nop(s);
783 } else {
2d097a83
RH
784 /* We need to keep the offset unchanged for retranslation. */
785 uint32_t old_insn = *(uint32_t *)s->code_ptr;
786
fd76e73a
RH
787 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
788 /* ??? Assume that all branches to undefined labels are forward.
789 Which means that if the nul bit is set, the delay slot is
790 not executed if the branch is taken, which is what we want. */
2d097a83 791 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
fd76e73a
RH
792 }
793}
794
795static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
796 TCGArg c1, TCGArg c2, int c2const)
797{
798 int op, pacond;
799
800 /* Note that COMICLR operates as if the immediate is the first
801 operand. We model setcond with the immediate in the second
802 to better match what targets are likely to give us. For
803 consistency, model COMCLR with reversed operands as well. */
804 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
805
806 if (c2const) {
807 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
808 } else {
809 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
810 }
811 op |= INSN_COND(pacond & 7);
812 op |= pacond & COND_FALSE ? 1 << 12 : 0;
813
814 tcg_out32(s, op);
815}
816
817static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
818 TCGArg bl, int blconst, TCGArg bh, int bhconst,
819 int label_index)
820{
821 switch (cond) {
822 case TCG_COND_EQ:
823 case TCG_COND_NE:
824 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
825 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
826 break;
827
828 default:
829 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
830 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
831 tcg_out_brcond(s, tcg_unsigned_cond(cond),
832 al, bl, blconst, label_index);
833 break;
834 }
835}
836
837static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
838 TCGArg c1, TCGArg c2, int c2const)
839{
840 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
841 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
842}
843
844static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
845 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
846 TCGArg bh, int bhconst)
847{
848 int scratch = TCG_REG_R20;
849
850 if (ret != al && ret != ah
851 && (blconst || ret != bl)
852 && (bhconst || ret != bh)) {
853 scratch = ret;
854 }
855
856 switch (cond) {
857 case TCG_COND_EQ:
858 case TCG_COND_NE:
859 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
860 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
861 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
862 break;
863
864 default:
865 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
866 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
867 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
868 tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
869 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
870 break;
871 }
872
3b6dac34 873 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
fd76e73a
RH
874}
875
876#if defined(CONFIG_SOFTMMU)
79383c9c 877#include "../../softmmu_defs.h"
f54b3f92
AJ
878
879static void *qemu_ld_helpers[4] = {
880 __ldb_mmu,
881 __ldw_mmu,
882 __ldl_mmu,
883 __ldq_mmu,
884};
885
886static void *qemu_st_helpers[4] = {
887 __stb_mmu,
888 __stw_mmu,
889 __stl_mmu,
890 __stq_mmu,
891};
fd76e73a
RH
892
893/* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
894 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
895 TLB for the memory index. The return value is the offset from ENV
896 contained in R1 afterward (to be used when loading ADDEND); if the
897 return value is 0, R1 is not used. */
898
899static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
900 int addrhi, int s_bits, int lab_miss, int offset)
901{
902 int ret;
903
904 /* Extracting the index into the TLB. The "normal C operation" is
905 r1 = addr_reg >> TARGET_PAGE_BITS;
906 r1 &= CPU_TLB_SIZE - 1;
907 r1 <<= CPU_TLB_ENTRY_BITS;
908 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
909 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
910 operations with an EXTRU. Unfortunately, the current value of
911 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
912 add that follows. */
913 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
fd76e73a
RH
914 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
915 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
916
917 /* Make sure that both the addr_{read,write} and addend can be
918 read with a 14-bit offset from the same base register. */
919 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
920 ret = 0;
921 } else {
922 ret = (offset + 0x400) & ~0x7ff;
923 offset = ret - offset;
924 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
925 r1 = TCG_REG_R1;
926 }
927
928 /* Load the entry from the computed slot. */
929 if (TARGET_LONG_BITS == 64) {
930 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
931 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
932 } else {
933 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
934 }
935
739734cb
RH
936 /* Compute the value that ought to appear in the TLB for a hit, namely, the page
937 of the address. We include the low N bits of the address to catch unaligned
938 accesses and force them onto the slow path. Do this computation after having
939 issued the load from the TLB slot to give the load time to complete. */
940 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
941
fd76e73a
RH
942 /* If not equal, jump to lab_miss. */
943 if (TARGET_LONG_BITS == 64) {
944 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
945 r0, 0, addrhi, 0, lab_miss);
946 } else {
947 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
948 }
949
950 return ret;
951}
f54b3f92
AJ
952#endif
953
f061b40e
RH
954static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
955 int addr_reg, int addend_reg, int opc)
f54b3f92 956{
f54b3f92 957#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 958 const int bswap = 0;
f54b3f92 959#else
f061b40e 960 const int bswap = 1;
f54b3f92 961#endif
f061b40e 962
f54b3f92 963 switch (opc) {
fd76e73a 964 case 0:
f061b40e 965 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
fd76e73a
RH
966 break;
967 case 0 | 4:
f061b40e
RH
968 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
969 tcg_out_ext8s(s, datalo_reg, datalo_reg);
fd76e73a
RH
970 break;
971 case 1:
f061b40e 972 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 973 if (bswap) {
f061b40e 974 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
fd76e73a
RH
975 }
976 break;
977 case 1 | 4:
f061b40e 978 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
fd76e73a 979 if (bswap) {
f061b40e 980 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
fd76e73a 981 } else {
f061b40e 982 tcg_out_ext16s(s, datalo_reg, datalo_reg);
fd76e73a
RH
983 }
984 break;
985 case 2:
f061b40e 986 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
fd76e73a 987 if (bswap) {
f061b40e 988 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
fd76e73a
RH
989 }
990 break;
991 case 3:
992 if (bswap) {
f061b40e
RH
993 int t = datahi_reg;
994 datahi_reg = datalo_reg;
995 datalo_reg = t;
fd76e73a 996 }
f061b40e
RH
997 /* We can't access the low-part with a reg+reg addressing mode,
998 so perform the addition now and use reg_ofs addressing mode. */
999 if (addend_reg != TCG_REG_R0) {
1000 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1001 addr_reg = TCG_REG_R20;
1002 }
1003 /* Make sure not to clobber the base register. */
1004 if (datahi_reg == addr_reg) {
1005 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1006 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
fd76e73a 1007 } else {
f061b40e
RH
1008 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1009 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
fd76e73a
RH
1010 }
1011 if (bswap) {
f061b40e
RH
1012 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1013 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
fd76e73a
RH
1014 }
1015 break;
1016 default:
1017 tcg_abort();
f54b3f92 1018 }
f061b40e
RH
1019}
1020
1021static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1022{
1023 int datalo_reg = *args++;
1024 /* Note that datahi_reg is only used for 64-bit loads. */
1025 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1026 int addrlo_reg = *args++;
f54b3f92
AJ
1027
1028#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1029 /* Note that addrhi_reg is only used for 64-bit guests. */
1030 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1031 int mem_index = *args;
1032 int lab1, lab2, argreg, offset;
1033
1034 lab1 = gen_new_label();
1035 lab2 = gen_new_label();
1036
1037 offset = offsetof(CPUState, tlb_table[mem_index][0].addr_read);
1038 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1039 opc & 3, lab1, offset);
1040
1041 /* TLB Hit. */
1042 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1043 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1044 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
fd76e73a
RH
1045 tcg_out_branch(s, lab2, 1);
1046
1047 /* TLB Miss. */
1048 /* label1: */
1049 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1050
1051 argreg = TCG_REG_R26;
3b6dac34 1052 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
fd76e73a 1053 if (TARGET_LONG_BITS == 64) {
3b6dac34 1054 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
fd76e73a
RH
1055 }
1056 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1057
f061b40e 1058 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
fd76e73a
RH
1059
1060 switch (opc) {
1061 case 0:
f061b40e 1062 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
fd76e73a
RH
1063 break;
1064 case 0 | 4:
f061b40e 1065 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1066 break;
1067 case 1:
f061b40e 1068 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
fd76e73a
RH
1069 break;
1070 case 1 | 4:
f061b40e 1071 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1072 break;
1073 case 2:
1074 case 2 | 4:
3b6dac34 1075 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
fd76e73a
RH
1076 break;
1077 case 3:
3b6dac34
RH
1078 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1079 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
fd76e73a
RH
1080 break;
1081 default:
1082 tcg_abort();
1083 }
1084
f54b3f92 1085 /* label2: */
fd76e73a 1086 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
f061b40e
RH
1087#else
1088 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1089 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
f54b3f92
AJ
1090#endif
1091}
1092
f061b40e
RH
1093static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1094 int addr_reg, int opc)
f54b3f92 1095{
f54b3f92 1096#ifdef TARGET_WORDS_BIGENDIAN
f061b40e 1097 const int bswap = 0;
f54b3f92 1098#else
f061b40e 1099 const int bswap = 1;
f54b3f92 1100#endif
f061b40e 1101
f54b3f92
AJ
1102 switch (opc) {
1103 case 0:
f061b40e 1104 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
f54b3f92
AJ
1105 break;
1106 case 1:
1107 if (bswap) {
f061b40e
RH
1108 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1109 datalo_reg = TCG_REG_R20;
f54b3f92 1110 }
f061b40e 1111 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
f54b3f92
AJ
1112 break;
1113 case 2:
1114 if (bswap) {
f061b40e
RH
1115 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1116 datalo_reg = TCG_REG_R20;
f54b3f92 1117 }
f061b40e 1118 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
f54b3f92
AJ
1119 break;
1120 case 3:
fd76e73a 1121 if (bswap) {
f061b40e
RH
1122 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1123 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1124 datahi_reg = TCG_REG_R20;
1125 datalo_reg = TCG_REG_R23;
f54b3f92 1126 }
f061b40e
RH
1127 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1128 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
f54b3f92
AJ
1129 break;
1130 default:
1131 tcg_abort();
1132 }
1133
f061b40e
RH
1134}
1135
1136static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1137{
1138 int datalo_reg = *args++;
1139 /* Note that datahi_reg is only used for 64-bit loads. */
1140 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1141 int addrlo_reg = *args++;
1142
f54b3f92 1143#if defined(CONFIG_SOFTMMU)
f061b40e
RH
1144 /* Note that addrhi_reg is only used for 64-bit guests. */
1145 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1146 int mem_index = *args;
1147 int lab1, lab2, argreg, offset;
1148
1149 lab1 = gen_new_label();
1150 lab2 = gen_new_label();
1151
1152 offset = offsetof(CPUState, tlb_table[mem_index][0].addr_write);
1153 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1154 opc, lab1, offset);
1155
1156 /* TLB Hit. */
1157 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1158 offsetof(CPUState, tlb_table[mem_index][0].addend) - offset);
1159
1160 /* There are no indexed stores, so we must do this addition explitly.
1161 Careful to avoid R20, which is used for the bswaps to follow. */
1162 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1163 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
fd76e73a
RH
1164 tcg_out_branch(s, lab2, 1);
1165
1166 /* TLB Miss. */
1167 /* label1: */
1168 tcg_out_label(s, lab1, (tcg_target_long)s->code_ptr);
1169
1170 argreg = TCG_REG_R26;
3b6dac34 1171 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
fd76e73a 1172 if (TARGET_LONG_BITS == 64) {
3b6dac34 1173 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
fd76e73a
RH
1174 }
1175
1176 switch(opc) {
1177 case 0:
f061b40e 1178 tcg_out_andi(s, argreg--, datalo_reg, 0xff);
fd76e73a
RH
1179 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1180 break;
1181 case 1:
f061b40e 1182 tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
fd76e73a
RH
1183 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1184 break;
1185 case 2:
3b6dac34 1186 tcg_out_mov(s, TCG_TYPE_I32, argreg--, datalo_reg);
fd76e73a
RH
1187 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1188 break;
1189 case 3:
1190 /* Because of the alignment required by the 64-bit data argument,
1191 we will always use R23/R24. Also, we will always run out of
1192 argument registers for storing mem_index, so that will have
1193 to go on the stack. */
1194 if (mem_index == 0) {
1195 argreg = TCG_REG_R0;
1196 } else {
1197 argreg = TCG_REG_R20;
1198 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1199 }
3b6dac34
RH
1200 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R23, datahi_reg);
1201 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R24, datalo_reg);
a42bceec 1202 tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_CALL_STACK,
fd76e73a
RH
1203 TCG_TARGET_CALL_STACK_OFFSET - 4);
1204 break;
1205 default:
1206 tcg_abort();
1207 }
1208
f061b40e 1209 tcg_out_call(s, qemu_st_helpers[opc]);
fd76e73a 1210
f54b3f92 1211 /* label2: */
fd76e73a 1212 tcg_out_label(s, lab2, (tcg_target_long)s->code_ptr);
f061b40e
RH
1213#else
1214 /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1215 explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1216 if (GUEST_BASE != 0) {
1217 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1218 addrlo_reg = TCG_REG_R31;
1219 }
1220 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
f54b3f92
AJ
1221#endif
1222}
1223
fd76e73a
RH
1224static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1225{
1226 if (!check_fit_tl(arg, 14)) {
1227 uint32_t hi, lo;
1228 hi = arg & ~0x7ff;
1229 lo = arg & 0x7ff;
1230 if (lo) {
1231 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1232 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1233 tcg_out_addi(s, TCG_REG_RET0, lo);
1234 return;
1235 }
1236 arg = hi;
1237 }
1238 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1239 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1240}
1241
1242static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1243{
1244 if (s->tb_jmp_offset) {
1245 /* direct jump method */
1246 fprintf(stderr, "goto_tb direct\n");
1247 tcg_abort();
1248 } else {
1249 /* indirect jump method */
1250 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1251 (tcg_target_long)(s->tb_next + arg));
1252 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1253 }
1254 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1255}
1256
a9751609 1257static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
f54b3f92
AJ
1258 const int *const_args)
1259{
f54b3f92
AJ
1260 switch (opc) {
1261 case INDEX_op_exit_tb:
fd76e73a 1262 tcg_out_exit_tb(s, args[0]);
f54b3f92
AJ
1263 break;
1264 case INDEX_op_goto_tb:
fd76e73a 1265 tcg_out_goto_tb(s, args[0]);
f54b3f92 1266 break;
fd76e73a 1267
f54b3f92 1268 case INDEX_op_call:
fd76e73a
RH
1269 if (const_args[0]) {
1270 tcg_out_call(s, (void *)args[0]);
1271 } else {
3e1f46ea
RH
1272 /* ??? FIXME: the value in the register in args[0] is almost
1273 certainly a procedure descriptor, not a code address. We
1274 probably need to use the millicode $$dyncall routine. */
1275 tcg_abort();
fd76e73a 1276 }
f54b3f92 1277 break;
fd76e73a 1278
f54b3f92
AJ
1279 case INDEX_op_jmp:
1280 fprintf(stderr, "unimplemented jmp\n");
1281 tcg_abort();
1282 break;
fd76e73a 1283
f54b3f92 1284 case INDEX_op_br:
fd76e73a 1285 tcg_out_branch(s, args[0], 1);
f54b3f92 1286 break;
fd76e73a 1287
f54b3f92
AJ
1288 case INDEX_op_movi_i32:
1289 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1290 break;
1291
1292 case INDEX_op_ld8u_i32:
fd76e73a 1293 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1294 break;
1295 case INDEX_op_ld8s_i32:
fd76e73a 1296 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
f54b3f92
AJ
1297 tcg_out_ext8s(s, args[0], args[0]);
1298 break;
1299 case INDEX_op_ld16u_i32:
fd76e73a 1300 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1301 break;
1302 case INDEX_op_ld16s_i32:
fd76e73a 1303 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
f54b3f92
AJ
1304 tcg_out_ext16s(s, args[0], args[0]);
1305 break;
1306 case INDEX_op_ld_i32:
fd76e73a 1307 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
f54b3f92
AJ
1308 break;
1309
1310 case INDEX_op_st8_i32:
fd76e73a 1311 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
f54b3f92
AJ
1312 break;
1313 case INDEX_op_st16_i32:
fd76e73a 1314 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
f54b3f92
AJ
1315 break;
1316 case INDEX_op_st_i32:
fd76e73a
RH
1317 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1318 break;
1319
1320 case INDEX_op_add_i32:
1321 if (const_args[2]) {
1322 tcg_out_addi2(s, args[0], args[1], args[2]);
1323 } else {
1324 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1325 }
f54b3f92
AJ
1326 break;
1327
1328 case INDEX_op_sub_i32:
fd76e73a
RH
1329 if (const_args[1]) {
1330 if (const_args[2]) {
1331 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1332 } else {
1333 /* Recall that SUBI is a reversed subtract. */
1334 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1335 }
1336 } else if (const_args[2]) {
1337 tcg_out_addi2(s, args[0], args[1], -args[2]);
1338 } else {
1339 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1340 }
1341 break;
1342
f54b3f92 1343 case INDEX_op_and_i32:
fd76e73a
RH
1344 if (const_args[2]) {
1345 tcg_out_andi(s, args[0], args[1], args[2]);
1346 } else {
1347 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1348 }
1349 break;
1350
f54b3f92 1351 case INDEX_op_or_i32:
fd76e73a
RH
1352 if (const_args[2]) {
1353 tcg_out_ori(s, args[0], args[1], args[2]);
1354 } else {
1355 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1356 }
1357 break;
1358
f54b3f92 1359 case INDEX_op_xor_i32:
fd76e73a
RH
1360 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1361 break;
1362
1363 case INDEX_op_andc_i32:
1364 if (const_args[2]) {
1365 tcg_out_andi(s, args[0], args[1], ~args[2]);
1366 } else {
1367 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1368 }
1369 break;
f54b3f92
AJ
1370
1371 case INDEX_op_shl_i32:
fd76e73a
RH
1372 if (const_args[2]) {
1373 tcg_out_shli(s, args[0], args[1], args[2]);
1374 } else {
1375 tcg_out_shl(s, args[0], args[1], args[2]);
1376 }
f54b3f92 1377 break;
fd76e73a 1378
f54b3f92 1379 case INDEX_op_shr_i32:
fd76e73a
RH
1380 if (const_args[2]) {
1381 tcg_out_shri(s, args[0], args[1], args[2]);
1382 } else {
1383 tcg_out_shr(s, args[0], args[1], args[2]);
1384 }
f54b3f92 1385 break;
fd76e73a 1386
f54b3f92 1387 case INDEX_op_sar_i32:
fd76e73a
RH
1388 if (const_args[2]) {
1389 tcg_out_sari(s, args[0], args[1], args[2]);
1390 } else {
1391 tcg_out_sar(s, args[0], args[1], args[2]);
1392 }
1393 break;
1394
1395 case INDEX_op_rotl_i32:
1396 if (const_args[2]) {
1397 tcg_out_rotli(s, args[0], args[1], args[2]);
1398 } else {
1399 tcg_out_rotl(s, args[0], args[1], args[2]);
1400 }
1401 break;
1402
1403 case INDEX_op_rotr_i32:
1404 if (const_args[2]) {
1405 tcg_out_rotri(s, args[0], args[1], args[2]);
1406 } else {
1407 tcg_out_rotr(s, args[0], args[1], args[2]);
1408 }
f54b3f92
AJ
1409 break;
1410
1411 case INDEX_op_mul_i32:
fd76e73a 1412 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
f54b3f92
AJ
1413 break;
1414 case INDEX_op_mulu2_i32:
fd76e73a 1415 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
f54b3f92 1416 break;
fd76e73a
RH
1417
1418 case INDEX_op_bswap16_i32:
1419 tcg_out_bswap16(s, args[0], args[1], 0);
f54b3f92 1420 break;
fd76e73a
RH
1421 case INDEX_op_bswap32_i32:
1422 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1423 break;
1424
1425 case INDEX_op_not_i32:
1426 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1427 break;
1428 case INDEX_op_ext8s_i32:
1429 tcg_out_ext8s(s, args[0], args[1]);
1430 break;
1431 case INDEX_op_ext16s_i32:
1432 tcg_out_ext16s(s, args[0], args[1]);
1433 break;
1434
f54b3f92 1435 case INDEX_op_brcond_i32:
fd76e73a
RH
1436 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1437 break;
1438 case INDEX_op_brcond2_i32:
1439 tcg_out_brcond2(s, args[4], args[0], args[1],
1440 args[2], const_args[2],
1441 args[3], const_args[3], args[5]);
1442 break;
1443
1444 case INDEX_op_setcond_i32:
1445 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1446 break;
1447 case INDEX_op_setcond2_i32:
1448 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1449 args[3], const_args[3], args[4], const_args[4]);
1450 break;
1451
1452 case INDEX_op_add2_i32:
91493631
RH
1453 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1454 args[4], args[5], const_args[4]);
fd76e73a
RH
1455 break;
1456
1457 case INDEX_op_sub2_i32:
91493631
RH
1458 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1459 args[4], args[5], const_args[2], const_args[4]);
f54b3f92
AJ
1460 break;
1461
1462 case INDEX_op_qemu_ld8u:
1463 tcg_out_qemu_ld(s, args, 0);
1464 break;
1465 case INDEX_op_qemu_ld8s:
1466 tcg_out_qemu_ld(s, args, 0 | 4);
1467 break;
1468 case INDEX_op_qemu_ld16u:
1469 tcg_out_qemu_ld(s, args, 1);
1470 break;
1471 case INDEX_op_qemu_ld16s:
1472 tcg_out_qemu_ld(s, args, 1 | 4);
1473 break;
86feb1c8 1474 case INDEX_op_qemu_ld32:
f54b3f92
AJ
1475 tcg_out_qemu_ld(s, args, 2);
1476 break;
fd76e73a
RH
1477 case INDEX_op_qemu_ld64:
1478 tcg_out_qemu_ld(s, args, 3);
1479 break;
f54b3f92
AJ
1480
1481 case INDEX_op_qemu_st8:
1482 tcg_out_qemu_st(s, args, 0);
1483 break;
1484 case INDEX_op_qemu_st16:
1485 tcg_out_qemu_st(s, args, 1);
1486 break;
1487 case INDEX_op_qemu_st32:
1488 tcg_out_qemu_st(s, args, 2);
1489 break;
fd76e73a
RH
1490 case INDEX_op_qemu_st64:
1491 tcg_out_qemu_st(s, args, 3);
1492 break;
f54b3f92
AJ
1493
1494 default:
1495 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1496 tcg_abort();
1497 }
f54b3f92
AJ
1498}
1499
1500static const TCGTargetOpDef hppa_op_defs[] = {
1501 { INDEX_op_exit_tb, { } },
1502 { INDEX_op_goto_tb, { } },
1503
fd76e73a 1504 { INDEX_op_call, { "ri" } },
f54b3f92
AJ
1505 { INDEX_op_jmp, { "r" } },
1506 { INDEX_op_br, { } },
1507
1508 { INDEX_op_mov_i32, { "r", "r" } },
1509 { INDEX_op_movi_i32, { "r" } },
fd76e73a 1510
f54b3f92
AJ
1511 { INDEX_op_ld8u_i32, { "r", "r" } },
1512 { INDEX_op_ld8s_i32, { "r", "r" } },
1513 { INDEX_op_ld16u_i32, { "r", "r" } },
1514 { INDEX_op_ld16s_i32, { "r", "r" } },
1515 { INDEX_op_ld_i32, { "r", "r" } },
fd76e73a
RH
1516 { INDEX_op_st8_i32, { "rZ", "r" } },
1517 { INDEX_op_st16_i32, { "rZ", "r" } },
1518 { INDEX_op_st_i32, { "rZ", "r" } },
1519
1520 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1521 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
0085bd51
RH
1522 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1523 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
fd76e73a 1524 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
0085bd51
RH
1525 /* Note that the second argument will be inverted, which means
1526 we want a constant whose inversion matches M, and that O = ~M.
1527 See the implementation of and_mask_p. */
1528 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
fd76e73a
RH
1529
1530 { INDEX_op_mul_i32, { "r", "r", "r" } },
1531 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
f54b3f92 1532
fd76e73a
RH
1533 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1534 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1535 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1536 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1537 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
f54b3f92 1538
fd76e73a
RH
1539 { INDEX_op_bswap16_i32, { "r", "r" } },
1540 { INDEX_op_bswap32_i32, { "r", "r" } },
fd76e73a 1541 { INDEX_op_not_i32, { "r", "r" } },
f54b3f92 1542
fd76e73a 1543 { INDEX_op_ext8s_i32, { "r", "r" } },
fd76e73a 1544 { INDEX_op_ext16s_i32, { "r", "r" } },
fd76e73a
RH
1545
1546 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1547 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1548
1549 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1550 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1551
1552 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
91493631 1553 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
f54b3f92
AJ
1554
1555#if TARGET_LONG_BITS == 32
1556 { INDEX_op_qemu_ld8u, { "r", "L" } },
1557 { INDEX_op_qemu_ld8s, { "r", "L" } },
1558 { INDEX_op_qemu_ld16u, { "r", "L" } },
1559 { INDEX_op_qemu_ld16s, { "r", "L" } },
86feb1c8 1560 { INDEX_op_qemu_ld32, { "r", "L" } },
f54b3f92
AJ
1561 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1562
fd76e73a
RH
1563 { INDEX_op_qemu_st8, { "LZ", "L" } },
1564 { INDEX_op_qemu_st16, { "LZ", "L" } },
1565 { INDEX_op_qemu_st32, { "LZ", "L" } },
1566 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
f54b3f92
AJ
1567#else
1568 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1569 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1570 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1571 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
86feb1c8 1572 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
f54b3f92
AJ
1573 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1574
fd76e73a
RH
1575 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1576 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1577 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1578 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
f54b3f92
AJ
1579#endif
1580 { -1 },
1581};
1582
fd76e73a
RH
1583static int tcg_target_callee_save_regs[] = {
1584 /* R2, the return address register, is saved specially
1585 in the caller's frame. */
1586 /* R3, the frame pointer, is not currently modified. */
1587 TCG_REG_R4,
1588 TCG_REG_R5,
1589 TCG_REG_R6,
1590 TCG_REG_R7,
1591 TCG_REG_R8,
1592 TCG_REG_R9,
1593 TCG_REG_R10,
1594 TCG_REG_R11,
1595 TCG_REG_R12,
1596 TCG_REG_R13,
1597 TCG_REG_R14,
1598 TCG_REG_R15,
1599 TCG_REG_R16,
cea5f9a2 1600 TCG_REG_R17, /* R17 is the global env. */
fd76e73a
RH
1601 TCG_REG_R18
1602};
1603
e4d58b41 1604static void tcg_target_qemu_prologue(TCGContext *s)
fd76e73a
RH
1605{
1606 int frame_size, i;
1607
1608 /* Allocate space for the fixed frame marker. */
1609 frame_size = -TCG_TARGET_CALL_STACK_OFFSET;
1610 frame_size += TCG_TARGET_STATIC_CALL_ARGS_SIZE;
1611
1612 /* Allocate space for the saved registers. */
1613 frame_size += ARRAY_SIZE(tcg_target_callee_save_regs) * 4;
1614
1615 /* Align the allocated space. */
1616 frame_size = ((frame_size + TCG_TARGET_STACK_ALIGN - 1)
1617 & -TCG_TARGET_STACK_ALIGN);
1618
1619 /* The return address is stored in the caller's frame. */
a42bceec 1620 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
fd76e73a
RH
1621
1622 /* Allocate stack frame, saving the first register at the same time. */
1623 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1624 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
fd76e73a
RH
1625
1626 /* Save all callee saved registers. */
1627 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1628 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1629 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1630 }
1631
884d348b 1632#ifdef CONFIG_USE_GUEST_BASE
4b31713d
RH
1633 if (GUEST_BASE != 0) {
1634 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1635 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1636 }
884d348b 1637#endif
fd76e73a 1638
cea5f9a2
BS
1639 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1640
fd76e73a 1641 /* Jump to TB, and adjust R18 to be the return address. */
cea5f9a2 1642 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
3b6dac34 1643 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
fd76e73a
RH
1644
1645 /* Restore callee saved registers. */
a42bceec
BS
1646 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1647 -frame_size - 20);
fd76e73a
RH
1648 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1649 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
a42bceec 1650 TCG_REG_CALL_STACK, -frame_size + i * 4);
fd76e73a
RH
1651 }
1652
1653 /* Deallocate stack frame and return. */
1654 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1655 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
a42bceec 1656 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
fd76e73a
RH
1657}
1658
e4d58b41 1659static void tcg_target_init(TCGContext *s)
f54b3f92
AJ
1660{
1661 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
fd76e73a
RH
1662
1663 tcg_regset_clear(tcg_target_call_clobber_regs);
1664 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1665 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1666 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1667 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1668 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1669 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1670 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1671 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1672 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
f54b3f92
AJ
1673
1674 tcg_regset_clear(s->reserved_regs);
1675 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1676 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1677 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1678 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1679 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1680 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1681 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1682 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
a42bceec 1683 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
f54b3f92
AJ
1684 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1685
1686 tcg_add_target_add_op_defs(hppa_op_defs);
614f104d
BS
1687 tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf),
1688 CPU_TEMP_BUF_NLONGS * sizeof(long));
f54b3f92 1689}