]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/hppa/tcg-target.c
Merge remote-tracking branch 'kwolf/for-anthony' into staging
[mirror_qemu.git] / tcg / hppa / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
31 };
32 #endif
33
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
36
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
39 #else
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
41 #endif
42
43 static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
54
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
59
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
64
65 TCG_REG_RET0,
66 TCG_REG_RET1,
67 };
68
69 static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
74 };
75
76 static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
79 };
80
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83 {
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86 }
87
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask)
95 {
96 if (mask == 0 || mask == -1) {
97 return 0;
98 }
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
101 }
102
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask)
110 {
111 return or_mask_p(~mask);
112 }
113
114 static int low_sign_ext(int val, int len)
115 {
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117 }
118
119 static int reassemble_12(int as12)
120 {
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
124 }
125
126 static int reassemble_17(int as17)
127 {
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
132 }
133
134 static int reassemble_21(int as21)
135 {
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
141 }
142
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
146
147 static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
149 {
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
153
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156
157 switch (type) {
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
166 case R_PARISC_PCREL17F:
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
170 break;
171 default:
172 tcg_abort();
173 }
174
175 *insn_ptr = insn;
176 }
177
178 /* maximum number of register used for input function arguments */
179 static inline int tcg_target_get_call_iarg_regs_count(int flags)
180 {
181 return 4;
182 }
183
184 /* parse target specific constraints */
185 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
186 {
187 const char *ct_str;
188
189 ct_str = *pct_str;
190 switch (ct_str[0]) {
191 case 'r':
192 ct->ct |= TCG_CT_REG;
193 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194 break;
195 case 'L': /* qemu_ld/st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202 break;
203 case 'Z':
204 ct->ct |= TCG_CT_CONST_0;
205 break;
206 case 'I':
207 ct->ct |= TCG_CT_CONST_S11;
208 break;
209 case 'J':
210 ct->ct |= TCG_CT_CONST_S5;
211 break;
212 case 'K':
213 ct->ct |= TCG_CT_CONST_MS11;
214 break;
215 case 'M':
216 ct->ct |= TCG_CT_CONST_AND;
217 break;
218 case 'O':
219 ct->ct |= TCG_CT_CONST_OR;
220 break;
221 default:
222 return -1;
223 }
224 ct_str++;
225 *pct_str = ct_str;
226 return 0;
227 }
228
229 /* test if a constant matches the constraint */
230 static int tcg_target_const_match(tcg_target_long val,
231 const TCGArgConstraint *arg_ct)
232 {
233 int ct = arg_ct->ct;
234 if (ct & TCG_CT_CONST) {
235 return 1;
236 } else if (ct & TCG_CT_CONST_0) {
237 return val == 0;
238 } else if (ct & TCG_CT_CONST_S5) {
239 return check_fit_tl(val, 5);
240 } else if (ct & TCG_CT_CONST_S11) {
241 return check_fit_tl(val, 11);
242 } else if (ct & TCG_CT_CONST_MS11) {
243 return check_fit_tl(-val, 11);
244 } else if (ct & TCG_CT_CONST_AND) {
245 return and_mask_p(val);
246 } else if (ct & TCG_CT_CONST_OR) {
247 return or_mask_p(val);
248 }
249 return 0;
250 }
251
252 #define INSN_OP(x) ((x) << 26)
253 #define INSN_EXT3BR(x) ((x) << 13)
254 #define INSN_EXT3SH(x) ((x) << 10)
255 #define INSN_EXT4(x) ((x) << 6)
256 #define INSN_EXT5(x) (x)
257 #define INSN_EXT6(x) ((x) << 6)
258 #define INSN_EXT7(x) ((x) << 6)
259 #define INSN_EXT8A(x) ((x) << 6)
260 #define INSN_EXT8B(x) ((x) << 5)
261 #define INSN_T(x) (x)
262 #define INSN_R1(x) ((x) << 16)
263 #define INSN_R2(x) ((x) << 21)
264 #define INSN_DEP_LEN(x) (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x) ((x) << 5)
267 #define INSN_COND(x) ((x) << 13)
268 #define INSN_IM11(x) low_sign_ext(x, 11)
269 #define INSN_IM14(x) low_sign_ext(x, 14)
270 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
271
272 #define COND_NEVER 0
273 #define COND_EQ 1
274 #define COND_LT 2
275 #define COND_LE 3
276 #define COND_LTU 4
277 #define COND_LEU 5
278 #define COND_SV 6
279 #define COND_OD 7
280 #define COND_FALSE 8
281
282 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI (INSN_OP(0x2d))
285 #define INSN_ADDIL (INSN_OP(0x0a))
286 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR (INSN_OP(0x24))
291 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL (INSN_OP(0x08))
296 #define INSN_LDO (INSN_OP(0x0d))
297 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI (INSN_OP(0x25))
303 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
309
310 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
316
317 #define INSN_LDB (INSN_OP(0x10))
318 #define INSN_LDH (INSN_OP(0x11))
319 #define INSN_LDW (INSN_OP(0x12))
320 #define INSN_LDWM (INSN_OP(0x13))
321 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
322
323 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
326
327 #define INSN_STB (INSN_OP(0x18))
328 #define INSN_STH (INSN_OP(0x19))
329 #define INSN_STW (INSN_OP(0x1a))
330 #define INSN_STWM (INSN_OP(0x1b))
331 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
332
333 #define INSN_COMBT (INSN_OP(0x20))
334 #define INSN_COMBF (INSN_OP(0x22))
335 #define INSN_COMIBT (INSN_OP(0x21))
336 #define INSN_COMIBF (INSN_OP(0x23))
337
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(const void *);
340
341 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
342 {
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
345 if (ret != arg) {
346 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347 | INSN_R2(TCG_REG_R0));
348 }
349 }
350
351 static void tcg_out_movi(TCGContext *s, TCGType type,
352 TCGReg ret, tcg_target_long arg)
353 {
354 if (check_fit_tl(arg, 14)) {
355 tcg_out32(s, INSN_LDO | INSN_R1(ret)
356 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357 } else {
358 uint32_t hi, lo;
359 hi = arg >> 11;
360 lo = arg & 0x7ff;
361
362 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363 if (lo) {
364 tcg_out32(s, INSN_LDO | INSN_R1(ret)
365 | INSN_R2(ret) | INSN_IM14(lo));
366 }
367 }
368 }
369
370 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371 tcg_target_long offset, int op)
372 {
373 if (!check_fit_tl(offset, 14)) {
374 uint32_t hi, lo, op;
375
376 hi = offset >> 11;
377 lo = offset & 0x7ff;
378
379 if (addr == TCG_REG_R0) {
380 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381 } else {
382 op = INSN_ADDIL | INSN_R2(addr);
383 }
384 tcg_out32(s, op | reassemble_21(hi));
385
386 addr = TCG_REG_R1;
387 offset = lo;
388 }
389
390 if (ret != addr || offset != 0 || op != INSN_LDO) {
391 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
392 }
393 }
394
395 /* This function is required by tcg.c. */
396 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
397 TCGReg arg1, tcg_target_long arg2)
398 {
399 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400 }
401
402 /* This function is required by tcg.c. */
403 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
404 TCGReg arg1, tcg_target_long arg2)
405 {
406 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407 }
408
409 static void tcg_out_ldst_index(TCGContext *s, int data,
410 int base, int index, int op)
411 {
412 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413 }
414
415 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416 tcg_target_long val)
417 {
418 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419 }
420
421 /* This function is required by tcg.c. */
422 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
423 {
424 tcg_out_addi2(s, reg, reg, val);
425 }
426
427 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
428 {
429 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430 }
431
432 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433 tcg_target_long val, int op)
434 {
435 assert(check_fit_tl(val, 11));
436 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
437 }
438
439 static inline void tcg_out_nop(TCGContext *s)
440 {
441 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442 }
443
444 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
445 {
446 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447 }
448
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453 unsigned ofs, unsigned len, int sign)
454 {
455 assert(ofs < 32 && len <= 32 - ofs);
456 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457 | INSN_R1(ret) | INSN_R2(arg)
458 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
459 }
460
461 /* Likewise with OFS interpreted little-endian. */
462 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463 unsigned ofs, unsigned len)
464 {
465 assert(ofs < 32 && len <= 32 - ofs);
466 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468 }
469
470 static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
471 unsigned ofs, unsigned len)
472 {
473 assert(ofs < 32 && len <= 32 - ofs);
474 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
475 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
476 }
477
478 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
479 unsigned count)
480 {
481 assert(count < 32);
482 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
483 | INSN_SHDEP_CP(count));
484 }
485
486 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
487 {
488 tcg_out_mtctl_sar(s, creg);
489 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
490 }
491
492 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
493 {
494 int bs0, bs1;
495
496 /* Note that the argument is constrained to match or_mask_p. */
497 for (bs0 = 0; bs0 < 32; bs0++) {
498 if ((m & (1u << bs0)) != 0) {
499 break;
500 }
501 }
502 for (bs1 = bs0; bs1 < 32; bs1++) {
503 if ((m & (1u << bs1)) == 0) {
504 break;
505 }
506 }
507 assert(bs1 == 32 || (1ul << bs1) > m);
508
509 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
510 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
511 }
512
513 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
514 {
515 int ls0, ls1, ms0;
516
517 /* Note that the argument is constrained to match and_mask_p. */
518 for (ls0 = 0; ls0 < 32; ls0++) {
519 if ((m & (1u << ls0)) == 0) {
520 break;
521 }
522 }
523 for (ls1 = ls0; ls1 < 32; ls1++) {
524 if ((m & (1u << ls1)) != 0) {
525 break;
526 }
527 }
528 for (ms0 = ls1; ms0 < 32; ms0++) {
529 if ((m & (1u << ms0)) == 0) {
530 break;
531 }
532 }
533 assert (ms0 == 32);
534
535 if (ls1 == 32) {
536 tcg_out_extr(s, ret, arg, 0, ls0, 0);
537 } else {
538 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
539 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
540 }
541 }
542
543 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
544 {
545 tcg_out_extr(s, ret, arg, 0, 8, 1);
546 }
547
548 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
549 {
550 tcg_out_extr(s, ret, arg, 0, 16, 1);
551 }
552
553 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
554 {
555 count &= 31;
556 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
557 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
558 }
559
560 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
561 {
562 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
563 tcg_out_mtctl_sar(s, TCG_REG_R20);
564 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
565 }
566
567 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
568 {
569 count &= 31;
570 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
571 }
572
573 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
574 {
575 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
576 }
577
578 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
579 {
580 count &= 31;
581 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
582 }
583
584 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
585 {
586 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
587 tcg_out_mtctl_sar(s, TCG_REG_R20);
588 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
589 }
590
591 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
592 {
593 count &= 31;
594 tcg_out_shd(s, ret, arg, arg, 32 - count);
595 }
596
597 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
598 {
599 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
600 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
601 }
602
603 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
604 {
605 count &= 31;
606 tcg_out_shd(s, ret, arg, arg, count);
607 }
608
609 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
610 {
611 tcg_out_vshd(s, ret, arg, arg, creg);
612 }
613
614 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
615 {
616 if (ret != arg) {
617 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
618 }
619 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
620 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
621 }
622
623 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
624 {
625 /* arg = ABCD */
626 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
627 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
628 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
629 }
630
631 static void tcg_out_call(TCGContext *s, const void *func)
632 {
633 tcg_target_long val, hi, lo, disp;
634
635 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
636 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
637
638 if (check_fit_tl(disp, 17)) {
639 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
640 } else {
641 hi = val >> 11;
642 lo = val & 0x7ff;
643
644 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
645 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
646 | reassemble_17(lo >> 2));
647 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
648 }
649 }
650
651 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
652 int arg1, int arg2)
653 {
654 /* Store both words into the stack for copy to the FPU. */
655 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
656 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
657
658 /* Load both words into the FPU at the same time. We get away
659 with this because we can address the left and right half of the
660 FPU registers individually once loaded. */
661 /* fldds stack_temp(sp),fr22 */
662 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
663 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
664
665 /* xmpyu fr22r,fr22,fr22 */
666 tcg_out32(s, 0x3ad64796);
667
668 /* Store the 64-bit result back into the stack. */
669 /* fstds stack_temp(sp),fr22 */
670 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
671 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
672
673 /* Load the pieces of the result that the caller requested. */
674 if (reth) {
675 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
676 }
677 if (retl) {
678 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
679 INSN_LDW);
680 }
681 }
682
683 static void tcg_out_add2(TCGContext *s, int destl, int desth,
684 int al, int ah, int bl, int bh, int blconst)
685 {
686 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
687
688 if (blconst) {
689 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
690 } else {
691 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
692 }
693 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
694
695 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
696 }
697
698 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
699 int bl, int bh, int alconst, int blconst)
700 {
701 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
702
703 if (alconst) {
704 if (blconst) {
705 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
706 bl = TCG_REG_R20;
707 }
708 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
709 } else if (blconst) {
710 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
711 } else {
712 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
713 }
714 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
715
716 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
717 }
718
719 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
720 {
721 TCGLabel *l = &s->labels[label_index];
722 uint32_t op = nul ? INSN_BL_N : INSN_BL;
723
724 if (l->has_value) {
725 tcg_target_long val = l->u.value;
726
727 val -= (tcg_target_long)s->code_ptr + 8;
728 val >>= 2;
729 assert(check_fit_tl(val, 17));
730
731 tcg_out32(s, op | reassemble_17(val));
732 } else {
733 /* We need to keep the offset unchanged for retranslation. */
734 uint32_t old_insn = *(uint32_t *)s->code_ptr;
735
736 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
737 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
738 }
739 }
740
741 static const uint8_t tcg_cond_to_cmp_cond[10] =
742 {
743 [TCG_COND_EQ] = COND_EQ,
744 [TCG_COND_NE] = COND_EQ | COND_FALSE,
745 [TCG_COND_LT] = COND_LT,
746 [TCG_COND_GE] = COND_LT | COND_FALSE,
747 [TCG_COND_LE] = COND_LE,
748 [TCG_COND_GT] = COND_LE | COND_FALSE,
749 [TCG_COND_LTU] = COND_LTU,
750 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
751 [TCG_COND_LEU] = COND_LEU,
752 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
753 };
754
755 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
756 TCGArg c2, int c2const, int label_index)
757 {
758 TCGLabel *l = &s->labels[label_index];
759 int op, pacond;
760
761 /* Note that COMIB operates as if the immediate is the first
762 operand. We model brcond with the immediate in the second
763 to better match what targets are likely to give us. For
764 consistency, model COMB with reversed operands as well. */
765 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
766
767 if (c2const) {
768 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
769 op |= INSN_IM5(c2);
770 } else {
771 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
772 op |= INSN_R1(c2);
773 }
774 op |= INSN_R2(c1);
775 op |= INSN_COND(pacond & 7);
776
777 if (l->has_value) {
778 tcg_target_long val = l->u.value;
779
780 val -= (tcg_target_long)s->code_ptr + 8;
781 val >>= 2;
782 assert(check_fit_tl(val, 12));
783
784 /* ??? Assume that all branches to defined labels are backward.
785 Which means that if the nul bit is set, the delay slot is
786 executed if the branch is taken, and not executed in fallthru. */
787 tcg_out32(s, op | reassemble_12(val));
788 tcg_out_nop(s);
789 } else {
790 /* We need to keep the offset unchanged for retranslation. */
791 uint32_t old_insn = *(uint32_t *)s->code_ptr;
792
793 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
794 /* ??? Assume that all branches to undefined labels are forward.
795 Which means that if the nul bit is set, the delay slot is
796 not executed if the branch is taken, which is what we want. */
797 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
798 }
799 }
800
801 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
802 TCGArg c1, TCGArg c2, int c2const)
803 {
804 int op, pacond;
805
806 /* Note that COMICLR operates as if the immediate is the first
807 operand. We model setcond with the immediate in the second
808 to better match what targets are likely to give us. For
809 consistency, model COMCLR with reversed operands as well. */
810 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
811
812 if (c2const) {
813 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
814 } else {
815 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
816 }
817 op |= INSN_COND(pacond & 7);
818 op |= pacond & COND_FALSE ? 1 << 12 : 0;
819
820 tcg_out32(s, op);
821 }
822
823 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
824 TCGArg bl, int blconst, TCGArg bh, int bhconst,
825 int label_index)
826 {
827 switch (cond) {
828 case TCG_COND_EQ:
829 case TCG_COND_NE:
830 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, al, bl, blconst);
831 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
832 break;
833
834 default:
835 tcg_out_brcond(s, cond, ah, bh, bhconst, label_index);
836 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
837 tcg_out_brcond(s, tcg_unsigned_cond(cond),
838 al, bl, blconst, label_index);
839 break;
840 }
841 }
842
843 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
844 TCGArg c1, TCGArg c2, int c2const)
845 {
846 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
847 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
848 }
849
850 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
851 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
852 TCGArg bh, int bhconst)
853 {
854 int scratch = TCG_REG_R20;
855
856 if (ret != al && ret != ah
857 && (blconst || ret != bl)
858 && (bhconst || ret != bh)) {
859 scratch = ret;
860 }
861
862 switch (cond) {
863 case TCG_COND_EQ:
864 case TCG_COND_NE:
865 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
866 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
867 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
868 break;
869
870 default:
871 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
872 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
873 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
874 tcg_out_comclr(s, cond, TCG_REG_R0, ah, bh, bhconst);
875 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
876 break;
877 }
878
879 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
880 }
881
882 #if defined(CONFIG_SOFTMMU)
883 #include "../../softmmu_defs.h"
884
885 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
886 int mmu_idx) */
887 static const void * const qemu_ld_helpers[4] = {
888 helper_ldb_mmu,
889 helper_ldw_mmu,
890 helper_ldl_mmu,
891 helper_ldq_mmu,
892 };
893
894 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
895 uintxx_t val, int mmu_idx) */
896 static const void * const qemu_st_helpers[4] = {
897 helper_stb_mmu,
898 helper_stw_mmu,
899 helper_stl_mmu,
900 helper_stq_mmu,
901 };
902
903 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
904 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
905 TLB for the memory index. The return value is the offset from ENV
906 contained in R1 afterward (to be used when loading ADDEND); if the
907 return value is 0, R1 is not used. */
908
909 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
910 int addrhi, int s_bits, int lab_miss, int offset)
911 {
912 int ret;
913
914 /* Extracting the index into the TLB. The "normal C operation" is
915 r1 = addr_reg >> TARGET_PAGE_BITS;
916 r1 &= CPU_TLB_SIZE - 1;
917 r1 <<= CPU_TLB_ENTRY_BITS;
918 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
919 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
920 operations with an EXTRU. Unfortunately, the current value of
921 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
922 add that follows. */
923 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
924 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
925 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
926
927 /* Make sure that both the addr_{read,write} and addend can be
928 read with a 14-bit offset from the same base register. */
929 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
930 ret = 0;
931 } else {
932 ret = (offset + 0x400) & ~0x7ff;
933 offset = ret - offset;
934 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
935 r1 = TCG_REG_R1;
936 }
937
938 /* Load the entry from the computed slot. */
939 if (TARGET_LONG_BITS == 64) {
940 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
941 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
942 } else {
943 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
944 }
945
946 /* Compute the value that ought to appear in the TLB for a hit, namely, the page
947 of the address. We include the low N bits of the address to catch unaligned
948 accesses and force them onto the slow path. Do this computation after having
949 issued the load from the TLB slot to give the load time to complete. */
950 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
951
952 /* If not equal, jump to lab_miss. */
953 if (TARGET_LONG_BITS == 64) {
954 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
955 r0, 0, addrhi, 0, lab_miss);
956 } else {
957 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
958 }
959
960 return ret;
961 }
962 #endif
963
964 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
965 int addr_reg, int addend_reg, int opc)
966 {
967 #ifdef TARGET_WORDS_BIGENDIAN
968 const int bswap = 0;
969 #else
970 const int bswap = 1;
971 #endif
972
973 switch (opc) {
974 case 0:
975 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
976 break;
977 case 0 | 4:
978 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
979 tcg_out_ext8s(s, datalo_reg, datalo_reg);
980 break;
981 case 1:
982 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
983 if (bswap) {
984 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
985 }
986 break;
987 case 1 | 4:
988 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
989 if (bswap) {
990 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
991 } else {
992 tcg_out_ext16s(s, datalo_reg, datalo_reg);
993 }
994 break;
995 case 2:
996 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
997 if (bswap) {
998 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
999 }
1000 break;
1001 case 3:
1002 if (bswap) {
1003 int t = datahi_reg;
1004 datahi_reg = datalo_reg;
1005 datalo_reg = t;
1006 }
1007 /* We can't access the low-part with a reg+reg addressing mode,
1008 so perform the addition now and use reg_ofs addressing mode. */
1009 if (addend_reg != TCG_REG_R0) {
1010 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1011 addr_reg = TCG_REG_R20;
1012 }
1013 /* Make sure not to clobber the base register. */
1014 if (datahi_reg == addr_reg) {
1015 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1016 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1017 } else {
1018 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1019 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1020 }
1021 if (bswap) {
1022 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1023 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1024 }
1025 break;
1026 default:
1027 tcg_abort();
1028 }
1029 }
1030
1031 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1032 {
1033 int datalo_reg = *args++;
1034 /* Note that datahi_reg is only used for 64-bit loads. */
1035 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1036 int addrlo_reg = *args++;
1037
1038 #if defined(CONFIG_SOFTMMU)
1039 /* Note that addrhi_reg is only used for 64-bit guests. */
1040 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1041 int mem_index = *args;
1042 int lab1, lab2, argreg, offset;
1043
1044 lab1 = gen_new_label();
1045 lab2 = gen_new_label();
1046
1047 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1048 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1049 opc & 3, lab1, offset);
1050
1051 /* TLB Hit. */
1052 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1053 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1054 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, TCG_REG_R20, opc);
1055 tcg_out_branch(s, lab2, 1);
1056
1057 /* TLB Miss. */
1058 /* label1: */
1059 tcg_out_label(s, lab1, s->code_ptr);
1060
1061 argreg = TCG_REG_R26;
1062 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1063 if (TARGET_LONG_BITS == 64) {
1064 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1065 }
1066 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1067
1068 /* XXX/FIXME: suboptimal */
1069 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
1070 tcg_target_call_iarg_regs[1]);
1071 tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
1072 tcg_target_call_iarg_regs[0]);
1073 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
1074 TCG_AREG0);
1075 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1076
1077 switch (opc) {
1078 case 0:
1079 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1080 break;
1081 case 0 | 4:
1082 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1083 break;
1084 case 1:
1085 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1086 break;
1087 case 1 | 4:
1088 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1089 break;
1090 case 2:
1091 case 2 | 4:
1092 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1093 break;
1094 case 3:
1095 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1096 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1097 break;
1098 default:
1099 tcg_abort();
1100 }
1101
1102 /* label2: */
1103 tcg_out_label(s, lab2, s->code_ptr);
1104 #else
1105 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1106 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1107 #endif
1108 }
1109
1110 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1111 int addr_reg, int opc)
1112 {
1113 #ifdef TARGET_WORDS_BIGENDIAN
1114 const int bswap = 0;
1115 #else
1116 const int bswap = 1;
1117 #endif
1118
1119 switch (opc) {
1120 case 0:
1121 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1122 break;
1123 case 1:
1124 if (bswap) {
1125 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1126 datalo_reg = TCG_REG_R20;
1127 }
1128 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1129 break;
1130 case 2:
1131 if (bswap) {
1132 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1133 datalo_reg = TCG_REG_R20;
1134 }
1135 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1136 break;
1137 case 3:
1138 if (bswap) {
1139 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1140 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1141 datahi_reg = TCG_REG_R20;
1142 datalo_reg = TCG_REG_R23;
1143 }
1144 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1145 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1146 break;
1147 default:
1148 tcg_abort();
1149 }
1150
1151 }
1152
1153 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1154 {
1155 int datalo_reg = *args++;
1156 /* Note that datahi_reg is only used for 64-bit loads. */
1157 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1158 int addrlo_reg = *args++;
1159
1160 #if defined(CONFIG_SOFTMMU)
1161 /* Note that addrhi_reg is only used for 64-bit guests. */
1162 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1163 int mem_index = *args;
1164 int lab1, lab2, argreg, offset;
1165
1166 lab1 = gen_new_label();
1167 lab2 = gen_new_label();
1168
1169 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1170 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, addrhi_reg,
1171 opc, lab1, offset);
1172
1173 /* TLB Hit. */
1174 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, (offset ? TCG_REG_R1 : TCG_REG_R25),
1175 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1176
1177 /* There are no indexed stores, so we must do this addition explitly.
1178 Careful to avoid R20, which is used for the bswaps to follow. */
1179 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1180 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1181 tcg_out_branch(s, lab2, 1);
1182
1183 /* TLB Miss. */
1184 /* label1: */
1185 tcg_out_label(s, lab1, s->code_ptr);
1186
1187 argreg = TCG_REG_R26;
1188 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrlo_reg);
1189 if (TARGET_LONG_BITS == 64) {
1190 tcg_out_mov(s, TCG_TYPE_I32, argreg--, addrhi_reg);
1191 }
1192
1193 switch(opc) {
1194 case 0:
1195 tcg_out_andi(s, argreg--, datalo_reg, 0xff);
1196 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1197 break;
1198 case 1:
1199 tcg_out_andi(s, argreg--, datalo_reg, 0xffff);
1200 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1201 break;
1202 case 2:
1203 tcg_out_mov(s, TCG_TYPE_I32, argreg--, datalo_reg);
1204 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1205 break;
1206 case 3:
1207 /* Because of the alignment required by the 64-bit data argument,
1208 we will always use R23/R24. Also, we will always run out of
1209 argument registers for storing mem_index, so that will have
1210 to go on the stack. */
1211 if (mem_index == 0) {
1212 argreg = TCG_REG_R0;
1213 } else {
1214 argreg = TCG_REG_R20;
1215 tcg_out_movi(s, TCG_TYPE_I32, argreg, mem_index);
1216 }
1217 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R23, datahi_reg);
1218 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R24, datalo_reg);
1219 tcg_out_st(s, TCG_TYPE_I32, argreg, TCG_REG_CALL_STACK,
1220 TCG_TARGET_CALL_STACK_OFFSET - 4);
1221 break;
1222 default:
1223 tcg_abort();
1224 }
1225
1226 /* XXX/FIXME: suboptimal */
1227 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
1228 tcg_target_call_iarg_regs[2]);
1229 tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[2],
1230 tcg_target_call_iarg_regs[1]);
1231 tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1],
1232 tcg_target_call_iarg_regs[0]);
1233 tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0],
1234 TCG_AREG0);
1235 tcg_out_call(s, qemu_st_helpers[opc]);
1236
1237 /* label2: */
1238 tcg_out_label(s, lab2, s->code_ptr);
1239 #else
1240 /* There are no indexed stores, so if GUEST_BASE is set we must do the add
1241 explicitly. Careful to avoid R20, which is used for the bswaps to follow. */
1242 if (GUEST_BASE != 0) {
1243 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_GUEST_BASE_REG, INSN_ADDL);
1244 addrlo_reg = TCG_REG_R31;
1245 }
1246 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1247 #endif
1248 }
1249
1250 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1251 {
1252 if (!check_fit_tl(arg, 14)) {
1253 uint32_t hi, lo;
1254 hi = arg & ~0x7ff;
1255 lo = arg & 0x7ff;
1256 if (lo) {
1257 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1258 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1259 tcg_out_addi(s, TCG_REG_RET0, lo);
1260 return;
1261 }
1262 arg = hi;
1263 }
1264 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1265 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1266 }
1267
1268 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1269 {
1270 if (s->tb_jmp_offset) {
1271 /* direct jump method */
1272 fprintf(stderr, "goto_tb direct\n");
1273 tcg_abort();
1274 } else {
1275 /* indirect jump method */
1276 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1277 (tcg_target_long)(s->tb_next + arg));
1278 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1279 }
1280 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1281 }
1282
1283 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1284 const int *const_args)
1285 {
1286 switch (opc) {
1287 case INDEX_op_exit_tb:
1288 tcg_out_exit_tb(s, args[0]);
1289 break;
1290 case INDEX_op_goto_tb:
1291 tcg_out_goto_tb(s, args[0]);
1292 break;
1293
1294 case INDEX_op_call:
1295 if (const_args[0]) {
1296 tcg_out_call(s, (void *)args[0]);
1297 } else {
1298 /* ??? FIXME: the value in the register in args[0] is almost
1299 certainly a procedure descriptor, not a code address. We
1300 probably need to use the millicode $$dyncall routine. */
1301 tcg_abort();
1302 }
1303 break;
1304
1305 case INDEX_op_jmp:
1306 fprintf(stderr, "unimplemented jmp\n");
1307 tcg_abort();
1308 break;
1309
1310 case INDEX_op_br:
1311 tcg_out_branch(s, args[0], 1);
1312 break;
1313
1314 case INDEX_op_movi_i32:
1315 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1316 break;
1317
1318 case INDEX_op_ld8u_i32:
1319 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1320 break;
1321 case INDEX_op_ld8s_i32:
1322 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1323 tcg_out_ext8s(s, args[0], args[0]);
1324 break;
1325 case INDEX_op_ld16u_i32:
1326 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1327 break;
1328 case INDEX_op_ld16s_i32:
1329 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1330 tcg_out_ext16s(s, args[0], args[0]);
1331 break;
1332 case INDEX_op_ld_i32:
1333 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1334 break;
1335
1336 case INDEX_op_st8_i32:
1337 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1338 break;
1339 case INDEX_op_st16_i32:
1340 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1341 break;
1342 case INDEX_op_st_i32:
1343 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1344 break;
1345
1346 case INDEX_op_add_i32:
1347 if (const_args[2]) {
1348 tcg_out_addi2(s, args[0], args[1], args[2]);
1349 } else {
1350 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1351 }
1352 break;
1353
1354 case INDEX_op_sub_i32:
1355 if (const_args[1]) {
1356 if (const_args[2]) {
1357 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1358 } else {
1359 /* Recall that SUBI is a reversed subtract. */
1360 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1361 }
1362 } else if (const_args[2]) {
1363 tcg_out_addi2(s, args[0], args[1], -args[2]);
1364 } else {
1365 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1366 }
1367 break;
1368
1369 case INDEX_op_and_i32:
1370 if (const_args[2]) {
1371 tcg_out_andi(s, args[0], args[1], args[2]);
1372 } else {
1373 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1374 }
1375 break;
1376
1377 case INDEX_op_or_i32:
1378 if (const_args[2]) {
1379 tcg_out_ori(s, args[0], args[1], args[2]);
1380 } else {
1381 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1382 }
1383 break;
1384
1385 case INDEX_op_xor_i32:
1386 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1387 break;
1388
1389 case INDEX_op_andc_i32:
1390 if (const_args[2]) {
1391 tcg_out_andi(s, args[0], args[1], ~args[2]);
1392 } else {
1393 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1394 }
1395 break;
1396
1397 case INDEX_op_shl_i32:
1398 if (const_args[2]) {
1399 tcg_out_shli(s, args[0], args[1], args[2]);
1400 } else {
1401 tcg_out_shl(s, args[0], args[1], args[2]);
1402 }
1403 break;
1404
1405 case INDEX_op_shr_i32:
1406 if (const_args[2]) {
1407 tcg_out_shri(s, args[0], args[1], args[2]);
1408 } else {
1409 tcg_out_shr(s, args[0], args[1], args[2]);
1410 }
1411 break;
1412
1413 case INDEX_op_sar_i32:
1414 if (const_args[2]) {
1415 tcg_out_sari(s, args[0], args[1], args[2]);
1416 } else {
1417 tcg_out_sar(s, args[0], args[1], args[2]);
1418 }
1419 break;
1420
1421 case INDEX_op_rotl_i32:
1422 if (const_args[2]) {
1423 tcg_out_rotli(s, args[0], args[1], args[2]);
1424 } else {
1425 tcg_out_rotl(s, args[0], args[1], args[2]);
1426 }
1427 break;
1428
1429 case INDEX_op_rotr_i32:
1430 if (const_args[2]) {
1431 tcg_out_rotri(s, args[0], args[1], args[2]);
1432 } else {
1433 tcg_out_rotr(s, args[0], args[1], args[2]);
1434 }
1435 break;
1436
1437 case INDEX_op_mul_i32:
1438 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1439 break;
1440 case INDEX_op_mulu2_i32:
1441 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1442 break;
1443
1444 case INDEX_op_bswap16_i32:
1445 tcg_out_bswap16(s, args[0], args[1], 0);
1446 break;
1447 case INDEX_op_bswap32_i32:
1448 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1449 break;
1450
1451 case INDEX_op_not_i32:
1452 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1453 break;
1454 case INDEX_op_ext8s_i32:
1455 tcg_out_ext8s(s, args[0], args[1]);
1456 break;
1457 case INDEX_op_ext16s_i32:
1458 tcg_out_ext16s(s, args[0], args[1]);
1459 break;
1460
1461 case INDEX_op_brcond_i32:
1462 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1463 break;
1464 case INDEX_op_brcond2_i32:
1465 tcg_out_brcond2(s, args[4], args[0], args[1],
1466 args[2], const_args[2],
1467 args[3], const_args[3], args[5]);
1468 break;
1469
1470 case INDEX_op_setcond_i32:
1471 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1472 break;
1473 case INDEX_op_setcond2_i32:
1474 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1475 args[3], const_args[3], args[4], const_args[4]);
1476 break;
1477
1478 case INDEX_op_add2_i32:
1479 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1480 args[4], args[5], const_args[4]);
1481 break;
1482
1483 case INDEX_op_sub2_i32:
1484 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1485 args[4], args[5], const_args[2], const_args[4]);
1486 break;
1487
1488 case INDEX_op_deposit_i32:
1489 if (const_args[2]) {
1490 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1491 } else {
1492 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1493 }
1494 break;
1495
1496 case INDEX_op_qemu_ld8u:
1497 tcg_out_qemu_ld(s, args, 0);
1498 break;
1499 case INDEX_op_qemu_ld8s:
1500 tcg_out_qemu_ld(s, args, 0 | 4);
1501 break;
1502 case INDEX_op_qemu_ld16u:
1503 tcg_out_qemu_ld(s, args, 1);
1504 break;
1505 case INDEX_op_qemu_ld16s:
1506 tcg_out_qemu_ld(s, args, 1 | 4);
1507 break;
1508 case INDEX_op_qemu_ld32:
1509 tcg_out_qemu_ld(s, args, 2);
1510 break;
1511 case INDEX_op_qemu_ld64:
1512 tcg_out_qemu_ld(s, args, 3);
1513 break;
1514
1515 case INDEX_op_qemu_st8:
1516 tcg_out_qemu_st(s, args, 0);
1517 break;
1518 case INDEX_op_qemu_st16:
1519 tcg_out_qemu_st(s, args, 1);
1520 break;
1521 case INDEX_op_qemu_st32:
1522 tcg_out_qemu_st(s, args, 2);
1523 break;
1524 case INDEX_op_qemu_st64:
1525 tcg_out_qemu_st(s, args, 3);
1526 break;
1527
1528 default:
1529 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1530 tcg_abort();
1531 }
1532 }
1533
1534 static const TCGTargetOpDef hppa_op_defs[] = {
1535 { INDEX_op_exit_tb, { } },
1536 { INDEX_op_goto_tb, { } },
1537
1538 { INDEX_op_call, { "ri" } },
1539 { INDEX_op_jmp, { "r" } },
1540 { INDEX_op_br, { } },
1541
1542 { INDEX_op_mov_i32, { "r", "r" } },
1543 { INDEX_op_movi_i32, { "r" } },
1544
1545 { INDEX_op_ld8u_i32, { "r", "r" } },
1546 { INDEX_op_ld8s_i32, { "r", "r" } },
1547 { INDEX_op_ld16u_i32, { "r", "r" } },
1548 { INDEX_op_ld16s_i32, { "r", "r" } },
1549 { INDEX_op_ld_i32, { "r", "r" } },
1550 { INDEX_op_st8_i32, { "rZ", "r" } },
1551 { INDEX_op_st16_i32, { "rZ", "r" } },
1552 { INDEX_op_st_i32, { "rZ", "r" } },
1553
1554 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1555 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1556 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1557 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1558 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1559 /* Note that the second argument will be inverted, which means
1560 we want a constant whose inversion matches M, and that O = ~M.
1561 See the implementation of and_mask_p. */
1562 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1563
1564 { INDEX_op_mul_i32, { "r", "r", "r" } },
1565 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1566
1567 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1568 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1569 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1570 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1571 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1572
1573 { INDEX_op_bswap16_i32, { "r", "r" } },
1574 { INDEX_op_bswap32_i32, { "r", "r" } },
1575 { INDEX_op_not_i32, { "r", "r" } },
1576
1577 { INDEX_op_ext8s_i32, { "r", "r" } },
1578 { INDEX_op_ext16s_i32, { "r", "r" } },
1579
1580 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1581 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1582
1583 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1584 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1585
1586 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1587 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1588
1589 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1590
1591 #if TARGET_LONG_BITS == 32
1592 { INDEX_op_qemu_ld8u, { "r", "L" } },
1593 { INDEX_op_qemu_ld8s, { "r", "L" } },
1594 { INDEX_op_qemu_ld16u, { "r", "L" } },
1595 { INDEX_op_qemu_ld16s, { "r", "L" } },
1596 { INDEX_op_qemu_ld32, { "r", "L" } },
1597 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1598
1599 { INDEX_op_qemu_st8, { "LZ", "L" } },
1600 { INDEX_op_qemu_st16, { "LZ", "L" } },
1601 { INDEX_op_qemu_st32, { "LZ", "L" } },
1602 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1603 #else
1604 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1605 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1606 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1607 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1608 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1609 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1610
1611 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1612 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1613 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1614 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1615 #endif
1616 { -1 },
1617 };
1618
1619 static int tcg_target_callee_save_regs[] = {
1620 /* R2, the return address register, is saved specially
1621 in the caller's frame. */
1622 /* R3, the frame pointer, is not currently modified. */
1623 TCG_REG_R4,
1624 TCG_REG_R5,
1625 TCG_REG_R6,
1626 TCG_REG_R7,
1627 TCG_REG_R8,
1628 TCG_REG_R9,
1629 TCG_REG_R10,
1630 TCG_REG_R11,
1631 TCG_REG_R12,
1632 TCG_REG_R13,
1633 TCG_REG_R14,
1634 TCG_REG_R15,
1635 TCG_REG_R16,
1636 TCG_REG_R17, /* R17 is the global env. */
1637 TCG_REG_R18
1638 };
1639
1640 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1641 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1642 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1643 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1644 + TCG_TARGET_STACK_ALIGN - 1) \
1645 & -TCG_TARGET_STACK_ALIGN)
1646
1647 static void tcg_target_qemu_prologue(TCGContext *s)
1648 {
1649 int frame_size, i;
1650
1651 frame_size = FRAME_SIZE;
1652
1653 /* The return address is stored in the caller's frame. */
1654 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
1655
1656 /* Allocate stack frame, saving the first register at the same time. */
1657 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1658 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
1659
1660 /* Save all callee saved registers. */
1661 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1662 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1663 TCG_REG_CALL_STACK, -frame_size + i * 4);
1664 }
1665
1666 /* Record the location of the TCG temps. */
1667 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
1668 CPU_TEMP_BUF_NLONGS * sizeof(long));
1669
1670 #ifdef CONFIG_USE_GUEST_BASE
1671 if (GUEST_BASE != 0) {
1672 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1673 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1674 }
1675 #endif
1676
1677 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1678
1679 /* Jump to TB, and adjust R18 to be the return address. */
1680 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
1681 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1682
1683 /* Restore callee saved registers. */
1684 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1685 -frame_size - 20);
1686 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1687 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1688 TCG_REG_CALL_STACK, -frame_size + i * 4);
1689 }
1690
1691 /* Deallocate stack frame and return. */
1692 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1693 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1694 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
1695 }
1696
1697 static void tcg_target_init(TCGContext *s)
1698 {
1699 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1700
1701 tcg_regset_clear(tcg_target_call_clobber_regs);
1702 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1703 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1704 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1705 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1706 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1707 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1708 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1709 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1710 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1711
1712 tcg_regset_clear(s->reserved_regs);
1713 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1714 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1715 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1716 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1717 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1718 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1719 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1720 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1721 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
1722 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1723
1724 tcg_add_target_add_op_defs(hppa_op_defs);
1725 }
1726
1727 typedef struct {
1728 uint32_t len __attribute__((aligned((sizeof(void *)))));
1729 uint32_t id;
1730 uint8_t version;
1731 char augmentation[1];
1732 uint8_t code_align;
1733 uint8_t data_align;
1734 uint8_t return_column;
1735 } DebugFrameCIE;
1736
1737 typedef struct {
1738 uint32_t len __attribute__((aligned((sizeof(void *)))));
1739 uint32_t cie_offset;
1740 tcg_target_long func_start __attribute__((packed));
1741 tcg_target_long func_len __attribute__((packed));
1742 uint8_t def_cfa[4];
1743 uint8_t ret_ofs[3];
1744 uint8_t reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1745 } DebugFrameFDE;
1746
1747 typedef struct {
1748 DebugFrameCIE cie;
1749 DebugFrameFDE fde;
1750 } DebugFrame;
1751
1752 #define ELF_HOST_MACHINE EM_PARISC
1753 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1754
1755 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1756 and other extensions. We don't really care, but if we don't set this
1757 to *something* then the object file won't be properly matched. */
1758 #define ELF_OSABI ELFOSABI_LINUX
1759
1760 static DebugFrame debug_frame = {
1761 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1762 .cie.id = -1,
1763 .cie.version = 1,
1764 .cie.code_align = 1,
1765 .cie.data_align = 1,
1766 .cie.return_column = 2,
1767
1768 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1769 .fde.def_cfa = {
1770 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1771 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1772 (-FRAME_SIZE >> 7) & 0x7f
1773 },
1774 .fde.ret_ofs = {
1775 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1776 },
1777 .fde.reg_ofs = {
1778 /* This must match the ordering in tcg_target_callee_save_regs. */
1779 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1780 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1781 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1782 0x80 + 7, 12, /* ... */
1783 0x80 + 8, 16,
1784 0x80 + 9, 20,
1785 0x80 + 10, 24,
1786 0x80 + 11, 28,
1787 0x80 + 12, 32,
1788 0x80 + 13, 36,
1789 0x80 + 14, 40,
1790 0x80 + 15, 44,
1791 0x80 + 16, 48,
1792 0x80 + 17, 52,
1793 0x80 + 18, 56,
1794 }
1795 };
1796
1797 void tcg_register_jit(void *buf, size_t buf_size)
1798 {
1799 debug_frame.fde.func_start = (tcg_target_long) buf;
1800 debug_frame.fde.func_len = buf_size;
1801
1802 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1803 }