]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/hppa/tcg-target.c
Merge branch 'tcg-sparc' of git://repo.or.cz/qemu/rth
[mirror_qemu.git] / tcg / hppa / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
31 };
32 #endif
33
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
36
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
39 #else
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
41 #endif
42
43 static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
54
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
59
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
64
65 TCG_REG_RET0,
66 TCG_REG_RET1,
67 };
68
69 static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
74 };
75
76 static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
79 };
80
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83 {
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86 }
87
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask)
95 {
96 if (mask == 0 || mask == -1) {
97 return 0;
98 }
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
101 }
102
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask)
110 {
111 return or_mask_p(~mask);
112 }
113
114 static int low_sign_ext(int val, int len)
115 {
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117 }
118
119 static int reassemble_12(int as12)
120 {
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
124 }
125
126 static int reassemble_17(int as17)
127 {
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
132 }
133
134 static int reassemble_21(int as21)
135 {
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
141 }
142
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
146
147 static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
149 {
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
153
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156
157 switch (type) {
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
166 case R_PARISC_PCREL17F:
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
170 break;
171 default:
172 tcg_abort();
173 }
174
175 *insn_ptr = insn;
176 }
177
178 /* parse target specific constraints */
179 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
180 {
181 const char *ct_str;
182
183 ct_str = *pct_str;
184 switch (ct_str[0]) {
185 case 'r':
186 ct->ct |= TCG_CT_REG;
187 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
188 break;
189 case 'L': /* qemu_ld/st constraint */
190 ct->ct |= TCG_CT_REG;
191 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
192 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
193 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
194 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
195 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
196 break;
197 case 'Z':
198 ct->ct |= TCG_CT_CONST_0;
199 break;
200 case 'I':
201 ct->ct |= TCG_CT_CONST_S11;
202 break;
203 case 'J':
204 ct->ct |= TCG_CT_CONST_S5;
205 break;
206 case 'K':
207 ct->ct |= TCG_CT_CONST_MS11;
208 break;
209 case 'M':
210 ct->ct |= TCG_CT_CONST_AND;
211 break;
212 case 'O':
213 ct->ct |= TCG_CT_CONST_OR;
214 break;
215 default:
216 return -1;
217 }
218 ct_str++;
219 *pct_str = ct_str;
220 return 0;
221 }
222
223 /* test if a constant matches the constraint */
224 static int tcg_target_const_match(tcg_target_long val,
225 const TCGArgConstraint *arg_ct)
226 {
227 int ct = arg_ct->ct;
228 if (ct & TCG_CT_CONST) {
229 return 1;
230 } else if (ct & TCG_CT_CONST_0) {
231 return val == 0;
232 } else if (ct & TCG_CT_CONST_S5) {
233 return check_fit_tl(val, 5);
234 } else if (ct & TCG_CT_CONST_S11) {
235 return check_fit_tl(val, 11);
236 } else if (ct & TCG_CT_CONST_MS11) {
237 return check_fit_tl(-val, 11);
238 } else if (ct & TCG_CT_CONST_AND) {
239 return and_mask_p(val);
240 } else if (ct & TCG_CT_CONST_OR) {
241 return or_mask_p(val);
242 }
243 return 0;
244 }
245
246 #define INSN_OP(x) ((x) << 26)
247 #define INSN_EXT3BR(x) ((x) << 13)
248 #define INSN_EXT3SH(x) ((x) << 10)
249 #define INSN_EXT4(x) ((x) << 6)
250 #define INSN_EXT5(x) (x)
251 #define INSN_EXT6(x) ((x) << 6)
252 #define INSN_EXT7(x) ((x) << 6)
253 #define INSN_EXT8A(x) ((x) << 6)
254 #define INSN_EXT8B(x) ((x) << 5)
255 #define INSN_T(x) (x)
256 #define INSN_R1(x) ((x) << 16)
257 #define INSN_R2(x) ((x) << 21)
258 #define INSN_DEP_LEN(x) (32 - (x))
259 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
260 #define INSN_SHDEP_P(x) ((x) << 5)
261 #define INSN_COND(x) ((x) << 13)
262 #define INSN_IM11(x) low_sign_ext(x, 11)
263 #define INSN_IM14(x) low_sign_ext(x, 14)
264 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
265
266 #define COND_NEVER 0
267 #define COND_EQ 1
268 #define COND_LT 2
269 #define COND_LE 3
270 #define COND_LTU 4
271 #define COND_LEU 5
272 #define COND_SV 6
273 #define COND_OD 7
274 #define COND_FALSE 8
275
276 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
277 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
278 #define INSN_ADDI (INSN_OP(0x2d))
279 #define INSN_ADDIL (INSN_OP(0x0a))
280 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
281 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
282 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
283 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
284 #define INSN_COMICLR (INSN_OP(0x24))
285 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
286 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
287 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
288 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
289 #define INSN_LDIL (INSN_OP(0x08))
290 #define INSN_LDO (INSN_OP(0x0d))
291 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
292 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
293 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
294 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
295 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
296 #define INSN_SUBI (INSN_OP(0x25))
297 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
298 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
299 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
300 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
301 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
302 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
303
304 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
305 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
306 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
307 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
308 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
309 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
310
311 #define INSN_LDB (INSN_OP(0x10))
312 #define INSN_LDH (INSN_OP(0x11))
313 #define INSN_LDW (INSN_OP(0x12))
314 #define INSN_LDWM (INSN_OP(0x13))
315 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
316
317 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
318 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
319 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
320
321 #define INSN_STB (INSN_OP(0x18))
322 #define INSN_STH (INSN_OP(0x19))
323 #define INSN_STW (INSN_OP(0x1a))
324 #define INSN_STWM (INSN_OP(0x1b))
325 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
326
327 #define INSN_COMBT (INSN_OP(0x20))
328 #define INSN_COMBF (INSN_OP(0x22))
329 #define INSN_COMIBT (INSN_OP(0x21))
330 #define INSN_COMIBF (INSN_OP(0x23))
331
332 /* supplied by libgcc */
333 extern void *__canonicalize_funcptr_for_compare(const void *);
334
335 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
336 {
337 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
338 but hppa-dis.c is unaware of this definition */
339 if (ret != arg) {
340 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
341 | INSN_R2(TCG_REG_R0));
342 }
343 }
344
345 static void tcg_out_movi(TCGContext *s, TCGType type,
346 TCGReg ret, tcg_target_long arg)
347 {
348 if (check_fit_tl(arg, 14)) {
349 tcg_out32(s, INSN_LDO | INSN_R1(ret)
350 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
351 } else {
352 uint32_t hi, lo;
353 hi = arg >> 11;
354 lo = arg & 0x7ff;
355
356 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
357 if (lo) {
358 tcg_out32(s, INSN_LDO | INSN_R1(ret)
359 | INSN_R2(ret) | INSN_IM14(lo));
360 }
361 }
362 }
363
364 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
365 tcg_target_long offset, int op)
366 {
367 if (!check_fit_tl(offset, 14)) {
368 uint32_t hi, lo, op;
369
370 hi = offset >> 11;
371 lo = offset & 0x7ff;
372
373 if (addr == TCG_REG_R0) {
374 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
375 } else {
376 op = INSN_ADDIL | INSN_R2(addr);
377 }
378 tcg_out32(s, op | reassemble_21(hi));
379
380 addr = TCG_REG_R1;
381 offset = lo;
382 }
383
384 if (ret != addr || offset != 0 || op != INSN_LDO) {
385 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
386 }
387 }
388
389 /* This function is required by tcg.c. */
390 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
391 TCGReg arg1, tcg_target_long arg2)
392 {
393 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
394 }
395
396 /* This function is required by tcg.c. */
397 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
398 TCGReg arg1, tcg_target_long arg2)
399 {
400 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
401 }
402
403 static void tcg_out_ldst_index(TCGContext *s, int data,
404 int base, int index, int op)
405 {
406 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
407 }
408
409 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
410 tcg_target_long val)
411 {
412 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
413 }
414
415 /* This function is required by tcg.c. */
416 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
417 {
418 tcg_out_addi2(s, reg, reg, val);
419 }
420
421 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
422 {
423 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
424 }
425
426 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
427 tcg_target_long val, int op)
428 {
429 assert(check_fit_tl(val, 11));
430 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
431 }
432
433 static inline void tcg_out_nop(TCGContext *s)
434 {
435 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
436 }
437
438 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
439 {
440 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
441 }
442
443 /* Extract LEN bits at position OFS from ARG and place in RET.
444 Note that here the bit ordering is reversed from the PA-RISC
445 standard, such that the right-most bit is 0. */
446 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
447 unsigned ofs, unsigned len, int sign)
448 {
449 assert(ofs < 32 && len <= 32 - ofs);
450 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
451 | INSN_R1(ret) | INSN_R2(arg)
452 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
453 }
454
455 /* Likewise with OFS interpreted little-endian. */
456 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
457 unsigned ofs, unsigned len)
458 {
459 assert(ofs < 32 && len <= 32 - ofs);
460 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
461 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
462 }
463
464 static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
465 unsigned ofs, unsigned len)
466 {
467 assert(ofs < 32 && len <= 32 - ofs);
468 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
469 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
470 }
471
472 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
473 unsigned count)
474 {
475 assert(count < 32);
476 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
477 | INSN_SHDEP_CP(count));
478 }
479
480 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
481 {
482 tcg_out_mtctl_sar(s, creg);
483 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
484 }
485
486 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
487 {
488 int bs0, bs1;
489
490 /* Note that the argument is constrained to match or_mask_p. */
491 for (bs0 = 0; bs0 < 32; bs0++) {
492 if ((m & (1u << bs0)) != 0) {
493 break;
494 }
495 }
496 for (bs1 = bs0; bs1 < 32; bs1++) {
497 if ((m & (1u << bs1)) == 0) {
498 break;
499 }
500 }
501 assert(bs1 == 32 || (1ul << bs1) > m);
502
503 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
504 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
505 }
506
507 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
508 {
509 int ls0, ls1, ms0;
510
511 /* Note that the argument is constrained to match and_mask_p. */
512 for (ls0 = 0; ls0 < 32; ls0++) {
513 if ((m & (1u << ls0)) == 0) {
514 break;
515 }
516 }
517 for (ls1 = ls0; ls1 < 32; ls1++) {
518 if ((m & (1u << ls1)) != 0) {
519 break;
520 }
521 }
522 for (ms0 = ls1; ms0 < 32; ms0++) {
523 if ((m & (1u << ms0)) == 0) {
524 break;
525 }
526 }
527 assert (ms0 == 32);
528
529 if (ls1 == 32) {
530 tcg_out_extr(s, ret, arg, 0, ls0, 0);
531 } else {
532 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
533 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
534 }
535 }
536
537 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
538 {
539 tcg_out_extr(s, ret, arg, 0, 8, 1);
540 }
541
542 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
543 {
544 tcg_out_extr(s, ret, arg, 0, 16, 1);
545 }
546
547 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
548 {
549 count &= 31;
550 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
551 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
552 }
553
554 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
555 {
556 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
557 tcg_out_mtctl_sar(s, TCG_REG_R20);
558 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
559 }
560
561 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
562 {
563 count &= 31;
564 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
565 }
566
567 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
568 {
569 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
570 }
571
572 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
573 {
574 count &= 31;
575 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
576 }
577
578 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
579 {
580 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
581 tcg_out_mtctl_sar(s, TCG_REG_R20);
582 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
583 }
584
585 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
586 {
587 count &= 31;
588 tcg_out_shd(s, ret, arg, arg, 32 - count);
589 }
590
591 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
592 {
593 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
594 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
595 }
596
597 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
598 {
599 count &= 31;
600 tcg_out_shd(s, ret, arg, arg, count);
601 }
602
603 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
604 {
605 tcg_out_vshd(s, ret, arg, arg, creg);
606 }
607
608 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
609 {
610 if (ret != arg) {
611 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
612 }
613 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
614 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
615 }
616
617 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
618 {
619 /* arg = ABCD */
620 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
621 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
622 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
623 }
624
625 static void tcg_out_call(TCGContext *s, const void *func)
626 {
627 tcg_target_long val, hi, lo, disp;
628
629 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
630 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
631
632 if (check_fit_tl(disp, 17)) {
633 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
634 } else {
635 hi = val >> 11;
636 lo = val & 0x7ff;
637
638 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
639 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
640 | reassemble_17(lo >> 2));
641 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
642 }
643 }
644
645 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
646 int arg1, int arg2)
647 {
648 /* Store both words into the stack for copy to the FPU. */
649 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
650 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
651
652 /* Load both words into the FPU at the same time. We get away
653 with this because we can address the left and right half of the
654 FPU registers individually once loaded. */
655 /* fldds stack_temp(sp),fr22 */
656 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
657 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
658
659 /* xmpyu fr22r,fr22,fr22 */
660 tcg_out32(s, 0x3ad64796);
661
662 /* Store the 64-bit result back into the stack. */
663 /* fstds stack_temp(sp),fr22 */
664 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
665 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
666
667 /* Load the pieces of the result that the caller requested. */
668 if (reth) {
669 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
670 }
671 if (retl) {
672 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
673 INSN_LDW);
674 }
675 }
676
677 static void tcg_out_add2(TCGContext *s, int destl, int desth,
678 int al, int ah, int bl, int bh, int blconst)
679 {
680 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
681
682 if (blconst) {
683 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
684 } else {
685 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
686 }
687 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
688
689 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
690 }
691
692 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
693 int bl, int bh, int alconst, int blconst)
694 {
695 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
696
697 if (alconst) {
698 if (blconst) {
699 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
700 bl = TCG_REG_R20;
701 }
702 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
703 } else if (blconst) {
704 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
705 } else {
706 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
707 }
708 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
709
710 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
711 }
712
713 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
714 {
715 TCGLabel *l = &s->labels[label_index];
716 uint32_t op = nul ? INSN_BL_N : INSN_BL;
717
718 if (l->has_value) {
719 tcg_target_long val = l->u.value;
720
721 val -= (tcg_target_long)s->code_ptr + 8;
722 val >>= 2;
723 assert(check_fit_tl(val, 17));
724
725 tcg_out32(s, op | reassemble_17(val));
726 } else {
727 /* We need to keep the offset unchanged for retranslation. */
728 uint32_t old_insn = *(uint32_t *)s->code_ptr;
729
730 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
731 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
732 }
733 }
734
735 static const uint8_t tcg_cond_to_cmp_cond[10] =
736 {
737 [TCG_COND_EQ] = COND_EQ,
738 [TCG_COND_NE] = COND_EQ | COND_FALSE,
739 [TCG_COND_LT] = COND_LT,
740 [TCG_COND_GE] = COND_LT | COND_FALSE,
741 [TCG_COND_LE] = COND_LE,
742 [TCG_COND_GT] = COND_LE | COND_FALSE,
743 [TCG_COND_LTU] = COND_LTU,
744 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
745 [TCG_COND_LEU] = COND_LEU,
746 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
747 };
748
749 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
750 TCGArg c2, int c2const, int label_index)
751 {
752 TCGLabel *l = &s->labels[label_index];
753 int op, pacond;
754
755 /* Note that COMIB operates as if the immediate is the first
756 operand. We model brcond with the immediate in the second
757 to better match what targets are likely to give us. For
758 consistency, model COMB with reversed operands as well. */
759 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
760
761 if (c2const) {
762 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
763 op |= INSN_IM5(c2);
764 } else {
765 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
766 op |= INSN_R1(c2);
767 }
768 op |= INSN_R2(c1);
769 op |= INSN_COND(pacond & 7);
770
771 if (l->has_value) {
772 tcg_target_long val = l->u.value;
773
774 val -= (tcg_target_long)s->code_ptr + 8;
775 val >>= 2;
776 assert(check_fit_tl(val, 12));
777
778 /* ??? Assume that all branches to defined labels are backward.
779 Which means that if the nul bit is set, the delay slot is
780 executed if the branch is taken, and not executed in fallthru. */
781 tcg_out32(s, op | reassemble_12(val));
782 tcg_out_nop(s);
783 } else {
784 /* We need to keep the offset unchanged for retranslation. */
785 uint32_t old_insn = *(uint32_t *)s->code_ptr;
786
787 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
788 /* ??? Assume that all branches to undefined labels are forward.
789 Which means that if the nul bit is set, the delay slot is
790 not executed if the branch is taken, which is what we want. */
791 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
792 }
793 }
794
795 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
796 TCGArg c1, TCGArg c2, int c2const)
797 {
798 int op, pacond;
799
800 /* Note that COMICLR operates as if the immediate is the first
801 operand. We model setcond with the immediate in the second
802 to better match what targets are likely to give us. For
803 consistency, model COMCLR with reversed operands as well. */
804 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
805
806 if (c2const) {
807 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
808 } else {
809 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
810 }
811 op |= INSN_COND(pacond & 7);
812 op |= pacond & COND_FALSE ? 1 << 12 : 0;
813
814 tcg_out32(s, op);
815 }
816
817 static TCGCond const tcg_high_cond[] = {
818 [TCG_COND_EQ] = TCG_COND_EQ,
819 [TCG_COND_NE] = TCG_COND_NE,
820 [TCG_COND_LT] = TCG_COND_LT,
821 [TCG_COND_LE] = TCG_COND_LT,
822 [TCG_COND_GT] = TCG_COND_GT,
823 [TCG_COND_GE] = TCG_COND_GT,
824 [TCG_COND_LTU] = TCG_COND_LTU,
825 [TCG_COND_LEU] = TCG_COND_LTU,
826 [TCG_COND_GTU] = TCG_COND_GTU,
827 [TCG_COND_GEU] = TCG_COND_GTU
828 };
829
830 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
831 TCGArg bl, int blconst, TCGArg bh, int bhconst,
832 int label_index)
833 {
834 switch (cond) {
835 case TCG_COND_EQ:
836 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
837 tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
838 break;
839 case TCG_COND_NE:
840 tcg_out_brcond(s, TCG_COND_NE, al, bl, bhconst, label_index);
841 tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
842 break;
843 default:
844 tcg_out_brcond(s, tcg_high_cond[cond], ah, bh, bhconst, label_index);
845 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
846 tcg_out_brcond(s, tcg_unsigned_cond(cond),
847 al, bl, blconst, label_index);
848 break;
849 }
850 }
851
852 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
853 TCGArg c1, TCGArg c2, int c2const)
854 {
855 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
856 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
857 }
858
859 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
860 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
861 TCGArg bh, int bhconst)
862 {
863 int scratch = TCG_REG_R20;
864
865 /* Note that the low parts are fully consumed before scratch is set. */
866 if (ret != ah && (bhconst || ret != bh)) {
867 scratch = ret;
868 }
869
870 switch (cond) {
871 case TCG_COND_EQ:
872 case TCG_COND_NE:
873 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
874 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
875 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
876 break;
877
878 case TCG_COND_GE:
879 case TCG_COND_GEU:
880 case TCG_COND_LT:
881 case TCG_COND_LTU:
882 /* Optimize compares with low part zero. */
883 if (bl == 0) {
884 tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
885 return;
886 }
887 /* FALLTHRU */
888
889 case TCG_COND_LE:
890 case TCG_COND_LEU:
891 case TCG_COND_GT:
892 case TCG_COND_GTU:
893 /* <= : ah < bh | (ah == bh && al <= bl) */
894 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
895 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
896 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
897 tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond[cond]),
898 TCG_REG_R0, ah, bh, bhconst);
899 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
900 break;
901
902 default:
903 tcg_abort();
904 }
905
906 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
907 }
908
909 static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret,
910 TCGArg c1, TCGArg c2, int c2const,
911 TCGArg v1, int v1const)
912 {
913 tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const);
914 if (v1const) {
915 tcg_out_movi(s, TCG_TYPE_I32, ret, v1);
916 } else {
917 tcg_out_mov(s, TCG_TYPE_I32, ret, v1);
918 }
919 }
920
921 #if defined(CONFIG_SOFTMMU)
922 #include "../../softmmu_defs.h"
923
924 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
925 int mmu_idx) */
926 static const void * const qemu_ld_helpers[4] = {
927 helper_ldb_mmu,
928 helper_ldw_mmu,
929 helper_ldl_mmu,
930 helper_ldq_mmu,
931 };
932
933 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
934 uintxx_t val, int mmu_idx) */
935 static const void * const qemu_st_helpers[4] = {
936 helper_stb_mmu,
937 helper_stw_mmu,
938 helper_stl_mmu,
939 helper_stq_mmu,
940 };
941
942 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
943 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
944 TLB for the memory index. The return value is the offset from ENV
945 contained in R1 afterward (to be used when loading ADDEND); if the
946 return value is 0, R1 is not used. */
947
948 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
949 int addrhi, int s_bits, int lab_miss, int offset)
950 {
951 int ret;
952
953 /* Extracting the index into the TLB. The "normal C operation" is
954 r1 = addr_reg >> TARGET_PAGE_BITS;
955 r1 &= CPU_TLB_SIZE - 1;
956 r1 <<= CPU_TLB_ENTRY_BITS;
957 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
958 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
959 operations with an EXTRU. Unfortunately, the current value of
960 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
961 add that follows. */
962 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
963 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
964 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
965
966 /* Make sure that both the addr_{read,write} and addend can be
967 read with a 14-bit offset from the same base register. */
968 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
969 ret = 0;
970 } else {
971 ret = (offset + 0x400) & ~0x7ff;
972 offset = ret - offset;
973 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
974 r1 = TCG_REG_R1;
975 }
976
977 /* Load the entry from the computed slot. */
978 if (TARGET_LONG_BITS == 64) {
979 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
980 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
981 } else {
982 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
983 }
984
985 /* Compute the value that ought to appear in the TLB for a hit, namely,
986 the page of the address. We include the low N bits of the address
987 to catch unaligned accesses and force them onto the slow path. Do
988 this computation after having issued the load from the TLB slot to
989 give the load time to complete. */
990 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
991
992 /* If not equal, jump to lab_miss. */
993 if (TARGET_LONG_BITS == 64) {
994 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
995 r0, 0, addrhi, 0, lab_miss);
996 } else {
997 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
998 }
999
1000 return ret;
1001 }
1002
1003 static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
1004 {
1005 if (argno < 4) {
1006 if (vconst) {
1007 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1008 } else {
1009 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1010 }
1011 } else {
1012 if (vconst && v != 0) {
1013 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
1014 v = TCG_REG_R20;
1015 }
1016 tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
1017 TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
1018 }
1019 return argno + 1;
1020 }
1021
1022 static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
1023 {
1024 /* 64-bit arguments must go in even reg pairs and stack slots. */
1025 if (argno & 1) {
1026 argno++;
1027 }
1028 argno = tcg_out_arg_reg32(s, argno, vl, false);
1029 argno = tcg_out_arg_reg32(s, argno, vh, false);
1030 return argno;
1031 }
1032 #endif
1033
1034 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1035 int addr_reg, int addend_reg, int opc)
1036 {
1037 #ifdef TARGET_WORDS_BIGENDIAN
1038 const int bswap = 0;
1039 #else
1040 const int bswap = 1;
1041 #endif
1042
1043 switch (opc) {
1044 case 0:
1045 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1046 break;
1047 case 0 | 4:
1048 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1049 tcg_out_ext8s(s, datalo_reg, datalo_reg);
1050 break;
1051 case 1:
1052 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1053 if (bswap) {
1054 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
1055 }
1056 break;
1057 case 1 | 4:
1058 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1059 if (bswap) {
1060 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
1061 } else {
1062 tcg_out_ext16s(s, datalo_reg, datalo_reg);
1063 }
1064 break;
1065 case 2:
1066 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
1067 if (bswap) {
1068 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1069 }
1070 break;
1071 case 3:
1072 if (bswap) {
1073 int t = datahi_reg;
1074 datahi_reg = datalo_reg;
1075 datalo_reg = t;
1076 }
1077 /* We can't access the low-part with a reg+reg addressing mode,
1078 so perform the addition now and use reg_ofs addressing mode. */
1079 if (addend_reg != TCG_REG_R0) {
1080 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1081 addr_reg = TCG_REG_R20;
1082 }
1083 /* Make sure not to clobber the base register. */
1084 if (datahi_reg == addr_reg) {
1085 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1086 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1087 } else {
1088 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1089 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1090 }
1091 if (bswap) {
1092 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1093 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1094 }
1095 break;
1096 default:
1097 tcg_abort();
1098 }
1099 }
1100
1101 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1102 {
1103 int datalo_reg = *args++;
1104 /* Note that datahi_reg is only used for 64-bit loads. */
1105 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1106 int addrlo_reg = *args++;
1107
1108 #if defined(CONFIG_SOFTMMU)
1109 /* Note that addrhi_reg is only used for 64-bit guests. */
1110 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1111 int mem_index = *args;
1112 int lab1, lab2, argno, offset;
1113
1114 lab1 = gen_new_label();
1115 lab2 = gen_new_label();
1116
1117 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1118 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1119 addrhi_reg, opc & 3, lab1, offset);
1120
1121 /* TLB Hit. */
1122 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1123 (offset ? TCG_REG_R1 : TCG_REG_R25),
1124 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1125 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1126 TCG_REG_R20, opc);
1127 tcg_out_branch(s, lab2, 1);
1128
1129 /* TLB Miss. */
1130 /* label1: */
1131 tcg_out_label(s, lab1, s->code_ptr);
1132
1133 argno = 0;
1134 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1135 if (TARGET_LONG_BITS == 64) {
1136 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1137 } else {
1138 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1139 }
1140 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1141
1142 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1143
1144 switch (opc) {
1145 case 0:
1146 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1147 break;
1148 case 0 | 4:
1149 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1150 break;
1151 case 1:
1152 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1153 break;
1154 case 1 | 4:
1155 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1156 break;
1157 case 2:
1158 case 2 | 4:
1159 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1160 break;
1161 case 3:
1162 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1163 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1164 break;
1165 default:
1166 tcg_abort();
1167 }
1168
1169 /* label2: */
1170 tcg_out_label(s, lab2, s->code_ptr);
1171 #else
1172 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1173 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1174 #endif
1175 }
1176
1177 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
1178 int datahi_reg, int addr_reg, int opc)
1179 {
1180 #ifdef TARGET_WORDS_BIGENDIAN
1181 const int bswap = 0;
1182 #else
1183 const int bswap = 1;
1184 #endif
1185
1186 switch (opc) {
1187 case 0:
1188 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1189 break;
1190 case 1:
1191 if (bswap) {
1192 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1193 datalo_reg = TCG_REG_R20;
1194 }
1195 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1196 break;
1197 case 2:
1198 if (bswap) {
1199 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1200 datalo_reg = TCG_REG_R20;
1201 }
1202 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1203 break;
1204 case 3:
1205 if (bswap) {
1206 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1207 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1208 datahi_reg = TCG_REG_R20;
1209 datalo_reg = TCG_REG_R23;
1210 }
1211 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1212 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1213 break;
1214 default:
1215 tcg_abort();
1216 }
1217
1218 }
1219
1220 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1221 {
1222 int datalo_reg = *args++;
1223 /* Note that datahi_reg is only used for 64-bit loads. */
1224 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1225 int addrlo_reg = *args++;
1226
1227 #if defined(CONFIG_SOFTMMU)
1228 /* Note that addrhi_reg is only used for 64-bit guests. */
1229 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1230 int mem_index = *args;
1231 int lab1, lab2, argno, next, offset;
1232
1233 lab1 = gen_new_label();
1234 lab2 = gen_new_label();
1235
1236 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1237 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1238 addrhi_reg, opc, lab1, offset);
1239
1240 /* TLB Hit. */
1241 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1242 (offset ? TCG_REG_R1 : TCG_REG_R25),
1243 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1244
1245 /* There are no indexed stores, so we must do this addition explitly.
1246 Careful to avoid R20, which is used for the bswaps to follow. */
1247 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1248 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1249 tcg_out_branch(s, lab2, 1);
1250
1251 /* TLB Miss. */
1252 /* label1: */
1253 tcg_out_label(s, lab1, s->code_ptr);
1254
1255 argno = 0;
1256 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1257 if (TARGET_LONG_BITS == 64) {
1258 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1259 } else {
1260 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1261 }
1262
1263 next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
1264 switch(opc) {
1265 case 0:
1266 tcg_out_andi(s, next, datalo_reg, 0xff);
1267 argno = tcg_out_arg_reg32(s, argno, next, false);
1268 break;
1269 case 1:
1270 tcg_out_andi(s, next, datalo_reg, 0xffff);
1271 argno = tcg_out_arg_reg32(s, argno, next, false);
1272 break;
1273 case 2:
1274 argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
1275 break;
1276 case 3:
1277 argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
1278 break;
1279 default:
1280 tcg_abort();
1281 }
1282 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1283
1284 tcg_out_call(s, qemu_st_helpers[opc]);
1285
1286 /* label2: */
1287 tcg_out_label(s, lab2, s->code_ptr);
1288 #else
1289 /* There are no indexed stores, so if GUEST_BASE is set we must do
1290 the add explicitly. Careful to avoid R20, which is used for the
1291 bswaps to follow. */
1292 if (GUEST_BASE != 0) {
1293 tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
1294 TCG_GUEST_BASE_REG, INSN_ADDL);
1295 addrlo_reg = TCG_REG_R31;
1296 }
1297 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1298 #endif
1299 }
1300
1301 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1302 {
1303 if (!check_fit_tl(arg, 14)) {
1304 uint32_t hi, lo;
1305 hi = arg & ~0x7ff;
1306 lo = arg & 0x7ff;
1307 if (lo) {
1308 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1309 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1310 tcg_out_addi(s, TCG_REG_RET0, lo);
1311 return;
1312 }
1313 arg = hi;
1314 }
1315 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1316 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1317 }
1318
1319 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1320 {
1321 if (s->tb_jmp_offset) {
1322 /* direct jump method */
1323 fprintf(stderr, "goto_tb direct\n");
1324 tcg_abort();
1325 } else {
1326 /* indirect jump method */
1327 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1328 (tcg_target_long)(s->tb_next + arg));
1329 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1330 }
1331 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1332 }
1333
1334 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1335 const int *const_args)
1336 {
1337 switch (opc) {
1338 case INDEX_op_exit_tb:
1339 tcg_out_exit_tb(s, args[0]);
1340 break;
1341 case INDEX_op_goto_tb:
1342 tcg_out_goto_tb(s, args[0]);
1343 break;
1344
1345 case INDEX_op_call:
1346 if (const_args[0]) {
1347 tcg_out_call(s, (void *)args[0]);
1348 } else {
1349 /* ??? FIXME: the value in the register in args[0] is almost
1350 certainly a procedure descriptor, not a code address. We
1351 probably need to use the millicode $$dyncall routine. */
1352 tcg_abort();
1353 }
1354 break;
1355
1356 case INDEX_op_jmp:
1357 fprintf(stderr, "unimplemented jmp\n");
1358 tcg_abort();
1359 break;
1360
1361 case INDEX_op_br:
1362 tcg_out_branch(s, args[0], 1);
1363 break;
1364
1365 case INDEX_op_movi_i32:
1366 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1367 break;
1368
1369 case INDEX_op_ld8u_i32:
1370 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1371 break;
1372 case INDEX_op_ld8s_i32:
1373 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1374 tcg_out_ext8s(s, args[0], args[0]);
1375 break;
1376 case INDEX_op_ld16u_i32:
1377 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1378 break;
1379 case INDEX_op_ld16s_i32:
1380 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1381 tcg_out_ext16s(s, args[0], args[0]);
1382 break;
1383 case INDEX_op_ld_i32:
1384 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1385 break;
1386
1387 case INDEX_op_st8_i32:
1388 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1389 break;
1390 case INDEX_op_st16_i32:
1391 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1392 break;
1393 case INDEX_op_st_i32:
1394 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1395 break;
1396
1397 case INDEX_op_add_i32:
1398 if (const_args[2]) {
1399 tcg_out_addi2(s, args[0], args[1], args[2]);
1400 } else {
1401 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1402 }
1403 break;
1404
1405 case INDEX_op_sub_i32:
1406 if (const_args[1]) {
1407 if (const_args[2]) {
1408 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1409 } else {
1410 /* Recall that SUBI is a reversed subtract. */
1411 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1412 }
1413 } else if (const_args[2]) {
1414 tcg_out_addi2(s, args[0], args[1], -args[2]);
1415 } else {
1416 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1417 }
1418 break;
1419
1420 case INDEX_op_and_i32:
1421 if (const_args[2]) {
1422 tcg_out_andi(s, args[0], args[1], args[2]);
1423 } else {
1424 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1425 }
1426 break;
1427
1428 case INDEX_op_or_i32:
1429 if (const_args[2]) {
1430 tcg_out_ori(s, args[0], args[1], args[2]);
1431 } else {
1432 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1433 }
1434 break;
1435
1436 case INDEX_op_xor_i32:
1437 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1438 break;
1439
1440 case INDEX_op_andc_i32:
1441 if (const_args[2]) {
1442 tcg_out_andi(s, args[0], args[1], ~args[2]);
1443 } else {
1444 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1445 }
1446 break;
1447
1448 case INDEX_op_shl_i32:
1449 if (const_args[2]) {
1450 tcg_out_shli(s, args[0], args[1], args[2]);
1451 } else {
1452 tcg_out_shl(s, args[0], args[1], args[2]);
1453 }
1454 break;
1455
1456 case INDEX_op_shr_i32:
1457 if (const_args[2]) {
1458 tcg_out_shri(s, args[0], args[1], args[2]);
1459 } else {
1460 tcg_out_shr(s, args[0], args[1], args[2]);
1461 }
1462 break;
1463
1464 case INDEX_op_sar_i32:
1465 if (const_args[2]) {
1466 tcg_out_sari(s, args[0], args[1], args[2]);
1467 } else {
1468 tcg_out_sar(s, args[0], args[1], args[2]);
1469 }
1470 break;
1471
1472 case INDEX_op_rotl_i32:
1473 if (const_args[2]) {
1474 tcg_out_rotli(s, args[0], args[1], args[2]);
1475 } else {
1476 tcg_out_rotl(s, args[0], args[1], args[2]);
1477 }
1478 break;
1479
1480 case INDEX_op_rotr_i32:
1481 if (const_args[2]) {
1482 tcg_out_rotri(s, args[0], args[1], args[2]);
1483 } else {
1484 tcg_out_rotr(s, args[0], args[1], args[2]);
1485 }
1486 break;
1487
1488 case INDEX_op_mul_i32:
1489 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1490 break;
1491 case INDEX_op_mulu2_i32:
1492 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1493 break;
1494
1495 case INDEX_op_bswap16_i32:
1496 tcg_out_bswap16(s, args[0], args[1], 0);
1497 break;
1498 case INDEX_op_bswap32_i32:
1499 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1500 break;
1501
1502 case INDEX_op_not_i32:
1503 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1504 break;
1505 case INDEX_op_ext8s_i32:
1506 tcg_out_ext8s(s, args[0], args[1]);
1507 break;
1508 case INDEX_op_ext16s_i32:
1509 tcg_out_ext16s(s, args[0], args[1]);
1510 break;
1511
1512 case INDEX_op_brcond_i32:
1513 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1514 break;
1515 case INDEX_op_brcond2_i32:
1516 tcg_out_brcond2(s, args[4], args[0], args[1],
1517 args[2], const_args[2],
1518 args[3], const_args[3], args[5]);
1519 break;
1520
1521 case INDEX_op_setcond_i32:
1522 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1523 break;
1524 case INDEX_op_setcond2_i32:
1525 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1526 args[3], const_args[3], args[4], const_args[4]);
1527 break;
1528
1529 case INDEX_op_movcond_i32:
1530 tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2],
1531 args[3], const_args[3]);
1532 break;
1533
1534 case INDEX_op_add2_i32:
1535 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1536 args[4], args[5], const_args[4]);
1537 break;
1538
1539 case INDEX_op_sub2_i32:
1540 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1541 args[4], args[5], const_args[2], const_args[4]);
1542 break;
1543
1544 case INDEX_op_deposit_i32:
1545 if (const_args[2]) {
1546 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1547 } else {
1548 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1549 }
1550 break;
1551
1552 case INDEX_op_qemu_ld8u:
1553 tcg_out_qemu_ld(s, args, 0);
1554 break;
1555 case INDEX_op_qemu_ld8s:
1556 tcg_out_qemu_ld(s, args, 0 | 4);
1557 break;
1558 case INDEX_op_qemu_ld16u:
1559 tcg_out_qemu_ld(s, args, 1);
1560 break;
1561 case INDEX_op_qemu_ld16s:
1562 tcg_out_qemu_ld(s, args, 1 | 4);
1563 break;
1564 case INDEX_op_qemu_ld32:
1565 tcg_out_qemu_ld(s, args, 2);
1566 break;
1567 case INDEX_op_qemu_ld64:
1568 tcg_out_qemu_ld(s, args, 3);
1569 break;
1570
1571 case INDEX_op_qemu_st8:
1572 tcg_out_qemu_st(s, args, 0);
1573 break;
1574 case INDEX_op_qemu_st16:
1575 tcg_out_qemu_st(s, args, 1);
1576 break;
1577 case INDEX_op_qemu_st32:
1578 tcg_out_qemu_st(s, args, 2);
1579 break;
1580 case INDEX_op_qemu_st64:
1581 tcg_out_qemu_st(s, args, 3);
1582 break;
1583
1584 default:
1585 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1586 tcg_abort();
1587 }
1588 }
1589
1590 static const TCGTargetOpDef hppa_op_defs[] = {
1591 { INDEX_op_exit_tb, { } },
1592 { INDEX_op_goto_tb, { } },
1593
1594 { INDEX_op_call, { "ri" } },
1595 { INDEX_op_jmp, { "r" } },
1596 { INDEX_op_br, { } },
1597
1598 { INDEX_op_mov_i32, { "r", "r" } },
1599 { INDEX_op_movi_i32, { "r" } },
1600
1601 { INDEX_op_ld8u_i32, { "r", "r" } },
1602 { INDEX_op_ld8s_i32, { "r", "r" } },
1603 { INDEX_op_ld16u_i32, { "r", "r" } },
1604 { INDEX_op_ld16s_i32, { "r", "r" } },
1605 { INDEX_op_ld_i32, { "r", "r" } },
1606 { INDEX_op_st8_i32, { "rZ", "r" } },
1607 { INDEX_op_st16_i32, { "rZ", "r" } },
1608 { INDEX_op_st_i32, { "rZ", "r" } },
1609
1610 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1611 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1612 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1613 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1614 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1615 /* Note that the second argument will be inverted, which means
1616 we want a constant whose inversion matches M, and that O = ~M.
1617 See the implementation of and_mask_p. */
1618 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1619
1620 { INDEX_op_mul_i32, { "r", "r", "r" } },
1621 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1622
1623 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1624 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1625 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1626 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1627 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1628
1629 { INDEX_op_bswap16_i32, { "r", "r" } },
1630 { INDEX_op_bswap32_i32, { "r", "r" } },
1631 { INDEX_op_not_i32, { "r", "r" } },
1632
1633 { INDEX_op_ext8s_i32, { "r", "r" } },
1634 { INDEX_op_ext16s_i32, { "r", "r" } },
1635
1636 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1637 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1638
1639 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1640 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1641
1642 /* ??? We can actually support a signed 14-bit arg3, but we
1643 only have existing constraints for a signed 11-bit. */
1644 { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } },
1645
1646 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1647 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1648
1649 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1650
1651 #if TARGET_LONG_BITS == 32
1652 { INDEX_op_qemu_ld8u, { "r", "L" } },
1653 { INDEX_op_qemu_ld8s, { "r", "L" } },
1654 { INDEX_op_qemu_ld16u, { "r", "L" } },
1655 { INDEX_op_qemu_ld16s, { "r", "L" } },
1656 { INDEX_op_qemu_ld32, { "r", "L" } },
1657 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1658
1659 { INDEX_op_qemu_st8, { "LZ", "L" } },
1660 { INDEX_op_qemu_st16, { "LZ", "L" } },
1661 { INDEX_op_qemu_st32, { "LZ", "L" } },
1662 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1663 #else
1664 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1665 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1666 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1667 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1668 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1669 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1670
1671 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1672 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1673 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1674 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1675 #endif
1676 { -1 },
1677 };
1678
1679 static int tcg_target_callee_save_regs[] = {
1680 /* R2, the return address register, is saved specially
1681 in the caller's frame. */
1682 /* R3, the frame pointer, is not currently modified. */
1683 TCG_REG_R4,
1684 TCG_REG_R5,
1685 TCG_REG_R6,
1686 TCG_REG_R7,
1687 TCG_REG_R8,
1688 TCG_REG_R9,
1689 TCG_REG_R10,
1690 TCG_REG_R11,
1691 TCG_REG_R12,
1692 TCG_REG_R13,
1693 TCG_REG_R14,
1694 TCG_REG_R15,
1695 TCG_REG_R16,
1696 TCG_REG_R17, /* R17 is the global env. */
1697 TCG_REG_R18
1698 };
1699
1700 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1701 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1702 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1703 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1704 + TCG_TARGET_STACK_ALIGN - 1) \
1705 & -TCG_TARGET_STACK_ALIGN)
1706
1707 static void tcg_target_qemu_prologue(TCGContext *s)
1708 {
1709 int frame_size, i;
1710
1711 frame_size = FRAME_SIZE;
1712
1713 /* The return address is stored in the caller's frame. */
1714 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
1715
1716 /* Allocate stack frame, saving the first register at the same time. */
1717 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1718 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
1719
1720 /* Save all callee saved registers. */
1721 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1722 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1723 TCG_REG_CALL_STACK, -frame_size + i * 4);
1724 }
1725
1726 /* Record the location of the TCG temps. */
1727 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
1728 CPU_TEMP_BUF_NLONGS * sizeof(long));
1729
1730 #ifdef CONFIG_USE_GUEST_BASE
1731 if (GUEST_BASE != 0) {
1732 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1733 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1734 }
1735 #endif
1736
1737 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1738
1739 /* Jump to TB, and adjust R18 to be the return address. */
1740 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
1741 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1742
1743 /* Restore callee saved registers. */
1744 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1745 -frame_size - 20);
1746 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1747 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1748 TCG_REG_CALL_STACK, -frame_size + i * 4);
1749 }
1750
1751 /* Deallocate stack frame and return. */
1752 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1753 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1754 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
1755 }
1756
1757 static void tcg_target_init(TCGContext *s)
1758 {
1759 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1760
1761 tcg_regset_clear(tcg_target_call_clobber_regs);
1762 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1763 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1764 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1765 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1766 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1767 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1768 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1769 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1770 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1771
1772 tcg_regset_clear(s->reserved_regs);
1773 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1774 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1775 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1776 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1777 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1778 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1779 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1780 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1781 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
1782 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1783
1784 tcg_add_target_add_op_defs(hppa_op_defs);
1785 }
1786
1787 typedef struct {
1788 uint32_t len __attribute__((aligned((sizeof(void *)))));
1789 uint32_t id;
1790 uint8_t version;
1791 char augmentation[1];
1792 uint8_t code_align;
1793 uint8_t data_align;
1794 uint8_t return_column;
1795 } DebugFrameCIE;
1796
1797 typedef struct {
1798 uint32_t len __attribute__((aligned((sizeof(void *)))));
1799 uint32_t cie_offset;
1800 tcg_target_long func_start __attribute__((packed));
1801 tcg_target_long func_len __attribute__((packed));
1802 uint8_t def_cfa[4];
1803 uint8_t ret_ofs[3];
1804 uint8_t reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1805 } DebugFrameFDE;
1806
1807 typedef struct {
1808 DebugFrameCIE cie;
1809 DebugFrameFDE fde;
1810 } DebugFrame;
1811
1812 #define ELF_HOST_MACHINE EM_PARISC
1813 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1814
1815 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1816 and other extensions. We don't really care, but if we don't set this
1817 to *something* then the object file won't be properly matched. */
1818 #define ELF_OSABI ELFOSABI_LINUX
1819
1820 static DebugFrame debug_frame = {
1821 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1822 .cie.id = -1,
1823 .cie.version = 1,
1824 .cie.code_align = 1,
1825 .cie.data_align = 1,
1826 .cie.return_column = 2,
1827
1828 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1829 .fde.def_cfa = {
1830 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1831 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1832 (-FRAME_SIZE >> 7) & 0x7f
1833 },
1834 .fde.ret_ofs = {
1835 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1836 },
1837 .fde.reg_ofs = {
1838 /* This must match the ordering in tcg_target_callee_save_regs. */
1839 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1840 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1841 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1842 0x80 + 7, 12, /* ... */
1843 0x80 + 8, 16,
1844 0x80 + 9, 20,
1845 0x80 + 10, 24,
1846 0x80 + 11, 28,
1847 0x80 + 12, 32,
1848 0x80 + 13, 36,
1849 0x80 + 14, 40,
1850 0x80 + 15, 44,
1851 0x80 + 16, 48,
1852 0x80 + 17, 52,
1853 0x80 + 18, 56,
1854 }
1855 };
1856
1857 void tcg_register_jit(void *buf, size_t buf_size)
1858 {
1859 debug_frame.fde.func_start = (tcg_target_long) buf;
1860 debug_frame.fde.func_len = buf_size;
1861
1862 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1863 }