]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/hppa/tcg-target.c
tcg-hppa: Fix broken load/store helpers
[mirror_qemu.git] / tcg / hppa / tcg-target.c
1 /*
2 * Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 #ifndef NDEBUG
26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
27 "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7",
28 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
29 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
30 "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31",
31 };
32 #endif
33
34 /* This is an 8 byte temp slot in the stack frame. */
35 #define STACK_TEMP_OFS -16
36
37 #ifdef CONFIG_USE_GUEST_BASE
38 #define TCG_GUEST_BASE_REG TCG_REG_R16
39 #else
40 #define TCG_GUEST_BASE_REG TCG_REG_R0
41 #endif
42
43 static const int tcg_target_reg_alloc_order[] = {
44 TCG_REG_R4,
45 TCG_REG_R5,
46 TCG_REG_R6,
47 TCG_REG_R7,
48 TCG_REG_R8,
49 TCG_REG_R9,
50 TCG_REG_R10,
51 TCG_REG_R11,
52 TCG_REG_R12,
53 TCG_REG_R13,
54
55 TCG_REG_R17,
56 TCG_REG_R14,
57 TCG_REG_R15,
58 TCG_REG_R16,
59
60 TCG_REG_R26,
61 TCG_REG_R25,
62 TCG_REG_R24,
63 TCG_REG_R23,
64
65 TCG_REG_RET0,
66 TCG_REG_RET1,
67 };
68
69 static const int tcg_target_call_iarg_regs[4] = {
70 TCG_REG_R26,
71 TCG_REG_R25,
72 TCG_REG_R24,
73 TCG_REG_R23,
74 };
75
76 static const int tcg_target_call_oarg_regs[2] = {
77 TCG_REG_RET0,
78 TCG_REG_RET1,
79 };
80
81 /* True iff val fits a signed field of width BITS. */
82 static inline int check_fit_tl(tcg_target_long val, unsigned int bits)
83 {
84 return (val << ((sizeof(tcg_target_long) * 8 - bits))
85 >> (sizeof(tcg_target_long) * 8 - bits)) == val;
86 }
87
88 /* True iff depi can be used to compute (reg | MASK).
89 Accept a bit pattern like:
90 0....01....1
91 1....10....0
92 0..01..10..0
93 Copied from gcc sources. */
94 static inline int or_mask_p(tcg_target_ulong mask)
95 {
96 if (mask == 0 || mask == -1) {
97 return 0;
98 }
99 mask += mask & -mask;
100 return (mask & (mask - 1)) == 0;
101 }
102
103 /* True iff depi or extru can be used to compute (reg & mask).
104 Accept a bit pattern like these:
105 0....01....1
106 1....10....0
107 1..10..01..1
108 Copied from gcc sources. */
109 static inline int and_mask_p(tcg_target_ulong mask)
110 {
111 return or_mask_p(~mask);
112 }
113
114 static int low_sign_ext(int val, int len)
115 {
116 return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1));
117 }
118
119 static int reassemble_12(int as12)
120 {
121 return (((as12 & 0x800) >> 11) |
122 ((as12 & 0x400) >> 8) |
123 ((as12 & 0x3ff) << 3));
124 }
125
126 static int reassemble_17(int as17)
127 {
128 return (((as17 & 0x10000) >> 16) |
129 ((as17 & 0x0f800) << 5) |
130 ((as17 & 0x00400) >> 8) |
131 ((as17 & 0x003ff) << 3));
132 }
133
134 static int reassemble_21(int as21)
135 {
136 return (((as21 & 0x100000) >> 20) |
137 ((as21 & 0x0ffe00) >> 8) |
138 ((as21 & 0x000180) << 7) |
139 ((as21 & 0x00007c) << 14) |
140 ((as21 & 0x000003) << 12));
141 }
142
143 /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all
144 such relocations are simply fully handled by the assembler. */
145 #define R_PARISC_PCREL12F R_PARISC_NONE
146
147 static void patch_reloc(uint8_t *code_ptr, int type,
148 tcg_target_long value, tcg_target_long addend)
149 {
150 uint32_t *insn_ptr = (uint32_t *)code_ptr;
151 uint32_t insn = *insn_ptr;
152 tcg_target_long pcrel;
153
154 value += addend;
155 pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2;
156
157 switch (type) {
158 case R_PARISC_PCREL12F:
159 assert(check_fit_tl(pcrel, 12));
160 /* ??? We assume all patches are forward. See tcg_out_brcond
161 re setting the NUL bit on the branch and eliding the nop. */
162 assert(pcrel >= 0);
163 insn &= ~0x1ffdu;
164 insn |= reassemble_12(pcrel);
165 break;
166 case R_PARISC_PCREL17F:
167 assert(check_fit_tl(pcrel, 17));
168 insn &= ~0x1f1ffdu;
169 insn |= reassemble_17(pcrel);
170 break;
171 default:
172 tcg_abort();
173 }
174
175 *insn_ptr = insn;
176 }
177
178 /* maximum number of register used for input function arguments */
179 static inline int tcg_target_get_call_iarg_regs_count(int flags)
180 {
181 return 4;
182 }
183
184 /* parse target specific constraints */
185 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
186 {
187 const char *ct_str;
188
189 ct_str = *pct_str;
190 switch (ct_str[0]) {
191 case 'r':
192 ct->ct |= TCG_CT_REG;
193 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
194 break;
195 case 'L': /* qemu_ld/st constraint */
196 ct->ct |= TCG_CT_REG;
197 tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
198 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
199 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
200 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
201 tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
202 break;
203 case 'Z':
204 ct->ct |= TCG_CT_CONST_0;
205 break;
206 case 'I':
207 ct->ct |= TCG_CT_CONST_S11;
208 break;
209 case 'J':
210 ct->ct |= TCG_CT_CONST_S5;
211 break;
212 case 'K':
213 ct->ct |= TCG_CT_CONST_MS11;
214 break;
215 case 'M':
216 ct->ct |= TCG_CT_CONST_AND;
217 break;
218 case 'O':
219 ct->ct |= TCG_CT_CONST_OR;
220 break;
221 default:
222 return -1;
223 }
224 ct_str++;
225 *pct_str = ct_str;
226 return 0;
227 }
228
229 /* test if a constant matches the constraint */
230 static int tcg_target_const_match(tcg_target_long val,
231 const TCGArgConstraint *arg_ct)
232 {
233 int ct = arg_ct->ct;
234 if (ct & TCG_CT_CONST) {
235 return 1;
236 } else if (ct & TCG_CT_CONST_0) {
237 return val == 0;
238 } else if (ct & TCG_CT_CONST_S5) {
239 return check_fit_tl(val, 5);
240 } else if (ct & TCG_CT_CONST_S11) {
241 return check_fit_tl(val, 11);
242 } else if (ct & TCG_CT_CONST_MS11) {
243 return check_fit_tl(-val, 11);
244 } else if (ct & TCG_CT_CONST_AND) {
245 return and_mask_p(val);
246 } else if (ct & TCG_CT_CONST_OR) {
247 return or_mask_p(val);
248 }
249 return 0;
250 }
251
252 #define INSN_OP(x) ((x) << 26)
253 #define INSN_EXT3BR(x) ((x) << 13)
254 #define INSN_EXT3SH(x) ((x) << 10)
255 #define INSN_EXT4(x) ((x) << 6)
256 #define INSN_EXT5(x) (x)
257 #define INSN_EXT6(x) ((x) << 6)
258 #define INSN_EXT7(x) ((x) << 6)
259 #define INSN_EXT8A(x) ((x) << 6)
260 #define INSN_EXT8B(x) ((x) << 5)
261 #define INSN_T(x) (x)
262 #define INSN_R1(x) ((x) << 16)
263 #define INSN_R2(x) ((x) << 21)
264 #define INSN_DEP_LEN(x) (32 - (x))
265 #define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
266 #define INSN_SHDEP_P(x) ((x) << 5)
267 #define INSN_COND(x) ((x) << 13)
268 #define INSN_IM11(x) low_sign_ext(x, 11)
269 #define INSN_IM14(x) low_sign_ext(x, 14)
270 #define INSN_IM5(x) (low_sign_ext(x, 5) << 16)
271
272 #define COND_NEVER 0
273 #define COND_EQ 1
274 #define COND_LT 2
275 #define COND_LE 3
276 #define COND_LTU 4
277 #define COND_LEU 5
278 #define COND_SV 6
279 #define COND_OD 7
280 #define COND_FALSE 8
281
282 #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18))
283 #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c))
284 #define INSN_ADDI (INSN_OP(0x2d))
285 #define INSN_ADDIL (INSN_OP(0x0a))
286 #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28))
287 #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
288 #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00))
289 #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22))
290 #define INSN_COMICLR (INSN_OP(0x24))
291 #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
292 #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7))
293 #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
294 #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
295 #define INSN_LDIL (INSN_OP(0x08))
296 #define INSN_LDO (INSN_OP(0x0d))
297 #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
298 #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
299 #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
300 #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
301 #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14))
302 #define INSN_SUBI (INSN_OP(0x25))
303 #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
304 #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4))
305 #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
306 #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
307 #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
308 #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
309
310 #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
311 #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2)
312 #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2))
313 #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
314 #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
315 #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13))
316
317 #define INSN_LDB (INSN_OP(0x10))
318 #define INSN_LDH (INSN_OP(0x11))
319 #define INSN_LDW (INSN_OP(0x12))
320 #define INSN_LDWM (INSN_OP(0x13))
321 #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12))
322
323 #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0))
324 #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1))
325 #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2))
326
327 #define INSN_STB (INSN_OP(0x18))
328 #define INSN_STH (INSN_OP(0x19))
329 #define INSN_STW (INSN_OP(0x1a))
330 #define INSN_STWM (INSN_OP(0x1b))
331 #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12))
332
333 #define INSN_COMBT (INSN_OP(0x20))
334 #define INSN_COMBF (INSN_OP(0x22))
335 #define INSN_COMIBT (INSN_OP(0x21))
336 #define INSN_COMIBF (INSN_OP(0x23))
337
338 /* supplied by libgcc */
339 extern void *__canonicalize_funcptr_for_compare(const void *);
340
341 static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
342 {
343 /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t
344 but hppa-dis.c is unaware of this definition */
345 if (ret != arg) {
346 tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg)
347 | INSN_R2(TCG_REG_R0));
348 }
349 }
350
351 static void tcg_out_movi(TCGContext *s, TCGType type,
352 TCGReg ret, tcg_target_long arg)
353 {
354 if (check_fit_tl(arg, 14)) {
355 tcg_out32(s, INSN_LDO | INSN_R1(ret)
356 | INSN_R2(TCG_REG_R0) | INSN_IM14(arg));
357 } else {
358 uint32_t hi, lo;
359 hi = arg >> 11;
360 lo = arg & 0x7ff;
361
362 tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi));
363 if (lo) {
364 tcg_out32(s, INSN_LDO | INSN_R1(ret)
365 | INSN_R2(ret) | INSN_IM14(lo));
366 }
367 }
368 }
369
370 static void tcg_out_ldst(TCGContext *s, int ret, int addr,
371 tcg_target_long offset, int op)
372 {
373 if (!check_fit_tl(offset, 14)) {
374 uint32_t hi, lo, op;
375
376 hi = offset >> 11;
377 lo = offset & 0x7ff;
378
379 if (addr == TCG_REG_R0) {
380 op = INSN_LDIL | INSN_R2(TCG_REG_R1);
381 } else {
382 op = INSN_ADDIL | INSN_R2(addr);
383 }
384 tcg_out32(s, op | reassemble_21(hi));
385
386 addr = TCG_REG_R1;
387 offset = lo;
388 }
389
390 if (ret != addr || offset != 0 || op != INSN_LDO) {
391 tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset));
392 }
393 }
394
395 /* This function is required by tcg.c. */
396 static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret,
397 TCGReg arg1, tcg_target_long arg2)
398 {
399 tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW);
400 }
401
402 /* This function is required by tcg.c. */
403 static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret,
404 TCGReg arg1, tcg_target_long arg2)
405 {
406 tcg_out_ldst(s, ret, arg1, arg2, INSN_STW);
407 }
408
409 static void tcg_out_ldst_index(TCGContext *s, int data,
410 int base, int index, int op)
411 {
412 tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base));
413 }
414
415 static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1,
416 tcg_target_long val)
417 {
418 tcg_out_ldst(s, ret, arg1, val, INSN_LDO);
419 }
420
421 /* This function is required by tcg.c. */
422 static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
423 {
424 tcg_out_addi2(s, reg, reg, val);
425 }
426
427 static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
428 {
429 tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
430 }
431
432 static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
433 tcg_target_long val, int op)
434 {
435 assert(check_fit_tl(val, 11));
436 tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val));
437 }
438
439 static inline void tcg_out_nop(TCGContext *s)
440 {
441 tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR);
442 }
443
444 static inline void tcg_out_mtctl_sar(TCGContext *s, int arg)
445 {
446 tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg));
447 }
448
449 /* Extract LEN bits at position OFS from ARG and place in RET.
450 Note that here the bit ordering is reversed from the PA-RISC
451 standard, such that the right-most bit is 0. */
452 static inline void tcg_out_extr(TCGContext *s, int ret, int arg,
453 unsigned ofs, unsigned len, int sign)
454 {
455 assert(ofs < 32 && len <= 32 - ofs);
456 tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU)
457 | INSN_R1(ret) | INSN_R2(arg)
458 | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len));
459 }
460
461 /* Likewise with OFS interpreted little-endian. */
462 static inline void tcg_out_dep(TCGContext *s, int ret, int arg,
463 unsigned ofs, unsigned len)
464 {
465 assert(ofs < 32 && len <= 32 - ofs);
466 tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg)
467 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
468 }
469
470 static inline void tcg_out_depi(TCGContext *s, int ret, int arg,
471 unsigned ofs, unsigned len)
472 {
473 assert(ofs < 32 && len <= 32 - ofs);
474 tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg)
475 | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len));
476 }
477
478 static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo,
479 unsigned count)
480 {
481 assert(count < 32);
482 tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret)
483 | INSN_SHDEP_CP(count));
484 }
485
486 static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg)
487 {
488 tcg_out_mtctl_sar(s, creg);
489 tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo));
490 }
491
492 static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m)
493 {
494 int bs0, bs1;
495
496 /* Note that the argument is constrained to match or_mask_p. */
497 for (bs0 = 0; bs0 < 32; bs0++) {
498 if ((m & (1u << bs0)) != 0) {
499 break;
500 }
501 }
502 for (bs1 = bs0; bs1 < 32; bs1++) {
503 if ((m & (1u << bs1)) == 0) {
504 break;
505 }
506 }
507 assert(bs1 == 32 || (1ul << bs1) > m);
508
509 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
510 tcg_out_depi(s, ret, -1, bs0, bs1 - bs0);
511 }
512
513 static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m)
514 {
515 int ls0, ls1, ms0;
516
517 /* Note that the argument is constrained to match and_mask_p. */
518 for (ls0 = 0; ls0 < 32; ls0++) {
519 if ((m & (1u << ls0)) == 0) {
520 break;
521 }
522 }
523 for (ls1 = ls0; ls1 < 32; ls1++) {
524 if ((m & (1u << ls1)) != 0) {
525 break;
526 }
527 }
528 for (ms0 = ls1; ms0 < 32; ms0++) {
529 if ((m & (1u << ms0)) == 0) {
530 break;
531 }
532 }
533 assert (ms0 == 32);
534
535 if (ls1 == 32) {
536 tcg_out_extr(s, ret, arg, 0, ls0, 0);
537 } else {
538 tcg_out_mov(s, TCG_TYPE_I32, ret, arg);
539 tcg_out_depi(s, ret, 0, ls0, ls1 - ls0);
540 }
541 }
542
543 static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg)
544 {
545 tcg_out_extr(s, ret, arg, 0, 8, 1);
546 }
547
548 static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg)
549 {
550 tcg_out_extr(s, ret, arg, 0, 16, 1);
551 }
552
553 static void tcg_out_shli(TCGContext *s, int ret, int arg, int count)
554 {
555 count &= 31;
556 tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg)
557 | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count));
558 }
559
560 static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg)
561 {
562 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
563 tcg_out_mtctl_sar(s, TCG_REG_R20);
564 tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32));
565 }
566
567 static void tcg_out_shri(TCGContext *s, int ret, int arg, int count)
568 {
569 count &= 31;
570 tcg_out_extr(s, ret, arg, count, 32 - count, 0);
571 }
572
573 static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg)
574 {
575 tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg);
576 }
577
578 static void tcg_out_sari(TCGContext *s, int ret, int arg, int count)
579 {
580 count &= 31;
581 tcg_out_extr(s, ret, arg, count, 32 - count, 1);
582 }
583
584 static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg)
585 {
586 tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI);
587 tcg_out_mtctl_sar(s, TCG_REG_R20);
588 tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32));
589 }
590
591 static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count)
592 {
593 count &= 31;
594 tcg_out_shd(s, ret, arg, arg, 32 - count);
595 }
596
597 static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg)
598 {
599 tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI);
600 tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20);
601 }
602
603 static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count)
604 {
605 count &= 31;
606 tcg_out_shd(s, ret, arg, arg, count);
607 }
608
609 static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg)
610 {
611 tcg_out_vshd(s, ret, arg, arg, creg);
612 }
613
614 static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign)
615 {
616 if (ret != arg) {
617 tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */
618 }
619 tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */
620 tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */
621 }
622
623 static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp)
624 {
625 /* arg = ABCD */
626 tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */
627 tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */
628 tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */
629 }
630
631 static void tcg_out_call(TCGContext *s, const void *func)
632 {
633 tcg_target_long val, hi, lo, disp;
634
635 val = (uint32_t)__canonicalize_funcptr_for_compare(func);
636 disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2;
637
638 if (check_fit_tl(disp, 17)) {
639 tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp));
640 } else {
641 hi = val >> 11;
642 lo = val & 0x7ff;
643
644 tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi));
645 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20)
646 | reassemble_17(lo >> 2));
647 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31);
648 }
649 }
650
651 static void tcg_out_xmpyu(TCGContext *s, int retl, int reth,
652 int arg1, int arg2)
653 {
654 /* Store both words into the stack for copy to the FPU. */
655 tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW);
656 tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW);
657
658 /* Load both words into the FPU at the same time. We get away
659 with this because we can address the left and right half of the
660 FPU registers individually once loaded. */
661 /* fldds stack_temp(sp),fr22 */
662 tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK)
663 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
664
665 /* xmpyu fr22r,fr22,fr22 */
666 tcg_out32(s, 0x3ad64796);
667
668 /* Store the 64-bit result back into the stack. */
669 /* fstds stack_temp(sp),fr22 */
670 tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK)
671 | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22));
672
673 /* Load the pieces of the result that the caller requested. */
674 if (reth) {
675 tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW);
676 }
677 if (retl) {
678 tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4,
679 INSN_LDW);
680 }
681 }
682
683 static void tcg_out_add2(TCGContext *s, int destl, int desth,
684 int al, int ah, int bl, int bh, int blconst)
685 {
686 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
687
688 if (blconst) {
689 tcg_out_arithi(s, tmp, al, bl, INSN_ADDI);
690 } else {
691 tcg_out_arith(s, tmp, al, bl, INSN_ADD);
692 }
693 tcg_out_arith(s, desth, ah, bh, INSN_ADDC);
694
695 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
696 }
697
698 static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah,
699 int bl, int bh, int alconst, int blconst)
700 {
701 int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl);
702
703 if (alconst) {
704 if (blconst) {
705 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl);
706 bl = TCG_REG_R20;
707 }
708 tcg_out_arithi(s, tmp, bl, al, INSN_SUBI);
709 } else if (blconst) {
710 tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI);
711 } else {
712 tcg_out_arith(s, tmp, al, bl, INSN_SUB);
713 }
714 tcg_out_arith(s, desth, ah, bh, INSN_SUBB);
715
716 tcg_out_mov(s, TCG_TYPE_I32, destl, tmp);
717 }
718
719 static void tcg_out_branch(TCGContext *s, int label_index, int nul)
720 {
721 TCGLabel *l = &s->labels[label_index];
722 uint32_t op = nul ? INSN_BL_N : INSN_BL;
723
724 if (l->has_value) {
725 tcg_target_long val = l->u.value;
726
727 val -= (tcg_target_long)s->code_ptr + 8;
728 val >>= 2;
729 assert(check_fit_tl(val, 17));
730
731 tcg_out32(s, op | reassemble_17(val));
732 } else {
733 /* We need to keep the offset unchanged for retranslation. */
734 uint32_t old_insn = *(uint32_t *)s->code_ptr;
735
736 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0);
737 tcg_out32(s, op | (old_insn & 0x1f1ffdu));
738 }
739 }
740
741 static const uint8_t tcg_cond_to_cmp_cond[10] =
742 {
743 [TCG_COND_EQ] = COND_EQ,
744 [TCG_COND_NE] = COND_EQ | COND_FALSE,
745 [TCG_COND_LT] = COND_LT,
746 [TCG_COND_GE] = COND_LT | COND_FALSE,
747 [TCG_COND_LE] = COND_LE,
748 [TCG_COND_GT] = COND_LE | COND_FALSE,
749 [TCG_COND_LTU] = COND_LTU,
750 [TCG_COND_GEU] = COND_LTU | COND_FALSE,
751 [TCG_COND_LEU] = COND_LEU,
752 [TCG_COND_GTU] = COND_LEU | COND_FALSE,
753 };
754
755 static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1,
756 TCGArg c2, int c2const, int label_index)
757 {
758 TCGLabel *l = &s->labels[label_index];
759 int op, pacond;
760
761 /* Note that COMIB operates as if the immediate is the first
762 operand. We model brcond with the immediate in the second
763 to better match what targets are likely to give us. For
764 consistency, model COMB with reversed operands as well. */
765 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
766
767 if (c2const) {
768 op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT);
769 op |= INSN_IM5(c2);
770 } else {
771 op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT);
772 op |= INSN_R1(c2);
773 }
774 op |= INSN_R2(c1);
775 op |= INSN_COND(pacond & 7);
776
777 if (l->has_value) {
778 tcg_target_long val = l->u.value;
779
780 val -= (tcg_target_long)s->code_ptr + 8;
781 val >>= 2;
782 assert(check_fit_tl(val, 12));
783
784 /* ??? Assume that all branches to defined labels are backward.
785 Which means that if the nul bit is set, the delay slot is
786 executed if the branch is taken, and not executed in fallthru. */
787 tcg_out32(s, op | reassemble_12(val));
788 tcg_out_nop(s);
789 } else {
790 /* We need to keep the offset unchanged for retranslation. */
791 uint32_t old_insn = *(uint32_t *)s->code_ptr;
792
793 tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0);
794 /* ??? Assume that all branches to undefined labels are forward.
795 Which means that if the nul bit is set, the delay slot is
796 not executed if the branch is taken, which is what we want. */
797 tcg_out32(s, op | 2 | (old_insn & 0x1ffdu));
798 }
799 }
800
801 static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret,
802 TCGArg c1, TCGArg c2, int c2const)
803 {
804 int op, pacond;
805
806 /* Note that COMICLR operates as if the immediate is the first
807 operand. We model setcond with the immediate in the second
808 to better match what targets are likely to give us. For
809 consistency, model COMCLR with reversed operands as well. */
810 pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)];
811
812 if (c2const) {
813 op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2);
814 } else {
815 op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret);
816 }
817 op |= INSN_COND(pacond & 7);
818 op |= pacond & COND_FALSE ? 1 << 12 : 0;
819
820 tcg_out32(s, op);
821 }
822
823 static TCGCond const tcg_high_cond[] = {
824 [TCG_COND_EQ] = TCG_COND_EQ,
825 [TCG_COND_NE] = TCG_COND_NE,
826 [TCG_COND_LT] = TCG_COND_LT,
827 [TCG_COND_LE] = TCG_COND_LT,
828 [TCG_COND_GT] = TCG_COND_GT,
829 [TCG_COND_GE] = TCG_COND_GT,
830 [TCG_COND_LTU] = TCG_COND_LTU,
831 [TCG_COND_LEU] = TCG_COND_LTU,
832 [TCG_COND_GTU] = TCG_COND_GTU,
833 [TCG_COND_GEU] = TCG_COND_GTU
834 };
835
836 static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah,
837 TCGArg bl, int blconst, TCGArg bh, int bhconst,
838 int label_index)
839 {
840 switch (cond) {
841 case TCG_COND_EQ:
842 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst);
843 tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index);
844 break;
845 case TCG_COND_NE:
846 tcg_out_brcond(s, TCG_COND_NE, al, bl, bhconst, label_index);
847 tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index);
848 break;
849 default:
850 tcg_out_brcond(s, tcg_high_cond[cond], ah, bh, bhconst, label_index);
851 tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst);
852 tcg_out_brcond(s, tcg_unsigned_cond(cond),
853 al, bl, blconst, label_index);
854 break;
855 }
856 }
857
858 static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret,
859 TCGArg c1, TCGArg c2, int c2const)
860 {
861 tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const);
862 tcg_out_movi(s, TCG_TYPE_I32, ret, 1);
863 }
864
865 static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret,
866 TCGArg al, TCGArg ah, TCGArg bl, int blconst,
867 TCGArg bh, int bhconst)
868 {
869 int scratch = TCG_REG_R20;
870
871 /* Note that the low parts are fully consumed before scratch is set. */
872 if (ret != ah && (bhconst || ret != bh)) {
873 scratch = ret;
874 }
875
876 switch (cond) {
877 case TCG_COND_EQ:
878 case TCG_COND_NE:
879 tcg_out_setcond(s, cond, scratch, al, bl, blconst);
880 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
881 tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE);
882 break;
883
884 case TCG_COND_GE:
885 case TCG_COND_GEU:
886 case TCG_COND_LT:
887 case TCG_COND_LTU:
888 /* Optimize compares with low part zero. */
889 if (bl == 0) {
890 tcg_out_setcond(s, cond, ret, ah, bh, bhconst);
891 return;
892 }
893 /* FALLTHRU */
894
895 case TCG_COND_LE:
896 case TCG_COND_LEU:
897 case TCG_COND_GT:
898 case TCG_COND_GTU:
899 /* <= : ah < bh | (ah == bh && al <= bl) */
900 tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst);
901 tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst);
902 tcg_out_movi(s, TCG_TYPE_I32, scratch, 0);
903 tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond[cond]),
904 TCG_REG_R0, ah, bh, bhconst);
905 tcg_out_movi(s, TCG_TYPE_I32, scratch, 1);
906 break;
907
908 default:
909 tcg_abort();
910 }
911
912 tcg_out_mov(s, TCG_TYPE_I32, ret, scratch);
913 }
914
915 #if defined(CONFIG_SOFTMMU)
916 #include "../../softmmu_defs.h"
917
918 /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
919 int mmu_idx) */
920 static const void * const qemu_ld_helpers[4] = {
921 helper_ldb_mmu,
922 helper_ldw_mmu,
923 helper_ldl_mmu,
924 helper_ldq_mmu,
925 };
926
927 /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
928 uintxx_t val, int mmu_idx) */
929 static const void * const qemu_st_helpers[4] = {
930 helper_stb_mmu,
931 helper_stw_mmu,
932 helper_stl_mmu,
933 helper_stq_mmu,
934 };
935
936 /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to
937 the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate
938 TLB for the memory index. The return value is the offset from ENV
939 contained in R1 afterward (to be used when loading ADDEND); if the
940 return value is 0, R1 is not used. */
941
942 static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo,
943 int addrhi, int s_bits, int lab_miss, int offset)
944 {
945 int ret;
946
947 /* Extracting the index into the TLB. The "normal C operation" is
948 r1 = addr_reg >> TARGET_PAGE_BITS;
949 r1 &= CPU_TLB_SIZE - 1;
950 r1 <<= CPU_TLB_ENTRY_BITS;
951 What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS
952 and place them at CPU_TLB_ENTRY_BITS. We can combine the first two
953 operations with an EXTRU. Unfortunately, the current value of
954 CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the
955 add that follows. */
956 tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0);
957 tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS);
958 tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL);
959
960 /* Make sure that both the addr_{read,write} and addend can be
961 read with a 14-bit offset from the same base register. */
962 if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) {
963 ret = 0;
964 } else {
965 ret = (offset + 0x400) & ~0x7ff;
966 offset = ret - offset;
967 tcg_out_addi2(s, TCG_REG_R1, r1, ret);
968 r1 = TCG_REG_R1;
969 }
970
971 /* Load the entry from the computed slot. */
972 if (TARGET_LONG_BITS == 64) {
973 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset);
974 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4);
975 } else {
976 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset);
977 }
978
979 /* Compute the value that ought to appear in the TLB for a hit, namely,
980 the page of the address. We include the low N bits of the address
981 to catch unaligned accesses and force them onto the slow path. Do
982 this computation after having issued the load from the TLB slot to
983 give the load time to complete. */
984 tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1));
985
986 /* If not equal, jump to lab_miss. */
987 if (TARGET_LONG_BITS == 64) {
988 tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23,
989 r0, 0, addrhi, 0, lab_miss);
990 } else {
991 tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss);
992 }
993
994 return ret;
995 }
996
997 static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst)
998 {
999 if (argno < 4) {
1000 if (vconst) {
1001 tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1002 } else {
1003 tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v);
1004 }
1005 } else {
1006 if (vconst && v != 0) {
1007 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v);
1008 v = TCG_REG_R20;
1009 }
1010 tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK,
1011 TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4));
1012 }
1013 return argno + 1;
1014 }
1015
1016 static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh)
1017 {
1018 /* 64-bit arguments must go in even reg pairs and stack slots. */
1019 if (argno & 1) {
1020 argno++;
1021 }
1022 argno = tcg_out_arg_reg32(s, argno, vl, false);
1023 argno = tcg_out_arg_reg32(s, argno, vh, false);
1024 return argno;
1025 }
1026 #endif
1027
1028 static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg,
1029 int addr_reg, int addend_reg, int opc)
1030 {
1031 #ifdef TARGET_WORDS_BIGENDIAN
1032 const int bswap = 0;
1033 #else
1034 const int bswap = 1;
1035 #endif
1036
1037 switch (opc) {
1038 case 0:
1039 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1040 break;
1041 case 0 | 4:
1042 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX);
1043 tcg_out_ext8s(s, datalo_reg, datalo_reg);
1044 break;
1045 case 1:
1046 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1047 if (bswap) {
1048 tcg_out_bswap16(s, datalo_reg, datalo_reg, 0);
1049 }
1050 break;
1051 case 1 | 4:
1052 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX);
1053 if (bswap) {
1054 tcg_out_bswap16(s, datalo_reg, datalo_reg, 1);
1055 } else {
1056 tcg_out_ext16s(s, datalo_reg, datalo_reg);
1057 }
1058 break;
1059 case 2:
1060 tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX);
1061 if (bswap) {
1062 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1063 }
1064 break;
1065 case 3:
1066 if (bswap) {
1067 int t = datahi_reg;
1068 datahi_reg = datalo_reg;
1069 datalo_reg = t;
1070 }
1071 /* We can't access the low-part with a reg+reg addressing mode,
1072 so perform the addition now and use reg_ofs addressing mode. */
1073 if (addend_reg != TCG_REG_R0) {
1074 tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD);
1075 addr_reg = TCG_REG_R20;
1076 }
1077 /* Make sure not to clobber the base register. */
1078 if (datahi_reg == addr_reg) {
1079 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1080 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1081 } else {
1082 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW);
1083 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW);
1084 }
1085 if (bswap) {
1086 tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20);
1087 tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20);
1088 }
1089 break;
1090 default:
1091 tcg_abort();
1092 }
1093 }
1094
1095 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
1096 {
1097 int datalo_reg = *args++;
1098 /* Note that datahi_reg is only used for 64-bit loads. */
1099 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1100 int addrlo_reg = *args++;
1101
1102 #if defined(CONFIG_SOFTMMU)
1103 /* Note that addrhi_reg is only used for 64-bit guests. */
1104 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1105 int mem_index = *args;
1106 int lab1, lab2, argno, offset;
1107
1108 lab1 = gen_new_label();
1109 lab2 = gen_new_label();
1110
1111 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read);
1112 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1113 addrhi_reg, opc & 3, lab1, offset);
1114
1115 /* TLB Hit. */
1116 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1117 (offset ? TCG_REG_R1 : TCG_REG_R25),
1118 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1119 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1120 TCG_REG_R20, opc);
1121 tcg_out_branch(s, lab2, 1);
1122
1123 /* TLB Miss. */
1124 /* label1: */
1125 tcg_out_label(s, lab1, s->code_ptr);
1126
1127 argno = 0;
1128 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1129 if (TARGET_LONG_BITS == 64) {
1130 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1131 } else {
1132 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1133 }
1134 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1135
1136 tcg_out_call(s, qemu_ld_helpers[opc & 3]);
1137
1138 switch (opc) {
1139 case 0:
1140 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff);
1141 break;
1142 case 0 | 4:
1143 tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0);
1144 break;
1145 case 1:
1146 tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff);
1147 break;
1148 case 1 | 4:
1149 tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0);
1150 break;
1151 case 2:
1152 case 2 | 4:
1153 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0);
1154 break;
1155 case 3:
1156 tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0);
1157 tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1);
1158 break;
1159 default:
1160 tcg_abort();
1161 }
1162
1163 /* label2: */
1164 tcg_out_label(s, lab2, s->code_ptr);
1165 #else
1166 tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg,
1167 (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc);
1168 #endif
1169 }
1170
1171 static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg,
1172 int datahi_reg, int addr_reg, int opc)
1173 {
1174 #ifdef TARGET_WORDS_BIGENDIAN
1175 const int bswap = 0;
1176 #else
1177 const int bswap = 1;
1178 #endif
1179
1180 switch (opc) {
1181 case 0:
1182 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB);
1183 break;
1184 case 1:
1185 if (bswap) {
1186 tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0);
1187 datalo_reg = TCG_REG_R20;
1188 }
1189 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH);
1190 break;
1191 case 2:
1192 if (bswap) {
1193 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1194 datalo_reg = TCG_REG_R20;
1195 }
1196 tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW);
1197 break;
1198 case 3:
1199 if (bswap) {
1200 tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20);
1201 tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23);
1202 datahi_reg = TCG_REG_R20;
1203 datalo_reg = TCG_REG_R23;
1204 }
1205 tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW);
1206 tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW);
1207 break;
1208 default:
1209 tcg_abort();
1210 }
1211
1212 }
1213
1214 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
1215 {
1216 int datalo_reg = *args++;
1217 /* Note that datahi_reg is only used for 64-bit loads. */
1218 int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0);
1219 int addrlo_reg = *args++;
1220
1221 #if defined(CONFIG_SOFTMMU)
1222 /* Note that addrhi_reg is only used for 64-bit guests. */
1223 int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0);
1224 int mem_index = *args;
1225 int lab1, lab2, argno, next, offset;
1226
1227 lab1 = gen_new_label();
1228 lab2 = gen_new_label();
1229
1230 offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write);
1231 offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg,
1232 addrhi_reg, opc, lab1, offset);
1233
1234 /* TLB Hit. */
1235 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20,
1236 (offset ? TCG_REG_R1 : TCG_REG_R25),
1237 offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset);
1238
1239 /* There are no indexed stores, so we must do this addition explitly.
1240 Careful to avoid R20, which is used for the bswaps to follow. */
1241 tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL);
1242 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc);
1243 tcg_out_branch(s, lab2, 1);
1244
1245 /* TLB Miss. */
1246 /* label1: */
1247 tcg_out_label(s, lab1, s->code_ptr);
1248
1249 argno = 0;
1250 argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false);
1251 if (TARGET_LONG_BITS == 64) {
1252 argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg);
1253 } else {
1254 argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false);
1255 }
1256
1257 next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20);
1258 switch(opc) {
1259 case 0:
1260 tcg_out_andi(s, next, datalo_reg, 0xff);
1261 argno = tcg_out_arg_reg32(s, argno, next, false);
1262 break;
1263 case 1:
1264 tcg_out_andi(s, next, datalo_reg, 0xffff);
1265 argno = tcg_out_arg_reg32(s, argno, next, false);
1266 break;
1267 case 2:
1268 argno = tcg_out_arg_reg32(s, argno, datalo_reg, false);
1269 break;
1270 case 3:
1271 argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg);
1272 break;
1273 default:
1274 tcg_abort();
1275 }
1276 argno = tcg_out_arg_reg32(s, argno, mem_index, true);
1277
1278 tcg_out_call(s, qemu_st_helpers[opc]);
1279
1280 /* label2: */
1281 tcg_out_label(s, lab2, s->code_ptr);
1282 #else
1283 /* There are no indexed stores, so if GUEST_BASE is set we must do
1284 the add explicitly. Careful to avoid R20, which is used for the
1285 bswaps to follow. */
1286 if (GUEST_BASE != 0) {
1287 tcg_out_arith(s, TCG_REG_R31, addrlo_reg,
1288 TCG_GUEST_BASE_REG, INSN_ADDL);
1289 addrlo_reg = TCG_REG_R31;
1290 }
1291 tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc);
1292 #endif
1293 }
1294
1295 static void tcg_out_exit_tb(TCGContext *s, TCGArg arg)
1296 {
1297 if (!check_fit_tl(arg, 14)) {
1298 uint32_t hi, lo;
1299 hi = arg & ~0x7ff;
1300 lo = arg & 0x7ff;
1301 if (lo) {
1302 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi);
1303 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1304 tcg_out_addi(s, TCG_REG_RET0, lo);
1305 return;
1306 }
1307 arg = hi;
1308 }
1309 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18));
1310 tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg);
1311 }
1312
1313 static void tcg_out_goto_tb(TCGContext *s, TCGArg arg)
1314 {
1315 if (s->tb_jmp_offset) {
1316 /* direct jump method */
1317 fprintf(stderr, "goto_tb direct\n");
1318 tcg_abort();
1319 } else {
1320 /* indirect jump method */
1321 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0,
1322 (tcg_target_long)(s->tb_next + arg));
1323 tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20));
1324 }
1325 s->tb_next_offset[arg] = s->code_ptr - s->code_buf;
1326 }
1327
1328 static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args,
1329 const int *const_args)
1330 {
1331 switch (opc) {
1332 case INDEX_op_exit_tb:
1333 tcg_out_exit_tb(s, args[0]);
1334 break;
1335 case INDEX_op_goto_tb:
1336 tcg_out_goto_tb(s, args[0]);
1337 break;
1338
1339 case INDEX_op_call:
1340 if (const_args[0]) {
1341 tcg_out_call(s, (void *)args[0]);
1342 } else {
1343 /* ??? FIXME: the value in the register in args[0] is almost
1344 certainly a procedure descriptor, not a code address. We
1345 probably need to use the millicode $$dyncall routine. */
1346 tcg_abort();
1347 }
1348 break;
1349
1350 case INDEX_op_jmp:
1351 fprintf(stderr, "unimplemented jmp\n");
1352 tcg_abort();
1353 break;
1354
1355 case INDEX_op_br:
1356 tcg_out_branch(s, args[0], 1);
1357 break;
1358
1359 case INDEX_op_movi_i32:
1360 tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
1361 break;
1362
1363 case INDEX_op_ld8u_i32:
1364 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1365 break;
1366 case INDEX_op_ld8s_i32:
1367 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB);
1368 tcg_out_ext8s(s, args[0], args[0]);
1369 break;
1370 case INDEX_op_ld16u_i32:
1371 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1372 break;
1373 case INDEX_op_ld16s_i32:
1374 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH);
1375 tcg_out_ext16s(s, args[0], args[0]);
1376 break;
1377 case INDEX_op_ld_i32:
1378 tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW);
1379 break;
1380
1381 case INDEX_op_st8_i32:
1382 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB);
1383 break;
1384 case INDEX_op_st16_i32:
1385 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH);
1386 break;
1387 case INDEX_op_st_i32:
1388 tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW);
1389 break;
1390
1391 case INDEX_op_add_i32:
1392 if (const_args[2]) {
1393 tcg_out_addi2(s, args[0], args[1], args[2]);
1394 } else {
1395 tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL);
1396 }
1397 break;
1398
1399 case INDEX_op_sub_i32:
1400 if (const_args[1]) {
1401 if (const_args[2]) {
1402 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]);
1403 } else {
1404 /* Recall that SUBI is a reversed subtract. */
1405 tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI);
1406 }
1407 } else if (const_args[2]) {
1408 tcg_out_addi2(s, args[0], args[1], -args[2]);
1409 } else {
1410 tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB);
1411 }
1412 break;
1413
1414 case INDEX_op_and_i32:
1415 if (const_args[2]) {
1416 tcg_out_andi(s, args[0], args[1], args[2]);
1417 } else {
1418 tcg_out_arith(s, args[0], args[1], args[2], INSN_AND);
1419 }
1420 break;
1421
1422 case INDEX_op_or_i32:
1423 if (const_args[2]) {
1424 tcg_out_ori(s, args[0], args[1], args[2]);
1425 } else {
1426 tcg_out_arith(s, args[0], args[1], args[2], INSN_OR);
1427 }
1428 break;
1429
1430 case INDEX_op_xor_i32:
1431 tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR);
1432 break;
1433
1434 case INDEX_op_andc_i32:
1435 if (const_args[2]) {
1436 tcg_out_andi(s, args[0], args[1], ~args[2]);
1437 } else {
1438 tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM);
1439 }
1440 break;
1441
1442 case INDEX_op_shl_i32:
1443 if (const_args[2]) {
1444 tcg_out_shli(s, args[0], args[1], args[2]);
1445 } else {
1446 tcg_out_shl(s, args[0], args[1], args[2]);
1447 }
1448 break;
1449
1450 case INDEX_op_shr_i32:
1451 if (const_args[2]) {
1452 tcg_out_shri(s, args[0], args[1], args[2]);
1453 } else {
1454 tcg_out_shr(s, args[0], args[1], args[2]);
1455 }
1456 break;
1457
1458 case INDEX_op_sar_i32:
1459 if (const_args[2]) {
1460 tcg_out_sari(s, args[0], args[1], args[2]);
1461 } else {
1462 tcg_out_sar(s, args[0], args[1], args[2]);
1463 }
1464 break;
1465
1466 case INDEX_op_rotl_i32:
1467 if (const_args[2]) {
1468 tcg_out_rotli(s, args[0], args[1], args[2]);
1469 } else {
1470 tcg_out_rotl(s, args[0], args[1], args[2]);
1471 }
1472 break;
1473
1474 case INDEX_op_rotr_i32:
1475 if (const_args[2]) {
1476 tcg_out_rotri(s, args[0], args[1], args[2]);
1477 } else {
1478 tcg_out_rotr(s, args[0], args[1], args[2]);
1479 }
1480 break;
1481
1482 case INDEX_op_mul_i32:
1483 tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]);
1484 break;
1485 case INDEX_op_mulu2_i32:
1486 tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]);
1487 break;
1488
1489 case INDEX_op_bswap16_i32:
1490 tcg_out_bswap16(s, args[0], args[1], 0);
1491 break;
1492 case INDEX_op_bswap32_i32:
1493 tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20);
1494 break;
1495
1496 case INDEX_op_not_i32:
1497 tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI);
1498 break;
1499 case INDEX_op_ext8s_i32:
1500 tcg_out_ext8s(s, args[0], args[1]);
1501 break;
1502 case INDEX_op_ext16s_i32:
1503 tcg_out_ext16s(s, args[0], args[1]);
1504 break;
1505
1506 case INDEX_op_brcond_i32:
1507 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]);
1508 break;
1509 case INDEX_op_brcond2_i32:
1510 tcg_out_brcond2(s, args[4], args[0], args[1],
1511 args[2], const_args[2],
1512 args[3], const_args[3], args[5]);
1513 break;
1514
1515 case INDEX_op_setcond_i32:
1516 tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]);
1517 break;
1518 case INDEX_op_setcond2_i32:
1519 tcg_out_setcond2(s, args[5], args[0], args[1], args[2],
1520 args[3], const_args[3], args[4], const_args[4]);
1521 break;
1522
1523 case INDEX_op_add2_i32:
1524 tcg_out_add2(s, args[0], args[1], args[2], args[3],
1525 args[4], args[5], const_args[4]);
1526 break;
1527
1528 case INDEX_op_sub2_i32:
1529 tcg_out_sub2(s, args[0], args[1], args[2], args[3],
1530 args[4], args[5], const_args[2], const_args[4]);
1531 break;
1532
1533 case INDEX_op_deposit_i32:
1534 if (const_args[2]) {
1535 tcg_out_depi(s, args[0], args[2], args[3], args[4]);
1536 } else {
1537 tcg_out_dep(s, args[0], args[2], args[3], args[4]);
1538 }
1539 break;
1540
1541 case INDEX_op_qemu_ld8u:
1542 tcg_out_qemu_ld(s, args, 0);
1543 break;
1544 case INDEX_op_qemu_ld8s:
1545 tcg_out_qemu_ld(s, args, 0 | 4);
1546 break;
1547 case INDEX_op_qemu_ld16u:
1548 tcg_out_qemu_ld(s, args, 1);
1549 break;
1550 case INDEX_op_qemu_ld16s:
1551 tcg_out_qemu_ld(s, args, 1 | 4);
1552 break;
1553 case INDEX_op_qemu_ld32:
1554 tcg_out_qemu_ld(s, args, 2);
1555 break;
1556 case INDEX_op_qemu_ld64:
1557 tcg_out_qemu_ld(s, args, 3);
1558 break;
1559
1560 case INDEX_op_qemu_st8:
1561 tcg_out_qemu_st(s, args, 0);
1562 break;
1563 case INDEX_op_qemu_st16:
1564 tcg_out_qemu_st(s, args, 1);
1565 break;
1566 case INDEX_op_qemu_st32:
1567 tcg_out_qemu_st(s, args, 2);
1568 break;
1569 case INDEX_op_qemu_st64:
1570 tcg_out_qemu_st(s, args, 3);
1571 break;
1572
1573 default:
1574 fprintf(stderr, "unknown opcode 0x%x\n", opc);
1575 tcg_abort();
1576 }
1577 }
1578
1579 static const TCGTargetOpDef hppa_op_defs[] = {
1580 { INDEX_op_exit_tb, { } },
1581 { INDEX_op_goto_tb, { } },
1582
1583 { INDEX_op_call, { "ri" } },
1584 { INDEX_op_jmp, { "r" } },
1585 { INDEX_op_br, { } },
1586
1587 { INDEX_op_mov_i32, { "r", "r" } },
1588 { INDEX_op_movi_i32, { "r" } },
1589
1590 { INDEX_op_ld8u_i32, { "r", "r" } },
1591 { INDEX_op_ld8s_i32, { "r", "r" } },
1592 { INDEX_op_ld16u_i32, { "r", "r" } },
1593 { INDEX_op_ld16s_i32, { "r", "r" } },
1594 { INDEX_op_ld_i32, { "r", "r" } },
1595 { INDEX_op_st8_i32, { "rZ", "r" } },
1596 { INDEX_op_st16_i32, { "rZ", "r" } },
1597 { INDEX_op_st_i32, { "rZ", "r" } },
1598
1599 { INDEX_op_add_i32, { "r", "rZ", "ri" } },
1600 { INDEX_op_sub_i32, { "r", "rI", "ri" } },
1601 { INDEX_op_and_i32, { "r", "rZ", "rM" } },
1602 { INDEX_op_or_i32, { "r", "rZ", "rO" } },
1603 { INDEX_op_xor_i32, { "r", "rZ", "rZ" } },
1604 /* Note that the second argument will be inverted, which means
1605 we want a constant whose inversion matches M, and that O = ~M.
1606 See the implementation of and_mask_p. */
1607 { INDEX_op_andc_i32, { "r", "rZ", "rO" } },
1608
1609 { INDEX_op_mul_i32, { "r", "r", "r" } },
1610 { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } },
1611
1612 { INDEX_op_shl_i32, { "r", "r", "ri" } },
1613 { INDEX_op_shr_i32, { "r", "r", "ri" } },
1614 { INDEX_op_sar_i32, { "r", "r", "ri" } },
1615 { INDEX_op_rotl_i32, { "r", "r", "ri" } },
1616 { INDEX_op_rotr_i32, { "r", "r", "ri" } },
1617
1618 { INDEX_op_bswap16_i32, { "r", "r" } },
1619 { INDEX_op_bswap32_i32, { "r", "r" } },
1620 { INDEX_op_not_i32, { "r", "r" } },
1621
1622 { INDEX_op_ext8s_i32, { "r", "r" } },
1623 { INDEX_op_ext16s_i32, { "r", "r" } },
1624
1625 { INDEX_op_brcond_i32, { "rZ", "rJ" } },
1626 { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } },
1627
1628 { INDEX_op_setcond_i32, { "r", "rZ", "rI" } },
1629 { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } },
1630
1631 { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } },
1632 { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } },
1633
1634 { INDEX_op_deposit_i32, { "r", "0", "rJ" } },
1635
1636 #if TARGET_LONG_BITS == 32
1637 { INDEX_op_qemu_ld8u, { "r", "L" } },
1638 { INDEX_op_qemu_ld8s, { "r", "L" } },
1639 { INDEX_op_qemu_ld16u, { "r", "L" } },
1640 { INDEX_op_qemu_ld16s, { "r", "L" } },
1641 { INDEX_op_qemu_ld32, { "r", "L" } },
1642 { INDEX_op_qemu_ld64, { "r", "r", "L" } },
1643
1644 { INDEX_op_qemu_st8, { "LZ", "L" } },
1645 { INDEX_op_qemu_st16, { "LZ", "L" } },
1646 { INDEX_op_qemu_st32, { "LZ", "L" } },
1647 { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } },
1648 #else
1649 { INDEX_op_qemu_ld8u, { "r", "L", "L" } },
1650 { INDEX_op_qemu_ld8s, { "r", "L", "L" } },
1651 { INDEX_op_qemu_ld16u, { "r", "L", "L" } },
1652 { INDEX_op_qemu_ld16s, { "r", "L", "L" } },
1653 { INDEX_op_qemu_ld32, { "r", "L", "L" } },
1654 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
1655
1656 { INDEX_op_qemu_st8, { "LZ", "L", "L" } },
1657 { INDEX_op_qemu_st16, { "LZ", "L", "L" } },
1658 { INDEX_op_qemu_st32, { "LZ", "L", "L" } },
1659 { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } },
1660 #endif
1661 { -1 },
1662 };
1663
1664 static int tcg_target_callee_save_regs[] = {
1665 /* R2, the return address register, is saved specially
1666 in the caller's frame. */
1667 /* R3, the frame pointer, is not currently modified. */
1668 TCG_REG_R4,
1669 TCG_REG_R5,
1670 TCG_REG_R6,
1671 TCG_REG_R7,
1672 TCG_REG_R8,
1673 TCG_REG_R9,
1674 TCG_REG_R10,
1675 TCG_REG_R11,
1676 TCG_REG_R12,
1677 TCG_REG_R13,
1678 TCG_REG_R14,
1679 TCG_REG_R15,
1680 TCG_REG_R16,
1681 TCG_REG_R17, /* R17 is the global env. */
1682 TCG_REG_R18
1683 };
1684
1685 #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \
1686 + TCG_TARGET_STATIC_CALL_ARGS_SIZE \
1687 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \
1688 + CPU_TEMP_BUF_NLONGS * sizeof(long) \
1689 + TCG_TARGET_STACK_ALIGN - 1) \
1690 & -TCG_TARGET_STACK_ALIGN)
1691
1692 static void tcg_target_qemu_prologue(TCGContext *s)
1693 {
1694 int frame_size, i;
1695
1696 frame_size = FRAME_SIZE;
1697
1698 /* The return address is stored in the caller's frame. */
1699 tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20);
1700
1701 /* Allocate stack frame, saving the first register at the same time. */
1702 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1703 TCG_REG_CALL_STACK, frame_size, INSN_STWM);
1704
1705 /* Save all callee saved registers. */
1706 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1707 tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1708 TCG_REG_CALL_STACK, -frame_size + i * 4);
1709 }
1710
1711 /* Record the location of the TCG temps. */
1712 tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4,
1713 CPU_TEMP_BUF_NLONGS * sizeof(long));
1714
1715 #ifdef CONFIG_USE_GUEST_BASE
1716 if (GUEST_BASE != 0) {
1717 tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE);
1718 tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG);
1719 }
1720 #endif
1721
1722 tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
1723
1724 /* Jump to TB, and adjust R18 to be the return address. */
1725 tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1]));
1726 tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31);
1727
1728 /* Restore callee saved registers. */
1729 tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK,
1730 -frame_size - 20);
1731 for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) {
1732 tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i],
1733 TCG_REG_CALL_STACK, -frame_size + i * 4);
1734 }
1735
1736 /* Deallocate stack frame and return. */
1737 tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP));
1738 tcg_out_ldst(s, tcg_target_callee_save_regs[0],
1739 TCG_REG_CALL_STACK, -frame_size, INSN_LDWM);
1740 }
1741
1742 static void tcg_target_init(TCGContext *s)
1743 {
1744 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
1745
1746 tcg_regset_clear(tcg_target_call_clobber_regs);
1747 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20);
1748 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21);
1749 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22);
1750 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23);
1751 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24);
1752 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25);
1753 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26);
1754 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0);
1755 tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1);
1756
1757 tcg_regset_clear(s->reserved_regs);
1758 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
1759 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
1760 tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
1761 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
1762 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
1763 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
1764 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
1765 tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
1766 tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */
1767 tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
1768
1769 tcg_add_target_add_op_defs(hppa_op_defs);
1770 }
1771
1772 typedef struct {
1773 uint32_t len __attribute__((aligned((sizeof(void *)))));
1774 uint32_t id;
1775 uint8_t version;
1776 char augmentation[1];
1777 uint8_t code_align;
1778 uint8_t data_align;
1779 uint8_t return_column;
1780 } DebugFrameCIE;
1781
1782 typedef struct {
1783 uint32_t len __attribute__((aligned((sizeof(void *)))));
1784 uint32_t cie_offset;
1785 tcg_target_long func_start __attribute__((packed));
1786 tcg_target_long func_len __attribute__((packed));
1787 uint8_t def_cfa[4];
1788 uint8_t ret_ofs[3];
1789 uint8_t reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2];
1790 } DebugFrameFDE;
1791
1792 typedef struct {
1793 DebugFrameCIE cie;
1794 DebugFrameFDE fde;
1795 } DebugFrame;
1796
1797 #define ELF_HOST_MACHINE EM_PARISC
1798 #define ELF_HOST_FLAGS EFA_PARISC_1_1
1799
1800 /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX
1801 and other extensions. We don't really care, but if we don't set this
1802 to *something* then the object file won't be properly matched. */
1803 #define ELF_OSABI ELFOSABI_LINUX
1804
1805 static DebugFrame debug_frame = {
1806 .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */
1807 .cie.id = -1,
1808 .cie.version = 1,
1809 .cie.code_align = 1,
1810 .cie.data_align = 1,
1811 .cie.return_column = 2,
1812
1813 .fde.len = sizeof(DebugFrameFDE)-4, /* length after .len member */
1814 .fde.def_cfa = {
1815 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */
1816 (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */
1817 (-FRAME_SIZE >> 7) & 0x7f
1818 },
1819 .fde.ret_ofs = {
1820 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */
1821 },
1822 .fde.reg_ofs = {
1823 /* This must match the ordering in tcg_target_callee_save_regs. */
1824 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */
1825 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */
1826 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */
1827 0x80 + 7, 12, /* ... */
1828 0x80 + 8, 16,
1829 0x80 + 9, 20,
1830 0x80 + 10, 24,
1831 0x80 + 11, 28,
1832 0x80 + 12, 32,
1833 0x80 + 13, 36,
1834 0x80 + 14, 40,
1835 0x80 + 15, 44,
1836 0x80 + 16, 48,
1837 0x80 + 17, 52,
1838 0x80 + 18, 56,
1839 }
1840 };
1841
1842 void tcg_register_jit(void *buf, size_t buf_size)
1843 {
1844 debug_frame.fde.func_start = (tcg_target_long) buf;
1845 debug_frame.fde.func_len = buf_size;
1846
1847 tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame));
1848 }