]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | #ifndef NDEBUG | |
26 | static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
27 | "%r0", "%r1", "%rp", "%r3", "%r4", "%r5", "%r6", "%r7", | |
28 | "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15", | |
29 | "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23", | |
30 | "%r24", "%r25", "%r26", "%dp", "%ret0", "%ret1", "%sp", "%r31", | |
31 | }; | |
32 | #endif | |
33 | ||
34 | /* This is an 8 byte temp slot in the stack frame. */ | |
35 | #define STACK_TEMP_OFS -16 | |
36 | ||
37 | #ifdef CONFIG_USE_GUEST_BASE | |
38 | #define TCG_GUEST_BASE_REG TCG_REG_R16 | |
39 | #else | |
40 | #define TCG_GUEST_BASE_REG TCG_REG_R0 | |
41 | #endif | |
42 | ||
43 | static const int tcg_target_reg_alloc_order[] = { | |
44 | TCG_REG_R4, | |
45 | TCG_REG_R5, | |
46 | TCG_REG_R6, | |
47 | TCG_REG_R7, | |
48 | TCG_REG_R8, | |
49 | TCG_REG_R9, | |
50 | TCG_REG_R10, | |
51 | TCG_REG_R11, | |
52 | TCG_REG_R12, | |
53 | TCG_REG_R13, | |
54 | ||
55 | TCG_REG_R17, | |
56 | TCG_REG_R14, | |
57 | TCG_REG_R15, | |
58 | TCG_REG_R16, | |
59 | ||
60 | TCG_REG_R26, | |
61 | TCG_REG_R25, | |
62 | TCG_REG_R24, | |
63 | TCG_REG_R23, | |
64 | ||
65 | TCG_REG_RET0, | |
66 | TCG_REG_RET1, | |
67 | }; | |
68 | ||
69 | static const int tcg_target_call_iarg_regs[4] = { | |
70 | TCG_REG_R26, | |
71 | TCG_REG_R25, | |
72 | TCG_REG_R24, | |
73 | TCG_REG_R23, | |
74 | }; | |
75 | ||
76 | static const int tcg_target_call_oarg_regs[2] = { | |
77 | TCG_REG_RET0, | |
78 | TCG_REG_RET1, | |
79 | }; | |
80 | ||
81 | /* True iff val fits a signed field of width BITS. */ | |
82 | static inline int check_fit_tl(tcg_target_long val, unsigned int bits) | |
83 | { | |
84 | return (val << ((sizeof(tcg_target_long) * 8 - bits)) | |
85 | >> (sizeof(tcg_target_long) * 8 - bits)) == val; | |
86 | } | |
87 | ||
88 | /* True iff depi can be used to compute (reg | MASK). | |
89 | Accept a bit pattern like: | |
90 | 0....01....1 | |
91 | 1....10....0 | |
92 | 0..01..10..0 | |
93 | Copied from gcc sources. */ | |
94 | static inline int or_mask_p(tcg_target_ulong mask) | |
95 | { | |
96 | if (mask == 0 || mask == -1) { | |
97 | return 0; | |
98 | } | |
99 | mask += mask & -mask; | |
100 | return (mask & (mask - 1)) == 0; | |
101 | } | |
102 | ||
103 | /* True iff depi or extru can be used to compute (reg & mask). | |
104 | Accept a bit pattern like these: | |
105 | 0....01....1 | |
106 | 1....10....0 | |
107 | 1..10..01..1 | |
108 | Copied from gcc sources. */ | |
109 | static inline int and_mask_p(tcg_target_ulong mask) | |
110 | { | |
111 | return or_mask_p(~mask); | |
112 | } | |
113 | ||
114 | static int low_sign_ext(int val, int len) | |
115 | { | |
116 | return (((val << 1) & ~(-1u << len)) | ((val >> (len - 1)) & 1)); | |
117 | } | |
118 | ||
119 | static int reassemble_12(int as12) | |
120 | { | |
121 | return (((as12 & 0x800) >> 11) | | |
122 | ((as12 & 0x400) >> 8) | | |
123 | ((as12 & 0x3ff) << 3)); | |
124 | } | |
125 | ||
126 | static int reassemble_17(int as17) | |
127 | { | |
128 | return (((as17 & 0x10000) >> 16) | | |
129 | ((as17 & 0x0f800) << 5) | | |
130 | ((as17 & 0x00400) >> 8) | | |
131 | ((as17 & 0x003ff) << 3)); | |
132 | } | |
133 | ||
134 | static int reassemble_21(int as21) | |
135 | { | |
136 | return (((as21 & 0x100000) >> 20) | | |
137 | ((as21 & 0x0ffe00) >> 8) | | |
138 | ((as21 & 0x000180) << 7) | | |
139 | ((as21 & 0x00007c) << 14) | | |
140 | ((as21 & 0x000003) << 12)); | |
141 | } | |
142 | ||
143 | /* ??? Bizzarely, there is no PCREL12F relocation type. I guess all | |
144 | such relocations are simply fully handled by the assembler. */ | |
145 | #define R_PARISC_PCREL12F R_PARISC_NONE | |
146 | ||
147 | static void patch_reloc(uint8_t *code_ptr, int type, | |
148 | tcg_target_long value, tcg_target_long addend) | |
149 | { | |
150 | uint32_t *insn_ptr = (uint32_t *)code_ptr; | |
151 | uint32_t insn = *insn_ptr; | |
152 | tcg_target_long pcrel; | |
153 | ||
154 | value += addend; | |
155 | pcrel = (value - ((tcg_target_long)code_ptr + 8)) >> 2; | |
156 | ||
157 | switch (type) { | |
158 | case R_PARISC_PCREL12F: | |
159 | assert(check_fit_tl(pcrel, 12)); | |
160 | /* ??? We assume all patches are forward. See tcg_out_brcond | |
161 | re setting the NUL bit on the branch and eliding the nop. */ | |
162 | assert(pcrel >= 0); | |
163 | insn &= ~0x1ffdu; | |
164 | insn |= reassemble_12(pcrel); | |
165 | break; | |
166 | case R_PARISC_PCREL17F: | |
167 | assert(check_fit_tl(pcrel, 17)); | |
168 | insn &= ~0x1f1ffdu; | |
169 | insn |= reassemble_17(pcrel); | |
170 | break; | |
171 | default: | |
172 | tcg_abort(); | |
173 | } | |
174 | ||
175 | *insn_ptr = insn; | |
176 | } | |
177 | ||
178 | /* parse target specific constraints */ | |
179 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
180 | { | |
181 | const char *ct_str; | |
182 | ||
183 | ct_str = *pct_str; | |
184 | switch (ct_str[0]) { | |
185 | case 'r': | |
186 | ct->ct |= TCG_CT_REG; | |
187 | tcg_regset_set32(ct->u.regs, 0, 0xffffffff); | |
188 | break; | |
189 | case 'L': /* qemu_ld/st constraint */ | |
190 | ct->ct |= TCG_CT_REG; | |
191 | tcg_regset_set32(ct->u.regs, 0, 0xffffffff); | |
192 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26); | |
193 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25); | |
194 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24); | |
195 | tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23); | |
196 | break; | |
197 | case 'Z': | |
198 | ct->ct |= TCG_CT_CONST_0; | |
199 | break; | |
200 | case 'I': | |
201 | ct->ct |= TCG_CT_CONST_S11; | |
202 | break; | |
203 | case 'J': | |
204 | ct->ct |= TCG_CT_CONST_S5; | |
205 | break; | |
206 | case 'K': | |
207 | ct->ct |= TCG_CT_CONST_MS11; | |
208 | break; | |
209 | case 'M': | |
210 | ct->ct |= TCG_CT_CONST_AND; | |
211 | break; | |
212 | case 'O': | |
213 | ct->ct |= TCG_CT_CONST_OR; | |
214 | break; | |
215 | default: | |
216 | return -1; | |
217 | } | |
218 | ct_str++; | |
219 | *pct_str = ct_str; | |
220 | return 0; | |
221 | } | |
222 | ||
223 | /* test if a constant matches the constraint */ | |
224 | static int tcg_target_const_match(tcg_target_long val, | |
225 | const TCGArgConstraint *arg_ct) | |
226 | { | |
227 | int ct = arg_ct->ct; | |
228 | if (ct & TCG_CT_CONST) { | |
229 | return 1; | |
230 | } else if (ct & TCG_CT_CONST_0) { | |
231 | return val == 0; | |
232 | } else if (ct & TCG_CT_CONST_S5) { | |
233 | return check_fit_tl(val, 5); | |
234 | } else if (ct & TCG_CT_CONST_S11) { | |
235 | return check_fit_tl(val, 11); | |
236 | } else if (ct & TCG_CT_CONST_MS11) { | |
237 | return check_fit_tl(-val, 11); | |
238 | } else if (ct & TCG_CT_CONST_AND) { | |
239 | return and_mask_p(val); | |
240 | } else if (ct & TCG_CT_CONST_OR) { | |
241 | return or_mask_p(val); | |
242 | } | |
243 | return 0; | |
244 | } | |
245 | ||
246 | #define INSN_OP(x) ((x) << 26) | |
247 | #define INSN_EXT3BR(x) ((x) << 13) | |
248 | #define INSN_EXT3SH(x) ((x) << 10) | |
249 | #define INSN_EXT4(x) ((x) << 6) | |
250 | #define INSN_EXT5(x) (x) | |
251 | #define INSN_EXT6(x) ((x) << 6) | |
252 | #define INSN_EXT7(x) ((x) << 6) | |
253 | #define INSN_EXT8A(x) ((x) << 6) | |
254 | #define INSN_EXT8B(x) ((x) << 5) | |
255 | #define INSN_T(x) (x) | |
256 | #define INSN_R1(x) ((x) << 16) | |
257 | #define INSN_R2(x) ((x) << 21) | |
258 | #define INSN_DEP_LEN(x) (32 - (x)) | |
259 | #define INSN_SHDEP_CP(x) ((31 - (x)) << 5) | |
260 | #define INSN_SHDEP_P(x) ((x) << 5) | |
261 | #define INSN_COND(x) ((x) << 13) | |
262 | #define INSN_IM11(x) low_sign_ext(x, 11) | |
263 | #define INSN_IM14(x) low_sign_ext(x, 14) | |
264 | #define INSN_IM5(x) (low_sign_ext(x, 5) << 16) | |
265 | ||
266 | #define COND_NEVER 0 | |
267 | #define COND_EQ 1 | |
268 | #define COND_LT 2 | |
269 | #define COND_LE 3 | |
270 | #define COND_LTU 4 | |
271 | #define COND_LEU 5 | |
272 | #define COND_SV 6 | |
273 | #define COND_OD 7 | |
274 | #define COND_FALSE 8 | |
275 | ||
276 | #define INSN_ADD (INSN_OP(0x02) | INSN_EXT6(0x18)) | |
277 | #define INSN_ADDC (INSN_OP(0x02) | INSN_EXT6(0x1c)) | |
278 | #define INSN_ADDI (INSN_OP(0x2d)) | |
279 | #define INSN_ADDIL (INSN_OP(0x0a)) | |
280 | #define INSN_ADDL (INSN_OP(0x02) | INSN_EXT6(0x28)) | |
281 | #define INSN_AND (INSN_OP(0x02) | INSN_EXT6(0x08)) | |
282 | #define INSN_ANDCM (INSN_OP(0x02) | INSN_EXT6(0x00)) | |
283 | #define INSN_COMCLR (INSN_OP(0x02) | INSN_EXT6(0x22)) | |
284 | #define INSN_COMICLR (INSN_OP(0x24)) | |
285 | #define INSN_DEP (INSN_OP(0x35) | INSN_EXT3SH(3)) | |
286 | #define INSN_DEPI (INSN_OP(0x35) | INSN_EXT3SH(7)) | |
287 | #define INSN_EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7)) | |
288 | #define INSN_EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6)) | |
289 | #define INSN_LDIL (INSN_OP(0x08)) | |
290 | #define INSN_LDO (INSN_OP(0x0d)) | |
291 | #define INSN_MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2)) | |
292 | #define INSN_OR (INSN_OP(0x02) | INSN_EXT6(0x09)) | |
293 | #define INSN_SHD (INSN_OP(0x34) | INSN_EXT3SH(2)) | |
294 | #define INSN_SUB (INSN_OP(0x02) | INSN_EXT6(0x10)) | |
295 | #define INSN_SUBB (INSN_OP(0x02) | INSN_EXT6(0x14)) | |
296 | #define INSN_SUBI (INSN_OP(0x25)) | |
297 | #define INSN_VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5)) | |
298 | #define INSN_VEXTRU (INSN_OP(0x34) | INSN_EXT3SH(4)) | |
299 | #define INSN_VSHD (INSN_OP(0x34) | INSN_EXT3SH(0)) | |
300 | #define INSN_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a)) | |
301 | #define INSN_ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2)) | |
302 | #define INSN_ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0)) | |
303 | ||
304 | #define INSN_BL (INSN_OP(0x3a) | INSN_EXT3BR(0)) | |
305 | #define INSN_BL_N (INSN_OP(0x3a) | INSN_EXT3BR(0) | 2) | |
306 | #define INSN_BLR (INSN_OP(0x3a) | INSN_EXT3BR(2)) | |
307 | #define INSN_BV (INSN_OP(0x3a) | INSN_EXT3BR(6)) | |
308 | #define INSN_BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2) | |
309 | #define INSN_BLE_SR4 (INSN_OP(0x39) | (1 << 13)) | |
310 | ||
311 | #define INSN_LDB (INSN_OP(0x10)) | |
312 | #define INSN_LDH (INSN_OP(0x11)) | |
313 | #define INSN_LDW (INSN_OP(0x12)) | |
314 | #define INSN_LDWM (INSN_OP(0x13)) | |
315 | #define INSN_FLDDS (INSN_OP(0x0b) | INSN_EXT4(0) | (1 << 12)) | |
316 | ||
317 | #define INSN_LDBX (INSN_OP(0x03) | INSN_EXT4(0)) | |
318 | #define INSN_LDHX (INSN_OP(0x03) | INSN_EXT4(1)) | |
319 | #define INSN_LDWX (INSN_OP(0x03) | INSN_EXT4(2)) | |
320 | ||
321 | #define INSN_STB (INSN_OP(0x18)) | |
322 | #define INSN_STH (INSN_OP(0x19)) | |
323 | #define INSN_STW (INSN_OP(0x1a)) | |
324 | #define INSN_STWM (INSN_OP(0x1b)) | |
325 | #define INSN_FSTDS (INSN_OP(0x0b) | INSN_EXT4(8) | (1 << 12)) | |
326 | ||
327 | #define INSN_COMBT (INSN_OP(0x20)) | |
328 | #define INSN_COMBF (INSN_OP(0x22)) | |
329 | #define INSN_COMIBT (INSN_OP(0x21)) | |
330 | #define INSN_COMIBF (INSN_OP(0x23)) | |
331 | ||
332 | /* supplied by libgcc */ | |
333 | extern void *__canonicalize_funcptr_for_compare(const void *); | |
334 | ||
335 | static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) | |
336 | { | |
337 | /* PA1.1 defines COPY as OR r,0,t; PA2.0 defines COPY as LDO 0(r),t | |
338 | but hppa-dis.c is unaware of this definition */ | |
339 | if (ret != arg) { | |
340 | tcg_out32(s, INSN_OR | INSN_T(ret) | INSN_R1(arg) | |
341 | | INSN_R2(TCG_REG_R0)); | |
342 | } | |
343 | } | |
344 | ||
345 | static void tcg_out_movi(TCGContext *s, TCGType type, | |
346 | TCGReg ret, tcg_target_long arg) | |
347 | { | |
348 | if (check_fit_tl(arg, 14)) { | |
349 | tcg_out32(s, INSN_LDO | INSN_R1(ret) | |
350 | | INSN_R2(TCG_REG_R0) | INSN_IM14(arg)); | |
351 | } else { | |
352 | uint32_t hi, lo; | |
353 | hi = arg >> 11; | |
354 | lo = arg & 0x7ff; | |
355 | ||
356 | tcg_out32(s, INSN_LDIL | INSN_R2(ret) | reassemble_21(hi)); | |
357 | if (lo) { | |
358 | tcg_out32(s, INSN_LDO | INSN_R1(ret) | |
359 | | INSN_R2(ret) | INSN_IM14(lo)); | |
360 | } | |
361 | } | |
362 | } | |
363 | ||
364 | static void tcg_out_ldst(TCGContext *s, int ret, int addr, | |
365 | tcg_target_long offset, int op) | |
366 | { | |
367 | if (!check_fit_tl(offset, 14)) { | |
368 | uint32_t hi, lo, op; | |
369 | ||
370 | hi = offset >> 11; | |
371 | lo = offset & 0x7ff; | |
372 | ||
373 | if (addr == TCG_REG_R0) { | |
374 | op = INSN_LDIL | INSN_R2(TCG_REG_R1); | |
375 | } else { | |
376 | op = INSN_ADDIL | INSN_R2(addr); | |
377 | } | |
378 | tcg_out32(s, op | reassemble_21(hi)); | |
379 | ||
380 | addr = TCG_REG_R1; | |
381 | offset = lo; | |
382 | } | |
383 | ||
384 | if (ret != addr || offset != 0 || op != INSN_LDO) { | |
385 | tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) | INSN_IM14(offset)); | |
386 | } | |
387 | } | |
388 | ||
389 | /* This function is required by tcg.c. */ | |
390 | static inline void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, | |
391 | TCGReg arg1, tcg_target_long arg2) | |
392 | { | |
393 | tcg_out_ldst(s, ret, arg1, arg2, INSN_LDW); | |
394 | } | |
395 | ||
396 | /* This function is required by tcg.c. */ | |
397 | static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg ret, | |
398 | TCGReg arg1, tcg_target_long arg2) | |
399 | { | |
400 | tcg_out_ldst(s, ret, arg1, arg2, INSN_STW); | |
401 | } | |
402 | ||
403 | static void tcg_out_ldst_index(TCGContext *s, int data, | |
404 | int base, int index, int op) | |
405 | { | |
406 | tcg_out32(s, op | INSN_T(data) | INSN_R1(index) | INSN_R2(base)); | |
407 | } | |
408 | ||
409 | static inline void tcg_out_addi2(TCGContext *s, int ret, int arg1, | |
410 | tcg_target_long val) | |
411 | { | |
412 | tcg_out_ldst(s, ret, arg1, val, INSN_LDO); | |
413 | } | |
414 | ||
415 | /* This function is required by tcg.c. */ | |
416 | static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) | |
417 | { | |
418 | tcg_out_addi2(s, reg, reg, val); | |
419 | } | |
420 | ||
421 | static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op) | |
422 | { | |
423 | tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2)); | |
424 | } | |
425 | ||
426 | static inline void tcg_out_arithi(TCGContext *s, int t, int r1, | |
427 | tcg_target_long val, int op) | |
428 | { | |
429 | assert(check_fit_tl(val, 11)); | |
430 | tcg_out32(s, op | INSN_R1(t) | INSN_R2(r1) | INSN_IM11(val)); | |
431 | } | |
432 | ||
433 | static inline void tcg_out_nop(TCGContext *s) | |
434 | { | |
435 | tcg_out_arith(s, TCG_REG_R0, TCG_REG_R0, TCG_REG_R0, INSN_OR); | |
436 | } | |
437 | ||
438 | static inline void tcg_out_mtctl_sar(TCGContext *s, int arg) | |
439 | { | |
440 | tcg_out32(s, INSN_MTCTL | INSN_R2(11) | INSN_R1(arg)); | |
441 | } | |
442 | ||
443 | /* Extract LEN bits at position OFS from ARG and place in RET. | |
444 | Note that here the bit ordering is reversed from the PA-RISC | |
445 | standard, such that the right-most bit is 0. */ | |
446 | static inline void tcg_out_extr(TCGContext *s, int ret, int arg, | |
447 | unsigned ofs, unsigned len, int sign) | |
448 | { | |
449 | assert(ofs < 32 && len <= 32 - ofs); | |
450 | tcg_out32(s, (sign ? INSN_EXTRS : INSN_EXTRU) | |
451 | | INSN_R1(ret) | INSN_R2(arg) | |
452 | | INSN_SHDEP_P(31 - ofs) | INSN_DEP_LEN(len)); | |
453 | } | |
454 | ||
455 | /* Likewise with OFS interpreted little-endian. */ | |
456 | static inline void tcg_out_dep(TCGContext *s, int ret, int arg, | |
457 | unsigned ofs, unsigned len) | |
458 | { | |
459 | assert(ofs < 32 && len <= 32 - ofs); | |
460 | tcg_out32(s, INSN_DEP | INSN_R2(ret) | INSN_R1(arg) | |
461 | | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len)); | |
462 | } | |
463 | ||
464 | static inline void tcg_out_depi(TCGContext *s, int ret, int arg, | |
465 | unsigned ofs, unsigned len) | |
466 | { | |
467 | assert(ofs < 32 && len <= 32 - ofs); | |
468 | tcg_out32(s, INSN_DEPI | INSN_R2(ret) | INSN_IM5(arg) | |
469 | | INSN_SHDEP_CP(31 - ofs) | INSN_DEP_LEN(len)); | |
470 | } | |
471 | ||
472 | static inline void tcg_out_shd(TCGContext *s, int ret, int hi, int lo, | |
473 | unsigned count) | |
474 | { | |
475 | assert(count < 32); | |
476 | tcg_out32(s, INSN_SHD | INSN_R1(hi) | INSN_R2(lo) | INSN_T(ret) | |
477 | | INSN_SHDEP_CP(count)); | |
478 | } | |
479 | ||
480 | static void tcg_out_vshd(TCGContext *s, int ret, int hi, int lo, int creg) | |
481 | { | |
482 | tcg_out_mtctl_sar(s, creg); | |
483 | tcg_out32(s, INSN_VSHD | INSN_T(ret) | INSN_R1(hi) | INSN_R2(lo)); | |
484 | } | |
485 | ||
486 | static void tcg_out_ori(TCGContext *s, int ret, int arg, tcg_target_ulong m) | |
487 | { | |
488 | int bs0, bs1; | |
489 | ||
490 | /* Note that the argument is constrained to match or_mask_p. */ | |
491 | for (bs0 = 0; bs0 < 32; bs0++) { | |
492 | if ((m & (1u << bs0)) != 0) { | |
493 | break; | |
494 | } | |
495 | } | |
496 | for (bs1 = bs0; bs1 < 32; bs1++) { | |
497 | if ((m & (1u << bs1)) == 0) { | |
498 | break; | |
499 | } | |
500 | } | |
501 | assert(bs1 == 32 || (1ul << bs1) > m); | |
502 | ||
503 | tcg_out_mov(s, TCG_TYPE_I32, ret, arg); | |
504 | tcg_out_depi(s, ret, -1, bs0, bs1 - bs0); | |
505 | } | |
506 | ||
507 | static void tcg_out_andi(TCGContext *s, int ret, int arg, tcg_target_ulong m) | |
508 | { | |
509 | int ls0, ls1, ms0; | |
510 | ||
511 | /* Note that the argument is constrained to match and_mask_p. */ | |
512 | for (ls0 = 0; ls0 < 32; ls0++) { | |
513 | if ((m & (1u << ls0)) == 0) { | |
514 | break; | |
515 | } | |
516 | } | |
517 | for (ls1 = ls0; ls1 < 32; ls1++) { | |
518 | if ((m & (1u << ls1)) != 0) { | |
519 | break; | |
520 | } | |
521 | } | |
522 | for (ms0 = ls1; ms0 < 32; ms0++) { | |
523 | if ((m & (1u << ms0)) == 0) { | |
524 | break; | |
525 | } | |
526 | } | |
527 | assert (ms0 == 32); | |
528 | ||
529 | if (ls1 == 32) { | |
530 | tcg_out_extr(s, ret, arg, 0, ls0, 0); | |
531 | } else { | |
532 | tcg_out_mov(s, TCG_TYPE_I32, ret, arg); | |
533 | tcg_out_depi(s, ret, 0, ls0, ls1 - ls0); | |
534 | } | |
535 | } | |
536 | ||
537 | static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) | |
538 | { | |
539 | tcg_out_extr(s, ret, arg, 0, 8, 1); | |
540 | } | |
541 | ||
542 | static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) | |
543 | { | |
544 | tcg_out_extr(s, ret, arg, 0, 16, 1); | |
545 | } | |
546 | ||
547 | static void tcg_out_shli(TCGContext *s, int ret, int arg, int count) | |
548 | { | |
549 | count &= 31; | |
550 | tcg_out32(s, INSN_ZDEP | INSN_R2(ret) | INSN_R1(arg) | |
551 | | INSN_SHDEP_CP(31 - count) | INSN_DEP_LEN(32 - count)); | |
552 | } | |
553 | ||
554 | static void tcg_out_shl(TCGContext *s, int ret, int arg, int creg) | |
555 | { | |
556 | tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI); | |
557 | tcg_out_mtctl_sar(s, TCG_REG_R20); | |
558 | tcg_out32(s, INSN_ZVDEP | INSN_R2(ret) | INSN_R1(arg) | INSN_DEP_LEN(32)); | |
559 | } | |
560 | ||
561 | static void tcg_out_shri(TCGContext *s, int ret, int arg, int count) | |
562 | { | |
563 | count &= 31; | |
564 | tcg_out_extr(s, ret, arg, count, 32 - count, 0); | |
565 | } | |
566 | ||
567 | static void tcg_out_shr(TCGContext *s, int ret, int arg, int creg) | |
568 | { | |
569 | tcg_out_vshd(s, ret, TCG_REG_R0, arg, creg); | |
570 | } | |
571 | ||
572 | static void tcg_out_sari(TCGContext *s, int ret, int arg, int count) | |
573 | { | |
574 | count &= 31; | |
575 | tcg_out_extr(s, ret, arg, count, 32 - count, 1); | |
576 | } | |
577 | ||
578 | static void tcg_out_sar(TCGContext *s, int ret, int arg, int creg) | |
579 | { | |
580 | tcg_out_arithi(s, TCG_REG_R20, creg, 31, INSN_SUBI); | |
581 | tcg_out_mtctl_sar(s, TCG_REG_R20); | |
582 | tcg_out32(s, INSN_VEXTRS | INSN_R1(ret) | INSN_R2(arg) | INSN_DEP_LEN(32)); | |
583 | } | |
584 | ||
585 | static void tcg_out_rotli(TCGContext *s, int ret, int arg, int count) | |
586 | { | |
587 | count &= 31; | |
588 | tcg_out_shd(s, ret, arg, arg, 32 - count); | |
589 | } | |
590 | ||
591 | static void tcg_out_rotl(TCGContext *s, int ret, int arg, int creg) | |
592 | { | |
593 | tcg_out_arithi(s, TCG_REG_R20, creg, 32, INSN_SUBI); | |
594 | tcg_out_vshd(s, ret, arg, arg, TCG_REG_R20); | |
595 | } | |
596 | ||
597 | static void tcg_out_rotri(TCGContext *s, int ret, int arg, int count) | |
598 | { | |
599 | count &= 31; | |
600 | tcg_out_shd(s, ret, arg, arg, count); | |
601 | } | |
602 | ||
603 | static void tcg_out_rotr(TCGContext *s, int ret, int arg, int creg) | |
604 | { | |
605 | tcg_out_vshd(s, ret, arg, arg, creg); | |
606 | } | |
607 | ||
608 | static void tcg_out_bswap16(TCGContext *s, int ret, int arg, int sign) | |
609 | { | |
610 | if (ret != arg) { | |
611 | tcg_out_mov(s, TCG_TYPE_I32, ret, arg); /* arg = xxAB */ | |
612 | } | |
613 | tcg_out_dep(s, ret, ret, 16, 8); /* ret = xBAB */ | |
614 | tcg_out_extr(s, ret, ret, 8, 16, sign); /* ret = ..BA */ | |
615 | } | |
616 | ||
617 | static void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) | |
618 | { | |
619 | /* arg = ABCD */ | |
620 | tcg_out_rotri(s, temp, arg, 16); /* temp = CDAB */ | |
621 | tcg_out_dep(s, temp, temp, 16, 8); /* temp = CBAB */ | |
622 | tcg_out_shd(s, ret, arg, temp, 8); /* ret = DCBA */ | |
623 | } | |
624 | ||
625 | static void tcg_out_call(TCGContext *s, const void *func) | |
626 | { | |
627 | tcg_target_long val, hi, lo, disp; | |
628 | ||
629 | val = (uint32_t)__canonicalize_funcptr_for_compare(func); | |
630 | disp = (val - ((tcg_target_long)s->code_ptr + 8)) >> 2; | |
631 | ||
632 | if (check_fit_tl(disp, 17)) { | |
633 | tcg_out32(s, INSN_BL_N | INSN_R2(TCG_REG_RP) | reassemble_17(disp)); | |
634 | } else { | |
635 | hi = val >> 11; | |
636 | lo = val & 0x7ff; | |
637 | ||
638 | tcg_out32(s, INSN_LDIL | INSN_R2(TCG_REG_R20) | reassemble_21(hi)); | |
639 | tcg_out32(s, INSN_BLE_SR4 | INSN_R2(TCG_REG_R20) | |
640 | | reassemble_17(lo >> 2)); | |
641 | tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_RP, TCG_REG_R31); | |
642 | } | |
643 | } | |
644 | ||
645 | static void tcg_out_xmpyu(TCGContext *s, int retl, int reth, | |
646 | int arg1, int arg2) | |
647 | { | |
648 | /* Store both words into the stack for copy to the FPU. */ | |
649 | tcg_out_ldst(s, arg1, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_STW); | |
650 | tcg_out_ldst(s, arg2, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, INSN_STW); | |
651 | ||
652 | /* Load both words into the FPU at the same time. We get away | |
653 | with this because we can address the left and right half of the | |
654 | FPU registers individually once loaded. */ | |
655 | /* fldds stack_temp(sp),fr22 */ | |
656 | tcg_out32(s, INSN_FLDDS | INSN_R2(TCG_REG_CALL_STACK) | |
657 | | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22)); | |
658 | ||
659 | /* xmpyu fr22r,fr22,fr22 */ | |
660 | tcg_out32(s, 0x3ad64796); | |
661 | ||
662 | /* Store the 64-bit result back into the stack. */ | |
663 | /* fstds stack_temp(sp),fr22 */ | |
664 | tcg_out32(s, INSN_FSTDS | INSN_R2(TCG_REG_CALL_STACK) | |
665 | | INSN_IM5(STACK_TEMP_OFS) | INSN_T(22)); | |
666 | ||
667 | /* Load the pieces of the result that the caller requested. */ | |
668 | if (reth) { | |
669 | tcg_out_ldst(s, reth, TCG_REG_CALL_STACK, STACK_TEMP_OFS, INSN_LDW); | |
670 | } | |
671 | if (retl) { | |
672 | tcg_out_ldst(s, retl, TCG_REG_CALL_STACK, STACK_TEMP_OFS + 4, | |
673 | INSN_LDW); | |
674 | } | |
675 | } | |
676 | ||
677 | static void tcg_out_add2(TCGContext *s, int destl, int desth, | |
678 | int al, int ah, int bl, int bh, int blconst) | |
679 | { | |
680 | int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl); | |
681 | ||
682 | if (blconst) { | |
683 | tcg_out_arithi(s, tmp, al, bl, INSN_ADDI); | |
684 | } else { | |
685 | tcg_out_arith(s, tmp, al, bl, INSN_ADD); | |
686 | } | |
687 | tcg_out_arith(s, desth, ah, bh, INSN_ADDC); | |
688 | ||
689 | tcg_out_mov(s, TCG_TYPE_I32, destl, tmp); | |
690 | } | |
691 | ||
692 | static void tcg_out_sub2(TCGContext *s, int destl, int desth, int al, int ah, | |
693 | int bl, int bh, int alconst, int blconst) | |
694 | { | |
695 | int tmp = (destl == ah || destl == bh ? TCG_REG_R20 : destl); | |
696 | ||
697 | if (alconst) { | |
698 | if (blconst) { | |
699 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, bl); | |
700 | bl = TCG_REG_R20; | |
701 | } | |
702 | tcg_out_arithi(s, tmp, bl, al, INSN_SUBI); | |
703 | } else if (blconst) { | |
704 | tcg_out_arithi(s, tmp, al, -bl, INSN_ADDI); | |
705 | } else { | |
706 | tcg_out_arith(s, tmp, al, bl, INSN_SUB); | |
707 | } | |
708 | tcg_out_arith(s, desth, ah, bh, INSN_SUBB); | |
709 | ||
710 | tcg_out_mov(s, TCG_TYPE_I32, destl, tmp); | |
711 | } | |
712 | ||
713 | static void tcg_out_branch(TCGContext *s, int label_index, int nul) | |
714 | { | |
715 | TCGLabel *l = &s->labels[label_index]; | |
716 | uint32_t op = nul ? INSN_BL_N : INSN_BL; | |
717 | ||
718 | if (l->has_value) { | |
719 | tcg_target_long val = l->u.value; | |
720 | ||
721 | val -= (tcg_target_long)s->code_ptr + 8; | |
722 | val >>= 2; | |
723 | assert(check_fit_tl(val, 17)); | |
724 | ||
725 | tcg_out32(s, op | reassemble_17(val)); | |
726 | } else { | |
727 | /* We need to keep the offset unchanged for retranslation. */ | |
728 | uint32_t old_insn = *(uint32_t *)s->code_ptr; | |
729 | ||
730 | tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL17F, label_index, 0); | |
731 | tcg_out32(s, op | (old_insn & 0x1f1ffdu)); | |
732 | } | |
733 | } | |
734 | ||
735 | static const uint8_t tcg_cond_to_cmp_cond[] = | |
736 | { | |
737 | [TCG_COND_EQ] = COND_EQ, | |
738 | [TCG_COND_NE] = COND_EQ | COND_FALSE, | |
739 | [TCG_COND_LT] = COND_LT, | |
740 | [TCG_COND_GE] = COND_LT | COND_FALSE, | |
741 | [TCG_COND_LE] = COND_LE, | |
742 | [TCG_COND_GT] = COND_LE | COND_FALSE, | |
743 | [TCG_COND_LTU] = COND_LTU, | |
744 | [TCG_COND_GEU] = COND_LTU | COND_FALSE, | |
745 | [TCG_COND_LEU] = COND_LEU, | |
746 | [TCG_COND_GTU] = COND_LEU | COND_FALSE, | |
747 | }; | |
748 | ||
749 | static void tcg_out_brcond(TCGContext *s, int cond, TCGArg c1, | |
750 | TCGArg c2, int c2const, int label_index) | |
751 | { | |
752 | TCGLabel *l = &s->labels[label_index]; | |
753 | int op, pacond; | |
754 | ||
755 | /* Note that COMIB operates as if the immediate is the first | |
756 | operand. We model brcond with the immediate in the second | |
757 | to better match what targets are likely to give us. For | |
758 | consistency, model COMB with reversed operands as well. */ | |
759 | pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)]; | |
760 | ||
761 | if (c2const) { | |
762 | op = (pacond & COND_FALSE ? INSN_COMIBF : INSN_COMIBT); | |
763 | op |= INSN_IM5(c2); | |
764 | } else { | |
765 | op = (pacond & COND_FALSE ? INSN_COMBF : INSN_COMBT); | |
766 | op |= INSN_R1(c2); | |
767 | } | |
768 | op |= INSN_R2(c1); | |
769 | op |= INSN_COND(pacond & 7); | |
770 | ||
771 | if (l->has_value) { | |
772 | tcg_target_long val = l->u.value; | |
773 | ||
774 | val -= (tcg_target_long)s->code_ptr + 8; | |
775 | val >>= 2; | |
776 | assert(check_fit_tl(val, 12)); | |
777 | ||
778 | /* ??? Assume that all branches to defined labels are backward. | |
779 | Which means that if the nul bit is set, the delay slot is | |
780 | executed if the branch is taken, and not executed in fallthru. */ | |
781 | tcg_out32(s, op | reassemble_12(val)); | |
782 | tcg_out_nop(s); | |
783 | } else { | |
784 | /* We need to keep the offset unchanged for retranslation. */ | |
785 | uint32_t old_insn = *(uint32_t *)s->code_ptr; | |
786 | ||
787 | tcg_out_reloc(s, s->code_ptr, R_PARISC_PCREL12F, label_index, 0); | |
788 | /* ??? Assume that all branches to undefined labels are forward. | |
789 | Which means that if the nul bit is set, the delay slot is | |
790 | not executed if the branch is taken, which is what we want. */ | |
791 | tcg_out32(s, op | 2 | (old_insn & 0x1ffdu)); | |
792 | } | |
793 | } | |
794 | ||
795 | static void tcg_out_comclr(TCGContext *s, int cond, TCGArg ret, | |
796 | TCGArg c1, TCGArg c2, int c2const) | |
797 | { | |
798 | int op, pacond; | |
799 | ||
800 | /* Note that COMICLR operates as if the immediate is the first | |
801 | operand. We model setcond with the immediate in the second | |
802 | to better match what targets are likely to give us. For | |
803 | consistency, model COMCLR with reversed operands as well. */ | |
804 | pacond = tcg_cond_to_cmp_cond[tcg_swap_cond(cond)]; | |
805 | ||
806 | if (c2const) { | |
807 | op = INSN_COMICLR | INSN_R2(c1) | INSN_R1(ret) | INSN_IM11(c2); | |
808 | } else { | |
809 | op = INSN_COMCLR | INSN_R2(c1) | INSN_R1(c2) | INSN_T(ret); | |
810 | } | |
811 | op |= INSN_COND(pacond & 7); | |
812 | op |= pacond & COND_FALSE ? 1 << 12 : 0; | |
813 | ||
814 | tcg_out32(s, op); | |
815 | } | |
816 | ||
817 | static void tcg_out_brcond2(TCGContext *s, int cond, TCGArg al, TCGArg ah, | |
818 | TCGArg bl, int blconst, TCGArg bh, int bhconst, | |
819 | int label_index) | |
820 | { | |
821 | switch (cond) { | |
822 | case TCG_COND_EQ: | |
823 | tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, al, bl, blconst); | |
824 | tcg_out_brcond(s, TCG_COND_EQ, ah, bh, bhconst, label_index); | |
825 | break; | |
826 | case TCG_COND_NE: | |
827 | tcg_out_brcond(s, TCG_COND_NE, al, bl, blconst, label_index); | |
828 | tcg_out_brcond(s, TCG_COND_NE, ah, bh, bhconst, label_index); | |
829 | break; | |
830 | default: | |
831 | tcg_out_brcond(s, tcg_high_cond(cond), ah, bh, bhconst, label_index); | |
832 | tcg_out_comclr(s, TCG_COND_NE, TCG_REG_R0, ah, bh, bhconst); | |
833 | tcg_out_brcond(s, tcg_unsigned_cond(cond), | |
834 | al, bl, blconst, label_index); | |
835 | break; | |
836 | } | |
837 | } | |
838 | ||
839 | static void tcg_out_setcond(TCGContext *s, int cond, TCGArg ret, | |
840 | TCGArg c1, TCGArg c2, int c2const) | |
841 | { | |
842 | tcg_out_comclr(s, tcg_invert_cond(cond), ret, c1, c2, c2const); | |
843 | tcg_out_movi(s, TCG_TYPE_I32, ret, 1); | |
844 | } | |
845 | ||
846 | static void tcg_out_setcond2(TCGContext *s, int cond, TCGArg ret, | |
847 | TCGArg al, TCGArg ah, TCGArg bl, int blconst, | |
848 | TCGArg bh, int bhconst) | |
849 | { | |
850 | int scratch = TCG_REG_R20; | |
851 | ||
852 | /* Note that the low parts are fully consumed before scratch is set. */ | |
853 | if (ret != ah && (bhconst || ret != bh)) { | |
854 | scratch = ret; | |
855 | } | |
856 | ||
857 | switch (cond) { | |
858 | case TCG_COND_EQ: | |
859 | case TCG_COND_NE: | |
860 | tcg_out_setcond(s, cond, scratch, al, bl, blconst); | |
861 | tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst); | |
862 | tcg_out_movi(s, TCG_TYPE_I32, scratch, cond == TCG_COND_NE); | |
863 | break; | |
864 | ||
865 | case TCG_COND_GE: | |
866 | case TCG_COND_GEU: | |
867 | case TCG_COND_LT: | |
868 | case TCG_COND_LTU: | |
869 | /* Optimize compares with low part zero. */ | |
870 | if (bl == 0) { | |
871 | tcg_out_setcond(s, cond, ret, ah, bh, bhconst); | |
872 | return; | |
873 | } | |
874 | /* FALLTHRU */ | |
875 | ||
876 | case TCG_COND_LE: | |
877 | case TCG_COND_LEU: | |
878 | case TCG_COND_GT: | |
879 | case TCG_COND_GTU: | |
880 | /* <= : ah < bh | (ah == bh && al <= bl) */ | |
881 | tcg_out_setcond(s, tcg_unsigned_cond(cond), scratch, al, bl, blconst); | |
882 | tcg_out_comclr(s, TCG_COND_EQ, TCG_REG_R0, ah, bh, bhconst); | |
883 | tcg_out_movi(s, TCG_TYPE_I32, scratch, 0); | |
884 | tcg_out_comclr(s, tcg_invert_cond(tcg_high_cond(cond)), | |
885 | TCG_REG_R0, ah, bh, bhconst); | |
886 | tcg_out_movi(s, TCG_TYPE_I32, scratch, 1); | |
887 | break; | |
888 | ||
889 | default: | |
890 | tcg_abort(); | |
891 | } | |
892 | ||
893 | tcg_out_mov(s, TCG_TYPE_I32, ret, scratch); | |
894 | } | |
895 | ||
896 | static void tcg_out_movcond(TCGContext *s, int cond, TCGArg ret, | |
897 | TCGArg c1, TCGArg c2, int c2const, | |
898 | TCGArg v1, int v1const) | |
899 | { | |
900 | tcg_out_comclr(s, tcg_invert_cond(cond), TCG_REG_R0, c1, c2, c2const); | |
901 | if (v1const) { | |
902 | tcg_out_movi(s, TCG_TYPE_I32, ret, v1); | |
903 | } else { | |
904 | tcg_out_mov(s, TCG_TYPE_I32, ret, v1); | |
905 | } | |
906 | } | |
907 | ||
908 | #if defined(CONFIG_SOFTMMU) | |
909 | #include "exec/softmmu_defs.h" | |
910 | ||
911 | /* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr, | |
912 | int mmu_idx) */ | |
913 | static const void * const qemu_ld_helpers[4] = { | |
914 | helper_ldb_mmu, | |
915 | helper_ldw_mmu, | |
916 | helper_ldl_mmu, | |
917 | helper_ldq_mmu, | |
918 | }; | |
919 | ||
920 | /* helper signature: helper_st_mmu(CPUState *env, target_ulong addr, | |
921 | uintxx_t val, int mmu_idx) */ | |
922 | static const void * const qemu_st_helpers[4] = { | |
923 | helper_stb_mmu, | |
924 | helper_stw_mmu, | |
925 | helper_stl_mmu, | |
926 | helper_stq_mmu, | |
927 | }; | |
928 | ||
929 | /* Load and compare a TLB entry, and branch if TLB miss. OFFSET is set to | |
930 | the offset of the first ADDR_READ or ADDR_WRITE member of the appropriate | |
931 | TLB for the memory index. The return value is the offset from ENV | |
932 | contained in R1 afterward (to be used when loading ADDEND); if the | |
933 | return value is 0, R1 is not used. */ | |
934 | ||
935 | static int tcg_out_tlb_read(TCGContext *s, int r0, int r1, int addrlo, | |
936 | int addrhi, int s_bits, int lab_miss, int offset) | |
937 | { | |
938 | int ret; | |
939 | ||
940 | /* Extracting the index into the TLB. The "normal C operation" is | |
941 | r1 = addr_reg >> TARGET_PAGE_BITS; | |
942 | r1 &= CPU_TLB_SIZE - 1; | |
943 | r1 <<= CPU_TLB_ENTRY_BITS; | |
944 | What this does is extract CPU_TLB_BITS beginning at TARGET_PAGE_BITS | |
945 | and place them at CPU_TLB_ENTRY_BITS. We can combine the first two | |
946 | operations with an EXTRU. Unfortunately, the current value of | |
947 | CPU_TLB_ENTRY_BITS is > 3, so we can't merge that shift with the | |
948 | add that follows. */ | |
949 | tcg_out_extr(s, r1, addrlo, TARGET_PAGE_BITS, CPU_TLB_BITS, 0); | |
950 | tcg_out_shli(s, r1, r1, CPU_TLB_ENTRY_BITS); | |
951 | tcg_out_arith(s, r1, r1, TCG_AREG0, INSN_ADDL); | |
952 | ||
953 | /* Make sure that both the addr_{read,write} and addend can be | |
954 | read with a 14-bit offset from the same base register. */ | |
955 | if (check_fit_tl(offset + CPU_TLB_SIZE, 14)) { | |
956 | ret = 0; | |
957 | } else { | |
958 | ret = (offset + 0x400) & ~0x7ff; | |
959 | offset = ret - offset; | |
960 | tcg_out_addi2(s, TCG_REG_R1, r1, ret); | |
961 | r1 = TCG_REG_R1; | |
962 | } | |
963 | ||
964 | /* Load the entry from the computed slot. */ | |
965 | if (TARGET_LONG_BITS == 64) { | |
966 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R23, r1, offset); | |
967 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset + 4); | |
968 | } else { | |
969 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, r1, offset); | |
970 | } | |
971 | ||
972 | /* Compute the value that ought to appear in the TLB for a hit, namely, | |
973 | the page of the address. We include the low N bits of the address | |
974 | to catch unaligned accesses and force them onto the slow path. Do | |
975 | this computation after having issued the load from the TLB slot to | |
976 | give the load time to complete. */ | |
977 | tcg_out_andi(s, r0, addrlo, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); | |
978 | ||
979 | /* If not equal, jump to lab_miss. */ | |
980 | if (TARGET_LONG_BITS == 64) { | |
981 | tcg_out_brcond2(s, TCG_COND_NE, TCG_REG_R20, TCG_REG_R23, | |
982 | r0, 0, addrhi, 0, lab_miss); | |
983 | } else { | |
984 | tcg_out_brcond(s, TCG_COND_NE, TCG_REG_R20, r0, 0, lab_miss); | |
985 | } | |
986 | ||
987 | return ret; | |
988 | } | |
989 | ||
990 | static int tcg_out_arg_reg32(TCGContext *s, int argno, TCGArg v, bool vconst) | |
991 | { | |
992 | if (argno < 4) { | |
993 | if (vconst) { | |
994 | tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v); | |
995 | } else { | |
996 | tcg_out_mov(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[argno], v); | |
997 | } | |
998 | } else { | |
999 | if (vconst && v != 0) { | |
1000 | tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R20, v); | |
1001 | v = TCG_REG_R20; | |
1002 | } | |
1003 | tcg_out_st(s, TCG_TYPE_I32, v, TCG_REG_CALL_STACK, | |
1004 | TCG_TARGET_CALL_STACK_OFFSET - ((argno - 3) * 4)); | |
1005 | } | |
1006 | return argno + 1; | |
1007 | } | |
1008 | ||
1009 | static int tcg_out_arg_reg64(TCGContext *s, int argno, TCGArg vl, TCGArg vh) | |
1010 | { | |
1011 | /* 64-bit arguments must go in even reg pairs and stack slots. */ | |
1012 | if (argno & 1) { | |
1013 | argno++; | |
1014 | } | |
1015 | argno = tcg_out_arg_reg32(s, argno, vl, false); | |
1016 | argno = tcg_out_arg_reg32(s, argno, vh, false); | |
1017 | return argno; | |
1018 | } | |
1019 | #endif | |
1020 | ||
1021 | static void tcg_out_qemu_ld_direct(TCGContext *s, int datalo_reg, int datahi_reg, | |
1022 | int addr_reg, int addend_reg, int opc) | |
1023 | { | |
1024 | #ifdef TARGET_WORDS_BIGENDIAN | |
1025 | const int bswap = 0; | |
1026 | #else | |
1027 | const int bswap = 1; | |
1028 | #endif | |
1029 | ||
1030 | switch (opc) { | |
1031 | case 0: | |
1032 | tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX); | |
1033 | break; | |
1034 | case 0 | 4: | |
1035 | tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDBX); | |
1036 | tcg_out_ext8s(s, datalo_reg, datalo_reg); | |
1037 | break; | |
1038 | case 1: | |
1039 | tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX); | |
1040 | if (bswap) { | |
1041 | tcg_out_bswap16(s, datalo_reg, datalo_reg, 0); | |
1042 | } | |
1043 | break; | |
1044 | case 1 | 4: | |
1045 | tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDHX); | |
1046 | if (bswap) { | |
1047 | tcg_out_bswap16(s, datalo_reg, datalo_reg, 1); | |
1048 | } else { | |
1049 | tcg_out_ext16s(s, datalo_reg, datalo_reg); | |
1050 | } | |
1051 | break; | |
1052 | case 2: | |
1053 | tcg_out_ldst_index(s, datalo_reg, addr_reg, addend_reg, INSN_LDWX); | |
1054 | if (bswap) { | |
1055 | tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20); | |
1056 | } | |
1057 | break; | |
1058 | case 3: | |
1059 | if (bswap) { | |
1060 | int t = datahi_reg; | |
1061 | datahi_reg = datalo_reg; | |
1062 | datalo_reg = t; | |
1063 | } | |
1064 | /* We can't access the low-part with a reg+reg addressing mode, | |
1065 | so perform the addition now and use reg_ofs addressing mode. */ | |
1066 | if (addend_reg != TCG_REG_R0) { | |
1067 | tcg_out_arith(s, TCG_REG_R20, addr_reg, addend_reg, INSN_ADD); | |
1068 | addr_reg = TCG_REG_R20; | |
1069 | } | |
1070 | /* Make sure not to clobber the base register. */ | |
1071 | if (datahi_reg == addr_reg) { | |
1072 | tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW); | |
1073 | tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW); | |
1074 | } else { | |
1075 | tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_LDW); | |
1076 | tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_LDW); | |
1077 | } | |
1078 | if (bswap) { | |
1079 | tcg_out_bswap32(s, datalo_reg, datalo_reg, TCG_REG_R20); | |
1080 | tcg_out_bswap32(s, datahi_reg, datahi_reg, TCG_REG_R20); | |
1081 | } | |
1082 | break; | |
1083 | default: | |
1084 | tcg_abort(); | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc) | |
1089 | { | |
1090 | int datalo_reg = *args++; | |
1091 | /* Note that datahi_reg is only used for 64-bit loads. */ | |
1092 | int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0); | |
1093 | int addrlo_reg = *args++; | |
1094 | ||
1095 | #if defined(CONFIG_SOFTMMU) | |
1096 | /* Note that addrhi_reg is only used for 64-bit guests. */ | |
1097 | int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0); | |
1098 | int mem_index = *args; | |
1099 | int lab1, lab2, argno, offset; | |
1100 | ||
1101 | lab1 = gen_new_label(); | |
1102 | lab2 = gen_new_label(); | |
1103 | ||
1104 | offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_read); | |
1105 | offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, | |
1106 | addrhi_reg, opc & 3, lab1, offset); | |
1107 | ||
1108 | /* TLB Hit. */ | |
1109 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, | |
1110 | (offset ? TCG_REG_R1 : TCG_REG_R25), | |
1111 | offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset); | |
1112 | tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, | |
1113 | TCG_REG_R20, opc); | |
1114 | tcg_out_branch(s, lab2, 1); | |
1115 | ||
1116 | /* TLB Miss. */ | |
1117 | /* label1: */ | |
1118 | tcg_out_label(s, lab1, s->code_ptr); | |
1119 | ||
1120 | argno = 0; | |
1121 | argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false); | |
1122 | if (TARGET_LONG_BITS == 64) { | |
1123 | argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg); | |
1124 | } else { | |
1125 | argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false); | |
1126 | } | |
1127 | argno = tcg_out_arg_reg32(s, argno, mem_index, true); | |
1128 | ||
1129 | tcg_out_call(s, qemu_ld_helpers[opc & 3]); | |
1130 | ||
1131 | switch (opc) { | |
1132 | case 0: | |
1133 | tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xff); | |
1134 | break; | |
1135 | case 0 | 4: | |
1136 | tcg_out_ext8s(s, datalo_reg, TCG_REG_RET0); | |
1137 | break; | |
1138 | case 1: | |
1139 | tcg_out_andi(s, datalo_reg, TCG_REG_RET0, 0xffff); | |
1140 | break; | |
1141 | case 1 | 4: | |
1142 | tcg_out_ext16s(s, datalo_reg, TCG_REG_RET0); | |
1143 | break; | |
1144 | case 2: | |
1145 | case 2 | 4: | |
1146 | tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET0); | |
1147 | break; | |
1148 | case 3: | |
1149 | tcg_out_mov(s, TCG_TYPE_I32, datahi_reg, TCG_REG_RET0); | |
1150 | tcg_out_mov(s, TCG_TYPE_I32, datalo_reg, TCG_REG_RET1); | |
1151 | break; | |
1152 | default: | |
1153 | tcg_abort(); | |
1154 | } | |
1155 | ||
1156 | /* label2: */ | |
1157 | tcg_out_label(s, lab2, s->code_ptr); | |
1158 | #else | |
1159 | tcg_out_qemu_ld_direct(s, datalo_reg, datahi_reg, addrlo_reg, | |
1160 | (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_R0), opc); | |
1161 | #endif | |
1162 | } | |
1163 | ||
1164 | static void tcg_out_qemu_st_direct(TCGContext *s, int datalo_reg, | |
1165 | int datahi_reg, int addr_reg, int opc) | |
1166 | { | |
1167 | #ifdef TARGET_WORDS_BIGENDIAN | |
1168 | const int bswap = 0; | |
1169 | #else | |
1170 | const int bswap = 1; | |
1171 | #endif | |
1172 | ||
1173 | switch (opc) { | |
1174 | case 0: | |
1175 | tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STB); | |
1176 | break; | |
1177 | case 1: | |
1178 | if (bswap) { | |
1179 | tcg_out_bswap16(s, TCG_REG_R20, datalo_reg, 0); | |
1180 | datalo_reg = TCG_REG_R20; | |
1181 | } | |
1182 | tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STH); | |
1183 | break; | |
1184 | case 2: | |
1185 | if (bswap) { | |
1186 | tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20); | |
1187 | datalo_reg = TCG_REG_R20; | |
1188 | } | |
1189 | tcg_out_ldst(s, datalo_reg, addr_reg, 0, INSN_STW); | |
1190 | break; | |
1191 | case 3: | |
1192 | if (bswap) { | |
1193 | tcg_out_bswap32(s, TCG_REG_R20, datalo_reg, TCG_REG_R20); | |
1194 | tcg_out_bswap32(s, TCG_REG_R23, datahi_reg, TCG_REG_R23); | |
1195 | datahi_reg = TCG_REG_R20; | |
1196 | datalo_reg = TCG_REG_R23; | |
1197 | } | |
1198 | tcg_out_ldst(s, datahi_reg, addr_reg, 0, INSN_STW); | |
1199 | tcg_out_ldst(s, datalo_reg, addr_reg, 4, INSN_STW); | |
1200 | break; | |
1201 | default: | |
1202 | tcg_abort(); | |
1203 | } | |
1204 | ||
1205 | } | |
1206 | ||
1207 | static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc) | |
1208 | { | |
1209 | int datalo_reg = *args++; | |
1210 | /* Note that datahi_reg is only used for 64-bit loads. */ | |
1211 | int datahi_reg = (opc == 3 ? *args++ : TCG_REG_R0); | |
1212 | int addrlo_reg = *args++; | |
1213 | ||
1214 | #if defined(CONFIG_SOFTMMU) | |
1215 | /* Note that addrhi_reg is only used for 64-bit guests. */ | |
1216 | int addrhi_reg = (TARGET_LONG_BITS == 64 ? *args++ : TCG_REG_R0); | |
1217 | int mem_index = *args; | |
1218 | int lab1, lab2, argno, next, offset; | |
1219 | ||
1220 | lab1 = gen_new_label(); | |
1221 | lab2 = gen_new_label(); | |
1222 | ||
1223 | offset = offsetof(CPUArchState, tlb_table[mem_index][0].addr_write); | |
1224 | offset = tcg_out_tlb_read(s, TCG_REG_R26, TCG_REG_R25, addrlo_reg, | |
1225 | addrhi_reg, opc, lab1, offset); | |
1226 | ||
1227 | /* TLB Hit. */ | |
1228 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, | |
1229 | (offset ? TCG_REG_R1 : TCG_REG_R25), | |
1230 | offsetof(CPUArchState, tlb_table[mem_index][0].addend) - offset); | |
1231 | ||
1232 | /* There are no indexed stores, so we must do this addition explitly. | |
1233 | Careful to avoid R20, which is used for the bswaps to follow. */ | |
1234 | tcg_out_arith(s, TCG_REG_R31, addrlo_reg, TCG_REG_R20, INSN_ADDL); | |
1235 | tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, TCG_REG_R31, opc); | |
1236 | tcg_out_branch(s, lab2, 1); | |
1237 | ||
1238 | /* TLB Miss. */ | |
1239 | /* label1: */ | |
1240 | tcg_out_label(s, lab1, s->code_ptr); | |
1241 | ||
1242 | argno = 0; | |
1243 | argno = tcg_out_arg_reg32(s, argno, TCG_AREG0, false); | |
1244 | if (TARGET_LONG_BITS == 64) { | |
1245 | argno = tcg_out_arg_reg64(s, argno, addrlo_reg, addrhi_reg); | |
1246 | } else { | |
1247 | argno = tcg_out_arg_reg32(s, argno, addrlo_reg, false); | |
1248 | } | |
1249 | ||
1250 | next = (argno < 4 ? tcg_target_call_iarg_regs[argno] : TCG_REG_R20); | |
1251 | switch(opc) { | |
1252 | case 0: | |
1253 | tcg_out_andi(s, next, datalo_reg, 0xff); | |
1254 | argno = tcg_out_arg_reg32(s, argno, next, false); | |
1255 | break; | |
1256 | case 1: | |
1257 | tcg_out_andi(s, next, datalo_reg, 0xffff); | |
1258 | argno = tcg_out_arg_reg32(s, argno, next, false); | |
1259 | break; | |
1260 | case 2: | |
1261 | argno = tcg_out_arg_reg32(s, argno, datalo_reg, false); | |
1262 | break; | |
1263 | case 3: | |
1264 | argno = tcg_out_arg_reg64(s, argno, datalo_reg, datahi_reg); | |
1265 | break; | |
1266 | default: | |
1267 | tcg_abort(); | |
1268 | } | |
1269 | argno = tcg_out_arg_reg32(s, argno, mem_index, true); | |
1270 | ||
1271 | tcg_out_call(s, qemu_st_helpers[opc]); | |
1272 | ||
1273 | /* label2: */ | |
1274 | tcg_out_label(s, lab2, s->code_ptr); | |
1275 | #else | |
1276 | /* There are no indexed stores, so if GUEST_BASE is set we must do | |
1277 | the add explicitly. Careful to avoid R20, which is used for the | |
1278 | bswaps to follow. */ | |
1279 | if (GUEST_BASE != 0) { | |
1280 | tcg_out_arith(s, TCG_REG_R31, addrlo_reg, | |
1281 | TCG_GUEST_BASE_REG, INSN_ADDL); | |
1282 | addrlo_reg = TCG_REG_R31; | |
1283 | } | |
1284 | tcg_out_qemu_st_direct(s, datalo_reg, datahi_reg, addrlo_reg, opc); | |
1285 | #endif | |
1286 | } | |
1287 | ||
1288 | static void tcg_out_exit_tb(TCGContext *s, TCGArg arg) | |
1289 | { | |
1290 | if (!check_fit_tl(arg, 14)) { | |
1291 | uint32_t hi, lo; | |
1292 | hi = arg & ~0x7ff; | |
1293 | lo = arg & 0x7ff; | |
1294 | if (lo) { | |
1295 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, hi); | |
1296 | tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18)); | |
1297 | tcg_out_addi(s, TCG_REG_RET0, lo); | |
1298 | return; | |
1299 | } | |
1300 | arg = hi; | |
1301 | } | |
1302 | tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_R18)); | |
1303 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, arg); | |
1304 | } | |
1305 | ||
1306 | static void tcg_out_goto_tb(TCGContext *s, TCGArg arg) | |
1307 | { | |
1308 | if (s->tb_jmp_offset) { | |
1309 | /* direct jump method */ | |
1310 | fprintf(stderr, "goto_tb direct\n"); | |
1311 | tcg_abort(); | |
1312 | } else { | |
1313 | /* indirect jump method */ | |
1314 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_R20, TCG_REG_R0, | |
1315 | (tcg_target_long)(s->tb_next + arg)); | |
1316 | tcg_out32(s, INSN_BV_N | INSN_R2(TCG_REG_R20)); | |
1317 | } | |
1318 | s->tb_next_offset[arg] = s->code_ptr - s->code_buf; | |
1319 | } | |
1320 | ||
1321 | static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | |
1322 | const int *const_args) | |
1323 | { | |
1324 | switch (opc) { | |
1325 | case INDEX_op_exit_tb: | |
1326 | tcg_out_exit_tb(s, args[0]); | |
1327 | break; | |
1328 | case INDEX_op_goto_tb: | |
1329 | tcg_out_goto_tb(s, args[0]); | |
1330 | break; | |
1331 | ||
1332 | case INDEX_op_call: | |
1333 | if (const_args[0]) { | |
1334 | tcg_out_call(s, (void *)args[0]); | |
1335 | } else { | |
1336 | /* ??? FIXME: the value in the register in args[0] is almost | |
1337 | certainly a procedure descriptor, not a code address. We | |
1338 | probably need to use the millicode $$dyncall routine. */ | |
1339 | tcg_abort(); | |
1340 | } | |
1341 | break; | |
1342 | ||
1343 | case INDEX_op_br: | |
1344 | tcg_out_branch(s, args[0], 1); | |
1345 | break; | |
1346 | ||
1347 | case INDEX_op_movi_i32: | |
1348 | tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]); | |
1349 | break; | |
1350 | ||
1351 | case INDEX_op_ld8u_i32: | |
1352 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB); | |
1353 | break; | |
1354 | case INDEX_op_ld8s_i32: | |
1355 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDB); | |
1356 | tcg_out_ext8s(s, args[0], args[0]); | |
1357 | break; | |
1358 | case INDEX_op_ld16u_i32: | |
1359 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH); | |
1360 | break; | |
1361 | case INDEX_op_ld16s_i32: | |
1362 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDH); | |
1363 | tcg_out_ext16s(s, args[0], args[0]); | |
1364 | break; | |
1365 | case INDEX_op_ld_i32: | |
1366 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_LDW); | |
1367 | break; | |
1368 | ||
1369 | case INDEX_op_st8_i32: | |
1370 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_STB); | |
1371 | break; | |
1372 | case INDEX_op_st16_i32: | |
1373 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_STH); | |
1374 | break; | |
1375 | case INDEX_op_st_i32: | |
1376 | tcg_out_ldst(s, args[0], args[1], args[2], INSN_STW); | |
1377 | break; | |
1378 | ||
1379 | case INDEX_op_add_i32: | |
1380 | if (const_args[2]) { | |
1381 | tcg_out_addi2(s, args[0], args[1], args[2]); | |
1382 | } else { | |
1383 | tcg_out_arith(s, args[0], args[1], args[2], INSN_ADDL); | |
1384 | } | |
1385 | break; | |
1386 | ||
1387 | case INDEX_op_sub_i32: | |
1388 | if (const_args[1]) { | |
1389 | if (const_args[2]) { | |
1390 | tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1] - args[2]); | |
1391 | } else { | |
1392 | /* Recall that SUBI is a reversed subtract. */ | |
1393 | tcg_out_arithi(s, args[0], args[2], args[1], INSN_SUBI); | |
1394 | } | |
1395 | } else if (const_args[2]) { | |
1396 | tcg_out_addi2(s, args[0], args[1], -args[2]); | |
1397 | } else { | |
1398 | tcg_out_arith(s, args[0], args[1], args[2], INSN_SUB); | |
1399 | } | |
1400 | break; | |
1401 | ||
1402 | case INDEX_op_and_i32: | |
1403 | if (const_args[2]) { | |
1404 | tcg_out_andi(s, args[0], args[1], args[2]); | |
1405 | } else { | |
1406 | tcg_out_arith(s, args[0], args[1], args[2], INSN_AND); | |
1407 | } | |
1408 | break; | |
1409 | ||
1410 | case INDEX_op_or_i32: | |
1411 | if (const_args[2]) { | |
1412 | tcg_out_ori(s, args[0], args[1], args[2]); | |
1413 | } else { | |
1414 | tcg_out_arith(s, args[0], args[1], args[2], INSN_OR); | |
1415 | } | |
1416 | break; | |
1417 | ||
1418 | case INDEX_op_xor_i32: | |
1419 | tcg_out_arith(s, args[0], args[1], args[2], INSN_XOR); | |
1420 | break; | |
1421 | ||
1422 | case INDEX_op_andc_i32: | |
1423 | if (const_args[2]) { | |
1424 | tcg_out_andi(s, args[0], args[1], ~args[2]); | |
1425 | } else { | |
1426 | tcg_out_arith(s, args[0], args[1], args[2], INSN_ANDCM); | |
1427 | } | |
1428 | break; | |
1429 | ||
1430 | case INDEX_op_shl_i32: | |
1431 | if (const_args[2]) { | |
1432 | tcg_out_shli(s, args[0], args[1], args[2]); | |
1433 | } else { | |
1434 | tcg_out_shl(s, args[0], args[1], args[2]); | |
1435 | } | |
1436 | break; | |
1437 | ||
1438 | case INDEX_op_shr_i32: | |
1439 | if (const_args[2]) { | |
1440 | tcg_out_shri(s, args[0], args[1], args[2]); | |
1441 | } else { | |
1442 | tcg_out_shr(s, args[0], args[1], args[2]); | |
1443 | } | |
1444 | break; | |
1445 | ||
1446 | case INDEX_op_sar_i32: | |
1447 | if (const_args[2]) { | |
1448 | tcg_out_sari(s, args[0], args[1], args[2]); | |
1449 | } else { | |
1450 | tcg_out_sar(s, args[0], args[1], args[2]); | |
1451 | } | |
1452 | break; | |
1453 | ||
1454 | case INDEX_op_rotl_i32: | |
1455 | if (const_args[2]) { | |
1456 | tcg_out_rotli(s, args[0], args[1], args[2]); | |
1457 | } else { | |
1458 | tcg_out_rotl(s, args[0], args[1], args[2]); | |
1459 | } | |
1460 | break; | |
1461 | ||
1462 | case INDEX_op_rotr_i32: | |
1463 | if (const_args[2]) { | |
1464 | tcg_out_rotri(s, args[0], args[1], args[2]); | |
1465 | } else { | |
1466 | tcg_out_rotr(s, args[0], args[1], args[2]); | |
1467 | } | |
1468 | break; | |
1469 | ||
1470 | case INDEX_op_mul_i32: | |
1471 | tcg_out_xmpyu(s, args[0], TCG_REG_R0, args[1], args[2]); | |
1472 | break; | |
1473 | case INDEX_op_mulu2_i32: | |
1474 | tcg_out_xmpyu(s, args[0], args[1], args[2], args[3]); | |
1475 | break; | |
1476 | ||
1477 | case INDEX_op_bswap16_i32: | |
1478 | tcg_out_bswap16(s, args[0], args[1], 0); | |
1479 | break; | |
1480 | case INDEX_op_bswap32_i32: | |
1481 | tcg_out_bswap32(s, args[0], args[1], TCG_REG_R20); | |
1482 | break; | |
1483 | ||
1484 | case INDEX_op_not_i32: | |
1485 | tcg_out_arithi(s, args[0], args[1], -1, INSN_SUBI); | |
1486 | break; | |
1487 | case INDEX_op_ext8s_i32: | |
1488 | tcg_out_ext8s(s, args[0], args[1]); | |
1489 | break; | |
1490 | case INDEX_op_ext16s_i32: | |
1491 | tcg_out_ext16s(s, args[0], args[1]); | |
1492 | break; | |
1493 | ||
1494 | case INDEX_op_brcond_i32: | |
1495 | tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]); | |
1496 | break; | |
1497 | case INDEX_op_brcond2_i32: | |
1498 | tcg_out_brcond2(s, args[4], args[0], args[1], | |
1499 | args[2], const_args[2], | |
1500 | args[3], const_args[3], args[5]); | |
1501 | break; | |
1502 | ||
1503 | case INDEX_op_setcond_i32: | |
1504 | tcg_out_setcond(s, args[3], args[0], args[1], args[2], const_args[2]); | |
1505 | break; | |
1506 | case INDEX_op_setcond2_i32: | |
1507 | tcg_out_setcond2(s, args[5], args[0], args[1], args[2], | |
1508 | args[3], const_args[3], args[4], const_args[4]); | |
1509 | break; | |
1510 | ||
1511 | case INDEX_op_movcond_i32: | |
1512 | tcg_out_movcond(s, args[5], args[0], args[1], args[2], const_args[2], | |
1513 | args[3], const_args[3]); | |
1514 | break; | |
1515 | ||
1516 | case INDEX_op_add2_i32: | |
1517 | tcg_out_add2(s, args[0], args[1], args[2], args[3], | |
1518 | args[4], args[5], const_args[4]); | |
1519 | break; | |
1520 | ||
1521 | case INDEX_op_sub2_i32: | |
1522 | tcg_out_sub2(s, args[0], args[1], args[2], args[3], | |
1523 | args[4], args[5], const_args[2], const_args[4]); | |
1524 | break; | |
1525 | ||
1526 | case INDEX_op_deposit_i32: | |
1527 | if (const_args[2]) { | |
1528 | tcg_out_depi(s, args[0], args[2], args[3], args[4]); | |
1529 | } else { | |
1530 | tcg_out_dep(s, args[0], args[2], args[3], args[4]); | |
1531 | } | |
1532 | break; | |
1533 | ||
1534 | case INDEX_op_qemu_ld8u: | |
1535 | tcg_out_qemu_ld(s, args, 0); | |
1536 | break; | |
1537 | case INDEX_op_qemu_ld8s: | |
1538 | tcg_out_qemu_ld(s, args, 0 | 4); | |
1539 | break; | |
1540 | case INDEX_op_qemu_ld16u: | |
1541 | tcg_out_qemu_ld(s, args, 1); | |
1542 | break; | |
1543 | case INDEX_op_qemu_ld16s: | |
1544 | tcg_out_qemu_ld(s, args, 1 | 4); | |
1545 | break; | |
1546 | case INDEX_op_qemu_ld32: | |
1547 | tcg_out_qemu_ld(s, args, 2); | |
1548 | break; | |
1549 | case INDEX_op_qemu_ld64: | |
1550 | tcg_out_qemu_ld(s, args, 3); | |
1551 | break; | |
1552 | ||
1553 | case INDEX_op_qemu_st8: | |
1554 | tcg_out_qemu_st(s, args, 0); | |
1555 | break; | |
1556 | case INDEX_op_qemu_st16: | |
1557 | tcg_out_qemu_st(s, args, 1); | |
1558 | break; | |
1559 | case INDEX_op_qemu_st32: | |
1560 | tcg_out_qemu_st(s, args, 2); | |
1561 | break; | |
1562 | case INDEX_op_qemu_st64: | |
1563 | tcg_out_qemu_st(s, args, 3); | |
1564 | break; | |
1565 | ||
1566 | default: | |
1567 | fprintf(stderr, "unknown opcode 0x%x\n", opc); | |
1568 | tcg_abort(); | |
1569 | } | |
1570 | } | |
1571 | ||
1572 | static const TCGTargetOpDef hppa_op_defs[] = { | |
1573 | { INDEX_op_exit_tb, { } }, | |
1574 | { INDEX_op_goto_tb, { } }, | |
1575 | ||
1576 | { INDEX_op_call, { "ri" } }, | |
1577 | { INDEX_op_br, { } }, | |
1578 | ||
1579 | { INDEX_op_mov_i32, { "r", "r" } }, | |
1580 | { INDEX_op_movi_i32, { "r" } }, | |
1581 | ||
1582 | { INDEX_op_ld8u_i32, { "r", "r" } }, | |
1583 | { INDEX_op_ld8s_i32, { "r", "r" } }, | |
1584 | { INDEX_op_ld16u_i32, { "r", "r" } }, | |
1585 | { INDEX_op_ld16s_i32, { "r", "r" } }, | |
1586 | { INDEX_op_ld_i32, { "r", "r" } }, | |
1587 | { INDEX_op_st8_i32, { "rZ", "r" } }, | |
1588 | { INDEX_op_st16_i32, { "rZ", "r" } }, | |
1589 | { INDEX_op_st_i32, { "rZ", "r" } }, | |
1590 | ||
1591 | { INDEX_op_add_i32, { "r", "rZ", "ri" } }, | |
1592 | { INDEX_op_sub_i32, { "r", "rI", "ri" } }, | |
1593 | { INDEX_op_and_i32, { "r", "rZ", "rM" } }, | |
1594 | { INDEX_op_or_i32, { "r", "rZ", "rO" } }, | |
1595 | { INDEX_op_xor_i32, { "r", "rZ", "rZ" } }, | |
1596 | /* Note that the second argument will be inverted, which means | |
1597 | we want a constant whose inversion matches M, and that O = ~M. | |
1598 | See the implementation of and_mask_p. */ | |
1599 | { INDEX_op_andc_i32, { "r", "rZ", "rO" } }, | |
1600 | ||
1601 | { INDEX_op_mul_i32, { "r", "r", "r" } }, | |
1602 | { INDEX_op_mulu2_i32, { "r", "r", "r", "r" } }, | |
1603 | ||
1604 | { INDEX_op_shl_i32, { "r", "r", "ri" } }, | |
1605 | { INDEX_op_shr_i32, { "r", "r", "ri" } }, | |
1606 | { INDEX_op_sar_i32, { "r", "r", "ri" } }, | |
1607 | { INDEX_op_rotl_i32, { "r", "r", "ri" } }, | |
1608 | { INDEX_op_rotr_i32, { "r", "r", "ri" } }, | |
1609 | ||
1610 | { INDEX_op_bswap16_i32, { "r", "r" } }, | |
1611 | { INDEX_op_bswap32_i32, { "r", "r" } }, | |
1612 | { INDEX_op_not_i32, { "r", "r" } }, | |
1613 | ||
1614 | { INDEX_op_ext8s_i32, { "r", "r" } }, | |
1615 | { INDEX_op_ext16s_i32, { "r", "r" } }, | |
1616 | ||
1617 | { INDEX_op_brcond_i32, { "rZ", "rJ" } }, | |
1618 | { INDEX_op_brcond2_i32, { "rZ", "rZ", "rJ", "rJ" } }, | |
1619 | ||
1620 | { INDEX_op_setcond_i32, { "r", "rZ", "rI" } }, | |
1621 | { INDEX_op_setcond2_i32, { "r", "rZ", "rZ", "rI", "rI" } }, | |
1622 | ||
1623 | /* ??? We can actually support a signed 14-bit arg3, but we | |
1624 | only have existing constraints for a signed 11-bit. */ | |
1625 | { INDEX_op_movcond_i32, { "r", "rZ", "rI", "rI", "0" } }, | |
1626 | ||
1627 | { INDEX_op_add2_i32, { "r", "r", "rZ", "rZ", "rI", "rZ" } }, | |
1628 | { INDEX_op_sub2_i32, { "r", "r", "rI", "rZ", "rK", "rZ" } }, | |
1629 | ||
1630 | { INDEX_op_deposit_i32, { "r", "0", "rJ" } }, | |
1631 | ||
1632 | #if TARGET_LONG_BITS == 32 | |
1633 | { INDEX_op_qemu_ld8u, { "r", "L" } }, | |
1634 | { INDEX_op_qemu_ld8s, { "r", "L" } }, | |
1635 | { INDEX_op_qemu_ld16u, { "r", "L" } }, | |
1636 | { INDEX_op_qemu_ld16s, { "r", "L" } }, | |
1637 | { INDEX_op_qemu_ld32, { "r", "L" } }, | |
1638 | { INDEX_op_qemu_ld64, { "r", "r", "L" } }, | |
1639 | ||
1640 | { INDEX_op_qemu_st8, { "LZ", "L" } }, | |
1641 | { INDEX_op_qemu_st16, { "LZ", "L" } }, | |
1642 | { INDEX_op_qemu_st32, { "LZ", "L" } }, | |
1643 | { INDEX_op_qemu_st64, { "LZ", "LZ", "L" } }, | |
1644 | #else | |
1645 | { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, | |
1646 | { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, | |
1647 | { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, | |
1648 | { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, | |
1649 | { INDEX_op_qemu_ld32, { "r", "L", "L" } }, | |
1650 | { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, | |
1651 | ||
1652 | { INDEX_op_qemu_st8, { "LZ", "L", "L" } }, | |
1653 | { INDEX_op_qemu_st16, { "LZ", "L", "L" } }, | |
1654 | { INDEX_op_qemu_st32, { "LZ", "L", "L" } }, | |
1655 | { INDEX_op_qemu_st64, { "LZ", "LZ", "L", "L" } }, | |
1656 | #endif | |
1657 | { -1 }, | |
1658 | }; | |
1659 | ||
1660 | static int tcg_target_callee_save_regs[] = { | |
1661 | /* R2, the return address register, is saved specially | |
1662 | in the caller's frame. */ | |
1663 | /* R3, the frame pointer, is not currently modified. */ | |
1664 | TCG_REG_R4, | |
1665 | TCG_REG_R5, | |
1666 | TCG_REG_R6, | |
1667 | TCG_REG_R7, | |
1668 | TCG_REG_R8, | |
1669 | TCG_REG_R9, | |
1670 | TCG_REG_R10, | |
1671 | TCG_REG_R11, | |
1672 | TCG_REG_R12, | |
1673 | TCG_REG_R13, | |
1674 | TCG_REG_R14, | |
1675 | TCG_REG_R15, | |
1676 | TCG_REG_R16, | |
1677 | TCG_REG_R17, /* R17 is the global env. */ | |
1678 | TCG_REG_R18 | |
1679 | }; | |
1680 | ||
1681 | #define FRAME_SIZE ((-TCG_TARGET_CALL_STACK_OFFSET \ | |
1682 | + TCG_TARGET_STATIC_CALL_ARGS_SIZE \ | |
1683 | + ARRAY_SIZE(tcg_target_callee_save_regs) * 4 \ | |
1684 | + CPU_TEMP_BUF_NLONGS * sizeof(long) \ | |
1685 | + TCG_TARGET_STACK_ALIGN - 1) \ | |
1686 | & -TCG_TARGET_STACK_ALIGN) | |
1687 | ||
1688 | static void tcg_target_qemu_prologue(TCGContext *s) | |
1689 | { | |
1690 | int frame_size, i; | |
1691 | ||
1692 | frame_size = FRAME_SIZE; | |
1693 | ||
1694 | /* The return address is stored in the caller's frame. */ | |
1695 | tcg_out_st(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, -20); | |
1696 | ||
1697 | /* Allocate stack frame, saving the first register at the same time. */ | |
1698 | tcg_out_ldst(s, tcg_target_callee_save_regs[0], | |
1699 | TCG_REG_CALL_STACK, frame_size, INSN_STWM); | |
1700 | ||
1701 | /* Save all callee saved registers. */ | |
1702 | for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { | |
1703 | tcg_out_st(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], | |
1704 | TCG_REG_CALL_STACK, -frame_size + i * 4); | |
1705 | } | |
1706 | ||
1707 | /* Record the location of the TCG temps. */ | |
1708 | tcg_set_frame(s, TCG_REG_CALL_STACK, -frame_size + i * 4, | |
1709 | CPU_TEMP_BUF_NLONGS * sizeof(long)); | |
1710 | ||
1711 | #ifdef CONFIG_USE_GUEST_BASE | |
1712 | if (GUEST_BASE != 0) { | |
1713 | tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, GUEST_BASE); | |
1714 | tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); | |
1715 | } | |
1716 | #endif | |
1717 | ||
1718 | tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); | |
1719 | ||
1720 | /* Jump to TB, and adjust R18 to be the return address. */ | |
1721 | tcg_out32(s, INSN_BLE_SR4 | INSN_R2(tcg_target_call_iarg_regs[1])); | |
1722 | tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_R18, TCG_REG_R31); | |
1723 | ||
1724 | /* Restore callee saved registers. */ | |
1725 | tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_RP, TCG_REG_CALL_STACK, | |
1726 | -frame_size - 20); | |
1727 | for (i = 1; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { | |
1728 | tcg_out_ld(s, TCG_TYPE_PTR, tcg_target_callee_save_regs[i], | |
1729 | TCG_REG_CALL_STACK, -frame_size + i * 4); | |
1730 | } | |
1731 | ||
1732 | /* Deallocate stack frame and return. */ | |
1733 | tcg_out32(s, INSN_BV | INSN_R2(TCG_REG_RP)); | |
1734 | tcg_out_ldst(s, tcg_target_callee_save_regs[0], | |
1735 | TCG_REG_CALL_STACK, -frame_size, INSN_LDWM); | |
1736 | } | |
1737 | ||
1738 | static void tcg_target_init(TCGContext *s) | |
1739 | { | |
1740 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); | |
1741 | ||
1742 | tcg_regset_clear(tcg_target_call_clobber_regs); | |
1743 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R20); | |
1744 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R21); | |
1745 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R22); | |
1746 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R23); | |
1747 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R24); | |
1748 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R25); | |
1749 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_R26); | |
1750 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET0); | |
1751 | tcg_regset_set_reg(tcg_target_call_clobber_regs, TCG_REG_RET1); | |
1752 | ||
1753 | tcg_regset_clear(s->reserved_regs); | |
1754 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */ | |
1755 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */ | |
1756 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */ | |
1757 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */ | |
1758 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */ | |
1759 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */ | |
1760 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */ | |
1761 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */ | |
1762 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); /* stack pointer */ | |
1763 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */ | |
1764 | ||
1765 | tcg_add_target_add_op_defs(hppa_op_defs); | |
1766 | } | |
1767 | ||
1768 | typedef struct { | |
1769 | DebugFrameCIE cie; | |
1770 | DebugFrameFDEHeader fde; | |
1771 | uint8_t fde_def_cfa[4]; | |
1772 | uint8_t fde_ret_ofs[3]; | |
1773 | uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; | |
1774 | } DebugFrame; | |
1775 | ||
1776 | #define ELF_HOST_MACHINE EM_PARISC | |
1777 | #define ELF_HOST_FLAGS EFA_PARISC_1_1 | |
1778 | ||
1779 | /* ??? BFD (and thus GDB) wants very much to distinguish between HPUX | |
1780 | and other extensions. We don't really care, but if we don't set this | |
1781 | to *something* then the object file won't be properly matched. */ | |
1782 | #define ELF_OSABI ELFOSABI_LINUX | |
1783 | ||
1784 | static DebugFrame debug_frame = { | |
1785 | .cie.len = sizeof(DebugFrameCIE)-4, /* length after .len member */ | |
1786 | .cie.id = -1, | |
1787 | .cie.version = 1, | |
1788 | .cie.code_align = 1, | |
1789 | .cie.data_align = 1, | |
1790 | .cie.return_column = 2, | |
1791 | ||
1792 | /* Total FDE size does not include the "len" member. */ | |
1793 | .fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, fde.cie_offset), | |
1794 | ||
1795 | .fde_def_cfa = { | |
1796 | 0x12, 30, /* DW_CFA_def_cfa_sf sp, ... */ | |
1797 | (-FRAME_SIZE & 0x7f) | 0x80, /* ... sleb128 -FRAME_SIZE */ | |
1798 | (-FRAME_SIZE >> 7) & 0x7f | |
1799 | }, | |
1800 | .fde_ret_ofs = { | |
1801 | 0x11, 2, (-20 / 4) & 0x7f /* DW_CFA_offset_extended_sf r2, 20 */ | |
1802 | }, | |
1803 | .fde_reg_ofs = { | |
1804 | /* This must match the ordering in tcg_target_callee_save_regs. */ | |
1805 | 0x80 + 4, 0, /* DW_CFA_offset r4, 0 */ | |
1806 | 0x80 + 5, 4, /* DW_CFA_offset r5, 4 */ | |
1807 | 0x80 + 6, 8, /* DW_CFA_offset r6, 8 */ | |
1808 | 0x80 + 7, 12, /* ... */ | |
1809 | 0x80 + 8, 16, | |
1810 | 0x80 + 9, 20, | |
1811 | 0x80 + 10, 24, | |
1812 | 0x80 + 11, 28, | |
1813 | 0x80 + 12, 32, | |
1814 | 0x80 + 13, 36, | |
1815 | 0x80 + 14, 40, | |
1816 | 0x80 + 15, 44, | |
1817 | 0x80 + 16, 48, | |
1818 | 0x80 + 17, 52, | |
1819 | 0x80 + 18, 56, | |
1820 | } | |
1821 | }; | |
1822 | ||
1823 | void tcg_register_jit(void *buf, size_t buf_size) | |
1824 | { | |
1825 | debug_frame.fde.func_start = (tcg_target_long) buf; | |
1826 | debug_frame.fde.func_len = buf_size; | |
1827 | ||
1828 | tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); | |
1829 | } |