]>
Commit | Line | Data |
---|---|---|
7316329a SW |
1 | /* |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2009, 2011 Stefan Weil | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* TODO list: | |
26 | * - See TODO comments in code. | |
27 | */ | |
28 | ||
29 | /* Marker for missing code. */ | |
30 | #define TODO() \ | |
31 | do { \ | |
32 | fprintf(stderr, "TODO %s:%u: %s()\n", \ | |
33 | __FILE__, __LINE__, __func__); \ | |
34 | tcg_abort(); \ | |
35 | } while (0) | |
36 | ||
37 | /* Single bit n. */ | |
38 | #define BIT(n) (1 << (n)) | |
39 | ||
40 | /* Bitfield n...m (in 32 bit value). */ | |
41 | #define BITS(n, m) (((0xffffffffU << (31 - n)) >> (31 - n + m)) << m) | |
42 | ||
43 | /* Used for function call generation. */ | |
44 | #define TCG_REG_CALL_STACK TCG_REG_R4 | |
45 | #define TCG_TARGET_STACK_ALIGN 16 | |
46 | #define TCG_TARGET_CALL_STACK_OFFSET 0 | |
47 | ||
48 | /* TODO: documentation. */ | |
49 | static uint8_t *tb_ret_addr; | |
50 | ||
51 | /* Macros used in tcg_target_op_defs. */ | |
52 | #define R "r" | |
53 | #define RI "ri" | |
54 | #if TCG_TARGET_REG_BITS == 32 | |
55 | # define R64 "r", "r" | |
56 | #else | |
57 | # define R64 "r" | |
58 | #endif | |
59 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
60 | # define L "L", "L" | |
61 | # define S "S", "S" | |
62 | #else | |
63 | # define L "L" | |
64 | # define S "S" | |
65 | #endif | |
66 | ||
67 | /* TODO: documentation. */ | |
68 | static const TCGTargetOpDef tcg_target_op_defs[] = { | |
69 | { INDEX_op_exit_tb, { NULL } }, | |
70 | { INDEX_op_goto_tb, { NULL } }, | |
71 | { INDEX_op_call, { RI } }, | |
72 | { INDEX_op_jmp, { RI } }, | |
73 | { INDEX_op_br, { NULL } }, | |
74 | ||
75 | { INDEX_op_mov_i32, { R, R } }, | |
76 | { INDEX_op_movi_i32, { R } }, | |
77 | ||
78 | { INDEX_op_ld8u_i32, { R, R } }, | |
79 | { INDEX_op_ld8s_i32, { R, R } }, | |
80 | { INDEX_op_ld16u_i32, { R, R } }, | |
81 | { INDEX_op_ld16s_i32, { R, R } }, | |
82 | { INDEX_op_ld_i32, { R, R } }, | |
83 | { INDEX_op_st8_i32, { R, R } }, | |
84 | { INDEX_op_st16_i32, { R, R } }, | |
85 | { INDEX_op_st_i32, { R, R } }, | |
86 | ||
87 | { INDEX_op_add_i32, { R, RI, RI } }, | |
88 | { INDEX_op_sub_i32, { R, RI, RI } }, | |
89 | { INDEX_op_mul_i32, { R, RI, RI } }, | |
90 | #if TCG_TARGET_HAS_div_i32 | |
91 | { INDEX_op_div_i32, { R, R, R } }, | |
92 | { INDEX_op_divu_i32, { R, R, R } }, | |
93 | { INDEX_op_rem_i32, { R, R, R } }, | |
94 | { INDEX_op_remu_i32, { R, R, R } }, | |
95 | #elif TCG_TARGET_HAS_div2_i32 | |
96 | { INDEX_op_div2_i32, { R, R, "0", "1", R } }, | |
97 | { INDEX_op_divu2_i32, { R, R, "0", "1", R } }, | |
98 | #endif | |
99 | /* TODO: Does R, RI, RI result in faster code than R, R, RI? | |
100 | If both operands are constants, we can optimize. */ | |
101 | { INDEX_op_and_i32, { R, RI, RI } }, | |
102 | #if TCG_TARGET_HAS_andc_i32 | |
103 | { INDEX_op_andc_i32, { R, RI, RI } }, | |
104 | #endif | |
105 | #if TCG_TARGET_HAS_eqv_i32 | |
106 | { INDEX_op_eqv_i32, { R, RI, RI } }, | |
107 | #endif | |
108 | #if TCG_TARGET_HAS_nand_i32 | |
109 | { INDEX_op_nand_i32, { R, RI, RI } }, | |
110 | #endif | |
111 | #if TCG_TARGET_HAS_nor_i32 | |
112 | { INDEX_op_nor_i32, { R, RI, RI } }, | |
113 | #endif | |
114 | { INDEX_op_or_i32, { R, RI, RI } }, | |
115 | #if TCG_TARGET_HAS_orc_i32 | |
116 | { INDEX_op_orc_i32, { R, RI, RI } }, | |
117 | #endif | |
118 | { INDEX_op_xor_i32, { R, RI, RI } }, | |
119 | { INDEX_op_shl_i32, { R, RI, RI } }, | |
120 | { INDEX_op_shr_i32, { R, RI, RI } }, | |
121 | { INDEX_op_sar_i32, { R, RI, RI } }, | |
122 | #if TCG_TARGET_HAS_rot_i32 | |
123 | { INDEX_op_rotl_i32, { R, RI, RI } }, | |
124 | { INDEX_op_rotr_i32, { R, RI, RI } }, | |
125 | #endif | |
126 | ||
127 | { INDEX_op_brcond_i32, { R, RI } }, | |
128 | ||
129 | { INDEX_op_setcond_i32, { R, R, RI } }, | |
130 | #if TCG_TARGET_REG_BITS == 64 | |
131 | { INDEX_op_setcond_i64, { R, R, RI } }, | |
132 | #endif /* TCG_TARGET_REG_BITS == 64 */ | |
133 | ||
134 | #if TCG_TARGET_REG_BITS == 32 | |
135 | /* TODO: Support R, R, R, R, RI, RI? Will it be faster? */ | |
136 | { INDEX_op_add2_i32, { R, R, R, R, R, R } }, | |
137 | { INDEX_op_sub2_i32, { R, R, R, R, R, R } }, | |
138 | { INDEX_op_brcond2_i32, { R, R, RI, RI } }, | |
139 | { INDEX_op_mulu2_i32, { R, R, R, R } }, | |
140 | { INDEX_op_setcond2_i32, { R, R, R, RI, RI } }, | |
141 | #endif | |
142 | ||
143 | #if TCG_TARGET_HAS_not_i32 | |
144 | { INDEX_op_not_i32, { R, R } }, | |
145 | #endif | |
146 | #if TCG_TARGET_HAS_neg_i32 | |
147 | { INDEX_op_neg_i32, { R, R } }, | |
148 | #endif | |
149 | ||
150 | #if TCG_TARGET_REG_BITS == 64 | |
151 | { INDEX_op_mov_i64, { R, R } }, | |
152 | { INDEX_op_movi_i64, { R } }, | |
153 | ||
154 | { INDEX_op_ld8u_i64, { R, R } }, | |
155 | { INDEX_op_ld8s_i64, { R, R } }, | |
156 | { INDEX_op_ld16u_i64, { R, R } }, | |
157 | { INDEX_op_ld16s_i64, { R, R } }, | |
158 | { INDEX_op_ld32u_i64, { R, R } }, | |
159 | { INDEX_op_ld32s_i64, { R, R } }, | |
160 | { INDEX_op_ld_i64, { R, R } }, | |
161 | ||
162 | { INDEX_op_st8_i64, { R, R } }, | |
163 | { INDEX_op_st16_i64, { R, R } }, | |
164 | { INDEX_op_st32_i64, { R, R } }, | |
165 | { INDEX_op_st_i64, { R, R } }, | |
166 | ||
167 | { INDEX_op_add_i64, { R, RI, RI } }, | |
168 | { INDEX_op_sub_i64, { R, RI, RI } }, | |
169 | { INDEX_op_mul_i64, { R, RI, RI } }, | |
170 | #if TCG_TARGET_HAS_div_i64 | |
171 | { INDEX_op_div_i64, { R, R, R } }, | |
172 | { INDEX_op_divu_i64, { R, R, R } }, | |
173 | { INDEX_op_rem_i64, { R, R, R } }, | |
174 | { INDEX_op_remu_i64, { R, R, R } }, | |
175 | #elif TCG_TARGET_HAS_div2_i64 | |
176 | { INDEX_op_div2_i64, { R, R, "0", "1", R } }, | |
177 | { INDEX_op_divu2_i64, { R, R, "0", "1", R } }, | |
178 | #endif | |
179 | { INDEX_op_and_i64, { R, RI, RI } }, | |
180 | #if TCG_TARGET_HAS_andc_i64 | |
181 | { INDEX_op_andc_i64, { R, RI, RI } }, | |
182 | #endif | |
183 | #if TCG_TARGET_HAS_eqv_i64 | |
184 | { INDEX_op_eqv_i64, { R, RI, RI } }, | |
185 | #endif | |
186 | #if TCG_TARGET_HAS_nand_i64 | |
187 | { INDEX_op_nand_i64, { R, RI, RI } }, | |
188 | #endif | |
189 | #if TCG_TARGET_HAS_nor_i64 | |
190 | { INDEX_op_nor_i64, { R, RI, RI } }, | |
191 | #endif | |
192 | { INDEX_op_or_i64, { R, RI, RI } }, | |
193 | #if TCG_TARGET_HAS_orc_i64 | |
194 | { INDEX_op_orc_i64, { R, RI, RI } }, | |
195 | #endif | |
196 | { INDEX_op_xor_i64, { R, RI, RI } }, | |
197 | { INDEX_op_shl_i64, { R, RI, RI } }, | |
198 | { INDEX_op_shr_i64, { R, RI, RI } }, | |
199 | { INDEX_op_sar_i64, { R, RI, RI } }, | |
200 | #if TCG_TARGET_HAS_rot_i64 | |
201 | { INDEX_op_rotl_i64, { R, RI, RI } }, | |
202 | { INDEX_op_rotr_i64, { R, RI, RI } }, | |
203 | #endif | |
204 | { INDEX_op_brcond_i64, { R, RI } }, | |
205 | ||
206 | #if TCG_TARGET_HAS_ext8s_i64 | |
207 | { INDEX_op_ext8s_i64, { R, R } }, | |
208 | #endif | |
209 | #if TCG_TARGET_HAS_ext16s_i64 | |
210 | { INDEX_op_ext16s_i64, { R, R } }, | |
211 | #endif | |
212 | #if TCG_TARGET_HAS_ext32s_i64 | |
213 | { INDEX_op_ext32s_i64, { R, R } }, | |
214 | #endif | |
215 | #if TCG_TARGET_HAS_ext8u_i64 | |
216 | { INDEX_op_ext8u_i64, { R, R } }, | |
217 | #endif | |
218 | #if TCG_TARGET_HAS_ext16u_i64 | |
219 | { INDEX_op_ext16u_i64, { R, R } }, | |
220 | #endif | |
221 | #if TCG_TARGET_HAS_ext32u_i64 | |
222 | { INDEX_op_ext32u_i64, { R, R } }, | |
223 | #endif | |
224 | #if TCG_TARGET_HAS_bswap16_i64 | |
225 | { INDEX_op_bswap16_i64, { R, R } }, | |
226 | #endif | |
227 | #if TCG_TARGET_HAS_bswap32_i64 | |
228 | { INDEX_op_bswap32_i64, { R, R } }, | |
229 | #endif | |
230 | #if TCG_TARGET_HAS_bswap64_i64 | |
231 | { INDEX_op_bswap64_i64, { R, R } }, | |
232 | #endif | |
233 | #if TCG_TARGET_HAS_not_i64 | |
234 | { INDEX_op_not_i64, { R, R } }, | |
235 | #endif | |
236 | #if TCG_TARGET_HAS_neg_i64 | |
237 | { INDEX_op_neg_i64, { R, R } }, | |
238 | #endif | |
239 | #endif /* TCG_TARGET_REG_BITS == 64 */ | |
240 | ||
241 | { INDEX_op_qemu_ld8u, { R, L } }, | |
242 | { INDEX_op_qemu_ld8s, { R, L } }, | |
243 | { INDEX_op_qemu_ld16u, { R, L } }, | |
244 | { INDEX_op_qemu_ld16s, { R, L } }, | |
245 | { INDEX_op_qemu_ld32, { R, L } }, | |
246 | #if TCG_TARGET_REG_BITS == 64 | |
247 | { INDEX_op_qemu_ld32u, { R, L } }, | |
248 | { INDEX_op_qemu_ld32s, { R, L } }, | |
249 | #endif | |
250 | { INDEX_op_qemu_ld64, { R64, L } }, | |
251 | ||
252 | { INDEX_op_qemu_st8, { R, S } }, | |
253 | { INDEX_op_qemu_st16, { R, S } }, | |
254 | { INDEX_op_qemu_st32, { R, S } }, | |
255 | { INDEX_op_qemu_st64, { R64, S } }, | |
256 | ||
257 | #if TCG_TARGET_HAS_ext8s_i32 | |
258 | { INDEX_op_ext8s_i32, { R, R } }, | |
259 | #endif | |
260 | #if TCG_TARGET_HAS_ext16s_i32 | |
261 | { INDEX_op_ext16s_i32, { R, R } }, | |
262 | #endif | |
263 | #if TCG_TARGET_HAS_ext8u_i32 | |
264 | { INDEX_op_ext8u_i32, { R, R } }, | |
265 | #endif | |
266 | #if TCG_TARGET_HAS_ext16u_i32 | |
267 | { INDEX_op_ext16u_i32, { R, R } }, | |
268 | #endif | |
269 | ||
270 | #if TCG_TARGET_HAS_bswap16_i32 | |
271 | { INDEX_op_bswap16_i32, { R, R } }, | |
272 | #endif | |
273 | #if TCG_TARGET_HAS_bswap32_i32 | |
274 | { INDEX_op_bswap32_i32, { R, R } }, | |
275 | #endif | |
276 | ||
277 | { -1 }, | |
278 | }; | |
279 | ||
280 | static const int tcg_target_reg_alloc_order[] = { | |
281 | TCG_REG_R0, | |
282 | TCG_REG_R1, | |
283 | TCG_REG_R2, | |
284 | TCG_REG_R3, | |
285 | #if 0 /* used for TCG_REG_CALL_STACK */ | |
286 | TCG_REG_R4, | |
287 | #endif | |
288 | TCG_REG_R5, | |
289 | TCG_REG_R6, | |
290 | TCG_REG_R7, | |
291 | #if TCG_TARGET_NB_REGS >= 16 | |
292 | TCG_REG_R8, | |
293 | TCG_REG_R9, | |
294 | TCG_REG_R10, | |
295 | TCG_REG_R11, | |
296 | TCG_REG_R12, | |
297 | TCG_REG_R13, | |
298 | TCG_REG_R14, | |
299 | TCG_REG_R15, | |
300 | #endif | |
301 | }; | |
302 | ||
303 | #if MAX_OPC_PARAM_IARGS != 4 | |
304 | # error Fix needed, number of supported input arguments changed! | |
305 | #endif | |
306 | ||
307 | static const int tcg_target_call_iarg_regs[] = { | |
308 | TCG_REG_R0, | |
309 | TCG_REG_R1, | |
310 | TCG_REG_R2, | |
311 | TCG_REG_R3, | |
312 | #if TCG_TARGET_REG_BITS == 32 | |
313 | /* 32 bit hosts need 2 * MAX_OPC_PARAM_IARGS registers. */ | |
314 | #if 0 /* used for TCG_REG_CALL_STACK */ | |
315 | TCG_REG_R4, | |
316 | #endif | |
317 | TCG_REG_R5, | |
318 | TCG_REG_R6, | |
319 | TCG_REG_R7, | |
320 | #if TCG_TARGET_NB_REGS >= 16 | |
321 | TCG_REG_R8, | |
322 | #else | |
323 | # error Too few input registers available | |
324 | #endif | |
325 | #endif | |
326 | }; | |
327 | ||
328 | static const int tcg_target_call_oarg_regs[] = { | |
329 | TCG_REG_R0, | |
330 | #if TCG_TARGET_REG_BITS == 32 | |
331 | TCG_REG_R1 | |
332 | #endif | |
333 | }; | |
334 | ||
335 | #ifndef NDEBUG | |
336 | static const char *const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { | |
337 | "r00", | |
338 | "r01", | |
339 | "r02", | |
340 | "r03", | |
341 | "r04", | |
342 | "r05", | |
343 | "r06", | |
344 | "r07", | |
345 | #if TCG_TARGET_NB_REGS >= 16 | |
346 | "r08", | |
347 | "r09", | |
348 | "r10", | |
349 | "r11", | |
350 | "r12", | |
351 | "r13", | |
352 | "r14", | |
353 | "r15", | |
354 | #if TCG_TARGET_NB_REGS >= 32 | |
355 | "r16", | |
356 | "r17", | |
357 | "r18", | |
358 | "r19", | |
359 | "r20", | |
360 | "r21", | |
361 | "r22", | |
362 | "r23", | |
363 | "r24", | |
364 | "r25", | |
365 | "r26", | |
366 | "r27", | |
367 | "r28", | |
368 | "r29", | |
369 | "r30", | |
370 | "r31" | |
371 | #endif | |
372 | #endif | |
373 | }; | |
374 | #endif | |
375 | ||
376 | static void flush_icache_range(unsigned long start, unsigned long stop) | |
377 | { | |
378 | } | |
379 | ||
380 | static void patch_reloc(uint8_t *code_ptr, int type, | |
381 | tcg_target_long value, tcg_target_long addend) | |
382 | { | |
383 | /* tcg_out_reloc always uses the same type, addend. */ | |
384 | assert(type == sizeof(tcg_target_long)); | |
385 | assert(addend == 0); | |
386 | assert(value != 0); | |
387 | *(tcg_target_long *)code_ptr = value; | |
388 | } | |
389 | ||
390 | /* Parse target specific constraints. */ | |
391 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) | |
392 | { | |
393 | const char *ct_str = *pct_str; | |
394 | switch (ct_str[0]) { | |
395 | case 'r': | |
396 | case 'L': /* qemu_ld constraint */ | |
397 | case 'S': /* qemu_st constraint */ | |
398 | ct->ct |= TCG_CT_REG; | |
399 | tcg_regset_set32(ct->u.regs, 0, BIT(TCG_TARGET_NB_REGS) - 1); | |
400 | break; | |
401 | default: | |
402 | return -1; | |
403 | } | |
404 | ct_str++; | |
405 | *pct_str = ct_str; | |
406 | return 0; | |
407 | } | |
408 | ||
409 | #if defined(CONFIG_DEBUG_TCG_INTERPRETER) | |
410 | /* Show current bytecode. Used by tcg interpreter. */ | |
411 | void tci_disas(uint8_t opc) | |
412 | { | |
413 | const TCGOpDef *def = &tcg_op_defs[opc]; | |
414 | fprintf(stderr, "TCG %s %u, %u, %u\n", | |
415 | def->name, def->nb_oargs, def->nb_iargs, def->nb_cargs); | |
416 | } | |
417 | #endif | |
418 | ||
419 | /* Write value (native size). */ | |
420 | static void tcg_out_i(TCGContext *s, tcg_target_ulong v) | |
421 | { | |
422 | *(tcg_target_ulong *)s->code_ptr = v; | |
423 | s->code_ptr += sizeof(tcg_target_ulong); | |
424 | } | |
425 | ||
426 | /* Write 64 bit value. */ | |
427 | static void tcg_out64(TCGContext *s, uint64_t v) | |
428 | { | |
429 | *(uint64_t *)s->code_ptr = v; | |
430 | s->code_ptr += sizeof(v); | |
431 | } | |
432 | ||
433 | /* Write opcode. */ | |
434 | static void tcg_out_op_t(TCGContext *s, TCGOpcode op) | |
435 | { | |
436 | tcg_out8(s, op); | |
437 | tcg_out8(s, 0); | |
438 | } | |
439 | ||
440 | /* Write register. */ | |
441 | static void tcg_out_r(TCGContext *s, TCGArg t0) | |
442 | { | |
443 | assert(t0 < TCG_TARGET_NB_REGS); | |
444 | tcg_out8(s, t0); | |
445 | } | |
446 | ||
447 | /* Write register or constant (native size). */ | |
448 | static void tcg_out_ri(TCGContext *s, int const_arg, TCGArg arg) | |
449 | { | |
450 | if (const_arg) { | |
451 | assert(const_arg == 1); | |
452 | tcg_out8(s, TCG_CONST); | |
453 | tcg_out_i(s, arg); | |
454 | } else { | |
455 | tcg_out_r(s, arg); | |
456 | } | |
457 | } | |
458 | ||
459 | /* Write register or constant (32 bit). */ | |
460 | static void tcg_out_ri32(TCGContext *s, int const_arg, TCGArg arg) | |
461 | { | |
462 | if (const_arg) { | |
463 | assert(const_arg == 1); | |
464 | tcg_out8(s, TCG_CONST); | |
465 | tcg_out32(s, arg); | |
466 | } else { | |
467 | tcg_out_r(s, arg); | |
468 | } | |
469 | } | |
470 | ||
471 | #if TCG_TARGET_REG_BITS == 64 | |
472 | /* Write register or constant (64 bit). */ | |
473 | static void tcg_out_ri64(TCGContext *s, int const_arg, TCGArg arg) | |
474 | { | |
475 | if (const_arg) { | |
476 | assert(const_arg == 1); | |
477 | tcg_out8(s, TCG_CONST); | |
478 | tcg_out64(s, arg); | |
479 | } else { | |
480 | tcg_out_r(s, arg); | |
481 | } | |
482 | } | |
483 | #endif | |
484 | ||
485 | /* Write label. */ | |
486 | static void tci_out_label(TCGContext *s, TCGArg arg) | |
487 | { | |
488 | TCGLabel *label = &s->labels[arg]; | |
489 | if (label->has_value) { | |
490 | tcg_out_i(s, label->u.value); | |
491 | assert(label->u.value); | |
492 | } else { | |
493 | tcg_out_reloc(s, s->code_ptr, sizeof(tcg_target_ulong), arg, 0); | |
494 | tcg_out_i(s, 0); | |
495 | } | |
496 | } | |
497 | ||
2a534aff | 498 | static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, |
7316329a SW |
499 | tcg_target_long arg2) |
500 | { | |
501 | uint8_t *old_code_ptr = s->code_ptr; | |
502 | if (type == TCG_TYPE_I32) { | |
503 | tcg_out_op_t(s, INDEX_op_ld_i32); | |
504 | tcg_out_r(s, ret); | |
505 | tcg_out_r(s, arg1); | |
506 | tcg_out32(s, arg2); | |
507 | } else { | |
508 | assert(type == TCG_TYPE_I64); | |
509 | #if TCG_TARGET_REG_BITS == 64 | |
510 | tcg_out_op_t(s, INDEX_op_ld_i64); | |
511 | tcg_out_r(s, ret); | |
512 | tcg_out_r(s, arg1); | |
513 | assert(arg2 == (uint32_t)arg2); | |
514 | tcg_out32(s, arg2); | |
515 | #else | |
516 | TODO(); | |
517 | #endif | |
518 | } | |
519 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | |
520 | } | |
521 | ||
2a534aff | 522 | static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) |
7316329a SW |
523 | { |
524 | uint8_t *old_code_ptr = s->code_ptr; | |
525 | assert(ret != arg); | |
526 | #if TCG_TARGET_REG_BITS == 32 | |
527 | tcg_out_op_t(s, INDEX_op_mov_i32); | |
528 | #else | |
529 | tcg_out_op_t(s, INDEX_op_mov_i64); | |
530 | #endif | |
531 | tcg_out_r(s, ret); | |
532 | tcg_out_r(s, arg); | |
533 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | |
534 | } | |
535 | ||
536 | static void tcg_out_movi(TCGContext *s, TCGType type, | |
2a534aff | 537 | TCGReg t0, tcg_target_long arg) |
7316329a SW |
538 | { |
539 | uint8_t *old_code_ptr = s->code_ptr; | |
540 | uint32_t arg32 = arg; | |
541 | if (type == TCG_TYPE_I32 || arg == arg32) { | |
542 | tcg_out_op_t(s, INDEX_op_movi_i32); | |
543 | tcg_out_r(s, t0); | |
544 | tcg_out32(s, arg32); | |
545 | } else { | |
546 | assert(type == TCG_TYPE_I64); | |
547 | #if TCG_TARGET_REG_BITS == 64 | |
548 | tcg_out_op_t(s, INDEX_op_movi_i64); | |
549 | tcg_out_r(s, t0); | |
550 | tcg_out64(s, arg); | |
551 | #else | |
552 | TODO(); | |
553 | #endif | |
554 | } | |
555 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | |
556 | } | |
557 | ||
558 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | |
559 | const int *const_args) | |
560 | { | |
561 | uint8_t *old_code_ptr = s->code_ptr; | |
562 | ||
563 | tcg_out_op_t(s, opc); | |
564 | ||
565 | switch (opc) { | |
566 | case INDEX_op_exit_tb: | |
567 | tcg_out64(s, args[0]); | |
568 | break; | |
569 | case INDEX_op_goto_tb: | |
570 | if (s->tb_jmp_offset) { | |
571 | /* Direct jump method. */ | |
572 | assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset)); | |
573 | s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; | |
574 | tcg_out32(s, 0); | |
575 | } else { | |
576 | /* Indirect jump method. */ | |
577 | TODO(); | |
578 | } | |
579 | assert(args[0] < ARRAY_SIZE(s->tb_next_offset)); | |
580 | s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; | |
581 | break; | |
582 | case INDEX_op_br: | |
583 | tci_out_label(s, args[0]); | |
584 | break; | |
585 | case INDEX_op_call: | |
586 | tcg_out_ri(s, const_args[0], args[0]); | |
587 | break; | |
588 | case INDEX_op_jmp: | |
589 | TODO(); | |
590 | break; | |
591 | case INDEX_op_setcond_i32: | |
592 | tcg_out_r(s, args[0]); | |
593 | tcg_out_r(s, args[1]); | |
594 | tcg_out_ri32(s, const_args[2], args[2]); | |
595 | tcg_out8(s, args[3]); /* condition */ | |
596 | break; | |
597 | #if TCG_TARGET_REG_BITS == 32 | |
598 | case INDEX_op_setcond2_i32: | |
599 | /* setcond2_i32 cond, t0, t1_low, t1_high, t2_low, t2_high */ | |
600 | tcg_out_r(s, args[0]); | |
601 | tcg_out_r(s, args[1]); | |
602 | tcg_out_r(s, args[2]); | |
603 | tcg_out_ri32(s, const_args[3], args[3]); | |
604 | tcg_out_ri32(s, const_args[4], args[4]); | |
605 | tcg_out8(s, args[5]); /* condition */ | |
606 | break; | |
607 | #elif TCG_TARGET_REG_BITS == 64 | |
608 | case INDEX_op_setcond_i64: | |
609 | tcg_out_r(s, args[0]); | |
610 | tcg_out_r(s, args[1]); | |
611 | tcg_out_ri64(s, const_args[2], args[2]); | |
612 | tcg_out8(s, args[3]); /* condition */ | |
613 | break; | |
614 | #endif | |
615 | case INDEX_op_movi_i32: | |
616 | TODO(); /* Handled by tcg_out_movi? */ | |
617 | break; | |
618 | case INDEX_op_ld8u_i32: | |
619 | case INDEX_op_ld8s_i32: | |
620 | case INDEX_op_ld16u_i32: | |
621 | case INDEX_op_ld16s_i32: | |
622 | case INDEX_op_ld_i32: | |
623 | case INDEX_op_st8_i32: | |
624 | case INDEX_op_st16_i32: | |
625 | case INDEX_op_st_i32: | |
626 | case INDEX_op_ld8u_i64: | |
627 | case INDEX_op_ld8s_i64: | |
628 | case INDEX_op_ld16u_i64: | |
629 | case INDEX_op_ld16s_i64: | |
630 | case INDEX_op_ld32u_i64: | |
631 | case INDEX_op_ld32s_i64: | |
632 | case INDEX_op_ld_i64: | |
633 | case INDEX_op_st8_i64: | |
634 | case INDEX_op_st16_i64: | |
635 | case INDEX_op_st32_i64: | |
636 | case INDEX_op_st_i64: | |
637 | tcg_out_r(s, args[0]); | |
638 | tcg_out_r(s, args[1]); | |
639 | assert(args[2] == (uint32_t)args[2]); | |
640 | tcg_out32(s, args[2]); | |
641 | break; | |
642 | case INDEX_op_add_i32: | |
643 | case INDEX_op_sub_i32: | |
644 | case INDEX_op_mul_i32: | |
645 | case INDEX_op_and_i32: | |
646 | case INDEX_op_andc_i32: /* Optional (TCG_TARGET_HAS_andc_i32). */ | |
647 | case INDEX_op_eqv_i32: /* Optional (TCG_TARGET_HAS_eqv_i32). */ | |
648 | case INDEX_op_nand_i32: /* Optional (TCG_TARGET_HAS_nand_i32). */ | |
649 | case INDEX_op_nor_i32: /* Optional (TCG_TARGET_HAS_nor_i32). */ | |
650 | case INDEX_op_or_i32: | |
651 | case INDEX_op_orc_i32: /* Optional (TCG_TARGET_HAS_orc_i32). */ | |
652 | case INDEX_op_xor_i32: | |
653 | case INDEX_op_shl_i32: | |
654 | case INDEX_op_shr_i32: | |
655 | case INDEX_op_sar_i32: | |
656 | case INDEX_op_rotl_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | |
657 | case INDEX_op_rotr_i32: /* Optional (TCG_TARGET_HAS_rot_i32). */ | |
658 | tcg_out_r(s, args[0]); | |
659 | tcg_out_ri32(s, const_args[1], args[1]); | |
660 | tcg_out_ri32(s, const_args[2], args[2]); | |
661 | break; | |
662 | ||
663 | #if TCG_TARGET_REG_BITS == 64 | |
664 | case INDEX_op_mov_i64: | |
665 | case INDEX_op_movi_i64: | |
666 | TODO(); | |
667 | break; | |
668 | case INDEX_op_add_i64: | |
669 | case INDEX_op_sub_i64: | |
670 | case INDEX_op_mul_i64: | |
671 | case INDEX_op_and_i64: | |
672 | case INDEX_op_andc_i64: /* Optional (TCG_TARGET_HAS_andc_i64). */ | |
673 | case INDEX_op_eqv_i64: /* Optional (TCG_TARGET_HAS_eqv_i64). */ | |
674 | case INDEX_op_nand_i64: /* Optional (TCG_TARGET_HAS_nand_i64). */ | |
675 | case INDEX_op_nor_i64: /* Optional (TCG_TARGET_HAS_nor_i64). */ | |
676 | case INDEX_op_or_i64: | |
677 | case INDEX_op_orc_i64: /* Optional (TCG_TARGET_HAS_orc_i64). */ | |
678 | case INDEX_op_xor_i64: | |
679 | case INDEX_op_shl_i64: | |
680 | case INDEX_op_shr_i64: | |
681 | case INDEX_op_sar_i64: | |
682 | /* TODO: Implementation of rotl_i64, rotr_i64 missing in tci.c. */ | |
683 | case INDEX_op_rotl_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | |
684 | case INDEX_op_rotr_i64: /* Optional (TCG_TARGET_HAS_rot_i64). */ | |
685 | tcg_out_r(s, args[0]); | |
686 | tcg_out_ri64(s, const_args[1], args[1]); | |
687 | tcg_out_ri64(s, const_args[2], args[2]); | |
688 | break; | |
689 | case INDEX_op_div_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | |
690 | case INDEX_op_divu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | |
691 | case INDEX_op_rem_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | |
692 | case INDEX_op_remu_i64: /* Optional (TCG_TARGET_HAS_div_i64). */ | |
693 | TODO(); | |
694 | break; | |
695 | case INDEX_op_div2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ | |
696 | case INDEX_op_divu2_i64: /* Optional (TCG_TARGET_HAS_div2_i64). */ | |
697 | TODO(); | |
698 | break; | |
699 | case INDEX_op_brcond_i64: | |
700 | tcg_out_r(s, args[0]); | |
701 | tcg_out_ri64(s, const_args[1], args[1]); | |
702 | tcg_out8(s, args[2]); /* condition */ | |
703 | tci_out_label(s, args[3]); | |
704 | break; | |
705 | case INDEX_op_bswap16_i64: /* Optional (TCG_TARGET_HAS_bswap16_i64). */ | |
706 | case INDEX_op_bswap32_i64: /* Optional (TCG_TARGET_HAS_bswap32_i64). */ | |
707 | case INDEX_op_bswap64_i64: /* Optional (TCG_TARGET_HAS_bswap64_i64). */ | |
708 | case INDEX_op_not_i64: /* Optional (TCG_TARGET_HAS_not_i64). */ | |
709 | case INDEX_op_neg_i64: /* Optional (TCG_TARGET_HAS_neg_i64). */ | |
710 | case INDEX_op_ext8s_i64: /* Optional (TCG_TARGET_HAS_ext8s_i64). */ | |
711 | case INDEX_op_ext8u_i64: /* Optional (TCG_TARGET_HAS_ext8u_i64). */ | |
712 | case INDEX_op_ext16s_i64: /* Optional (TCG_TARGET_HAS_ext16s_i64). */ | |
713 | case INDEX_op_ext16u_i64: /* Optional (TCG_TARGET_HAS_ext16u_i64). */ | |
714 | case INDEX_op_ext32s_i64: /* Optional (TCG_TARGET_HAS_ext32s_i64). */ | |
715 | case INDEX_op_ext32u_i64: /* Optional (TCG_TARGET_HAS_ext32u_i64). */ | |
716 | #endif /* TCG_TARGET_REG_BITS == 64 */ | |
717 | case INDEX_op_neg_i32: /* Optional (TCG_TARGET_HAS_neg_i32). */ | |
718 | case INDEX_op_not_i32: /* Optional (TCG_TARGET_HAS_not_i32). */ | |
719 | case INDEX_op_ext8s_i32: /* Optional (TCG_TARGET_HAS_ext8s_i32). */ | |
720 | case INDEX_op_ext16s_i32: /* Optional (TCG_TARGET_HAS_ext16s_i32). */ | |
721 | case INDEX_op_ext8u_i32: /* Optional (TCG_TARGET_HAS_ext8u_i32). */ | |
722 | case INDEX_op_ext16u_i32: /* Optional (TCG_TARGET_HAS_ext16u_i32). */ | |
723 | case INDEX_op_bswap16_i32: /* Optional (TCG_TARGET_HAS_bswap16_i32). */ | |
724 | case INDEX_op_bswap32_i32: /* Optional (TCG_TARGET_HAS_bswap32_i32). */ | |
725 | tcg_out_r(s, args[0]); | |
726 | tcg_out_r(s, args[1]); | |
727 | break; | |
728 | case INDEX_op_div_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | |
729 | case INDEX_op_divu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | |
730 | case INDEX_op_rem_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | |
731 | case INDEX_op_remu_i32: /* Optional (TCG_TARGET_HAS_div_i32). */ | |
732 | tcg_out_r(s, args[0]); | |
733 | tcg_out_ri32(s, const_args[1], args[1]); | |
734 | tcg_out_ri32(s, const_args[2], args[2]); | |
735 | break; | |
736 | case INDEX_op_div2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ | |
737 | case INDEX_op_divu2_i32: /* Optional (TCG_TARGET_HAS_div2_i32). */ | |
738 | TODO(); | |
739 | break; | |
740 | #if TCG_TARGET_REG_BITS == 32 | |
741 | case INDEX_op_add2_i32: | |
742 | case INDEX_op_sub2_i32: | |
743 | tcg_out_r(s, args[0]); | |
744 | tcg_out_r(s, args[1]); | |
745 | tcg_out_r(s, args[2]); | |
746 | tcg_out_r(s, args[3]); | |
747 | tcg_out_r(s, args[4]); | |
748 | tcg_out_r(s, args[5]); | |
749 | break; | |
750 | case INDEX_op_brcond2_i32: | |
751 | tcg_out_r(s, args[0]); | |
752 | tcg_out_r(s, args[1]); | |
753 | tcg_out_ri32(s, const_args[2], args[2]); | |
754 | tcg_out_ri32(s, const_args[3], args[3]); | |
755 | tcg_out8(s, args[4]); /* condition */ | |
756 | tci_out_label(s, args[5]); | |
757 | break; | |
758 | case INDEX_op_mulu2_i32: | |
759 | tcg_out_r(s, args[0]); | |
760 | tcg_out_r(s, args[1]); | |
761 | tcg_out_r(s, args[2]); | |
762 | tcg_out_r(s, args[3]); | |
763 | break; | |
764 | #endif | |
765 | case INDEX_op_brcond_i32: | |
766 | tcg_out_r(s, args[0]); | |
767 | tcg_out_ri32(s, const_args[1], args[1]); | |
768 | tcg_out8(s, args[2]); /* condition */ | |
769 | tci_out_label(s, args[3]); | |
770 | break; | |
771 | case INDEX_op_qemu_ld8u: | |
772 | case INDEX_op_qemu_ld8s: | |
773 | case INDEX_op_qemu_ld16u: | |
774 | case INDEX_op_qemu_ld16s: | |
775 | case INDEX_op_qemu_ld32: | |
776 | #if TCG_TARGET_REG_BITS == 64 | |
777 | case INDEX_op_qemu_ld32s: | |
778 | case INDEX_op_qemu_ld32u: | |
779 | #endif | |
780 | tcg_out_r(s, *args++); | |
781 | tcg_out_r(s, *args++); | |
782 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
783 | tcg_out_r(s, *args++); | |
784 | #endif | |
785 | #ifdef CONFIG_SOFTMMU | |
786 | tcg_out_i(s, *args); | |
787 | #endif | |
788 | break; | |
789 | case INDEX_op_qemu_ld64: | |
790 | tcg_out_r(s, *args++); | |
791 | #if TCG_TARGET_REG_BITS == 32 | |
792 | tcg_out_r(s, *args++); | |
793 | #endif | |
794 | tcg_out_r(s, *args++); | |
795 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
796 | tcg_out_r(s, *args++); | |
797 | #endif | |
798 | #ifdef CONFIG_SOFTMMU | |
799 | tcg_out_i(s, *args); | |
800 | #endif | |
801 | break; | |
802 | case INDEX_op_qemu_st8: | |
803 | case INDEX_op_qemu_st16: | |
804 | case INDEX_op_qemu_st32: | |
805 | tcg_out_r(s, *args++); | |
806 | tcg_out_r(s, *args++); | |
807 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
808 | tcg_out_r(s, *args++); | |
809 | #endif | |
810 | #ifdef CONFIG_SOFTMMU | |
811 | tcg_out_i(s, *args); | |
812 | #endif | |
813 | break; | |
814 | case INDEX_op_qemu_st64: | |
815 | tcg_out_r(s, *args++); | |
816 | #if TCG_TARGET_REG_BITS == 32 | |
817 | tcg_out_r(s, *args++); | |
818 | #endif | |
819 | tcg_out_r(s, *args++); | |
820 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
821 | tcg_out_r(s, *args++); | |
822 | #endif | |
823 | #ifdef CONFIG_SOFTMMU | |
824 | tcg_out_i(s, *args); | |
825 | #endif | |
826 | break; | |
827 | case INDEX_op_end: | |
828 | TODO(); | |
829 | break; | |
830 | default: | |
831 | fprintf(stderr, "Missing: %s\n", tcg_op_defs[opc].name); | |
832 | tcg_abort(); | |
833 | } | |
834 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | |
835 | } | |
836 | ||
2a534aff | 837 | static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, |
7316329a SW |
838 | tcg_target_long arg2) |
839 | { | |
840 | uint8_t *old_code_ptr = s->code_ptr; | |
841 | if (type == TCG_TYPE_I32) { | |
842 | tcg_out_op_t(s, INDEX_op_st_i32); | |
843 | tcg_out_r(s, arg); | |
844 | tcg_out_r(s, arg1); | |
845 | tcg_out32(s, arg2); | |
846 | } else { | |
847 | assert(type == TCG_TYPE_I64); | |
848 | #if TCG_TARGET_REG_BITS == 64 | |
849 | tcg_out_op_t(s, INDEX_op_st_i64); | |
850 | tcg_out_r(s, arg); | |
851 | tcg_out_r(s, arg1); | |
852 | tcg_out32(s, arg2); | |
853 | #else | |
854 | TODO(); | |
855 | #endif | |
856 | } | |
857 | old_code_ptr[1] = s->code_ptr - old_code_ptr; | |
858 | } | |
859 | ||
860 | /* Test if a constant matches the constraint. */ | |
861 | static int tcg_target_const_match(tcg_target_long val, | |
862 | const TCGArgConstraint *arg_ct) | |
863 | { | |
864 | /* No need to return 0 or 1, 0 or != 0 is good enough. */ | |
865 | return arg_ct->ct & TCG_CT_CONST; | |
866 | } | |
867 | ||
868 | /* Maximum number of register used for input function arguments. */ | |
869 | static int tcg_target_get_call_iarg_regs_count(int flags) | |
870 | { | |
871 | return ARRAY_SIZE(tcg_target_call_iarg_regs); | |
872 | } | |
873 | ||
874 | static void tcg_target_init(TCGContext *s) | |
875 | { | |
876 | #if defined(CONFIG_DEBUG_TCG_INTERPRETER) | |
877 | const char *envval = getenv("DEBUG_TCG"); | |
878 | if (envval) { | |
879 | loglevel = strtol(envval, NULL, 0); | |
880 | } | |
881 | #endif | |
882 | ||
883 | /* The current code uses uint8_t for tcg operations. */ | |
884 | assert(ARRAY_SIZE(tcg_op_defs) <= UINT8_MAX); | |
885 | ||
886 | /* Registers available for 32 bit operations. */ | |
887 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, | |
888 | BIT(TCG_TARGET_NB_REGS) - 1); | |
889 | /* Registers available for 64 bit operations. */ | |
890 | tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, | |
891 | BIT(TCG_TARGET_NB_REGS) - 1); | |
892 | /* TODO: Which registers should be set here? */ | |
893 | tcg_regset_set32(tcg_target_call_clobber_regs, 0, | |
894 | BIT(TCG_TARGET_NB_REGS) - 1); | |
895 | tcg_regset_clear(s->reserved_regs); | |
896 | tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK); | |
897 | tcg_add_target_add_op_defs(tcg_target_op_defs); | |
898 | tcg_set_frame(s, TCG_AREG0, offsetof(CPUState, temp_buf), | |
899 | CPU_TEMP_BUF_NLONGS * sizeof(long)); | |
900 | } | |
901 | ||
902 | /* Generate global QEMU prologue and epilogue code. */ | |
903 | static void tcg_target_qemu_prologue(TCGContext *s) | |
904 | { | |
905 | tb_ret_addr = s->code_ptr; | |
906 | } |