]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2008 Fabrice Bellard | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
7 | * of this software and associated documentation files (the "Software"), to deal | |
8 | * in the Software without restriction, including without limitation the rights | |
9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
10 | * copies of the Software, and to permit persons to whom the Software is | |
11 | * furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
22 | * THE SOFTWARE. | |
23 | */ | |
24 | ||
25 | /* define it to use liveness analysis (better code) */ | |
26 | #define USE_LIVENESS_ANALYSIS | |
27 | #define USE_TCG_OPTIMIZATIONS | |
28 | ||
29 | #include "config.h" | |
30 | ||
31 | /* Define to jump the ELF file used to communicate with GDB. */ | |
32 | #undef DEBUG_JIT | |
33 | ||
34 | #if !defined(CONFIG_DEBUG_TCG) && !defined(NDEBUG) | |
35 | /* define it to suppress various consistency checks (faster) */ | |
36 | #define NDEBUG | |
37 | #endif | |
38 | ||
39 | #include "qemu-common.h" | |
40 | #include "qemu/cache-utils.h" | |
41 | #include "qemu/host-utils.h" | |
42 | #include "qemu/timer.h" | |
43 | ||
44 | /* Note: the long term plan is to reduce the dependancies on the QEMU | |
45 | CPU definitions. Currently they are used for qemu_ld/st | |
46 | instructions */ | |
47 | #define NO_CPU_IO_DEFS | |
48 | #include "cpu.h" | |
49 | ||
50 | #include "tcg-op.h" | |
51 | ||
52 | #if TCG_TARGET_REG_BITS == 64 | |
53 | # define ELF_CLASS ELFCLASS64 | |
54 | #else | |
55 | # define ELF_CLASS ELFCLASS32 | |
56 | #endif | |
57 | #ifdef HOST_WORDS_BIGENDIAN | |
58 | # define ELF_DATA ELFDATA2MSB | |
59 | #else | |
60 | # define ELF_DATA ELFDATA2LSB | |
61 | #endif | |
62 | ||
63 | #include "elf.h" | |
64 | ||
65 | /* Forward declarations for functions declared in tcg-target.c and used here. */ | |
66 | static void tcg_target_init(TCGContext *s); | |
67 | static void tcg_target_qemu_prologue(TCGContext *s); | |
68 | static void patch_reloc(uint8_t *code_ptr, int type, | |
69 | tcg_target_long value, tcg_target_long addend); | |
70 | ||
71 | static void tcg_register_jit_int(void *buf, size_t size, | |
72 | void *debug_frame, size_t debug_frame_size) | |
73 | __attribute__((unused)); | |
74 | ||
75 | /* Forward declarations for functions declared and used in tcg-target.c. */ | |
76 | static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str); | |
77 | static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1, | |
78 | tcg_target_long arg2); | |
79 | static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg); | |
80 | static void tcg_out_movi(TCGContext *s, TCGType type, | |
81 | TCGReg ret, tcg_target_long arg); | |
82 | static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, | |
83 | const int *const_args); | |
84 | static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, | |
85 | tcg_target_long arg2); | |
86 | static int tcg_target_const_match(tcg_target_long val, | |
87 | const TCGArgConstraint *arg_ct); | |
88 | ||
89 | TCGOpDef tcg_op_defs[] = { | |
90 | #define DEF(s, oargs, iargs, cargs, flags) { #s, oargs, iargs, cargs, iargs + oargs + cargs, flags }, | |
91 | #include "tcg-opc.h" | |
92 | #undef DEF | |
93 | }; | |
94 | const size_t tcg_op_defs_max = ARRAY_SIZE(tcg_op_defs); | |
95 | ||
96 | static TCGRegSet tcg_target_available_regs[2]; | |
97 | static TCGRegSet tcg_target_call_clobber_regs; | |
98 | ||
99 | static inline void tcg_out8(TCGContext *s, uint8_t v) | |
100 | { | |
101 | *s->code_ptr++ = v; | |
102 | } | |
103 | ||
104 | static inline void tcg_out16(TCGContext *s, uint16_t v) | |
105 | { | |
106 | *(uint16_t *)s->code_ptr = v; | |
107 | s->code_ptr += 2; | |
108 | } | |
109 | ||
110 | static inline void tcg_out32(TCGContext *s, uint32_t v) | |
111 | { | |
112 | *(uint32_t *)s->code_ptr = v; | |
113 | s->code_ptr += 4; | |
114 | } | |
115 | ||
116 | /* label relocation processing */ | |
117 | ||
118 | static void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type, | |
119 | int label_index, long addend) | |
120 | { | |
121 | TCGLabel *l; | |
122 | TCGRelocation *r; | |
123 | ||
124 | l = &s->labels[label_index]; | |
125 | if (l->has_value) { | |
126 | /* FIXME: This may break relocations on RISC targets that | |
127 | modify instruction fields in place. The caller may not have | |
128 | written the initial value. */ | |
129 | patch_reloc(code_ptr, type, l->u.value, addend); | |
130 | } else { | |
131 | /* add a new relocation entry */ | |
132 | r = tcg_malloc(sizeof(TCGRelocation)); | |
133 | r->type = type; | |
134 | r->ptr = code_ptr; | |
135 | r->addend = addend; | |
136 | r->next = l->u.first_reloc; | |
137 | l->u.first_reloc = r; | |
138 | } | |
139 | } | |
140 | ||
141 | static void tcg_out_label(TCGContext *s, int label_index, void *ptr) | |
142 | { | |
143 | TCGLabel *l; | |
144 | TCGRelocation *r; | |
145 | tcg_target_long value = (tcg_target_long)ptr; | |
146 | ||
147 | l = &s->labels[label_index]; | |
148 | if (l->has_value) | |
149 | tcg_abort(); | |
150 | r = l->u.first_reloc; | |
151 | while (r != NULL) { | |
152 | patch_reloc(r->ptr, r->type, value, r->addend); | |
153 | r = r->next; | |
154 | } | |
155 | l->has_value = 1; | |
156 | l->u.value = value; | |
157 | } | |
158 | ||
159 | int gen_new_label(void) | |
160 | { | |
161 | TCGContext *s = &tcg_ctx; | |
162 | int idx; | |
163 | TCGLabel *l; | |
164 | ||
165 | if (s->nb_labels >= TCG_MAX_LABELS) | |
166 | tcg_abort(); | |
167 | idx = s->nb_labels++; | |
168 | l = &s->labels[idx]; | |
169 | l->has_value = 0; | |
170 | l->u.first_reloc = NULL; | |
171 | return idx; | |
172 | } | |
173 | ||
174 | #include "tcg-target.c" | |
175 | ||
176 | /* pool based memory allocation */ | |
177 | void *tcg_malloc_internal(TCGContext *s, int size) | |
178 | { | |
179 | TCGPool *p; | |
180 | int pool_size; | |
181 | ||
182 | if (size > TCG_POOL_CHUNK_SIZE) { | |
183 | /* big malloc: insert a new pool (XXX: could optimize) */ | |
184 | p = g_malloc(sizeof(TCGPool) + size); | |
185 | p->size = size; | |
186 | p->next = s->pool_first_large; | |
187 | s->pool_first_large = p; | |
188 | return p->data; | |
189 | } else { | |
190 | p = s->pool_current; | |
191 | if (!p) { | |
192 | p = s->pool_first; | |
193 | if (!p) | |
194 | goto new_pool; | |
195 | } else { | |
196 | if (!p->next) { | |
197 | new_pool: | |
198 | pool_size = TCG_POOL_CHUNK_SIZE; | |
199 | p = g_malloc(sizeof(TCGPool) + pool_size); | |
200 | p->size = pool_size; | |
201 | p->next = NULL; | |
202 | if (s->pool_current) | |
203 | s->pool_current->next = p; | |
204 | else | |
205 | s->pool_first = p; | |
206 | } else { | |
207 | p = p->next; | |
208 | } | |
209 | } | |
210 | } | |
211 | s->pool_current = p; | |
212 | s->pool_cur = p->data + size; | |
213 | s->pool_end = p->data + p->size; | |
214 | return p->data; | |
215 | } | |
216 | ||
217 | void tcg_pool_reset(TCGContext *s) | |
218 | { | |
219 | TCGPool *p, *t; | |
220 | for (p = s->pool_first_large; p; p = t) { | |
221 | t = p->next; | |
222 | g_free(p); | |
223 | } | |
224 | s->pool_first_large = NULL; | |
225 | s->pool_cur = s->pool_end = NULL; | |
226 | s->pool_current = NULL; | |
227 | } | |
228 | ||
229 | void tcg_context_init(TCGContext *s) | |
230 | { | |
231 | int op, total_args, n; | |
232 | TCGOpDef *def; | |
233 | TCGArgConstraint *args_ct; | |
234 | int *sorted_args; | |
235 | ||
236 | memset(s, 0, sizeof(*s)); | |
237 | s->nb_globals = 0; | |
238 | ||
239 | /* Count total number of arguments and allocate the corresponding | |
240 | space */ | |
241 | total_args = 0; | |
242 | for(op = 0; op < NB_OPS; op++) { | |
243 | def = &tcg_op_defs[op]; | |
244 | n = def->nb_iargs + def->nb_oargs; | |
245 | total_args += n; | |
246 | } | |
247 | ||
248 | args_ct = g_malloc(sizeof(TCGArgConstraint) * total_args); | |
249 | sorted_args = g_malloc(sizeof(int) * total_args); | |
250 | ||
251 | for(op = 0; op < NB_OPS; op++) { | |
252 | def = &tcg_op_defs[op]; | |
253 | def->args_ct = args_ct; | |
254 | def->sorted_args = sorted_args; | |
255 | n = def->nb_iargs + def->nb_oargs; | |
256 | sorted_args += n; | |
257 | args_ct += n; | |
258 | } | |
259 | ||
260 | tcg_target_init(s); | |
261 | } | |
262 | ||
263 | void tcg_prologue_init(TCGContext *s) | |
264 | { | |
265 | /* init global prologue and epilogue */ | |
266 | s->code_buf = code_gen_prologue; | |
267 | s->code_ptr = s->code_buf; | |
268 | tcg_target_qemu_prologue(s); | |
269 | flush_icache_range((tcg_target_ulong)s->code_buf, | |
270 | (tcg_target_ulong)s->code_ptr); | |
271 | } | |
272 | ||
273 | void tcg_set_frame(TCGContext *s, int reg, | |
274 | tcg_target_long start, tcg_target_long size) | |
275 | { | |
276 | s->frame_start = start; | |
277 | s->frame_end = start + size; | |
278 | s->frame_reg = reg; | |
279 | } | |
280 | ||
281 | void tcg_func_start(TCGContext *s) | |
282 | { | |
283 | int i; | |
284 | tcg_pool_reset(s); | |
285 | s->nb_temps = s->nb_globals; | |
286 | for(i = 0; i < (TCG_TYPE_COUNT * 2); i++) | |
287 | s->first_free_temp[i] = -1; | |
288 | s->labels = tcg_malloc(sizeof(TCGLabel) * TCG_MAX_LABELS); | |
289 | s->nb_labels = 0; | |
290 | s->current_frame_offset = s->frame_start; | |
291 | ||
292 | #ifdef CONFIG_DEBUG_TCG | |
293 | s->goto_tb_issue_mask = 0; | |
294 | #endif | |
295 | ||
296 | s->gen_opc_ptr = s->gen_opc_buf; | |
297 | s->gen_opparam_ptr = s->gen_opparam_buf; | |
298 | ||
299 | #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) | |
300 | /* Initialize qemu_ld/st labels to assist code generation at the end of TB | |
301 | for TLB miss cases at the end of TB */ | |
302 | s->qemu_ldst_labels = tcg_malloc(sizeof(TCGLabelQemuLdst) * | |
303 | TCG_MAX_QEMU_LDST); | |
304 | s->nb_qemu_ldst_labels = 0; | |
305 | #endif | |
306 | } | |
307 | ||
308 | static inline void tcg_temp_alloc(TCGContext *s, int n) | |
309 | { | |
310 | if (n > TCG_MAX_TEMPS) | |
311 | tcg_abort(); | |
312 | } | |
313 | ||
314 | static inline int tcg_global_reg_new_internal(TCGType type, int reg, | |
315 | const char *name) | |
316 | { | |
317 | TCGContext *s = &tcg_ctx; | |
318 | TCGTemp *ts; | |
319 | int idx; | |
320 | ||
321 | #if TCG_TARGET_REG_BITS == 32 | |
322 | if (type != TCG_TYPE_I32) | |
323 | tcg_abort(); | |
324 | #endif | |
325 | if (tcg_regset_test_reg(s->reserved_regs, reg)) | |
326 | tcg_abort(); | |
327 | idx = s->nb_globals; | |
328 | tcg_temp_alloc(s, s->nb_globals + 1); | |
329 | ts = &s->temps[s->nb_globals]; | |
330 | ts->base_type = type; | |
331 | ts->type = type; | |
332 | ts->fixed_reg = 1; | |
333 | ts->reg = reg; | |
334 | ts->name = name; | |
335 | s->nb_globals++; | |
336 | tcg_regset_set_reg(s->reserved_regs, reg); | |
337 | return idx; | |
338 | } | |
339 | ||
340 | TCGv_i32 tcg_global_reg_new_i32(int reg, const char *name) | |
341 | { | |
342 | int idx; | |
343 | ||
344 | idx = tcg_global_reg_new_internal(TCG_TYPE_I32, reg, name); | |
345 | return MAKE_TCGV_I32(idx); | |
346 | } | |
347 | ||
348 | TCGv_i64 tcg_global_reg_new_i64(int reg, const char *name) | |
349 | { | |
350 | int idx; | |
351 | ||
352 | idx = tcg_global_reg_new_internal(TCG_TYPE_I64, reg, name); | |
353 | return MAKE_TCGV_I64(idx); | |
354 | } | |
355 | ||
356 | static inline int tcg_global_mem_new_internal(TCGType type, int reg, | |
357 | tcg_target_long offset, | |
358 | const char *name) | |
359 | { | |
360 | TCGContext *s = &tcg_ctx; | |
361 | TCGTemp *ts; | |
362 | int idx; | |
363 | ||
364 | idx = s->nb_globals; | |
365 | #if TCG_TARGET_REG_BITS == 32 | |
366 | if (type == TCG_TYPE_I64) { | |
367 | char buf[64]; | |
368 | tcg_temp_alloc(s, s->nb_globals + 2); | |
369 | ts = &s->temps[s->nb_globals]; | |
370 | ts->base_type = type; | |
371 | ts->type = TCG_TYPE_I32; | |
372 | ts->fixed_reg = 0; | |
373 | ts->mem_allocated = 1; | |
374 | ts->mem_reg = reg; | |
375 | #ifdef TCG_TARGET_WORDS_BIGENDIAN | |
376 | ts->mem_offset = offset + 4; | |
377 | #else | |
378 | ts->mem_offset = offset; | |
379 | #endif | |
380 | pstrcpy(buf, sizeof(buf), name); | |
381 | pstrcat(buf, sizeof(buf), "_0"); | |
382 | ts->name = strdup(buf); | |
383 | ts++; | |
384 | ||
385 | ts->base_type = type; | |
386 | ts->type = TCG_TYPE_I32; | |
387 | ts->fixed_reg = 0; | |
388 | ts->mem_allocated = 1; | |
389 | ts->mem_reg = reg; | |
390 | #ifdef TCG_TARGET_WORDS_BIGENDIAN | |
391 | ts->mem_offset = offset; | |
392 | #else | |
393 | ts->mem_offset = offset + 4; | |
394 | #endif | |
395 | pstrcpy(buf, sizeof(buf), name); | |
396 | pstrcat(buf, sizeof(buf), "_1"); | |
397 | ts->name = strdup(buf); | |
398 | ||
399 | s->nb_globals += 2; | |
400 | } else | |
401 | #endif | |
402 | { | |
403 | tcg_temp_alloc(s, s->nb_globals + 1); | |
404 | ts = &s->temps[s->nb_globals]; | |
405 | ts->base_type = type; | |
406 | ts->type = type; | |
407 | ts->fixed_reg = 0; | |
408 | ts->mem_allocated = 1; | |
409 | ts->mem_reg = reg; | |
410 | ts->mem_offset = offset; | |
411 | ts->name = name; | |
412 | s->nb_globals++; | |
413 | } | |
414 | return idx; | |
415 | } | |
416 | ||
417 | TCGv_i32 tcg_global_mem_new_i32(int reg, tcg_target_long offset, | |
418 | const char *name) | |
419 | { | |
420 | int idx; | |
421 | ||
422 | idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name); | |
423 | return MAKE_TCGV_I32(idx); | |
424 | } | |
425 | ||
426 | TCGv_i64 tcg_global_mem_new_i64(int reg, tcg_target_long offset, | |
427 | const char *name) | |
428 | { | |
429 | int idx; | |
430 | ||
431 | idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name); | |
432 | return MAKE_TCGV_I64(idx); | |
433 | } | |
434 | ||
435 | static inline int tcg_temp_new_internal(TCGType type, int temp_local) | |
436 | { | |
437 | TCGContext *s = &tcg_ctx; | |
438 | TCGTemp *ts; | |
439 | int idx, k; | |
440 | ||
441 | k = type; | |
442 | if (temp_local) | |
443 | k += TCG_TYPE_COUNT; | |
444 | idx = s->first_free_temp[k]; | |
445 | if (idx != -1) { | |
446 | /* There is already an available temp with the | |
447 | right type */ | |
448 | ts = &s->temps[idx]; | |
449 | s->first_free_temp[k] = ts->next_free_temp; | |
450 | ts->temp_allocated = 1; | |
451 | assert(ts->temp_local == temp_local); | |
452 | } else { | |
453 | idx = s->nb_temps; | |
454 | #if TCG_TARGET_REG_BITS == 32 | |
455 | if (type == TCG_TYPE_I64) { | |
456 | tcg_temp_alloc(s, s->nb_temps + 2); | |
457 | ts = &s->temps[s->nb_temps]; | |
458 | ts->base_type = type; | |
459 | ts->type = TCG_TYPE_I32; | |
460 | ts->temp_allocated = 1; | |
461 | ts->temp_local = temp_local; | |
462 | ts->name = NULL; | |
463 | ts++; | |
464 | ts->base_type = TCG_TYPE_I32; | |
465 | ts->type = TCG_TYPE_I32; | |
466 | ts->temp_allocated = 1; | |
467 | ts->temp_local = temp_local; | |
468 | ts->name = NULL; | |
469 | s->nb_temps += 2; | |
470 | } else | |
471 | #endif | |
472 | { | |
473 | tcg_temp_alloc(s, s->nb_temps + 1); | |
474 | ts = &s->temps[s->nb_temps]; | |
475 | ts->base_type = type; | |
476 | ts->type = type; | |
477 | ts->temp_allocated = 1; | |
478 | ts->temp_local = temp_local; | |
479 | ts->name = NULL; | |
480 | s->nb_temps++; | |
481 | } | |
482 | } | |
483 | ||
484 | #if defined(CONFIG_DEBUG_TCG) | |
485 | s->temps_in_use++; | |
486 | #endif | |
487 | return idx; | |
488 | } | |
489 | ||
490 | TCGv_i32 tcg_temp_new_internal_i32(int temp_local) | |
491 | { | |
492 | int idx; | |
493 | ||
494 | idx = tcg_temp_new_internal(TCG_TYPE_I32, temp_local); | |
495 | return MAKE_TCGV_I32(idx); | |
496 | } | |
497 | ||
498 | TCGv_i64 tcg_temp_new_internal_i64(int temp_local) | |
499 | { | |
500 | int idx; | |
501 | ||
502 | idx = tcg_temp_new_internal(TCG_TYPE_I64, temp_local); | |
503 | return MAKE_TCGV_I64(idx); | |
504 | } | |
505 | ||
506 | static inline void tcg_temp_free_internal(int idx) | |
507 | { | |
508 | TCGContext *s = &tcg_ctx; | |
509 | TCGTemp *ts; | |
510 | int k; | |
511 | ||
512 | #if defined(CONFIG_DEBUG_TCG) | |
513 | s->temps_in_use--; | |
514 | if (s->temps_in_use < 0) { | |
515 | fprintf(stderr, "More temporaries freed than allocated!\n"); | |
516 | } | |
517 | #endif | |
518 | ||
519 | assert(idx >= s->nb_globals && idx < s->nb_temps); | |
520 | ts = &s->temps[idx]; | |
521 | assert(ts->temp_allocated != 0); | |
522 | ts->temp_allocated = 0; | |
523 | k = ts->base_type; | |
524 | if (ts->temp_local) | |
525 | k += TCG_TYPE_COUNT; | |
526 | ts->next_free_temp = s->first_free_temp[k]; | |
527 | s->first_free_temp[k] = idx; | |
528 | } | |
529 | ||
530 | void tcg_temp_free_i32(TCGv_i32 arg) | |
531 | { | |
532 | tcg_temp_free_internal(GET_TCGV_I32(arg)); | |
533 | } | |
534 | ||
535 | void tcg_temp_free_i64(TCGv_i64 arg) | |
536 | { | |
537 | tcg_temp_free_internal(GET_TCGV_I64(arg)); | |
538 | } | |
539 | ||
540 | TCGv_i32 tcg_const_i32(int32_t val) | |
541 | { | |
542 | TCGv_i32 t0; | |
543 | t0 = tcg_temp_new_i32(); | |
544 | tcg_gen_movi_i32(t0, val); | |
545 | return t0; | |
546 | } | |
547 | ||
548 | TCGv_i64 tcg_const_i64(int64_t val) | |
549 | { | |
550 | TCGv_i64 t0; | |
551 | t0 = tcg_temp_new_i64(); | |
552 | tcg_gen_movi_i64(t0, val); | |
553 | return t0; | |
554 | } | |
555 | ||
556 | TCGv_i32 tcg_const_local_i32(int32_t val) | |
557 | { | |
558 | TCGv_i32 t0; | |
559 | t0 = tcg_temp_local_new_i32(); | |
560 | tcg_gen_movi_i32(t0, val); | |
561 | return t0; | |
562 | } | |
563 | ||
564 | TCGv_i64 tcg_const_local_i64(int64_t val) | |
565 | { | |
566 | TCGv_i64 t0; | |
567 | t0 = tcg_temp_local_new_i64(); | |
568 | tcg_gen_movi_i64(t0, val); | |
569 | return t0; | |
570 | } | |
571 | ||
572 | #if defined(CONFIG_DEBUG_TCG) | |
573 | void tcg_clear_temp_count(void) | |
574 | { | |
575 | TCGContext *s = &tcg_ctx; | |
576 | s->temps_in_use = 0; | |
577 | } | |
578 | ||
579 | int tcg_check_temp_count(void) | |
580 | { | |
581 | TCGContext *s = &tcg_ctx; | |
582 | if (s->temps_in_use) { | |
583 | /* Clear the count so that we don't give another | |
584 | * warning immediately next time around. | |
585 | */ | |
586 | s->temps_in_use = 0; | |
587 | return 1; | |
588 | } | |
589 | return 0; | |
590 | } | |
591 | #endif | |
592 | ||
593 | void tcg_register_helper(void *func, const char *name) | |
594 | { | |
595 | TCGContext *s = &tcg_ctx; | |
596 | int n; | |
597 | if ((s->nb_helpers + 1) > s->allocated_helpers) { | |
598 | n = s->allocated_helpers; | |
599 | if (n == 0) { | |
600 | n = 4; | |
601 | } else { | |
602 | n *= 2; | |
603 | } | |
604 | s->helpers = realloc(s->helpers, n * sizeof(TCGHelperInfo)); | |
605 | s->allocated_helpers = n; | |
606 | } | |
607 | s->helpers[s->nb_helpers].func = (tcg_target_ulong)func; | |
608 | s->helpers[s->nb_helpers].name = name; | |
609 | s->nb_helpers++; | |
610 | } | |
611 | ||
612 | /* Note: we convert the 64 bit args to 32 bit and do some alignment | |
613 | and endian swap. Maybe it would be better to do the alignment | |
614 | and endian swap in tcg_reg_alloc_call(). */ | |
615 | void tcg_gen_callN(TCGContext *s, TCGv_ptr func, unsigned int flags, | |
616 | int sizemask, TCGArg ret, int nargs, TCGArg *args) | |
617 | { | |
618 | int i; | |
619 | int real_args; | |
620 | int nb_rets; | |
621 | TCGArg *nparam; | |
622 | ||
623 | #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 | |
624 | for (i = 0; i < nargs; ++i) { | |
625 | int is_64bit = sizemask & (1 << (i+1)*2); | |
626 | int is_signed = sizemask & (2 << (i+1)*2); | |
627 | if (!is_64bit) { | |
628 | TCGv_i64 temp = tcg_temp_new_i64(); | |
629 | TCGv_i64 orig = MAKE_TCGV_I64(args[i]); | |
630 | if (is_signed) { | |
631 | tcg_gen_ext32s_i64(temp, orig); | |
632 | } else { | |
633 | tcg_gen_ext32u_i64(temp, orig); | |
634 | } | |
635 | args[i] = GET_TCGV_I64(temp); | |
636 | } | |
637 | } | |
638 | #endif /* TCG_TARGET_EXTEND_ARGS */ | |
639 | ||
640 | *s->gen_opc_ptr++ = INDEX_op_call; | |
641 | nparam = s->gen_opparam_ptr++; | |
642 | if (ret != TCG_CALL_DUMMY_ARG) { | |
643 | #if TCG_TARGET_REG_BITS < 64 | |
644 | if (sizemask & 1) { | |
645 | #ifdef TCG_TARGET_WORDS_BIGENDIAN | |
646 | *s->gen_opparam_ptr++ = ret + 1; | |
647 | *s->gen_opparam_ptr++ = ret; | |
648 | #else | |
649 | *s->gen_opparam_ptr++ = ret; | |
650 | *s->gen_opparam_ptr++ = ret + 1; | |
651 | #endif | |
652 | nb_rets = 2; | |
653 | } else | |
654 | #endif | |
655 | { | |
656 | *s->gen_opparam_ptr++ = ret; | |
657 | nb_rets = 1; | |
658 | } | |
659 | } else { | |
660 | nb_rets = 0; | |
661 | } | |
662 | real_args = 0; | |
663 | for (i = 0; i < nargs; i++) { | |
664 | #if TCG_TARGET_REG_BITS < 64 | |
665 | int is_64bit = sizemask & (1 << (i+1)*2); | |
666 | if (is_64bit) { | |
667 | #ifdef TCG_TARGET_CALL_ALIGN_ARGS | |
668 | /* some targets want aligned 64 bit args */ | |
669 | if (real_args & 1) { | |
670 | *s->gen_opparam_ptr++ = TCG_CALL_DUMMY_ARG; | |
671 | real_args++; | |
672 | } | |
673 | #endif | |
674 | /* If stack grows up, then we will be placing successive | |
675 | arguments at lower addresses, which means we need to | |
676 | reverse the order compared to how we would normally | |
677 | treat either big or little-endian. For those arguments | |
678 | that will wind up in registers, this still works for | |
679 | HPPA (the only current STACK_GROWSUP target) since the | |
680 | argument registers are *also* allocated in decreasing | |
681 | order. If another such target is added, this logic may | |
682 | have to get more complicated to differentiate between | |
683 | stack arguments and register arguments. */ | |
684 | #if defined(TCG_TARGET_WORDS_BIGENDIAN) != defined(TCG_TARGET_STACK_GROWSUP) | |
685 | *s->gen_opparam_ptr++ = args[i] + 1; | |
686 | *s->gen_opparam_ptr++ = args[i]; | |
687 | #else | |
688 | *s->gen_opparam_ptr++ = args[i]; | |
689 | *s->gen_opparam_ptr++ = args[i] + 1; | |
690 | #endif | |
691 | real_args += 2; | |
692 | continue; | |
693 | } | |
694 | #endif /* TCG_TARGET_REG_BITS < 64 */ | |
695 | ||
696 | *s->gen_opparam_ptr++ = args[i]; | |
697 | real_args++; | |
698 | } | |
699 | *s->gen_opparam_ptr++ = GET_TCGV_PTR(func); | |
700 | ||
701 | *s->gen_opparam_ptr++ = flags; | |
702 | ||
703 | *nparam = (nb_rets << 16) | (real_args + 1); | |
704 | ||
705 | /* total parameters, needed to go backward in the instruction stream */ | |
706 | *s->gen_opparam_ptr++ = 1 + nb_rets + real_args + 3; | |
707 | ||
708 | #if defined(TCG_TARGET_EXTEND_ARGS) && TCG_TARGET_REG_BITS == 64 | |
709 | for (i = 0; i < nargs; ++i) { | |
710 | int is_64bit = sizemask & (1 << (i+1)*2); | |
711 | if (!is_64bit) { | |
712 | TCGv_i64 temp = MAKE_TCGV_I64(args[i]); | |
713 | tcg_temp_free_i64(temp); | |
714 | } | |
715 | } | |
716 | #endif /* TCG_TARGET_EXTEND_ARGS */ | |
717 | } | |
718 | ||
719 | #if TCG_TARGET_REG_BITS == 32 | |
720 | void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, | |
721 | int c, int right, int arith) | |
722 | { | |
723 | if (c == 0) { | |
724 | tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); | |
725 | tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); | |
726 | } else if (c >= 32) { | |
727 | c -= 32; | |
728 | if (right) { | |
729 | if (arith) { | |
730 | tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); | |
731 | tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); | |
732 | } else { | |
733 | tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); | |
734 | tcg_gen_movi_i32(TCGV_HIGH(ret), 0); | |
735 | } | |
736 | } else { | |
737 | tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c); | |
738 | tcg_gen_movi_i32(TCGV_LOW(ret), 0); | |
739 | } | |
740 | } else { | |
741 | TCGv_i32 t0, t1; | |
742 | ||
743 | t0 = tcg_temp_new_i32(); | |
744 | t1 = tcg_temp_new_i32(); | |
745 | if (right) { | |
746 | tcg_gen_shli_i32(t0, TCGV_HIGH(arg1), 32 - c); | |
747 | if (arith) | |
748 | tcg_gen_sari_i32(t1, TCGV_HIGH(arg1), c); | |
749 | else | |
750 | tcg_gen_shri_i32(t1, TCGV_HIGH(arg1), c); | |
751 | tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); | |
752 | tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t0); | |
753 | tcg_gen_mov_i32(TCGV_HIGH(ret), t1); | |
754 | } else { | |
755 | tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); | |
756 | /* Note: ret can be the same as arg1, so we use t1 */ | |
757 | tcg_gen_shli_i32(t1, TCGV_LOW(arg1), c); | |
758 | tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); | |
759 | tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t0); | |
760 | tcg_gen_mov_i32(TCGV_LOW(ret), t1); | |
761 | } | |
762 | tcg_temp_free_i32(t0); | |
763 | tcg_temp_free_i32(t1); | |
764 | } | |
765 | } | |
766 | #endif | |
767 | ||
768 | ||
769 | static void tcg_reg_alloc_start(TCGContext *s) | |
770 | { | |
771 | int i; | |
772 | TCGTemp *ts; | |
773 | for(i = 0; i < s->nb_globals; i++) { | |
774 | ts = &s->temps[i]; | |
775 | if (ts->fixed_reg) { | |
776 | ts->val_type = TEMP_VAL_REG; | |
777 | } else { | |
778 | ts->val_type = TEMP_VAL_MEM; | |
779 | } | |
780 | } | |
781 | for(i = s->nb_globals; i < s->nb_temps; i++) { | |
782 | ts = &s->temps[i]; | |
783 | if (ts->temp_local) { | |
784 | ts->val_type = TEMP_VAL_MEM; | |
785 | } else { | |
786 | ts->val_type = TEMP_VAL_DEAD; | |
787 | } | |
788 | ts->mem_allocated = 0; | |
789 | ts->fixed_reg = 0; | |
790 | } | |
791 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | |
792 | s->reg_to_temp[i] = -1; | |
793 | } | |
794 | } | |
795 | ||
796 | static char *tcg_get_arg_str_idx(TCGContext *s, char *buf, int buf_size, | |
797 | int idx) | |
798 | { | |
799 | TCGTemp *ts; | |
800 | ||
801 | assert(idx >= 0 && idx < s->nb_temps); | |
802 | ts = &s->temps[idx]; | |
803 | if (idx < s->nb_globals) { | |
804 | pstrcpy(buf, buf_size, ts->name); | |
805 | } else { | |
806 | if (ts->temp_local) | |
807 | snprintf(buf, buf_size, "loc%d", idx - s->nb_globals); | |
808 | else | |
809 | snprintf(buf, buf_size, "tmp%d", idx - s->nb_globals); | |
810 | } | |
811 | return buf; | |
812 | } | |
813 | ||
814 | char *tcg_get_arg_str_i32(TCGContext *s, char *buf, int buf_size, TCGv_i32 arg) | |
815 | { | |
816 | return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I32(arg)); | |
817 | } | |
818 | ||
819 | char *tcg_get_arg_str_i64(TCGContext *s, char *buf, int buf_size, TCGv_i64 arg) | |
820 | { | |
821 | return tcg_get_arg_str_idx(s, buf, buf_size, GET_TCGV_I64(arg)); | |
822 | } | |
823 | ||
824 | static int helper_cmp(const void *p1, const void *p2) | |
825 | { | |
826 | const TCGHelperInfo *th1 = p1; | |
827 | const TCGHelperInfo *th2 = p2; | |
828 | if (th1->func < th2->func) | |
829 | return -1; | |
830 | else if (th1->func == th2->func) | |
831 | return 0; | |
832 | else | |
833 | return 1; | |
834 | } | |
835 | ||
836 | /* find helper definition (Note: A hash table would be better) */ | |
837 | static TCGHelperInfo *tcg_find_helper(TCGContext *s, tcg_target_ulong val) | |
838 | { | |
839 | int m, m_min, m_max; | |
840 | TCGHelperInfo *th; | |
841 | tcg_target_ulong v; | |
842 | ||
843 | if (unlikely(!s->helpers_sorted)) { | |
844 | qsort(s->helpers, s->nb_helpers, sizeof(TCGHelperInfo), | |
845 | helper_cmp); | |
846 | s->helpers_sorted = 1; | |
847 | } | |
848 | ||
849 | /* binary search */ | |
850 | m_min = 0; | |
851 | m_max = s->nb_helpers - 1; | |
852 | while (m_min <= m_max) { | |
853 | m = (m_min + m_max) >> 1; | |
854 | th = &s->helpers[m]; | |
855 | v = th->func; | |
856 | if (v == val) | |
857 | return th; | |
858 | else if (val < v) { | |
859 | m_max = m - 1; | |
860 | } else { | |
861 | m_min = m + 1; | |
862 | } | |
863 | } | |
864 | return NULL; | |
865 | } | |
866 | ||
867 | static const char * const cond_name[] = | |
868 | { | |
869 | [TCG_COND_NEVER] = "never", | |
870 | [TCG_COND_ALWAYS] = "always", | |
871 | [TCG_COND_EQ] = "eq", | |
872 | [TCG_COND_NE] = "ne", | |
873 | [TCG_COND_LT] = "lt", | |
874 | [TCG_COND_GE] = "ge", | |
875 | [TCG_COND_LE] = "le", | |
876 | [TCG_COND_GT] = "gt", | |
877 | [TCG_COND_LTU] = "ltu", | |
878 | [TCG_COND_GEU] = "geu", | |
879 | [TCG_COND_LEU] = "leu", | |
880 | [TCG_COND_GTU] = "gtu" | |
881 | }; | |
882 | ||
883 | void tcg_dump_ops(TCGContext *s) | |
884 | { | |
885 | const uint16_t *opc_ptr; | |
886 | const TCGArg *args; | |
887 | TCGArg arg; | |
888 | TCGOpcode c; | |
889 | int i, k, nb_oargs, nb_iargs, nb_cargs, first_insn; | |
890 | const TCGOpDef *def; | |
891 | char buf[128]; | |
892 | ||
893 | first_insn = 1; | |
894 | opc_ptr = s->gen_opc_buf; | |
895 | args = s->gen_opparam_buf; | |
896 | while (opc_ptr < s->gen_opc_ptr) { | |
897 | c = *opc_ptr++; | |
898 | def = &tcg_op_defs[c]; | |
899 | if (c == INDEX_op_debug_insn_start) { | |
900 | uint64_t pc; | |
901 | #if TARGET_LONG_BITS > TCG_TARGET_REG_BITS | |
902 | pc = ((uint64_t)args[1] << 32) | args[0]; | |
903 | #else | |
904 | pc = args[0]; | |
905 | #endif | |
906 | if (!first_insn) { | |
907 | qemu_log("\n"); | |
908 | } | |
909 | qemu_log(" ---- 0x%" PRIx64, pc); | |
910 | first_insn = 0; | |
911 | nb_oargs = def->nb_oargs; | |
912 | nb_iargs = def->nb_iargs; | |
913 | nb_cargs = def->nb_cargs; | |
914 | } else if (c == INDEX_op_call) { | |
915 | TCGArg arg; | |
916 | ||
917 | /* variable number of arguments */ | |
918 | arg = *args++; | |
919 | nb_oargs = arg >> 16; | |
920 | nb_iargs = arg & 0xffff; | |
921 | nb_cargs = def->nb_cargs; | |
922 | ||
923 | qemu_log(" %s ", def->name); | |
924 | ||
925 | /* function name */ | |
926 | qemu_log("%s", | |
927 | tcg_get_arg_str_idx(s, buf, sizeof(buf), | |
928 | args[nb_oargs + nb_iargs - 1])); | |
929 | /* flags */ | |
930 | qemu_log(",$0x%" TCG_PRIlx, args[nb_oargs + nb_iargs]); | |
931 | /* nb out args */ | |
932 | qemu_log(",$%d", nb_oargs); | |
933 | for(i = 0; i < nb_oargs; i++) { | |
934 | qemu_log(","); | |
935 | qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), | |
936 | args[i])); | |
937 | } | |
938 | for(i = 0; i < (nb_iargs - 1); i++) { | |
939 | qemu_log(","); | |
940 | if (args[nb_oargs + i] == TCG_CALL_DUMMY_ARG) { | |
941 | qemu_log("<dummy>"); | |
942 | } else { | |
943 | qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), | |
944 | args[nb_oargs + i])); | |
945 | } | |
946 | } | |
947 | } else if (c == INDEX_op_movi_i32 || c == INDEX_op_movi_i64) { | |
948 | tcg_target_ulong val; | |
949 | TCGHelperInfo *th; | |
950 | ||
951 | nb_oargs = def->nb_oargs; | |
952 | nb_iargs = def->nb_iargs; | |
953 | nb_cargs = def->nb_cargs; | |
954 | qemu_log(" %s %s,$", def->name, | |
955 | tcg_get_arg_str_idx(s, buf, sizeof(buf), args[0])); | |
956 | val = args[1]; | |
957 | th = tcg_find_helper(s, val); | |
958 | if (th) { | |
959 | qemu_log("%s", th->name); | |
960 | } else { | |
961 | if (c == INDEX_op_movi_i32) { | |
962 | qemu_log("0x%x", (uint32_t)val); | |
963 | } else { | |
964 | qemu_log("0x%" PRIx64 , (uint64_t)val); | |
965 | } | |
966 | } | |
967 | } else { | |
968 | qemu_log(" %s ", def->name); | |
969 | if (c == INDEX_op_nopn) { | |
970 | /* variable number of arguments */ | |
971 | nb_cargs = *args; | |
972 | nb_oargs = 0; | |
973 | nb_iargs = 0; | |
974 | } else { | |
975 | nb_oargs = def->nb_oargs; | |
976 | nb_iargs = def->nb_iargs; | |
977 | nb_cargs = def->nb_cargs; | |
978 | } | |
979 | ||
980 | k = 0; | |
981 | for(i = 0; i < nb_oargs; i++) { | |
982 | if (k != 0) { | |
983 | qemu_log(","); | |
984 | } | |
985 | qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), | |
986 | args[k++])); | |
987 | } | |
988 | for(i = 0; i < nb_iargs; i++) { | |
989 | if (k != 0) { | |
990 | qemu_log(","); | |
991 | } | |
992 | qemu_log("%s", tcg_get_arg_str_idx(s, buf, sizeof(buf), | |
993 | args[k++])); | |
994 | } | |
995 | switch (c) { | |
996 | case INDEX_op_brcond_i32: | |
997 | case INDEX_op_setcond_i32: | |
998 | case INDEX_op_movcond_i32: | |
999 | case INDEX_op_brcond2_i32: | |
1000 | case INDEX_op_setcond2_i32: | |
1001 | case INDEX_op_brcond_i64: | |
1002 | case INDEX_op_setcond_i64: | |
1003 | case INDEX_op_movcond_i64: | |
1004 | if (args[k] < ARRAY_SIZE(cond_name) && cond_name[args[k]]) { | |
1005 | qemu_log(",%s", cond_name[args[k++]]); | |
1006 | } else { | |
1007 | qemu_log(",$0x%" TCG_PRIlx, args[k++]); | |
1008 | } | |
1009 | i = 1; | |
1010 | break; | |
1011 | default: | |
1012 | i = 0; | |
1013 | break; | |
1014 | } | |
1015 | for(; i < nb_cargs; i++) { | |
1016 | if (k != 0) { | |
1017 | qemu_log(","); | |
1018 | } | |
1019 | arg = args[k++]; | |
1020 | qemu_log("$0x%" TCG_PRIlx, arg); | |
1021 | } | |
1022 | } | |
1023 | qemu_log("\n"); | |
1024 | args += nb_iargs + nb_oargs + nb_cargs; | |
1025 | } | |
1026 | } | |
1027 | ||
1028 | /* we give more priority to constraints with less registers */ | |
1029 | static int get_constraint_priority(const TCGOpDef *def, int k) | |
1030 | { | |
1031 | const TCGArgConstraint *arg_ct; | |
1032 | ||
1033 | int i, n; | |
1034 | arg_ct = &def->args_ct[k]; | |
1035 | if (arg_ct->ct & TCG_CT_ALIAS) { | |
1036 | /* an alias is equivalent to a single register */ | |
1037 | n = 1; | |
1038 | } else { | |
1039 | if (!(arg_ct->ct & TCG_CT_REG)) | |
1040 | return 0; | |
1041 | n = 0; | |
1042 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | |
1043 | if (tcg_regset_test_reg(arg_ct->u.regs, i)) | |
1044 | n++; | |
1045 | } | |
1046 | } | |
1047 | return TCG_TARGET_NB_REGS - n + 1; | |
1048 | } | |
1049 | ||
1050 | /* sort from highest priority to lowest */ | |
1051 | static void sort_constraints(TCGOpDef *def, int start, int n) | |
1052 | { | |
1053 | int i, j, p1, p2, tmp; | |
1054 | ||
1055 | for(i = 0; i < n; i++) | |
1056 | def->sorted_args[start + i] = start + i; | |
1057 | if (n <= 1) | |
1058 | return; | |
1059 | for(i = 0; i < n - 1; i++) { | |
1060 | for(j = i + 1; j < n; j++) { | |
1061 | p1 = get_constraint_priority(def, def->sorted_args[start + i]); | |
1062 | p2 = get_constraint_priority(def, def->sorted_args[start + j]); | |
1063 | if (p1 < p2) { | |
1064 | tmp = def->sorted_args[start + i]; | |
1065 | def->sorted_args[start + i] = def->sorted_args[start + j]; | |
1066 | def->sorted_args[start + j] = tmp; | |
1067 | } | |
1068 | } | |
1069 | } | |
1070 | } | |
1071 | ||
1072 | void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs) | |
1073 | { | |
1074 | TCGOpcode op; | |
1075 | TCGOpDef *def; | |
1076 | const char *ct_str; | |
1077 | int i, nb_args; | |
1078 | ||
1079 | for(;;) { | |
1080 | if (tdefs->op == (TCGOpcode)-1) | |
1081 | break; | |
1082 | op = tdefs->op; | |
1083 | assert((unsigned)op < NB_OPS); | |
1084 | def = &tcg_op_defs[op]; | |
1085 | #if defined(CONFIG_DEBUG_TCG) | |
1086 | /* Duplicate entry in op definitions? */ | |
1087 | assert(!def->used); | |
1088 | def->used = 1; | |
1089 | #endif | |
1090 | nb_args = def->nb_iargs + def->nb_oargs; | |
1091 | for(i = 0; i < nb_args; i++) { | |
1092 | ct_str = tdefs->args_ct_str[i]; | |
1093 | /* Incomplete TCGTargetOpDef entry? */ | |
1094 | assert(ct_str != NULL); | |
1095 | tcg_regset_clear(def->args_ct[i].u.regs); | |
1096 | def->args_ct[i].ct = 0; | |
1097 | if (ct_str[0] >= '0' && ct_str[0] <= '9') { | |
1098 | int oarg; | |
1099 | oarg = ct_str[0] - '0'; | |
1100 | assert(oarg < def->nb_oargs); | |
1101 | assert(def->args_ct[oarg].ct & TCG_CT_REG); | |
1102 | /* TCG_CT_ALIAS is for the output arguments. The input | |
1103 | argument is tagged with TCG_CT_IALIAS. */ | |
1104 | def->args_ct[i] = def->args_ct[oarg]; | |
1105 | def->args_ct[oarg].ct = TCG_CT_ALIAS; | |
1106 | def->args_ct[oarg].alias_index = i; | |
1107 | def->args_ct[i].ct |= TCG_CT_IALIAS; | |
1108 | def->args_ct[i].alias_index = oarg; | |
1109 | } else { | |
1110 | for(;;) { | |
1111 | if (*ct_str == '\0') | |
1112 | break; | |
1113 | switch(*ct_str) { | |
1114 | case 'i': | |
1115 | def->args_ct[i].ct |= TCG_CT_CONST; | |
1116 | ct_str++; | |
1117 | break; | |
1118 | default: | |
1119 | if (target_parse_constraint(&def->args_ct[i], &ct_str) < 0) { | |
1120 | fprintf(stderr, "Invalid constraint '%s' for arg %d of operation '%s'\n", | |
1121 | ct_str, i, def->name); | |
1122 | exit(1); | |
1123 | } | |
1124 | } | |
1125 | } | |
1126 | } | |
1127 | } | |
1128 | ||
1129 | /* TCGTargetOpDef entry with too much information? */ | |
1130 | assert(i == TCG_MAX_OP_ARGS || tdefs->args_ct_str[i] == NULL); | |
1131 | ||
1132 | /* sort the constraints (XXX: this is just an heuristic) */ | |
1133 | sort_constraints(def, 0, def->nb_oargs); | |
1134 | sort_constraints(def, def->nb_oargs, def->nb_iargs); | |
1135 | ||
1136 | #if 0 | |
1137 | { | |
1138 | int i; | |
1139 | ||
1140 | printf("%s: sorted=", def->name); | |
1141 | for(i = 0; i < def->nb_oargs + def->nb_iargs; i++) | |
1142 | printf(" %d", def->sorted_args[i]); | |
1143 | printf("\n"); | |
1144 | } | |
1145 | #endif | |
1146 | tdefs++; | |
1147 | } | |
1148 | ||
1149 | #if defined(CONFIG_DEBUG_TCG) | |
1150 | i = 0; | |
1151 | for (op = 0; op < ARRAY_SIZE(tcg_op_defs); op++) { | |
1152 | const TCGOpDef *def = &tcg_op_defs[op]; | |
1153 | if (op < INDEX_op_call | |
1154 | || op == INDEX_op_debug_insn_start | |
1155 | || (def->flags & TCG_OPF_NOT_PRESENT)) { | |
1156 | /* Wrong entry in op definitions? */ | |
1157 | if (def->used) { | |
1158 | fprintf(stderr, "Invalid op definition for %s\n", def->name); | |
1159 | i = 1; | |
1160 | } | |
1161 | } else { | |
1162 | /* Missing entry in op definitions? */ | |
1163 | if (!def->used) { | |
1164 | fprintf(stderr, "Missing op definition for %s\n", def->name); | |
1165 | i = 1; | |
1166 | } | |
1167 | } | |
1168 | } | |
1169 | if (i == 1) { | |
1170 | tcg_abort(); | |
1171 | } | |
1172 | #endif | |
1173 | } | |
1174 | ||
1175 | #ifdef USE_LIVENESS_ANALYSIS | |
1176 | ||
1177 | /* set a nop for an operation using 'nb_args' */ | |
1178 | static inline void tcg_set_nop(TCGContext *s, uint16_t *opc_ptr, | |
1179 | TCGArg *args, int nb_args) | |
1180 | { | |
1181 | if (nb_args == 0) { | |
1182 | *opc_ptr = INDEX_op_nop; | |
1183 | } else { | |
1184 | *opc_ptr = INDEX_op_nopn; | |
1185 | args[0] = nb_args; | |
1186 | args[nb_args - 1] = nb_args; | |
1187 | } | |
1188 | } | |
1189 | ||
1190 | /* liveness analysis: end of function: all temps are dead, and globals | |
1191 | should be in memory. */ | |
1192 | static inline void tcg_la_func_end(TCGContext *s, uint8_t *dead_temps, | |
1193 | uint8_t *mem_temps) | |
1194 | { | |
1195 | memset(dead_temps, 1, s->nb_temps); | |
1196 | memset(mem_temps, 1, s->nb_globals); | |
1197 | memset(mem_temps + s->nb_globals, 0, s->nb_temps - s->nb_globals); | |
1198 | } | |
1199 | ||
1200 | /* liveness analysis: end of basic block: all temps are dead, globals | |
1201 | and local temps should be in memory. */ | |
1202 | static inline void tcg_la_bb_end(TCGContext *s, uint8_t *dead_temps, | |
1203 | uint8_t *mem_temps) | |
1204 | { | |
1205 | int i; | |
1206 | ||
1207 | memset(dead_temps, 1, s->nb_temps); | |
1208 | memset(mem_temps, 1, s->nb_globals); | |
1209 | for(i = s->nb_globals; i < s->nb_temps; i++) { | |
1210 | mem_temps[i] = s->temps[i].temp_local; | |
1211 | } | |
1212 | } | |
1213 | ||
1214 | /* Liveness analysis : update the opc_dead_args array to tell if a | |
1215 | given input arguments is dead. Instructions updating dead | |
1216 | temporaries are removed. */ | |
1217 | static void tcg_liveness_analysis(TCGContext *s) | |
1218 | { | |
1219 | int i, op_index, nb_args, nb_iargs, nb_oargs, arg, nb_ops; | |
1220 | TCGOpcode op; | |
1221 | TCGArg *args; | |
1222 | const TCGOpDef *def; | |
1223 | uint8_t *dead_temps, *mem_temps; | |
1224 | uint16_t dead_args; | |
1225 | uint8_t sync_args; | |
1226 | ||
1227 | s->gen_opc_ptr++; /* skip end */ | |
1228 | ||
1229 | nb_ops = s->gen_opc_ptr - s->gen_opc_buf; | |
1230 | ||
1231 | s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t)); | |
1232 | s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t)); | |
1233 | ||
1234 | dead_temps = tcg_malloc(s->nb_temps); | |
1235 | mem_temps = tcg_malloc(s->nb_temps); | |
1236 | tcg_la_func_end(s, dead_temps, mem_temps); | |
1237 | ||
1238 | args = s->gen_opparam_ptr; | |
1239 | op_index = nb_ops - 1; | |
1240 | while (op_index >= 0) { | |
1241 | op = s->gen_opc_buf[op_index]; | |
1242 | def = &tcg_op_defs[op]; | |
1243 | switch(op) { | |
1244 | case INDEX_op_call: | |
1245 | { | |
1246 | int call_flags; | |
1247 | ||
1248 | nb_args = args[-1]; | |
1249 | args -= nb_args; | |
1250 | nb_iargs = args[0] & 0xffff; | |
1251 | nb_oargs = args[0] >> 16; | |
1252 | args++; | |
1253 | call_flags = args[nb_oargs + nb_iargs]; | |
1254 | ||
1255 | /* pure functions can be removed if their result is not | |
1256 | used */ | |
1257 | if (call_flags & TCG_CALL_NO_SIDE_EFFECTS) { | |
1258 | for(i = 0; i < nb_oargs; i++) { | |
1259 | arg = args[i]; | |
1260 | if (!dead_temps[arg] || mem_temps[arg]) { | |
1261 | goto do_not_remove_call; | |
1262 | } | |
1263 | } | |
1264 | tcg_set_nop(s, s->gen_opc_buf + op_index, | |
1265 | args - 1, nb_args); | |
1266 | } else { | |
1267 | do_not_remove_call: | |
1268 | ||
1269 | /* output args are dead */ | |
1270 | dead_args = 0; | |
1271 | sync_args = 0; | |
1272 | for(i = 0; i < nb_oargs; i++) { | |
1273 | arg = args[i]; | |
1274 | if (dead_temps[arg]) { | |
1275 | dead_args |= (1 << i); | |
1276 | } | |
1277 | if (mem_temps[arg]) { | |
1278 | sync_args |= (1 << i); | |
1279 | } | |
1280 | dead_temps[arg] = 1; | |
1281 | mem_temps[arg] = 0; | |
1282 | } | |
1283 | ||
1284 | if (!(call_flags & TCG_CALL_NO_READ_GLOBALS)) { | |
1285 | /* globals should be synced to memory */ | |
1286 | memset(mem_temps, 1, s->nb_globals); | |
1287 | } | |
1288 | if (!(call_flags & (TCG_CALL_NO_WRITE_GLOBALS | | |
1289 | TCG_CALL_NO_READ_GLOBALS))) { | |
1290 | /* globals should go back to memory */ | |
1291 | memset(dead_temps, 1, s->nb_globals); | |
1292 | } | |
1293 | ||
1294 | /* input args are live */ | |
1295 | for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { | |
1296 | arg = args[i]; | |
1297 | if (arg != TCG_CALL_DUMMY_ARG) { | |
1298 | if (dead_temps[arg]) { | |
1299 | dead_args |= (1 << i); | |
1300 | } | |
1301 | dead_temps[arg] = 0; | |
1302 | } | |
1303 | } | |
1304 | s->op_dead_args[op_index] = dead_args; | |
1305 | s->op_sync_args[op_index] = sync_args; | |
1306 | } | |
1307 | args--; | |
1308 | } | |
1309 | break; | |
1310 | case INDEX_op_debug_insn_start: | |
1311 | args -= def->nb_args; | |
1312 | break; | |
1313 | case INDEX_op_nopn: | |
1314 | nb_args = args[-1]; | |
1315 | args -= nb_args; | |
1316 | break; | |
1317 | case INDEX_op_discard: | |
1318 | args--; | |
1319 | /* mark the temporary as dead */ | |
1320 | dead_temps[args[0]] = 1; | |
1321 | mem_temps[args[0]] = 0; | |
1322 | break; | |
1323 | case INDEX_op_end: | |
1324 | break; | |
1325 | ||
1326 | case INDEX_op_add2_i32: | |
1327 | case INDEX_op_sub2_i32: | |
1328 | args -= 6; | |
1329 | nb_iargs = 4; | |
1330 | nb_oargs = 2; | |
1331 | /* Test if the high part of the operation is dead, but not | |
1332 | the low part. The result can be optimized to a simple | |
1333 | add or sub. This happens often for x86_64 guest when the | |
1334 | cpu mode is set to 32 bit. */ | |
1335 | if (dead_temps[args[1]] && !mem_temps[args[1]]) { | |
1336 | if (dead_temps[args[0]] && !mem_temps[args[0]]) { | |
1337 | goto do_remove; | |
1338 | } | |
1339 | /* Create the single operation plus nop. */ | |
1340 | if (op == INDEX_op_add2_i32) { | |
1341 | op = INDEX_op_add_i32; | |
1342 | } else { | |
1343 | op = INDEX_op_sub_i32; | |
1344 | } | |
1345 | s->gen_opc_buf[op_index] = op; | |
1346 | args[1] = args[2]; | |
1347 | args[2] = args[4]; | |
1348 | assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); | |
1349 | tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 3); | |
1350 | /* Fall through and mark the single-word operation live. */ | |
1351 | nb_iargs = 2; | |
1352 | nb_oargs = 1; | |
1353 | } | |
1354 | goto do_not_remove; | |
1355 | ||
1356 | case INDEX_op_mulu2_i32: | |
1357 | args -= 4; | |
1358 | nb_iargs = 2; | |
1359 | nb_oargs = 2; | |
1360 | /* Likewise, test for the high part of the operation dead. */ | |
1361 | if (dead_temps[args[1]] && !mem_temps[args[1]]) { | |
1362 | if (dead_temps[args[0]] && !mem_temps[args[0]]) { | |
1363 | goto do_remove; | |
1364 | } | |
1365 | s->gen_opc_buf[op_index] = op = INDEX_op_mul_i32; | |
1366 | args[1] = args[2]; | |
1367 | args[2] = args[3]; | |
1368 | assert(s->gen_opc_buf[op_index + 1] == INDEX_op_nop); | |
1369 | tcg_set_nop(s, s->gen_opc_buf + op_index + 1, args + 3, 1); | |
1370 | /* Fall through and mark the single-word operation live. */ | |
1371 | nb_oargs = 1; | |
1372 | } | |
1373 | goto do_not_remove; | |
1374 | ||
1375 | default: | |
1376 | /* XXX: optimize by hardcoding common cases (e.g. triadic ops) */ | |
1377 | args -= def->nb_args; | |
1378 | nb_iargs = def->nb_iargs; | |
1379 | nb_oargs = def->nb_oargs; | |
1380 | ||
1381 | /* Test if the operation can be removed because all | |
1382 | its outputs are dead. We assume that nb_oargs == 0 | |
1383 | implies side effects */ | |
1384 | if (!(def->flags & TCG_OPF_SIDE_EFFECTS) && nb_oargs != 0) { | |
1385 | for(i = 0; i < nb_oargs; i++) { | |
1386 | arg = args[i]; | |
1387 | if (!dead_temps[arg] || mem_temps[arg]) { | |
1388 | goto do_not_remove; | |
1389 | } | |
1390 | } | |
1391 | do_remove: | |
1392 | tcg_set_nop(s, s->gen_opc_buf + op_index, args, def->nb_args); | |
1393 | #ifdef CONFIG_PROFILER | |
1394 | s->del_op_count++; | |
1395 | #endif | |
1396 | } else { | |
1397 | do_not_remove: | |
1398 | ||
1399 | /* output args are dead */ | |
1400 | dead_args = 0; | |
1401 | sync_args = 0; | |
1402 | for(i = 0; i < nb_oargs; i++) { | |
1403 | arg = args[i]; | |
1404 | if (dead_temps[arg]) { | |
1405 | dead_args |= (1 << i); | |
1406 | } | |
1407 | if (mem_temps[arg]) { | |
1408 | sync_args |= (1 << i); | |
1409 | } | |
1410 | dead_temps[arg] = 1; | |
1411 | mem_temps[arg] = 0; | |
1412 | } | |
1413 | ||
1414 | /* if end of basic block, update */ | |
1415 | if (def->flags & TCG_OPF_BB_END) { | |
1416 | tcg_la_bb_end(s, dead_temps, mem_temps); | |
1417 | } else if (def->flags & TCG_OPF_SIDE_EFFECTS) { | |
1418 | /* globals should be synced to memory */ | |
1419 | memset(mem_temps, 1, s->nb_globals); | |
1420 | } | |
1421 | ||
1422 | /* input args are live */ | |
1423 | for(i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
1424 | arg = args[i]; | |
1425 | if (dead_temps[arg]) { | |
1426 | dead_args |= (1 << i); | |
1427 | } | |
1428 | dead_temps[arg] = 0; | |
1429 | } | |
1430 | s->op_dead_args[op_index] = dead_args; | |
1431 | s->op_sync_args[op_index] = sync_args; | |
1432 | } | |
1433 | break; | |
1434 | } | |
1435 | op_index--; | |
1436 | } | |
1437 | ||
1438 | if (args != s->gen_opparam_buf) { | |
1439 | tcg_abort(); | |
1440 | } | |
1441 | } | |
1442 | #else | |
1443 | /* dummy liveness analysis */ | |
1444 | static void tcg_liveness_analysis(TCGContext *s) | |
1445 | { | |
1446 | int nb_ops; | |
1447 | nb_ops = s->gen_opc_ptr - s->gen_opc_buf; | |
1448 | ||
1449 | s->op_dead_args = tcg_malloc(nb_ops * sizeof(uint16_t)); | |
1450 | memset(s->op_dead_args, 0, nb_ops * sizeof(uint16_t)); | |
1451 | s->op_sync_args = tcg_malloc(nb_ops * sizeof(uint8_t)); | |
1452 | memset(s->op_sync_args, 0, nb_ops * sizeof(uint8_t)); | |
1453 | } | |
1454 | #endif | |
1455 | ||
1456 | #ifndef NDEBUG | |
1457 | static void dump_regs(TCGContext *s) | |
1458 | { | |
1459 | TCGTemp *ts; | |
1460 | int i; | |
1461 | char buf[64]; | |
1462 | ||
1463 | for(i = 0; i < s->nb_temps; i++) { | |
1464 | ts = &s->temps[i]; | |
1465 | printf(" %10s: ", tcg_get_arg_str_idx(s, buf, sizeof(buf), i)); | |
1466 | switch(ts->val_type) { | |
1467 | case TEMP_VAL_REG: | |
1468 | printf("%s", tcg_target_reg_names[ts->reg]); | |
1469 | break; | |
1470 | case TEMP_VAL_MEM: | |
1471 | printf("%d(%s)", (int)ts->mem_offset, tcg_target_reg_names[ts->mem_reg]); | |
1472 | break; | |
1473 | case TEMP_VAL_CONST: | |
1474 | printf("$0x%" TCG_PRIlx, ts->val); | |
1475 | break; | |
1476 | case TEMP_VAL_DEAD: | |
1477 | printf("D"); | |
1478 | break; | |
1479 | default: | |
1480 | printf("???"); | |
1481 | break; | |
1482 | } | |
1483 | printf("\n"); | |
1484 | } | |
1485 | ||
1486 | for(i = 0; i < TCG_TARGET_NB_REGS; i++) { | |
1487 | if (s->reg_to_temp[i] >= 0) { | |
1488 | printf("%s: %s\n", | |
1489 | tcg_target_reg_names[i], | |
1490 | tcg_get_arg_str_idx(s, buf, sizeof(buf), s->reg_to_temp[i])); | |
1491 | } | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | static void check_regs(TCGContext *s) | |
1496 | { | |
1497 | int reg, k; | |
1498 | TCGTemp *ts; | |
1499 | char buf[64]; | |
1500 | ||
1501 | for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { | |
1502 | k = s->reg_to_temp[reg]; | |
1503 | if (k >= 0) { | |
1504 | ts = &s->temps[k]; | |
1505 | if (ts->val_type != TEMP_VAL_REG || | |
1506 | ts->reg != reg) { | |
1507 | printf("Inconsistency for register %s:\n", | |
1508 | tcg_target_reg_names[reg]); | |
1509 | goto fail; | |
1510 | } | |
1511 | } | |
1512 | } | |
1513 | for(k = 0; k < s->nb_temps; k++) { | |
1514 | ts = &s->temps[k]; | |
1515 | if (ts->val_type == TEMP_VAL_REG && | |
1516 | !ts->fixed_reg && | |
1517 | s->reg_to_temp[ts->reg] != k) { | |
1518 | printf("Inconsistency for temp %s:\n", | |
1519 | tcg_get_arg_str_idx(s, buf, sizeof(buf), k)); | |
1520 | fail: | |
1521 | printf("reg state:\n"); | |
1522 | dump_regs(s); | |
1523 | tcg_abort(); | |
1524 | } | |
1525 | } | |
1526 | } | |
1527 | #endif | |
1528 | ||
1529 | static void temp_allocate_frame(TCGContext *s, int temp) | |
1530 | { | |
1531 | TCGTemp *ts; | |
1532 | ts = &s->temps[temp]; | |
1533 | #if !(defined(__sparc__) && TCG_TARGET_REG_BITS == 64) | |
1534 | /* Sparc64 stack is accessed with offset of 2047 */ | |
1535 | s->current_frame_offset = (s->current_frame_offset + | |
1536 | (tcg_target_long)sizeof(tcg_target_long) - 1) & | |
1537 | ~(sizeof(tcg_target_long) - 1); | |
1538 | #endif | |
1539 | if (s->current_frame_offset + (tcg_target_long)sizeof(tcg_target_long) > | |
1540 | s->frame_end) { | |
1541 | tcg_abort(); | |
1542 | } | |
1543 | ts->mem_offset = s->current_frame_offset; | |
1544 | ts->mem_reg = s->frame_reg; | |
1545 | ts->mem_allocated = 1; | |
1546 | s->current_frame_offset += (tcg_target_long)sizeof(tcg_target_long); | |
1547 | } | |
1548 | ||
1549 | /* sync register 'reg' by saving it to the corresponding temporary */ | |
1550 | static inline void tcg_reg_sync(TCGContext *s, int reg) | |
1551 | { | |
1552 | TCGTemp *ts; | |
1553 | int temp; | |
1554 | ||
1555 | temp = s->reg_to_temp[reg]; | |
1556 | ts = &s->temps[temp]; | |
1557 | assert(ts->val_type == TEMP_VAL_REG); | |
1558 | if (!ts->mem_coherent && !ts->fixed_reg) { | |
1559 | if (!ts->mem_allocated) { | |
1560 | temp_allocate_frame(s, temp); | |
1561 | } | |
1562 | tcg_out_st(s, ts->type, reg, ts->mem_reg, ts->mem_offset); | |
1563 | } | |
1564 | ts->mem_coherent = 1; | |
1565 | } | |
1566 | ||
1567 | /* free register 'reg' by spilling the corresponding temporary if necessary */ | |
1568 | static void tcg_reg_free(TCGContext *s, int reg) | |
1569 | { | |
1570 | int temp; | |
1571 | ||
1572 | temp = s->reg_to_temp[reg]; | |
1573 | if (temp != -1) { | |
1574 | tcg_reg_sync(s, reg); | |
1575 | s->temps[temp].val_type = TEMP_VAL_MEM; | |
1576 | s->reg_to_temp[reg] = -1; | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | /* Allocate a register belonging to reg1 & ~reg2 */ | |
1581 | static int tcg_reg_alloc(TCGContext *s, TCGRegSet reg1, TCGRegSet reg2) | |
1582 | { | |
1583 | int i, reg; | |
1584 | TCGRegSet reg_ct; | |
1585 | ||
1586 | tcg_regset_andnot(reg_ct, reg1, reg2); | |
1587 | ||
1588 | /* first try free registers */ | |
1589 | for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { | |
1590 | reg = tcg_target_reg_alloc_order[i]; | |
1591 | if (tcg_regset_test_reg(reg_ct, reg) && s->reg_to_temp[reg] == -1) | |
1592 | return reg; | |
1593 | } | |
1594 | ||
1595 | /* XXX: do better spill choice */ | |
1596 | for(i = 0; i < ARRAY_SIZE(tcg_target_reg_alloc_order); i++) { | |
1597 | reg = tcg_target_reg_alloc_order[i]; | |
1598 | if (tcg_regset_test_reg(reg_ct, reg)) { | |
1599 | tcg_reg_free(s, reg); | |
1600 | return reg; | |
1601 | } | |
1602 | } | |
1603 | ||
1604 | tcg_abort(); | |
1605 | } | |
1606 | ||
1607 | /* mark a temporary as dead. */ | |
1608 | static inline void temp_dead(TCGContext *s, int temp) | |
1609 | { | |
1610 | TCGTemp *ts; | |
1611 | ||
1612 | ts = &s->temps[temp]; | |
1613 | if (!ts->fixed_reg) { | |
1614 | if (ts->val_type == TEMP_VAL_REG) { | |
1615 | s->reg_to_temp[ts->reg] = -1; | |
1616 | } | |
1617 | if (temp < s->nb_globals || ts->temp_local) { | |
1618 | ts->val_type = TEMP_VAL_MEM; | |
1619 | } else { | |
1620 | ts->val_type = TEMP_VAL_DEAD; | |
1621 | } | |
1622 | } | |
1623 | } | |
1624 | ||
1625 | /* sync a temporary to memory. 'allocated_regs' is used in case a | |
1626 | temporary registers needs to be allocated to store a constant. */ | |
1627 | static inline void temp_sync(TCGContext *s, int temp, TCGRegSet allocated_regs) | |
1628 | { | |
1629 | TCGTemp *ts; | |
1630 | ||
1631 | ts = &s->temps[temp]; | |
1632 | if (!ts->fixed_reg) { | |
1633 | switch(ts->val_type) { | |
1634 | case TEMP_VAL_CONST: | |
1635 | ts->reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type], | |
1636 | allocated_regs); | |
1637 | ts->val_type = TEMP_VAL_REG; | |
1638 | s->reg_to_temp[ts->reg] = temp; | |
1639 | ts->mem_coherent = 0; | |
1640 | tcg_out_movi(s, ts->type, ts->reg, ts->val); | |
1641 | /* fallthrough*/ | |
1642 | case TEMP_VAL_REG: | |
1643 | tcg_reg_sync(s, ts->reg); | |
1644 | break; | |
1645 | case TEMP_VAL_DEAD: | |
1646 | case TEMP_VAL_MEM: | |
1647 | break; | |
1648 | default: | |
1649 | tcg_abort(); | |
1650 | } | |
1651 | } | |
1652 | } | |
1653 | ||
1654 | /* save a temporary to memory. 'allocated_regs' is used in case a | |
1655 | temporary registers needs to be allocated to store a constant. */ | |
1656 | static inline void temp_save(TCGContext *s, int temp, TCGRegSet allocated_regs) | |
1657 | { | |
1658 | #ifdef USE_LIVENESS_ANALYSIS | |
1659 | /* The liveness analysis already ensures that globals are back | |
1660 | in memory. Keep an assert for safety. */ | |
1661 | assert(s->temps[temp].val_type == TEMP_VAL_MEM || s->temps[temp].fixed_reg); | |
1662 | #else | |
1663 | temp_sync(s, temp, allocated_regs); | |
1664 | temp_dead(s, temp); | |
1665 | #endif | |
1666 | } | |
1667 | ||
1668 | /* save globals to their canonical location and assume they can be | |
1669 | modified be the following code. 'allocated_regs' is used in case a | |
1670 | temporary registers needs to be allocated to store a constant. */ | |
1671 | static void save_globals(TCGContext *s, TCGRegSet allocated_regs) | |
1672 | { | |
1673 | int i; | |
1674 | ||
1675 | for(i = 0; i < s->nb_globals; i++) { | |
1676 | temp_save(s, i, allocated_regs); | |
1677 | } | |
1678 | } | |
1679 | ||
1680 | /* sync globals to their canonical location and assume they can be | |
1681 | read by the following code. 'allocated_regs' is used in case a | |
1682 | temporary registers needs to be allocated to store a constant. */ | |
1683 | static void sync_globals(TCGContext *s, TCGRegSet allocated_regs) | |
1684 | { | |
1685 | int i; | |
1686 | ||
1687 | for (i = 0; i < s->nb_globals; i++) { | |
1688 | #ifdef USE_LIVENESS_ANALYSIS | |
1689 | assert(s->temps[i].val_type != TEMP_VAL_REG || s->temps[i].fixed_reg || | |
1690 | s->temps[i].mem_coherent); | |
1691 | #else | |
1692 | temp_sync(s, i, allocated_regs); | |
1693 | #endif | |
1694 | } | |
1695 | } | |
1696 | ||
1697 | /* at the end of a basic block, we assume all temporaries are dead and | |
1698 | all globals are stored at their canonical location. */ | |
1699 | static void tcg_reg_alloc_bb_end(TCGContext *s, TCGRegSet allocated_regs) | |
1700 | { | |
1701 | TCGTemp *ts; | |
1702 | int i; | |
1703 | ||
1704 | for(i = s->nb_globals; i < s->nb_temps; i++) { | |
1705 | ts = &s->temps[i]; | |
1706 | if (ts->temp_local) { | |
1707 | temp_save(s, i, allocated_regs); | |
1708 | } else { | |
1709 | #ifdef USE_LIVENESS_ANALYSIS | |
1710 | /* The liveness analysis already ensures that temps are dead. | |
1711 | Keep an assert for safety. */ | |
1712 | assert(ts->val_type == TEMP_VAL_DEAD); | |
1713 | #else | |
1714 | temp_dead(s, i); | |
1715 | #endif | |
1716 | } | |
1717 | } | |
1718 | ||
1719 | save_globals(s, allocated_regs); | |
1720 | } | |
1721 | ||
1722 | #define IS_DEAD_ARG(n) ((dead_args >> (n)) & 1) | |
1723 | #define NEED_SYNC_ARG(n) ((sync_args >> (n)) & 1) | |
1724 | ||
1725 | static void tcg_reg_alloc_movi(TCGContext *s, const TCGArg *args, | |
1726 | uint16_t dead_args, uint8_t sync_args) | |
1727 | { | |
1728 | TCGTemp *ots; | |
1729 | tcg_target_ulong val; | |
1730 | ||
1731 | ots = &s->temps[args[0]]; | |
1732 | val = args[1]; | |
1733 | ||
1734 | if (ots->fixed_reg) { | |
1735 | /* for fixed registers, we do not do any constant | |
1736 | propagation */ | |
1737 | tcg_out_movi(s, ots->type, ots->reg, val); | |
1738 | } else { | |
1739 | /* The movi is not explicitly generated here */ | |
1740 | if (ots->val_type == TEMP_VAL_REG) | |
1741 | s->reg_to_temp[ots->reg] = -1; | |
1742 | ots->val_type = TEMP_VAL_CONST; | |
1743 | ots->val = val; | |
1744 | } | |
1745 | if (NEED_SYNC_ARG(0)) { | |
1746 | temp_sync(s, args[0], s->reserved_regs); | |
1747 | } | |
1748 | if (IS_DEAD_ARG(0)) { | |
1749 | temp_dead(s, args[0]); | |
1750 | } | |
1751 | } | |
1752 | ||
1753 | static void tcg_reg_alloc_mov(TCGContext *s, const TCGOpDef *def, | |
1754 | const TCGArg *args, uint16_t dead_args, | |
1755 | uint8_t sync_args) | |
1756 | { | |
1757 | TCGRegSet allocated_regs; | |
1758 | TCGTemp *ts, *ots; | |
1759 | const TCGArgConstraint *arg_ct, *oarg_ct; | |
1760 | ||
1761 | tcg_regset_set(allocated_regs, s->reserved_regs); | |
1762 | ots = &s->temps[args[0]]; | |
1763 | ts = &s->temps[args[1]]; | |
1764 | oarg_ct = &def->args_ct[0]; | |
1765 | arg_ct = &def->args_ct[1]; | |
1766 | ||
1767 | /* If the source value is not in a register, and we're going to be | |
1768 | forced to have it in a register in order to perform the copy, | |
1769 | then copy the SOURCE value into its own register first. That way | |
1770 | we don't have to reload SOURCE the next time it is used. */ | |
1771 | if (((NEED_SYNC_ARG(0) || ots->fixed_reg) && ts->val_type != TEMP_VAL_REG) | |
1772 | || ts->val_type == TEMP_VAL_MEM) { | |
1773 | ts->reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
1774 | if (ts->val_type == TEMP_VAL_MEM) { | |
1775 | tcg_out_ld(s, ts->type, ts->reg, ts->mem_reg, ts->mem_offset); | |
1776 | ts->mem_coherent = 1; | |
1777 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
1778 | tcg_out_movi(s, ts->type, ts->reg, ts->val); | |
1779 | } | |
1780 | s->reg_to_temp[ts->reg] = args[1]; | |
1781 | ts->val_type = TEMP_VAL_REG; | |
1782 | } | |
1783 | ||
1784 | if (IS_DEAD_ARG(0) && !ots->fixed_reg) { | |
1785 | /* mov to a non-saved dead register makes no sense (even with | |
1786 | liveness analysis disabled). */ | |
1787 | assert(NEED_SYNC_ARG(0)); | |
1788 | /* The code above should have moved the temp to a register. */ | |
1789 | assert(ts->val_type == TEMP_VAL_REG); | |
1790 | if (!ots->mem_allocated) { | |
1791 | temp_allocate_frame(s, args[0]); | |
1792 | } | |
1793 | tcg_out_st(s, ots->type, ts->reg, ots->mem_reg, ots->mem_offset); | |
1794 | if (IS_DEAD_ARG(1)) { | |
1795 | temp_dead(s, args[1]); | |
1796 | } | |
1797 | temp_dead(s, args[0]); | |
1798 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
1799 | /* propagate constant */ | |
1800 | if (ots->val_type == TEMP_VAL_REG) { | |
1801 | s->reg_to_temp[ots->reg] = -1; | |
1802 | } | |
1803 | ots->val_type = TEMP_VAL_CONST; | |
1804 | ots->val = ts->val; | |
1805 | } else { | |
1806 | /* The code in the first if block should have moved the | |
1807 | temp to a register. */ | |
1808 | assert(ts->val_type == TEMP_VAL_REG); | |
1809 | if (IS_DEAD_ARG(1) && !ts->fixed_reg && !ots->fixed_reg) { | |
1810 | /* the mov can be suppressed */ | |
1811 | if (ots->val_type == TEMP_VAL_REG) { | |
1812 | s->reg_to_temp[ots->reg] = -1; | |
1813 | } | |
1814 | ots->reg = ts->reg; | |
1815 | temp_dead(s, args[1]); | |
1816 | } else { | |
1817 | if (ots->val_type != TEMP_VAL_REG) { | |
1818 | /* When allocating a new register, make sure to not spill the | |
1819 | input one. */ | |
1820 | tcg_regset_set_reg(allocated_regs, ts->reg); | |
1821 | ots->reg = tcg_reg_alloc(s, oarg_ct->u.regs, allocated_regs); | |
1822 | } | |
1823 | tcg_out_mov(s, ots->type, ots->reg, ts->reg); | |
1824 | } | |
1825 | ots->val_type = TEMP_VAL_REG; | |
1826 | ots->mem_coherent = 0; | |
1827 | s->reg_to_temp[ots->reg] = args[0]; | |
1828 | if (NEED_SYNC_ARG(0)) { | |
1829 | tcg_reg_sync(s, ots->reg); | |
1830 | } | |
1831 | } | |
1832 | } | |
1833 | ||
1834 | static void tcg_reg_alloc_op(TCGContext *s, | |
1835 | const TCGOpDef *def, TCGOpcode opc, | |
1836 | const TCGArg *args, uint16_t dead_args, | |
1837 | uint8_t sync_args) | |
1838 | { | |
1839 | TCGRegSet allocated_regs; | |
1840 | int i, k, nb_iargs, nb_oargs, reg; | |
1841 | TCGArg arg; | |
1842 | const TCGArgConstraint *arg_ct; | |
1843 | TCGTemp *ts; | |
1844 | TCGArg new_args[TCG_MAX_OP_ARGS]; | |
1845 | int const_args[TCG_MAX_OP_ARGS]; | |
1846 | ||
1847 | nb_oargs = def->nb_oargs; | |
1848 | nb_iargs = def->nb_iargs; | |
1849 | ||
1850 | /* copy constants */ | |
1851 | memcpy(new_args + nb_oargs + nb_iargs, | |
1852 | args + nb_oargs + nb_iargs, | |
1853 | sizeof(TCGArg) * def->nb_cargs); | |
1854 | ||
1855 | /* satisfy input constraints */ | |
1856 | tcg_regset_set(allocated_regs, s->reserved_regs); | |
1857 | for(k = 0; k < nb_iargs; k++) { | |
1858 | i = def->sorted_args[nb_oargs + k]; | |
1859 | arg = args[i]; | |
1860 | arg_ct = &def->args_ct[i]; | |
1861 | ts = &s->temps[arg]; | |
1862 | if (ts->val_type == TEMP_VAL_MEM) { | |
1863 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
1864 | tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); | |
1865 | ts->val_type = TEMP_VAL_REG; | |
1866 | ts->reg = reg; | |
1867 | ts->mem_coherent = 1; | |
1868 | s->reg_to_temp[reg] = arg; | |
1869 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
1870 | if (tcg_target_const_match(ts->val, arg_ct)) { | |
1871 | /* constant is OK for instruction */ | |
1872 | const_args[i] = 1; | |
1873 | new_args[i] = ts->val; | |
1874 | goto iarg_end; | |
1875 | } else { | |
1876 | /* need to move to a register */ | |
1877 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
1878 | tcg_out_movi(s, ts->type, reg, ts->val); | |
1879 | ts->val_type = TEMP_VAL_REG; | |
1880 | ts->reg = reg; | |
1881 | ts->mem_coherent = 0; | |
1882 | s->reg_to_temp[reg] = arg; | |
1883 | } | |
1884 | } | |
1885 | assert(ts->val_type == TEMP_VAL_REG); | |
1886 | if (arg_ct->ct & TCG_CT_IALIAS) { | |
1887 | if (ts->fixed_reg) { | |
1888 | /* if fixed register, we must allocate a new register | |
1889 | if the alias is not the same register */ | |
1890 | if (arg != args[arg_ct->alias_index]) | |
1891 | goto allocate_in_reg; | |
1892 | } else { | |
1893 | /* if the input is aliased to an output and if it is | |
1894 | not dead after the instruction, we must allocate | |
1895 | a new register and move it */ | |
1896 | if (!IS_DEAD_ARG(i)) { | |
1897 | goto allocate_in_reg; | |
1898 | } | |
1899 | } | |
1900 | } | |
1901 | reg = ts->reg; | |
1902 | if (tcg_regset_test_reg(arg_ct->u.regs, reg)) { | |
1903 | /* nothing to do : the constraint is satisfied */ | |
1904 | } else { | |
1905 | allocate_in_reg: | |
1906 | /* allocate a new register matching the constraint | |
1907 | and move the temporary register into it */ | |
1908 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
1909 | tcg_out_mov(s, ts->type, reg, ts->reg); | |
1910 | } | |
1911 | new_args[i] = reg; | |
1912 | const_args[i] = 0; | |
1913 | tcg_regset_set_reg(allocated_regs, reg); | |
1914 | iarg_end: ; | |
1915 | } | |
1916 | ||
1917 | /* mark dead temporaries and free the associated registers */ | |
1918 | for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
1919 | if (IS_DEAD_ARG(i)) { | |
1920 | temp_dead(s, args[i]); | |
1921 | } | |
1922 | } | |
1923 | ||
1924 | if (def->flags & TCG_OPF_BB_END) { | |
1925 | tcg_reg_alloc_bb_end(s, allocated_regs); | |
1926 | } else { | |
1927 | if (def->flags & TCG_OPF_CALL_CLOBBER) { | |
1928 | /* XXX: permit generic clobber register list ? */ | |
1929 | for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { | |
1930 | if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) { | |
1931 | tcg_reg_free(s, reg); | |
1932 | } | |
1933 | } | |
1934 | } | |
1935 | if (def->flags & TCG_OPF_SIDE_EFFECTS) { | |
1936 | /* sync globals if the op has side effects and might trigger | |
1937 | an exception. */ | |
1938 | sync_globals(s, allocated_regs); | |
1939 | } | |
1940 | ||
1941 | /* satisfy the output constraints */ | |
1942 | tcg_regset_set(allocated_regs, s->reserved_regs); | |
1943 | for(k = 0; k < nb_oargs; k++) { | |
1944 | i = def->sorted_args[k]; | |
1945 | arg = args[i]; | |
1946 | arg_ct = &def->args_ct[i]; | |
1947 | ts = &s->temps[arg]; | |
1948 | if (arg_ct->ct & TCG_CT_ALIAS) { | |
1949 | reg = new_args[arg_ct->alias_index]; | |
1950 | } else { | |
1951 | /* if fixed register, we try to use it */ | |
1952 | reg = ts->reg; | |
1953 | if (ts->fixed_reg && | |
1954 | tcg_regset_test_reg(arg_ct->u.regs, reg)) { | |
1955 | goto oarg_end; | |
1956 | } | |
1957 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
1958 | } | |
1959 | tcg_regset_set_reg(allocated_regs, reg); | |
1960 | /* if a fixed register is used, then a move will be done afterwards */ | |
1961 | if (!ts->fixed_reg) { | |
1962 | if (ts->val_type == TEMP_VAL_REG) { | |
1963 | s->reg_to_temp[ts->reg] = -1; | |
1964 | } | |
1965 | ts->val_type = TEMP_VAL_REG; | |
1966 | ts->reg = reg; | |
1967 | /* temp value is modified, so the value kept in memory is | |
1968 | potentially not the same */ | |
1969 | ts->mem_coherent = 0; | |
1970 | s->reg_to_temp[reg] = arg; | |
1971 | } | |
1972 | oarg_end: | |
1973 | new_args[i] = reg; | |
1974 | } | |
1975 | } | |
1976 | ||
1977 | /* emit instruction */ | |
1978 | tcg_out_op(s, opc, new_args, const_args); | |
1979 | ||
1980 | /* move the outputs in the correct register if needed */ | |
1981 | for(i = 0; i < nb_oargs; i++) { | |
1982 | ts = &s->temps[args[i]]; | |
1983 | reg = new_args[i]; | |
1984 | if (ts->fixed_reg && ts->reg != reg) { | |
1985 | tcg_out_mov(s, ts->type, ts->reg, reg); | |
1986 | } | |
1987 | if (NEED_SYNC_ARG(i)) { | |
1988 | tcg_reg_sync(s, reg); | |
1989 | } | |
1990 | if (IS_DEAD_ARG(i)) { | |
1991 | temp_dead(s, args[i]); | |
1992 | } | |
1993 | } | |
1994 | } | |
1995 | ||
1996 | #ifdef TCG_TARGET_STACK_GROWSUP | |
1997 | #define STACK_DIR(x) (-(x)) | |
1998 | #else | |
1999 | #define STACK_DIR(x) (x) | |
2000 | #endif | |
2001 | ||
2002 | static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def, | |
2003 | TCGOpcode opc, const TCGArg *args, | |
2004 | uint16_t dead_args, uint8_t sync_args) | |
2005 | { | |
2006 | int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params; | |
2007 | TCGArg arg, func_arg; | |
2008 | TCGTemp *ts; | |
2009 | tcg_target_long stack_offset, call_stack_size, func_addr; | |
2010 | int const_func_arg, allocate_args; | |
2011 | TCGRegSet allocated_regs; | |
2012 | const TCGArgConstraint *arg_ct; | |
2013 | ||
2014 | arg = *args++; | |
2015 | ||
2016 | nb_oargs = arg >> 16; | |
2017 | nb_iargs = arg & 0xffff; | |
2018 | nb_params = nb_iargs - 1; | |
2019 | ||
2020 | flags = args[nb_oargs + nb_iargs]; | |
2021 | ||
2022 | nb_regs = ARRAY_SIZE(tcg_target_call_iarg_regs); | |
2023 | if (nb_regs > nb_params) | |
2024 | nb_regs = nb_params; | |
2025 | ||
2026 | /* assign stack slots first */ | |
2027 | call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long); | |
2028 | call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) & | |
2029 | ~(TCG_TARGET_STACK_ALIGN - 1); | |
2030 | allocate_args = (call_stack_size > TCG_STATIC_CALL_ARGS_SIZE); | |
2031 | if (allocate_args) { | |
2032 | /* XXX: if more than TCG_STATIC_CALL_ARGS_SIZE is needed, | |
2033 | preallocate call stack */ | |
2034 | tcg_abort(); | |
2035 | } | |
2036 | ||
2037 | stack_offset = TCG_TARGET_CALL_STACK_OFFSET; | |
2038 | for(i = nb_regs; i < nb_params; i++) { | |
2039 | arg = args[nb_oargs + i]; | |
2040 | #ifdef TCG_TARGET_STACK_GROWSUP | |
2041 | stack_offset -= sizeof(tcg_target_long); | |
2042 | #endif | |
2043 | if (arg != TCG_CALL_DUMMY_ARG) { | |
2044 | ts = &s->temps[arg]; | |
2045 | if (ts->val_type == TEMP_VAL_REG) { | |
2046 | tcg_out_st(s, ts->type, ts->reg, TCG_REG_CALL_STACK, stack_offset); | |
2047 | } else if (ts->val_type == TEMP_VAL_MEM) { | |
2048 | reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type], | |
2049 | s->reserved_regs); | |
2050 | /* XXX: not correct if reading values from the stack */ | |
2051 | tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); | |
2052 | tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); | |
2053 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
2054 | reg = tcg_reg_alloc(s, tcg_target_available_regs[ts->type], | |
2055 | s->reserved_regs); | |
2056 | /* XXX: sign extend may be needed on some targets */ | |
2057 | tcg_out_movi(s, ts->type, reg, ts->val); | |
2058 | tcg_out_st(s, ts->type, reg, TCG_REG_CALL_STACK, stack_offset); | |
2059 | } else { | |
2060 | tcg_abort(); | |
2061 | } | |
2062 | } | |
2063 | #ifndef TCG_TARGET_STACK_GROWSUP | |
2064 | stack_offset += sizeof(tcg_target_long); | |
2065 | #endif | |
2066 | } | |
2067 | ||
2068 | /* assign input registers */ | |
2069 | tcg_regset_set(allocated_regs, s->reserved_regs); | |
2070 | for(i = 0; i < nb_regs; i++) { | |
2071 | arg = args[nb_oargs + i]; | |
2072 | if (arg != TCG_CALL_DUMMY_ARG) { | |
2073 | ts = &s->temps[arg]; | |
2074 | reg = tcg_target_call_iarg_regs[i]; | |
2075 | tcg_reg_free(s, reg); | |
2076 | if (ts->val_type == TEMP_VAL_REG) { | |
2077 | if (ts->reg != reg) { | |
2078 | tcg_out_mov(s, ts->type, reg, ts->reg); | |
2079 | } | |
2080 | } else if (ts->val_type == TEMP_VAL_MEM) { | |
2081 | tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); | |
2082 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
2083 | /* XXX: sign extend ? */ | |
2084 | tcg_out_movi(s, ts->type, reg, ts->val); | |
2085 | } else { | |
2086 | tcg_abort(); | |
2087 | } | |
2088 | tcg_regset_set_reg(allocated_regs, reg); | |
2089 | } | |
2090 | } | |
2091 | ||
2092 | /* assign function address */ | |
2093 | func_arg = args[nb_oargs + nb_iargs - 1]; | |
2094 | arg_ct = &def->args_ct[0]; | |
2095 | ts = &s->temps[func_arg]; | |
2096 | func_addr = ts->val; | |
2097 | const_func_arg = 0; | |
2098 | if (ts->val_type == TEMP_VAL_MEM) { | |
2099 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
2100 | tcg_out_ld(s, ts->type, reg, ts->mem_reg, ts->mem_offset); | |
2101 | func_arg = reg; | |
2102 | tcg_regset_set_reg(allocated_regs, reg); | |
2103 | } else if (ts->val_type == TEMP_VAL_REG) { | |
2104 | reg = ts->reg; | |
2105 | if (!tcg_regset_test_reg(arg_ct->u.regs, reg)) { | |
2106 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
2107 | tcg_out_mov(s, ts->type, reg, ts->reg); | |
2108 | } | |
2109 | func_arg = reg; | |
2110 | tcg_regset_set_reg(allocated_regs, reg); | |
2111 | } else if (ts->val_type == TEMP_VAL_CONST) { | |
2112 | if (tcg_target_const_match(func_addr, arg_ct)) { | |
2113 | const_func_arg = 1; | |
2114 | func_arg = func_addr; | |
2115 | } else { | |
2116 | reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs); | |
2117 | tcg_out_movi(s, ts->type, reg, func_addr); | |
2118 | func_arg = reg; | |
2119 | tcg_regset_set_reg(allocated_regs, reg); | |
2120 | } | |
2121 | } else { | |
2122 | tcg_abort(); | |
2123 | } | |
2124 | ||
2125 | ||
2126 | /* mark dead temporaries and free the associated registers */ | |
2127 | for(i = nb_oargs; i < nb_iargs + nb_oargs; i++) { | |
2128 | if (IS_DEAD_ARG(i)) { | |
2129 | temp_dead(s, args[i]); | |
2130 | } | |
2131 | } | |
2132 | ||
2133 | /* clobber call registers */ | |
2134 | for(reg = 0; reg < TCG_TARGET_NB_REGS; reg++) { | |
2135 | if (tcg_regset_test_reg(tcg_target_call_clobber_regs, reg)) { | |
2136 | tcg_reg_free(s, reg); | |
2137 | } | |
2138 | } | |
2139 | ||
2140 | /* Save globals if they might be written by the helper, sync them if | |
2141 | they might be read. */ | |
2142 | if (flags & TCG_CALL_NO_READ_GLOBALS) { | |
2143 | /* Nothing to do */ | |
2144 | } else if (flags & TCG_CALL_NO_WRITE_GLOBALS) { | |
2145 | sync_globals(s, allocated_regs); | |
2146 | } else { | |
2147 | save_globals(s, allocated_regs); | |
2148 | } | |
2149 | ||
2150 | tcg_out_op(s, opc, &func_arg, &const_func_arg); | |
2151 | ||
2152 | /* assign output registers and emit moves if needed */ | |
2153 | for(i = 0; i < nb_oargs; i++) { | |
2154 | arg = args[i]; | |
2155 | ts = &s->temps[arg]; | |
2156 | reg = tcg_target_call_oarg_regs[i]; | |
2157 | assert(s->reg_to_temp[reg] == -1); | |
2158 | if (ts->fixed_reg) { | |
2159 | if (ts->reg != reg) { | |
2160 | tcg_out_mov(s, ts->type, ts->reg, reg); | |
2161 | } | |
2162 | } else { | |
2163 | if (ts->val_type == TEMP_VAL_REG) { | |
2164 | s->reg_to_temp[ts->reg] = -1; | |
2165 | } | |
2166 | ts->val_type = TEMP_VAL_REG; | |
2167 | ts->reg = reg; | |
2168 | ts->mem_coherent = 0; | |
2169 | s->reg_to_temp[reg] = arg; | |
2170 | if (NEED_SYNC_ARG(i)) { | |
2171 | tcg_reg_sync(s, reg); | |
2172 | } | |
2173 | if (IS_DEAD_ARG(i)) { | |
2174 | temp_dead(s, args[i]); | |
2175 | } | |
2176 | } | |
2177 | } | |
2178 | ||
2179 | return nb_iargs + nb_oargs + def->nb_cargs + 1; | |
2180 | } | |
2181 | ||
2182 | #ifdef CONFIG_PROFILER | |
2183 | ||
2184 | static int64_t tcg_table_op_count[NB_OPS]; | |
2185 | ||
2186 | static void dump_op_count(void) | |
2187 | { | |
2188 | int i; | |
2189 | FILE *f; | |
2190 | f = fopen("/tmp/op.log", "w"); | |
2191 | for(i = INDEX_op_end; i < NB_OPS; i++) { | |
2192 | fprintf(f, "%s %" PRId64 "\n", tcg_op_defs[i].name, tcg_table_op_count[i]); | |
2193 | } | |
2194 | fclose(f); | |
2195 | } | |
2196 | #endif | |
2197 | ||
2198 | ||
2199 | static inline int tcg_gen_code_common(TCGContext *s, uint8_t *gen_code_buf, | |
2200 | long search_pc) | |
2201 | { | |
2202 | TCGOpcode opc; | |
2203 | int op_index; | |
2204 | const TCGOpDef *def; | |
2205 | const TCGArg *args; | |
2206 | ||
2207 | #ifdef DEBUG_DISAS | |
2208 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) { | |
2209 | qemu_log("OP:\n"); | |
2210 | tcg_dump_ops(s); | |
2211 | qemu_log("\n"); | |
2212 | } | |
2213 | #endif | |
2214 | ||
2215 | #ifdef CONFIG_PROFILER | |
2216 | s->opt_time -= profile_getclock(); | |
2217 | #endif | |
2218 | ||
2219 | #ifdef USE_TCG_OPTIMIZATIONS | |
2220 | s->gen_opparam_ptr = | |
2221 | tcg_optimize(s, s->gen_opc_ptr, s->gen_opparam_buf, tcg_op_defs); | |
2222 | #endif | |
2223 | ||
2224 | #ifdef CONFIG_PROFILER | |
2225 | s->opt_time += profile_getclock(); | |
2226 | s->la_time -= profile_getclock(); | |
2227 | #endif | |
2228 | ||
2229 | tcg_liveness_analysis(s); | |
2230 | ||
2231 | #ifdef CONFIG_PROFILER | |
2232 | s->la_time += profile_getclock(); | |
2233 | #endif | |
2234 | ||
2235 | #ifdef DEBUG_DISAS | |
2236 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP_OPT))) { | |
2237 | qemu_log("OP after optimization and liveness analysis:\n"); | |
2238 | tcg_dump_ops(s); | |
2239 | qemu_log("\n"); | |
2240 | } | |
2241 | #endif | |
2242 | ||
2243 | tcg_reg_alloc_start(s); | |
2244 | ||
2245 | s->code_buf = gen_code_buf; | |
2246 | s->code_ptr = gen_code_buf; | |
2247 | ||
2248 | args = s->gen_opparam_buf; | |
2249 | op_index = 0; | |
2250 | ||
2251 | for(;;) { | |
2252 | opc = s->gen_opc_buf[op_index]; | |
2253 | #ifdef CONFIG_PROFILER | |
2254 | tcg_table_op_count[opc]++; | |
2255 | #endif | |
2256 | def = &tcg_op_defs[opc]; | |
2257 | #if 0 | |
2258 | printf("%s: %d %d %d\n", def->name, | |
2259 | def->nb_oargs, def->nb_iargs, def->nb_cargs); | |
2260 | // dump_regs(s); | |
2261 | #endif | |
2262 | switch(opc) { | |
2263 | case INDEX_op_mov_i32: | |
2264 | case INDEX_op_mov_i64: | |
2265 | tcg_reg_alloc_mov(s, def, args, s->op_dead_args[op_index], | |
2266 | s->op_sync_args[op_index]); | |
2267 | break; | |
2268 | case INDEX_op_movi_i32: | |
2269 | case INDEX_op_movi_i64: | |
2270 | tcg_reg_alloc_movi(s, args, s->op_dead_args[op_index], | |
2271 | s->op_sync_args[op_index]); | |
2272 | break; | |
2273 | case INDEX_op_debug_insn_start: | |
2274 | /* debug instruction */ | |
2275 | break; | |
2276 | case INDEX_op_nop: | |
2277 | case INDEX_op_nop1: | |
2278 | case INDEX_op_nop2: | |
2279 | case INDEX_op_nop3: | |
2280 | break; | |
2281 | case INDEX_op_nopn: | |
2282 | args += args[0]; | |
2283 | goto next; | |
2284 | case INDEX_op_discard: | |
2285 | temp_dead(s, args[0]); | |
2286 | break; | |
2287 | case INDEX_op_set_label: | |
2288 | tcg_reg_alloc_bb_end(s, s->reserved_regs); | |
2289 | tcg_out_label(s, args[0], s->code_ptr); | |
2290 | break; | |
2291 | case INDEX_op_call: | |
2292 | args += tcg_reg_alloc_call(s, def, opc, args, | |
2293 | s->op_dead_args[op_index], | |
2294 | s->op_sync_args[op_index]); | |
2295 | goto next; | |
2296 | case INDEX_op_end: | |
2297 | goto the_end; | |
2298 | default: | |
2299 | /* Sanity check that we've not introduced any unhandled opcodes. */ | |
2300 | if (def->flags & TCG_OPF_NOT_PRESENT) { | |
2301 | tcg_abort(); | |
2302 | } | |
2303 | /* Note: in order to speed up the code, it would be much | |
2304 | faster to have specialized register allocator functions for | |
2305 | some common argument patterns */ | |
2306 | tcg_reg_alloc_op(s, def, opc, args, s->op_dead_args[op_index], | |
2307 | s->op_sync_args[op_index]); | |
2308 | break; | |
2309 | } | |
2310 | args += def->nb_args; | |
2311 | next: | |
2312 | if (search_pc >= 0 && search_pc < s->code_ptr - gen_code_buf) { | |
2313 | return op_index; | |
2314 | } | |
2315 | op_index++; | |
2316 | #ifndef NDEBUG | |
2317 | check_regs(s); | |
2318 | #endif | |
2319 | } | |
2320 | the_end: | |
2321 | #if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU) | |
2322 | /* Generate TB finalization at the end of block */ | |
2323 | tcg_out_tb_finalize(s); | |
2324 | #endif | |
2325 | return -1; | |
2326 | } | |
2327 | ||
2328 | int tcg_gen_code(TCGContext *s, uint8_t *gen_code_buf) | |
2329 | { | |
2330 | #ifdef CONFIG_PROFILER | |
2331 | { | |
2332 | int n; | |
2333 | n = (s->gen_opc_ptr - s->gen_opc_buf); | |
2334 | s->op_count += n; | |
2335 | if (n > s->op_count_max) | |
2336 | s->op_count_max = n; | |
2337 | ||
2338 | s->temp_count += s->nb_temps; | |
2339 | if (s->nb_temps > s->temp_count_max) | |
2340 | s->temp_count_max = s->nb_temps; | |
2341 | } | |
2342 | #endif | |
2343 | ||
2344 | tcg_gen_code_common(s, gen_code_buf, -1); | |
2345 | ||
2346 | /* flush instruction cache */ | |
2347 | flush_icache_range((tcg_target_ulong)gen_code_buf, | |
2348 | (tcg_target_ulong)s->code_ptr); | |
2349 | ||
2350 | return s->code_ptr - gen_code_buf; | |
2351 | } | |
2352 | ||
2353 | /* Return the index of the micro operation such as the pc after is < | |
2354 | offset bytes from the start of the TB. The contents of gen_code_buf must | |
2355 | not be changed, though writing the same values is ok. | |
2356 | Return -1 if not found. */ | |
2357 | int tcg_gen_code_search_pc(TCGContext *s, uint8_t *gen_code_buf, long offset) | |
2358 | { | |
2359 | return tcg_gen_code_common(s, gen_code_buf, offset); | |
2360 | } | |
2361 | ||
2362 | #ifdef CONFIG_PROFILER | |
2363 | void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) | |
2364 | { | |
2365 | TCGContext *s = &tcg_ctx; | |
2366 | int64_t tot; | |
2367 | ||
2368 | tot = s->interm_time + s->code_time; | |
2369 | cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n", | |
2370 | tot, tot / 2.4e9); | |
2371 | cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n", | |
2372 | s->tb_count, | |
2373 | s->tb_count1 - s->tb_count, | |
2374 | s->tb_count1 ? (double)(s->tb_count1 - s->tb_count) / s->tb_count1 * 100.0 : 0); | |
2375 | cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n", | |
2376 | s->tb_count ? (double)s->op_count / s->tb_count : 0, s->op_count_max); | |
2377 | cpu_fprintf(f, "deleted ops/TB %0.2f\n", | |
2378 | s->tb_count ? | |
2379 | (double)s->del_op_count / s->tb_count : 0); | |
2380 | cpu_fprintf(f, "avg temps/TB %0.2f max=%d\n", | |
2381 | s->tb_count ? | |
2382 | (double)s->temp_count / s->tb_count : 0, | |
2383 | s->temp_count_max); | |
2384 | ||
2385 | cpu_fprintf(f, "cycles/op %0.1f\n", | |
2386 | s->op_count ? (double)tot / s->op_count : 0); | |
2387 | cpu_fprintf(f, "cycles/in byte %0.1f\n", | |
2388 | s->code_in_len ? (double)tot / s->code_in_len : 0); | |
2389 | cpu_fprintf(f, "cycles/out byte %0.1f\n", | |
2390 | s->code_out_len ? (double)tot / s->code_out_len : 0); | |
2391 | if (tot == 0) | |
2392 | tot = 1; | |
2393 | cpu_fprintf(f, " gen_interm time %0.1f%%\n", | |
2394 | (double)s->interm_time / tot * 100.0); | |
2395 | cpu_fprintf(f, " gen_code time %0.1f%%\n", | |
2396 | (double)s->code_time / tot * 100.0); | |
2397 | cpu_fprintf(f, "optim./code time %0.1f%%\n", | |
2398 | (double)s->opt_time / (s->code_time ? s->code_time : 1) | |
2399 | * 100.0); | |
2400 | cpu_fprintf(f, "liveness/code time %0.1f%%\n", | |
2401 | (double)s->la_time / (s->code_time ? s->code_time : 1) * 100.0); | |
2402 | cpu_fprintf(f, "cpu_restore count %" PRId64 "\n", | |
2403 | s->restore_count); | |
2404 | cpu_fprintf(f, " avg cycles %0.1f\n", | |
2405 | s->restore_count ? (double)s->restore_time / s->restore_count : 0); | |
2406 | ||
2407 | dump_op_count(); | |
2408 | } | |
2409 | #else | |
2410 | void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf) | |
2411 | { | |
2412 | cpu_fprintf(f, "[TCG profiler not compiled]\n"); | |
2413 | } | |
2414 | #endif | |
2415 | ||
2416 | #ifdef ELF_HOST_MACHINE | |
2417 | /* In order to use this feature, the backend needs to do three things: | |
2418 | ||
2419 | (1) Define ELF_HOST_MACHINE to indicate both what value to | |
2420 | put into the ELF image and to indicate support for the feature. | |
2421 | ||
2422 | (2) Define tcg_register_jit. This should create a buffer containing | |
2423 | the contents of a .debug_frame section that describes the post- | |
2424 | prologue unwind info for the tcg machine. | |
2425 | ||
2426 | (3) Call tcg_register_jit_int, with the constructed .debug_frame. | |
2427 | */ | |
2428 | ||
2429 | /* Begin GDB interface. THE FOLLOWING MUST MATCH GDB DOCS. */ | |
2430 | typedef enum { | |
2431 | JIT_NOACTION = 0, | |
2432 | JIT_REGISTER_FN, | |
2433 | JIT_UNREGISTER_FN | |
2434 | } jit_actions_t; | |
2435 | ||
2436 | struct jit_code_entry { | |
2437 | struct jit_code_entry *next_entry; | |
2438 | struct jit_code_entry *prev_entry; | |
2439 | const void *symfile_addr; | |
2440 | uint64_t symfile_size; | |
2441 | }; | |
2442 | ||
2443 | struct jit_descriptor { | |
2444 | uint32_t version; | |
2445 | uint32_t action_flag; | |
2446 | struct jit_code_entry *relevant_entry; | |
2447 | struct jit_code_entry *first_entry; | |
2448 | }; | |
2449 | ||
2450 | void __jit_debug_register_code(void) __attribute__((noinline)); | |
2451 | void __jit_debug_register_code(void) | |
2452 | { | |
2453 | asm(""); | |
2454 | } | |
2455 | ||
2456 | /* Must statically initialize the version, because GDB may check | |
2457 | the version before we can set it. */ | |
2458 | struct jit_descriptor __jit_debug_descriptor = { 1, 0, 0, 0 }; | |
2459 | ||
2460 | /* End GDB interface. */ | |
2461 | ||
2462 | static int find_string(const char *strtab, const char *str) | |
2463 | { | |
2464 | const char *p = strtab + 1; | |
2465 | ||
2466 | while (1) { | |
2467 | if (strcmp(p, str) == 0) { | |
2468 | return p - strtab; | |
2469 | } | |
2470 | p += strlen(p) + 1; | |
2471 | } | |
2472 | } | |
2473 | ||
2474 | static void tcg_register_jit_int(void *buf_ptr, size_t buf_size, | |
2475 | void *debug_frame, size_t debug_frame_size) | |
2476 | { | |
2477 | struct __attribute__((packed)) DebugInfo { | |
2478 | uint32_t len; | |
2479 | uint16_t version; | |
2480 | uint32_t abbrev; | |
2481 | uint8_t ptr_size; | |
2482 | uint8_t cu_die; | |
2483 | uint16_t cu_lang; | |
2484 | uintptr_t cu_low_pc; | |
2485 | uintptr_t cu_high_pc; | |
2486 | uint8_t fn_die; | |
2487 | char fn_name[16]; | |
2488 | uintptr_t fn_low_pc; | |
2489 | uintptr_t fn_high_pc; | |
2490 | uint8_t cu_eoc; | |
2491 | }; | |
2492 | ||
2493 | struct ElfImage { | |
2494 | ElfW(Ehdr) ehdr; | |
2495 | ElfW(Phdr) phdr; | |
2496 | ElfW(Shdr) shdr[7]; | |
2497 | ElfW(Sym) sym[2]; | |
2498 | struct DebugInfo di; | |
2499 | uint8_t da[24]; | |
2500 | char str[80]; | |
2501 | }; | |
2502 | ||
2503 | struct ElfImage *img; | |
2504 | ||
2505 | static const struct ElfImage img_template = { | |
2506 | .ehdr = { | |
2507 | .e_ident[EI_MAG0] = ELFMAG0, | |
2508 | .e_ident[EI_MAG1] = ELFMAG1, | |
2509 | .e_ident[EI_MAG2] = ELFMAG2, | |
2510 | .e_ident[EI_MAG3] = ELFMAG3, | |
2511 | .e_ident[EI_CLASS] = ELF_CLASS, | |
2512 | .e_ident[EI_DATA] = ELF_DATA, | |
2513 | .e_ident[EI_VERSION] = EV_CURRENT, | |
2514 | .e_type = ET_EXEC, | |
2515 | .e_machine = ELF_HOST_MACHINE, | |
2516 | .e_version = EV_CURRENT, | |
2517 | .e_phoff = offsetof(struct ElfImage, phdr), | |
2518 | .e_shoff = offsetof(struct ElfImage, shdr), | |
2519 | .e_ehsize = sizeof(ElfW(Shdr)), | |
2520 | .e_phentsize = sizeof(ElfW(Phdr)), | |
2521 | .e_phnum = 1, | |
2522 | .e_shentsize = sizeof(ElfW(Shdr)), | |
2523 | .e_shnum = ARRAY_SIZE(img->shdr), | |
2524 | .e_shstrndx = ARRAY_SIZE(img->shdr) - 1, | |
2525 | #ifdef ELF_HOST_FLAGS | |
2526 | .e_flags = ELF_HOST_FLAGS, | |
2527 | #endif | |
2528 | #ifdef ELF_OSABI | |
2529 | .e_ident[EI_OSABI] = ELF_OSABI, | |
2530 | #endif | |
2531 | }, | |
2532 | .phdr = { | |
2533 | .p_type = PT_LOAD, | |
2534 | .p_flags = PF_X, | |
2535 | }, | |
2536 | .shdr = { | |
2537 | [0] = { .sh_type = SHT_NULL }, | |
2538 | /* Trick: The contents of code_gen_buffer are not present in | |
2539 | this fake ELF file; that got allocated elsewhere. Therefore | |
2540 | we mark .text as SHT_NOBITS (similar to .bss) so that readers | |
2541 | will not look for contents. We can record any address. */ | |
2542 | [1] = { /* .text */ | |
2543 | .sh_type = SHT_NOBITS, | |
2544 | .sh_flags = SHF_EXECINSTR | SHF_ALLOC, | |
2545 | }, | |
2546 | [2] = { /* .debug_info */ | |
2547 | .sh_type = SHT_PROGBITS, | |
2548 | .sh_offset = offsetof(struct ElfImage, di), | |
2549 | .sh_size = sizeof(struct DebugInfo), | |
2550 | }, | |
2551 | [3] = { /* .debug_abbrev */ | |
2552 | .sh_type = SHT_PROGBITS, | |
2553 | .sh_offset = offsetof(struct ElfImage, da), | |
2554 | .sh_size = sizeof(img->da), | |
2555 | }, | |
2556 | [4] = { /* .debug_frame */ | |
2557 | .sh_type = SHT_PROGBITS, | |
2558 | .sh_offset = sizeof(struct ElfImage), | |
2559 | }, | |
2560 | [5] = { /* .symtab */ | |
2561 | .sh_type = SHT_SYMTAB, | |
2562 | .sh_offset = offsetof(struct ElfImage, sym), | |
2563 | .sh_size = sizeof(img->sym), | |
2564 | .sh_info = 1, | |
2565 | .sh_link = ARRAY_SIZE(img->shdr) - 1, | |
2566 | .sh_entsize = sizeof(ElfW(Sym)), | |
2567 | }, | |
2568 | [6] = { /* .strtab */ | |
2569 | .sh_type = SHT_STRTAB, | |
2570 | .sh_offset = offsetof(struct ElfImage, str), | |
2571 | .sh_size = sizeof(img->str), | |
2572 | } | |
2573 | }, | |
2574 | .sym = { | |
2575 | [1] = { /* code_gen_buffer */ | |
2576 | .st_info = ELF_ST_INFO(STB_GLOBAL, STT_FUNC), | |
2577 | .st_shndx = 1, | |
2578 | } | |
2579 | }, | |
2580 | .di = { | |
2581 | .len = sizeof(struct DebugInfo) - 4, | |
2582 | .version = 2, | |
2583 | .ptr_size = sizeof(void *), | |
2584 | .cu_die = 1, | |
2585 | .cu_lang = 0x8001, /* DW_LANG_Mips_Assembler */ | |
2586 | .fn_die = 2, | |
2587 | .fn_name = "code_gen_buffer" | |
2588 | }, | |
2589 | .da = { | |
2590 | 1, /* abbrev number (the cu) */ | |
2591 | 0x11, 1, /* DW_TAG_compile_unit, has children */ | |
2592 | 0x13, 0x5, /* DW_AT_language, DW_FORM_data2 */ | |
2593 | 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ | |
2594 | 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ | |
2595 | 0, 0, /* end of abbrev */ | |
2596 | 2, /* abbrev number (the fn) */ | |
2597 | 0x2e, 0, /* DW_TAG_subprogram, no children */ | |
2598 | 0x3, 0x8, /* DW_AT_name, DW_FORM_string */ | |
2599 | 0x11, 0x1, /* DW_AT_low_pc, DW_FORM_addr */ | |
2600 | 0x12, 0x1, /* DW_AT_high_pc, DW_FORM_addr */ | |
2601 | 0, 0, /* end of abbrev */ | |
2602 | 0 /* no more abbrev */ | |
2603 | }, | |
2604 | .str = "\0" ".text\0" ".debug_info\0" ".debug_abbrev\0" | |
2605 | ".debug_frame\0" ".symtab\0" ".strtab\0" "code_gen_buffer", | |
2606 | }; | |
2607 | ||
2608 | /* We only need a single jit entry; statically allocate it. */ | |
2609 | static struct jit_code_entry one_entry; | |
2610 | ||
2611 | uintptr_t buf = (uintptr_t)buf_ptr; | |
2612 | size_t img_size = sizeof(struct ElfImage) + debug_frame_size; | |
2613 | ||
2614 | img = g_malloc(img_size); | |
2615 | *img = img_template; | |
2616 | memcpy(img + 1, debug_frame, debug_frame_size); | |
2617 | ||
2618 | img->phdr.p_vaddr = buf; | |
2619 | img->phdr.p_paddr = buf; | |
2620 | img->phdr.p_memsz = buf_size; | |
2621 | ||
2622 | img->shdr[1].sh_name = find_string(img->str, ".text"); | |
2623 | img->shdr[1].sh_addr = buf; | |
2624 | img->shdr[1].sh_size = buf_size; | |
2625 | ||
2626 | img->shdr[2].sh_name = find_string(img->str, ".debug_info"); | |
2627 | img->shdr[3].sh_name = find_string(img->str, ".debug_abbrev"); | |
2628 | ||
2629 | img->shdr[4].sh_name = find_string(img->str, ".debug_frame"); | |
2630 | img->shdr[4].sh_size = debug_frame_size; | |
2631 | ||
2632 | img->shdr[5].sh_name = find_string(img->str, ".symtab"); | |
2633 | img->shdr[6].sh_name = find_string(img->str, ".strtab"); | |
2634 | ||
2635 | img->sym[1].st_name = find_string(img->str, "code_gen_buffer"); | |
2636 | img->sym[1].st_value = buf; | |
2637 | img->sym[1].st_size = buf_size; | |
2638 | ||
2639 | img->di.cu_low_pc = buf; | |
2640 | img->di.cu_high_pc = buf_size; | |
2641 | img->di.fn_low_pc = buf; | |
2642 | img->di.fn_high_pc = buf_size; | |
2643 | ||
2644 | #ifdef DEBUG_JIT | |
2645 | /* Enable this block to be able to debug the ELF image file creation. | |
2646 | One can use readelf, objdump, or other inspection utilities. */ | |
2647 | { | |
2648 | FILE *f = fopen("/tmp/qemu.jit", "w+b"); | |
2649 | if (f) { | |
2650 | if (fwrite(img, img_size, 1, f) != img_size) { | |
2651 | /* Avoid stupid unused return value warning for fwrite. */ | |
2652 | } | |
2653 | fclose(f); | |
2654 | } | |
2655 | } | |
2656 | #endif | |
2657 | ||
2658 | one_entry.symfile_addr = img; | |
2659 | one_entry.symfile_size = img_size; | |
2660 | ||
2661 | __jit_debug_descriptor.action_flag = JIT_REGISTER_FN; | |
2662 | __jit_debug_descriptor.relevant_entry = &one_entry; | |
2663 | __jit_debug_descriptor.first_entry = &one_entry; | |
2664 | __jit_debug_register_code(); | |
2665 | } | |
2666 | #else | |
2667 | /* No support for the feature. Provide the entry point expected by exec.c, | |
2668 | and implement the internal function we declared earlier. */ | |
2669 | ||
2670 | static void tcg_register_jit_int(void *buf, size_t size, | |
2671 | void *debug_frame, size_t debug_frame_size) | |
2672 | { | |
2673 | } | |
2674 | ||
2675 | void tcg_register_jit(void *buf, size_t buf_size) | |
2676 | { | |
2677 | } | |
2678 | #endif /* ELF_HOST_MACHINE */ |