]> git.proxmox.com Git - qemu.git/blob - tcg/optimize.c
TCG: fix breakage by previous patch
[qemu.git] / tcg / optimize.c
1 /*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdio.h>
30
31 #include "qemu-common.h"
32 #include "tcg-op.h"
33
34 #if TCG_TARGET_REG_BITS == 64
35 #define CASE_OP_32_64(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64)
38 #else
39 #define CASE_OP_32_64(x) \
40 glue(glue(case INDEX_op_, x), _i32)
41 #endif
42
43 typedef enum {
44 TCG_TEMP_UNDEF = 0,
45 TCG_TEMP_CONST,
46 TCG_TEMP_COPY,
47 TCG_TEMP_HAS_COPY,
48 TCG_TEMP_ANY
49 } tcg_temp_state;
50
51 struct tcg_temp_info {
52 tcg_temp_state state;
53 uint16_t prev_copy;
54 uint16_t next_copy;
55 tcg_target_ulong val;
56 };
57
58 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
59
60 /* Reset TEMP's state to TCG_TEMP_ANY. If TEMP was a representative of some
61 class of equivalent temp's, a new representative should be chosen in this
62 class. */
63 static void reset_temp(TCGArg temp, int nb_temps, int nb_globals)
64 {
65 int i;
66 TCGArg new_base = (TCGArg)-1;
67 if (temps[temp].state == TCG_TEMP_HAS_COPY) {
68 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
69 if (i >= nb_globals) {
70 temps[i].state = TCG_TEMP_HAS_COPY;
71 new_base = i;
72 break;
73 }
74 }
75 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
76 if (new_base == (TCGArg)-1) {
77 temps[i].state = TCG_TEMP_ANY;
78 } else {
79 temps[i].val = new_base;
80 }
81 }
82 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
83 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
84 } else if (temps[temp].state == TCG_TEMP_COPY) {
85 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
86 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
87 new_base = temps[temp].val;
88 }
89 temps[temp].state = TCG_TEMP_ANY;
90 if (new_base != (TCGArg)-1 && temps[new_base].next_copy == new_base) {
91 temps[new_base].state = TCG_TEMP_ANY;
92 }
93 }
94
95 static int op_bits(int op)
96 {
97 switch (op) {
98 case INDEX_op_mov_i32:
99 case INDEX_op_add_i32:
100 case INDEX_op_sub_i32:
101 case INDEX_op_mul_i32:
102 case INDEX_op_and_i32:
103 case INDEX_op_or_i32:
104 case INDEX_op_xor_i32:
105 case INDEX_op_shl_i32:
106 case INDEX_op_shr_i32:
107 case INDEX_op_sar_i32:
108 #ifdef TCG_TARGET_HAS_rot_i32
109 case INDEX_op_rotl_i32:
110 case INDEX_op_rotr_i32:
111 #endif
112 #ifdef TCG_TARGET_HAS_not_i32
113 case INDEX_op_not_i32:
114 #endif
115 #ifdef TCG_TARGET_HAS_ext8s_i32
116 case INDEX_op_ext8s_i32:
117 #endif
118 #ifdef TCG_TARGET_HAS_ext16s_i32
119 case INDEX_op_ext16s_i32:
120 #endif
121 #ifdef TCG_TARGET_HAS_ext8u_i32
122 case INDEX_op_ext8u_i32:
123 #endif
124 #ifdef TCG_TARGET_HAS_ext16u_i32
125 case INDEX_op_ext16u_i32:
126 #endif
127 return 32;
128 #if TCG_TARGET_REG_BITS == 64
129 case INDEX_op_mov_i64:
130 case INDEX_op_add_i64:
131 case INDEX_op_sub_i64:
132 case INDEX_op_mul_i64:
133 case INDEX_op_and_i64:
134 case INDEX_op_or_i64:
135 case INDEX_op_xor_i64:
136 case INDEX_op_shl_i64:
137 case INDEX_op_shr_i64:
138 case INDEX_op_sar_i64:
139 #ifdef TCG_TARGET_HAS_rot_i64
140 case INDEX_op_rotl_i64:
141 case INDEX_op_rotr_i64:
142 #endif
143 #ifdef TCG_TARGET_HAS_not_i64
144 case INDEX_op_not_i64:
145 #endif
146 #ifdef TCG_TARGET_HAS_ext8s_i64
147 case INDEX_op_ext8s_i64:
148 #endif
149 #ifdef TCG_TARGET_HAS_ext16s_i64
150 case INDEX_op_ext16s_i64:
151 #endif
152 #ifdef TCG_TARGET_HAS_ext32s_i64
153 case INDEX_op_ext32s_i64:
154 #endif
155 #ifdef TCG_TARGET_HAS_ext8u_i64
156 case INDEX_op_ext8u_i64:
157 #endif
158 #ifdef TCG_TARGET_HAS_ext16u_i64
159 case INDEX_op_ext16u_i64:
160 #endif
161 #ifdef TCG_TARGET_HAS_ext32u_i64
162 case INDEX_op_ext32u_i64:
163 #endif
164 return 64;
165 #endif
166 default:
167 fprintf(stderr, "Unrecognized operation %d in op_bits.\n", op);
168 tcg_abort();
169 }
170 }
171
172 static int op_to_movi(int op)
173 {
174 switch (op_bits(op)) {
175 case 32:
176 return INDEX_op_movi_i32;
177 #if TCG_TARGET_REG_BITS == 64
178 case 64:
179 return INDEX_op_movi_i64;
180 #endif
181 default:
182 fprintf(stderr, "op_to_movi: unexpected return value of "
183 "function op_bits.\n");
184 tcg_abort();
185 }
186 }
187
188 static void tcg_opt_gen_mov(TCGArg *gen_args, TCGArg dst, TCGArg src,
189 int nb_temps, int nb_globals)
190 {
191 reset_temp(dst, nb_temps, nb_globals);
192 assert(temps[src].state != TCG_TEMP_COPY);
193 if (src >= nb_globals) {
194 assert(temps[src].state != TCG_TEMP_CONST);
195 if (temps[src].state != TCG_TEMP_HAS_COPY) {
196 temps[src].state = TCG_TEMP_HAS_COPY;
197 temps[src].next_copy = src;
198 temps[src].prev_copy = src;
199 }
200 temps[dst].state = TCG_TEMP_COPY;
201 temps[dst].val = src;
202 temps[dst].next_copy = temps[src].next_copy;
203 temps[dst].prev_copy = src;
204 temps[temps[dst].next_copy].prev_copy = dst;
205 temps[src].next_copy = dst;
206 }
207 gen_args[0] = dst;
208 gen_args[1] = src;
209 }
210
211 static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val,
212 int nb_temps, int nb_globals)
213 {
214 reset_temp(dst, nb_temps, nb_globals);
215 temps[dst].state = TCG_TEMP_CONST;
216 temps[dst].val = val;
217 gen_args[0] = dst;
218 gen_args[1] = val;
219 }
220
221 static int op_to_mov(int op)
222 {
223 switch (op_bits(op)) {
224 case 32:
225 return INDEX_op_mov_i32;
226 #if TCG_TARGET_REG_BITS == 64
227 case 64:
228 return INDEX_op_mov_i64;
229 #endif
230 default:
231 fprintf(stderr, "op_to_mov: unexpected return value of "
232 "function op_bits.\n");
233 tcg_abort();
234 }
235 }
236
237 static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
238 {
239 switch (op) {
240 CASE_OP_32_64(add):
241 return x + y;
242
243 CASE_OP_32_64(sub):
244 return x - y;
245
246 CASE_OP_32_64(mul):
247 return x * y;
248
249 CASE_OP_32_64(and):
250 return x & y;
251
252 CASE_OP_32_64(or):
253 return x | y;
254
255 CASE_OP_32_64(xor):
256 return x ^ y;
257
258 case INDEX_op_shl_i32:
259 return (uint32_t)x << (uint32_t)y;
260
261 #if TCG_TARGET_REG_BITS == 64
262 case INDEX_op_shl_i64:
263 return (uint64_t)x << (uint64_t)y;
264 #endif
265
266 case INDEX_op_shr_i32:
267 return (uint32_t)x >> (uint32_t)y;
268
269 #if TCG_TARGET_REG_BITS == 64
270 case INDEX_op_shr_i64:
271 return (uint64_t)x >> (uint64_t)y;
272 #endif
273
274 case INDEX_op_sar_i32:
275 return (int32_t)x >> (int32_t)y;
276
277 #if TCG_TARGET_REG_BITS == 64
278 case INDEX_op_sar_i64:
279 return (int64_t)x >> (int64_t)y;
280 #endif
281
282 #ifdef TCG_TARGET_HAS_rot_i32
283 case INDEX_op_rotr_i32:
284 #if TCG_TARGET_REG_BITS == 64
285 x &= 0xffffffff;
286 y &= 0xffffffff;
287 #endif
288 x = (x << (32 - y)) | (x >> y);
289 return x;
290 #endif
291
292 #ifdef TCG_TARGET_HAS_rot_i64
293 #if TCG_TARGET_REG_BITS == 64
294 case INDEX_op_rotr_i64:
295 x = (x << (64 - y)) | (x >> y);
296 return x;
297 #endif
298 #endif
299
300 #ifdef TCG_TARGET_HAS_rot_i32
301 case INDEX_op_rotl_i32:
302 #if TCG_TARGET_REG_BITS == 64
303 x &= 0xffffffff;
304 y &= 0xffffffff;
305 #endif
306 x = (x << y) | (x >> (32 - y));
307 return x;
308 #endif
309
310 #ifdef TCG_TARGET_HAS_rot_i64
311 #if TCG_TARGET_REG_BITS == 64
312 case INDEX_op_rotl_i64:
313 x = (x << y) | (x >> (64 - y));
314 return x;
315 #endif
316 #endif
317
318 #if defined(TCG_TARGET_HAS_not_i32) || defined(TCG_TARGET_HAS_not_i64)
319 #ifdef TCG_TARGET_HAS_not_i32
320 case INDEX_op_not_i32:
321 #endif
322 #ifdef TCG_TARGET_HAS_not_i64
323 case INDEX_op_not_i64:
324 #endif
325 return ~x;
326 #endif
327
328 #if defined(TCG_TARGET_HAS_ext8s_i32) || defined(TCG_TARGET_HAS_ext8s_i64)
329 #ifdef TCG_TARGET_HAS_ext8s_i32
330 case INDEX_op_ext8s_i32:
331 #endif
332 #ifdef TCG_TARGET_HAS_ext8s_i64
333 case INDEX_op_ext8s_i64:
334 #endif
335 return (int8_t)x;
336 #endif
337
338 #if defined(TCG_TARGET_HAS_ext16s_i32) || defined(TCG_TARGET_HAS_ext16s_i64)
339 #ifdef TCG_TARGET_HAS_ext16s_i32
340 case INDEX_op_ext16s_i32:
341 #endif
342 #ifdef TCG_TARGET_HAS_ext16s_i64
343 case INDEX_op_ext16s_i64:
344 #endif
345 return (int16_t)x;
346 #endif
347
348 #if defined(TCG_TARGET_HAS_ext8u_i32) || defined(TCG_TARGET_HAS_ext8u_i64)
349 #ifdef TCG_TARGET_HAS_ext8u_i32
350 case INDEX_op_ext8u_i32:
351 #endif
352 #ifdef TCG_TARGET_HAS_ext8u_i64
353 case INDEX_op_ext8u_i64:
354 #endif
355 return (uint8_t)x;
356 #endif
357
358 #if defined(TCG_TARGET_HAS_ext16u_i32) || defined(TCG_TARGET_HAS_ext16u_i64)
359 #ifdef TCG_TARGET_HAS_ext16u_i32
360 case INDEX_op_ext16u_i32:
361 #endif
362 #ifdef TCG_TARGET_HAS_ext16u_i64
363 case INDEX_op_ext16u_i64:
364 #endif
365 return (uint16_t)x;
366 #endif
367
368 #if TCG_TARGET_REG_BITS == 64
369 #ifdef TCG_TARGET_HAS_ext32s_i64
370 case INDEX_op_ext32s_i64:
371 return (int32_t)x;
372 #endif
373
374 #ifdef TCG_TARGET_HAS_ext32u_i64
375 case INDEX_op_ext32u_i64:
376 return (uint32_t)x;
377 #endif
378 #endif
379
380 default:
381 fprintf(stderr,
382 "Unrecognized operation %d in do_constant_folding.\n", op);
383 tcg_abort();
384 }
385 }
386
387 static TCGArg do_constant_folding(int op, TCGArg x, TCGArg y)
388 {
389 TCGArg res = do_constant_folding_2(op, x, y);
390 #if TCG_TARGET_REG_BITS == 64
391 if (op_bits(op) == 32) {
392 res &= 0xffffffff;
393 }
394 #endif
395 return res;
396 }
397
398 /* Propagate constants and copies, fold constant expressions. */
399 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
400 TCGArg *args, TCGOpDef *tcg_op_defs)
401 {
402 int i, nb_ops, op_index, op, nb_temps, nb_globals, nb_call_args;
403 const TCGOpDef *def;
404 TCGArg *gen_args;
405 TCGArg tmp;
406 /* Array VALS has an element for each temp.
407 If this temp holds a constant then its value is kept in VALS' element.
408 If this temp is a copy of other ones then this equivalence class'
409 representative is kept in VALS' element.
410 If this temp is neither copy nor constant then corresponding VALS'
411 element is unused. */
412
413 nb_temps = s->nb_temps;
414 nb_globals = s->nb_globals;
415 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
416
417 nb_ops = tcg_opc_ptr - gen_opc_buf;
418 gen_args = args;
419 for (op_index = 0; op_index < nb_ops; op_index++) {
420 op = gen_opc_buf[op_index];
421 def = &tcg_op_defs[op];
422 /* Do copy propagation */
423 if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS))) {
424 assert(op != INDEX_op_call);
425 for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) {
426 if (temps[args[i]].state == TCG_TEMP_COPY) {
427 args[i] = temps[args[i]].val;
428 }
429 }
430 }
431
432 /* For commutative operations make constant second argument */
433 switch (op) {
434 CASE_OP_32_64(add):
435 CASE_OP_32_64(mul):
436 CASE_OP_32_64(and):
437 CASE_OP_32_64(or):
438 CASE_OP_32_64(xor):
439 if (temps[args[1]].state == TCG_TEMP_CONST) {
440 tmp = args[1];
441 args[1] = args[2];
442 args[2] = tmp;
443 }
444 break;
445 default:
446 break;
447 }
448
449 /* Simplify expression if possible. */
450 switch (op) {
451 CASE_OP_32_64(add):
452 CASE_OP_32_64(sub):
453 CASE_OP_32_64(shl):
454 CASE_OP_32_64(shr):
455 CASE_OP_32_64(sar):
456 #ifdef TCG_TARGET_HAS_rot_i32
457 case INDEX_op_rotl_i32:
458 case INDEX_op_rotr_i32:
459 #endif
460 #ifdef TCG_TARGET_HAS_rot_i64
461 case INDEX_op_rotl_i64:
462 case INDEX_op_rotr_i64:
463 #endif
464 if (temps[args[1]].state == TCG_TEMP_CONST) {
465 /* Proceed with possible constant folding. */
466 break;
467 }
468 if (temps[args[2]].state == TCG_TEMP_CONST
469 && temps[args[2]].val == 0) {
470 if ((temps[args[0]].state == TCG_TEMP_COPY
471 && temps[args[0]].val == args[1])
472 || args[0] == args[1]) {
473 args += 3;
474 gen_opc_buf[op_index] = INDEX_op_nop;
475 } else {
476 gen_opc_buf[op_index] = op_to_mov(op);
477 tcg_opt_gen_mov(gen_args, args[0], args[1],
478 nb_temps, nb_globals);
479 gen_args += 2;
480 args += 3;
481 }
482 continue;
483 }
484 break;
485 CASE_OP_32_64(mul):
486 if ((temps[args[2]].state == TCG_TEMP_CONST
487 && temps[args[2]].val == 0)) {
488 gen_opc_buf[op_index] = op_to_movi(op);
489 tcg_opt_gen_movi(gen_args, args[0], 0, nb_temps, nb_globals);
490 args += 3;
491 gen_args += 2;
492 continue;
493 }
494 break;
495 CASE_OP_32_64(or):
496 CASE_OP_32_64(and):
497 if (args[1] == args[2]) {
498 if (args[1] == args[0]) {
499 args += 3;
500 gen_opc_buf[op_index] = INDEX_op_nop;
501 } else {
502 gen_opc_buf[op_index] = op_to_mov(op);
503 tcg_opt_gen_mov(gen_args, args[0], args[1], nb_temps,
504 nb_globals);
505 gen_args += 2;
506 args += 3;
507 }
508 continue;
509 }
510 break;
511 }
512
513 /* Propagate constants through copy operations and do constant
514 folding. Constants will be substituted to arguments by register
515 allocator where needed and possible. Also detect copies. */
516 switch (op) {
517 CASE_OP_32_64(mov):
518 if ((temps[args[1]].state == TCG_TEMP_COPY
519 && temps[args[1]].val == args[0])
520 || args[0] == args[1]) {
521 args += 2;
522 gen_opc_buf[op_index] = INDEX_op_nop;
523 break;
524 }
525 if (temps[args[1]].state != TCG_TEMP_CONST) {
526 tcg_opt_gen_mov(gen_args, args[0], args[1],
527 nb_temps, nb_globals);
528 gen_args += 2;
529 args += 2;
530 break;
531 }
532 /* Source argument is constant. Rewrite the operation and
533 let movi case handle it. */
534 op = op_to_movi(op);
535 gen_opc_buf[op_index] = op;
536 args[1] = temps[args[1]].val;
537 /* fallthrough */
538 CASE_OP_32_64(movi):
539 tcg_opt_gen_movi(gen_args, args[0], args[1], nb_temps, nb_globals);
540 gen_args += 2;
541 args += 2;
542 break;
543 CASE_OP_32_64(not):
544 #ifdef TCG_TARGET_HAS_ext8s_i32
545 case INDEX_op_ext8s_i32:
546 #endif
547 #ifdef TCG_TARGET_HAS_ext8s_i64
548 case INDEX_op_ext8s_i64:
549 #endif
550 #ifdef TCG_TARGET_HAS_ext16s_i32
551 case INDEX_op_ext16s_i32:
552 #endif
553 #ifdef TCG_TARGET_HAS_ext16s_i64
554 case INDEX_op_ext16s_i64:
555 #endif
556 #ifdef TCG_TARGET_HAS_ext8u_i32
557 case INDEX_op_ext8u_i32:
558 #endif
559 #ifdef TCG_TARGET_HAS_ext8u_i64
560 case INDEX_op_ext8u_i64:
561 #endif
562 #ifdef TCG_TARGET_HAS_ext16u_i32
563 case INDEX_op_ext16u_i32:
564 #endif
565 #ifdef TCG_TARGET_HAS_ext16u_i64
566 case INDEX_op_ext16u_i64:
567 #endif
568 #if TCG_TARGET_REG_BITS == 64
569 case INDEX_op_ext32s_i64:
570 case INDEX_op_ext32u_i64:
571 #endif
572 if (temps[args[1]].state == TCG_TEMP_CONST) {
573 gen_opc_buf[op_index] = op_to_movi(op);
574 tmp = do_constant_folding(op, temps[args[1]].val, 0);
575 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
576 gen_args += 2;
577 args += 2;
578 break;
579 } else {
580 reset_temp(args[0], nb_temps, nb_globals);
581 gen_args[0] = args[0];
582 gen_args[1] = args[1];
583 gen_args += 2;
584 args += 2;
585 break;
586 }
587 CASE_OP_32_64(add):
588 CASE_OP_32_64(sub):
589 CASE_OP_32_64(mul):
590 CASE_OP_32_64(or):
591 CASE_OP_32_64(and):
592 CASE_OP_32_64(xor):
593 CASE_OP_32_64(shl):
594 CASE_OP_32_64(shr):
595 CASE_OP_32_64(sar):
596 #ifdef TCG_TARGET_HAS_rot_i32
597 case INDEX_op_rotl_i32:
598 case INDEX_op_rotr_i32:
599 #endif
600 #ifdef TCG_TARGET_HAS_rot_i64
601 case INDEX_op_rotl_i64:
602 case INDEX_op_rotr_i64:
603 #endif
604 if (temps[args[1]].state == TCG_TEMP_CONST
605 && temps[args[2]].state == TCG_TEMP_CONST) {
606 gen_opc_buf[op_index] = op_to_movi(op);
607 tmp = do_constant_folding(op, temps[args[1]].val,
608 temps[args[2]].val);
609 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
610 gen_args += 2;
611 args += 3;
612 break;
613 } else {
614 reset_temp(args[0], nb_temps, nb_globals);
615 gen_args[0] = args[0];
616 gen_args[1] = args[1];
617 gen_args[2] = args[2];
618 gen_args += 3;
619 args += 3;
620 break;
621 }
622 case INDEX_op_call:
623 nb_call_args = (args[0] >> 16) + (args[0] & 0xffff);
624 if (!(args[nb_call_args + 1] & (TCG_CALL_CONST | TCG_CALL_PURE))) {
625 for (i = 0; i < nb_globals; i++) {
626 reset_temp(i, nb_temps, nb_globals);
627 }
628 }
629 for (i = 0; i < (args[0] >> 16); i++) {
630 reset_temp(args[i + 1], nb_temps, nb_globals);
631 }
632 i = nb_call_args + 3;
633 while (i) {
634 *gen_args = *args;
635 args++;
636 gen_args++;
637 i--;
638 }
639 break;
640 case INDEX_op_set_label:
641 case INDEX_op_jmp:
642 case INDEX_op_br:
643 CASE_OP_32_64(brcond):
644 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
645 for (i = 0; i < def->nb_args; i++) {
646 *gen_args = *args;
647 args++;
648 gen_args++;
649 }
650 break;
651 default:
652 /* Default case: we do know nothing about operation so no
653 propagation is done. We only trash output args. */
654 for (i = 0; i < def->nb_oargs; i++) {
655 reset_temp(args[i], nb_temps, nb_globals);
656 }
657 for (i = 0; i < def->nb_args; i++) {
658 gen_args[i] = args[i];
659 }
660 args += def->nb_args;
661 gen_args += def->nb_args;
662 break;
663 }
664 }
665
666 return gen_args;
667 }
668
669 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
670 TCGArg *args, TCGOpDef *tcg_op_defs)
671 {
672 TCGArg *res;
673 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
674 return res;
675 }