]> git.proxmox.com Git - qemu.git/blob - tcg/optimize.c
TCG: fix copy propagation
[qemu.git] / tcg / optimize.c
1 /*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdio.h>
30
31 #include "qemu-common.h"
32 #include "tcg-op.h"
33
34 #if TCG_TARGET_REG_BITS == 64
35 #define CASE_OP_32_64(x) \
36 glue(glue(case INDEX_op_, x), _i32): \
37 glue(glue(case INDEX_op_, x), _i64)
38 #else
39 #define CASE_OP_32_64(x) \
40 glue(glue(case INDEX_op_, x), _i32)
41 #endif
42
43 typedef enum {
44 TCG_TEMP_UNDEF = 0,
45 TCG_TEMP_CONST,
46 TCG_TEMP_COPY,
47 TCG_TEMP_HAS_COPY,
48 TCG_TEMP_ANY
49 } tcg_temp_state;
50
51 struct tcg_temp_info {
52 tcg_temp_state state;
53 uint16_t prev_copy;
54 uint16_t next_copy;
55 tcg_target_ulong val;
56 };
57
58 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
59
60 /* Reset TEMP's state to TCG_TEMP_ANY. If TEMP was a representative of some
61 class of equivalent temp's, a new representative should be chosen in this
62 class. */
63 static void reset_temp(TCGArg temp, int nb_temps, int nb_globals)
64 {
65 int i;
66 TCGArg new_base = (TCGArg)-1;
67 if (temps[temp].state == TCG_TEMP_HAS_COPY) {
68 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
69 if (i >= nb_globals) {
70 temps[i].state = TCG_TEMP_HAS_COPY;
71 new_base = i;
72 break;
73 }
74 }
75 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
76 if (new_base == (TCGArg)-1) {
77 temps[i].state = TCG_TEMP_ANY;
78 } else {
79 temps[i].val = new_base;
80 }
81 }
82 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
83 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
84 } else if (temps[temp].state == TCG_TEMP_COPY) {
85 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
86 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
87 new_base = temps[temp].val;
88 }
89 temps[temp].state = TCG_TEMP_ANY;
90 if (new_base != (TCGArg)-1 && temps[new_base].next_copy == new_base) {
91 temps[new_base].state = TCG_TEMP_ANY;
92 }
93 }
94
95 static int op_bits(int op)
96 {
97 switch (op) {
98 case INDEX_op_mov_i32:
99 case INDEX_op_add_i32:
100 case INDEX_op_sub_i32:
101 case INDEX_op_mul_i32:
102 case INDEX_op_and_i32:
103 case INDEX_op_or_i32:
104 case INDEX_op_xor_i32:
105 case INDEX_op_shl_i32:
106 case INDEX_op_shr_i32:
107 case INDEX_op_sar_i32:
108 #ifdef TCG_TARGET_HAS_rot_i32
109 case INDEX_op_rotl_i32:
110 case INDEX_op_rotr_i32:
111 #endif
112 #ifdef TCG_TARGET_HAS_not_i32
113 case INDEX_op_not_i32:
114 #endif
115 #ifdef TCG_TARGET_HAS_ext8s_i32
116 case INDEX_op_ext8s_i32:
117 #endif
118 #ifdef TCG_TARGET_HAS_ext16s_i32
119 case INDEX_op_ext16s_i32:
120 #endif
121 #ifdef TCG_TARGET_HAS_ext8u_i32
122 case INDEX_op_ext8u_i32:
123 #endif
124 #ifdef TCG_TARGET_HAS_ext16u_i32
125 case INDEX_op_ext16u_i32:
126 #endif
127 return 32;
128 #if TCG_TARGET_REG_BITS == 64
129 case INDEX_op_mov_i64:
130 case INDEX_op_add_i64:
131 case INDEX_op_sub_i64:
132 case INDEX_op_mul_i64:
133 case INDEX_op_and_i64:
134 case INDEX_op_or_i64:
135 case INDEX_op_xor_i64:
136 case INDEX_op_shl_i64:
137 case INDEX_op_shr_i64:
138 case INDEX_op_sar_i64:
139 #ifdef TCG_TARGET_HAS_rot_i64
140 case INDEX_op_rotl_i64:
141 case INDEX_op_rotr_i64:
142 #endif
143 #ifdef TCG_TARGET_HAS_not_i64
144 case INDEX_op_not_i64:
145 #endif
146 #ifdef TCG_TARGET_HAS_ext8s_i64
147 case INDEX_op_ext8s_i64:
148 #endif
149 #ifdef TCG_TARGET_HAS_ext16s_i64
150 case INDEX_op_ext16s_i64:
151 #endif
152 #ifdef TCG_TARGET_HAS_ext32s_i64
153 case INDEX_op_ext32s_i64:
154 #endif
155 #ifdef TCG_TARGET_HAS_ext8u_i64
156 case INDEX_op_ext8u_i64:
157 #endif
158 #ifdef TCG_TARGET_HAS_ext16u_i64
159 case INDEX_op_ext16u_i64:
160 #endif
161 #ifdef TCG_TARGET_HAS_ext32u_i64
162 case INDEX_op_ext32u_i64:
163 #endif
164 return 64;
165 #endif
166 default:
167 fprintf(stderr, "Unrecognized operation %d in op_bits.\n", op);
168 tcg_abort();
169 }
170 }
171
172 static int op_to_movi(int op)
173 {
174 switch (op_bits(op)) {
175 case 32:
176 return INDEX_op_movi_i32;
177 #if TCG_TARGET_REG_BITS == 64
178 case 64:
179 return INDEX_op_movi_i64;
180 #endif
181 default:
182 fprintf(stderr, "op_to_movi: unexpected return value of "
183 "function op_bits.\n");
184 tcg_abort();
185 }
186 }
187
188 static void tcg_opt_gen_mov(TCGContext *s, TCGArg *gen_args, TCGArg dst,
189 TCGArg src, int nb_temps, int nb_globals)
190 {
191 reset_temp(dst, nb_temps, nb_globals);
192 assert(temps[src].state != TCG_TEMP_COPY);
193 /* Don't try to copy if one of temps is a global or either one
194 is local and another is register */
195 if (src >= nb_globals && dst >= nb_globals &&
196 tcg_arg_is_local(s, src) == tcg_arg_is_local(s, dst)) {
197 assert(temps[src].state != TCG_TEMP_CONST);
198 if (temps[src].state != TCG_TEMP_HAS_COPY) {
199 temps[src].state = TCG_TEMP_HAS_COPY;
200 temps[src].next_copy = src;
201 temps[src].prev_copy = src;
202 }
203 temps[dst].state = TCG_TEMP_COPY;
204 temps[dst].val = src;
205 temps[dst].next_copy = temps[src].next_copy;
206 temps[dst].prev_copy = src;
207 temps[temps[dst].next_copy].prev_copy = dst;
208 temps[src].next_copy = dst;
209 }
210 gen_args[0] = dst;
211 gen_args[1] = src;
212 }
213
214 static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val,
215 int nb_temps, int nb_globals)
216 {
217 reset_temp(dst, nb_temps, nb_globals);
218 temps[dst].state = TCG_TEMP_CONST;
219 temps[dst].val = val;
220 gen_args[0] = dst;
221 gen_args[1] = val;
222 }
223
224 static int op_to_mov(int op)
225 {
226 switch (op_bits(op)) {
227 case 32:
228 return INDEX_op_mov_i32;
229 #if TCG_TARGET_REG_BITS == 64
230 case 64:
231 return INDEX_op_mov_i64;
232 #endif
233 default:
234 fprintf(stderr, "op_to_mov: unexpected return value of "
235 "function op_bits.\n");
236 tcg_abort();
237 }
238 }
239
240 static TCGArg do_constant_folding_2(int op, TCGArg x, TCGArg y)
241 {
242 switch (op) {
243 CASE_OP_32_64(add):
244 return x + y;
245
246 CASE_OP_32_64(sub):
247 return x - y;
248
249 CASE_OP_32_64(mul):
250 return x * y;
251
252 CASE_OP_32_64(and):
253 return x & y;
254
255 CASE_OP_32_64(or):
256 return x | y;
257
258 CASE_OP_32_64(xor):
259 return x ^ y;
260
261 case INDEX_op_shl_i32:
262 return (uint32_t)x << (uint32_t)y;
263
264 #if TCG_TARGET_REG_BITS == 64
265 case INDEX_op_shl_i64:
266 return (uint64_t)x << (uint64_t)y;
267 #endif
268
269 case INDEX_op_shr_i32:
270 return (uint32_t)x >> (uint32_t)y;
271
272 #if TCG_TARGET_REG_BITS == 64
273 case INDEX_op_shr_i64:
274 return (uint64_t)x >> (uint64_t)y;
275 #endif
276
277 case INDEX_op_sar_i32:
278 return (int32_t)x >> (int32_t)y;
279
280 #if TCG_TARGET_REG_BITS == 64
281 case INDEX_op_sar_i64:
282 return (int64_t)x >> (int64_t)y;
283 #endif
284
285 #ifdef TCG_TARGET_HAS_rot_i32
286 case INDEX_op_rotr_i32:
287 #if TCG_TARGET_REG_BITS == 64
288 x &= 0xffffffff;
289 y &= 0xffffffff;
290 #endif
291 x = (x << (32 - y)) | (x >> y);
292 return x;
293 #endif
294
295 #ifdef TCG_TARGET_HAS_rot_i64
296 #if TCG_TARGET_REG_BITS == 64
297 case INDEX_op_rotr_i64:
298 x = (x << (64 - y)) | (x >> y);
299 return x;
300 #endif
301 #endif
302
303 #ifdef TCG_TARGET_HAS_rot_i32
304 case INDEX_op_rotl_i32:
305 #if TCG_TARGET_REG_BITS == 64
306 x &= 0xffffffff;
307 y &= 0xffffffff;
308 #endif
309 x = (x << y) | (x >> (32 - y));
310 return x;
311 #endif
312
313 #ifdef TCG_TARGET_HAS_rot_i64
314 #if TCG_TARGET_REG_BITS == 64
315 case INDEX_op_rotl_i64:
316 x = (x << y) | (x >> (64 - y));
317 return x;
318 #endif
319 #endif
320
321 #if defined(TCG_TARGET_HAS_not_i32) || defined(TCG_TARGET_HAS_not_i64)
322 #ifdef TCG_TARGET_HAS_not_i32
323 case INDEX_op_not_i32:
324 #endif
325 #ifdef TCG_TARGET_HAS_not_i64
326 case INDEX_op_not_i64:
327 #endif
328 return ~x;
329 #endif
330
331 #if defined(TCG_TARGET_HAS_ext8s_i32) || defined(TCG_TARGET_HAS_ext8s_i64)
332 #ifdef TCG_TARGET_HAS_ext8s_i32
333 case INDEX_op_ext8s_i32:
334 #endif
335 #ifdef TCG_TARGET_HAS_ext8s_i64
336 case INDEX_op_ext8s_i64:
337 #endif
338 return (int8_t)x;
339 #endif
340
341 #if defined(TCG_TARGET_HAS_ext16s_i32) || defined(TCG_TARGET_HAS_ext16s_i64)
342 #ifdef TCG_TARGET_HAS_ext16s_i32
343 case INDEX_op_ext16s_i32:
344 #endif
345 #ifdef TCG_TARGET_HAS_ext16s_i64
346 case INDEX_op_ext16s_i64:
347 #endif
348 return (int16_t)x;
349 #endif
350
351 #if defined(TCG_TARGET_HAS_ext8u_i32) || defined(TCG_TARGET_HAS_ext8u_i64)
352 #ifdef TCG_TARGET_HAS_ext8u_i32
353 case INDEX_op_ext8u_i32:
354 #endif
355 #ifdef TCG_TARGET_HAS_ext8u_i64
356 case INDEX_op_ext8u_i64:
357 #endif
358 return (uint8_t)x;
359 #endif
360
361 #if defined(TCG_TARGET_HAS_ext16u_i32) || defined(TCG_TARGET_HAS_ext16u_i64)
362 #ifdef TCG_TARGET_HAS_ext16u_i32
363 case INDEX_op_ext16u_i32:
364 #endif
365 #ifdef TCG_TARGET_HAS_ext16u_i64
366 case INDEX_op_ext16u_i64:
367 #endif
368 return (uint16_t)x;
369 #endif
370
371 #if TCG_TARGET_REG_BITS == 64
372 #ifdef TCG_TARGET_HAS_ext32s_i64
373 case INDEX_op_ext32s_i64:
374 return (int32_t)x;
375 #endif
376
377 #ifdef TCG_TARGET_HAS_ext32u_i64
378 case INDEX_op_ext32u_i64:
379 return (uint32_t)x;
380 #endif
381 #endif
382
383 default:
384 fprintf(stderr,
385 "Unrecognized operation %d in do_constant_folding.\n", op);
386 tcg_abort();
387 }
388 }
389
390 static TCGArg do_constant_folding(int op, TCGArg x, TCGArg y)
391 {
392 TCGArg res = do_constant_folding_2(op, x, y);
393 #if TCG_TARGET_REG_BITS == 64
394 if (op_bits(op) == 32) {
395 res &= 0xffffffff;
396 }
397 #endif
398 return res;
399 }
400
401 /* Propagate constants and copies, fold constant expressions. */
402 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
403 TCGArg *args, TCGOpDef *tcg_op_defs)
404 {
405 int i, nb_ops, op_index, op, nb_temps, nb_globals, nb_call_args;
406 const TCGOpDef *def;
407 TCGArg *gen_args;
408 TCGArg tmp;
409 /* Array VALS has an element for each temp.
410 If this temp holds a constant then its value is kept in VALS' element.
411 If this temp is a copy of other ones then this equivalence class'
412 representative is kept in VALS' element.
413 If this temp is neither copy nor constant then corresponding VALS'
414 element is unused. */
415
416 nb_temps = s->nb_temps;
417 nb_globals = s->nb_globals;
418 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
419
420 nb_ops = tcg_opc_ptr - gen_opc_buf;
421 gen_args = args;
422 for (op_index = 0; op_index < nb_ops; op_index++) {
423 op = gen_opc_buf[op_index];
424 def = &tcg_op_defs[op];
425 /* Do copy propagation */
426 if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS))) {
427 assert(op != INDEX_op_call);
428 for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) {
429 if (temps[args[i]].state == TCG_TEMP_COPY) {
430 args[i] = temps[args[i]].val;
431 }
432 }
433 }
434
435 /* For commutative operations make constant second argument */
436 switch (op) {
437 CASE_OP_32_64(add):
438 CASE_OP_32_64(mul):
439 CASE_OP_32_64(and):
440 CASE_OP_32_64(or):
441 CASE_OP_32_64(xor):
442 if (temps[args[1]].state == TCG_TEMP_CONST) {
443 tmp = args[1];
444 args[1] = args[2];
445 args[2] = tmp;
446 }
447 break;
448 default:
449 break;
450 }
451
452 /* Simplify expression if possible. */
453 switch (op) {
454 CASE_OP_32_64(add):
455 CASE_OP_32_64(sub):
456 CASE_OP_32_64(shl):
457 CASE_OP_32_64(shr):
458 CASE_OP_32_64(sar):
459 #ifdef TCG_TARGET_HAS_rot_i32
460 case INDEX_op_rotl_i32:
461 case INDEX_op_rotr_i32:
462 #endif
463 #ifdef TCG_TARGET_HAS_rot_i64
464 case INDEX_op_rotl_i64:
465 case INDEX_op_rotr_i64:
466 #endif
467 if (temps[args[1]].state == TCG_TEMP_CONST) {
468 /* Proceed with possible constant folding. */
469 break;
470 }
471 if (temps[args[2]].state == TCG_TEMP_CONST
472 && temps[args[2]].val == 0) {
473 if ((temps[args[0]].state == TCG_TEMP_COPY
474 && temps[args[0]].val == args[1])
475 || args[0] == args[1]) {
476 args += 3;
477 gen_opc_buf[op_index] = INDEX_op_nop;
478 } else {
479 gen_opc_buf[op_index] = op_to_mov(op);
480 tcg_opt_gen_mov(s, gen_args, args[0], args[1],
481 nb_temps, nb_globals);
482 gen_args += 2;
483 args += 3;
484 }
485 continue;
486 }
487 break;
488 CASE_OP_32_64(mul):
489 if ((temps[args[2]].state == TCG_TEMP_CONST
490 && temps[args[2]].val == 0)) {
491 gen_opc_buf[op_index] = op_to_movi(op);
492 tcg_opt_gen_movi(gen_args, args[0], 0, nb_temps, nb_globals);
493 args += 3;
494 gen_args += 2;
495 continue;
496 }
497 break;
498 CASE_OP_32_64(or):
499 CASE_OP_32_64(and):
500 if (args[1] == args[2]) {
501 if (args[1] == args[0]) {
502 args += 3;
503 gen_opc_buf[op_index] = INDEX_op_nop;
504 } else {
505 gen_opc_buf[op_index] = op_to_mov(op);
506 tcg_opt_gen_mov(s, gen_args, args[0], args[1], nb_temps,
507 nb_globals);
508 gen_args += 2;
509 args += 3;
510 }
511 continue;
512 }
513 break;
514 }
515
516 /* Propagate constants through copy operations and do constant
517 folding. Constants will be substituted to arguments by register
518 allocator where needed and possible. Also detect copies. */
519 switch (op) {
520 CASE_OP_32_64(mov):
521 if ((temps[args[1]].state == TCG_TEMP_COPY
522 && temps[args[1]].val == args[0])
523 || args[0] == args[1]) {
524 args += 2;
525 gen_opc_buf[op_index] = INDEX_op_nop;
526 break;
527 }
528 if (temps[args[1]].state != TCG_TEMP_CONST) {
529 tcg_opt_gen_mov(s, gen_args, args[0], args[1],
530 nb_temps, nb_globals);
531 gen_args += 2;
532 args += 2;
533 break;
534 }
535 /* Source argument is constant. Rewrite the operation and
536 let movi case handle it. */
537 op = op_to_movi(op);
538 gen_opc_buf[op_index] = op;
539 args[1] = temps[args[1]].val;
540 /* fallthrough */
541 CASE_OP_32_64(movi):
542 tcg_opt_gen_movi(gen_args, args[0], args[1], nb_temps, nb_globals);
543 gen_args += 2;
544 args += 2;
545 break;
546 CASE_OP_32_64(not):
547 #ifdef TCG_TARGET_HAS_ext8s_i32
548 case INDEX_op_ext8s_i32:
549 #endif
550 #ifdef TCG_TARGET_HAS_ext8s_i64
551 case INDEX_op_ext8s_i64:
552 #endif
553 #ifdef TCG_TARGET_HAS_ext16s_i32
554 case INDEX_op_ext16s_i32:
555 #endif
556 #ifdef TCG_TARGET_HAS_ext16s_i64
557 case INDEX_op_ext16s_i64:
558 #endif
559 #ifdef TCG_TARGET_HAS_ext8u_i32
560 case INDEX_op_ext8u_i32:
561 #endif
562 #ifdef TCG_TARGET_HAS_ext8u_i64
563 case INDEX_op_ext8u_i64:
564 #endif
565 #ifdef TCG_TARGET_HAS_ext16u_i32
566 case INDEX_op_ext16u_i32:
567 #endif
568 #ifdef TCG_TARGET_HAS_ext16u_i64
569 case INDEX_op_ext16u_i64:
570 #endif
571 #if TCG_TARGET_REG_BITS == 64
572 case INDEX_op_ext32s_i64:
573 case INDEX_op_ext32u_i64:
574 #endif
575 if (temps[args[1]].state == TCG_TEMP_CONST) {
576 gen_opc_buf[op_index] = op_to_movi(op);
577 tmp = do_constant_folding(op, temps[args[1]].val, 0);
578 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
579 gen_args += 2;
580 args += 2;
581 break;
582 } else {
583 reset_temp(args[0], nb_temps, nb_globals);
584 gen_args[0] = args[0];
585 gen_args[1] = args[1];
586 gen_args += 2;
587 args += 2;
588 break;
589 }
590 CASE_OP_32_64(add):
591 CASE_OP_32_64(sub):
592 CASE_OP_32_64(mul):
593 CASE_OP_32_64(or):
594 CASE_OP_32_64(and):
595 CASE_OP_32_64(xor):
596 CASE_OP_32_64(shl):
597 CASE_OP_32_64(shr):
598 CASE_OP_32_64(sar):
599 #ifdef TCG_TARGET_HAS_rot_i32
600 case INDEX_op_rotl_i32:
601 case INDEX_op_rotr_i32:
602 #endif
603 #ifdef TCG_TARGET_HAS_rot_i64
604 case INDEX_op_rotl_i64:
605 case INDEX_op_rotr_i64:
606 #endif
607 if (temps[args[1]].state == TCG_TEMP_CONST
608 && temps[args[2]].state == TCG_TEMP_CONST) {
609 gen_opc_buf[op_index] = op_to_movi(op);
610 tmp = do_constant_folding(op, temps[args[1]].val,
611 temps[args[2]].val);
612 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
613 gen_args += 2;
614 args += 3;
615 break;
616 } else {
617 reset_temp(args[0], nb_temps, nb_globals);
618 gen_args[0] = args[0];
619 gen_args[1] = args[1];
620 gen_args[2] = args[2];
621 gen_args += 3;
622 args += 3;
623 break;
624 }
625 case INDEX_op_call:
626 nb_call_args = (args[0] >> 16) + (args[0] & 0xffff);
627 if (!(args[nb_call_args + 1] & (TCG_CALL_CONST | TCG_CALL_PURE))) {
628 for (i = 0; i < nb_globals; i++) {
629 reset_temp(i, nb_temps, nb_globals);
630 }
631 }
632 for (i = 0; i < (args[0] >> 16); i++) {
633 reset_temp(args[i + 1], nb_temps, nb_globals);
634 }
635 i = nb_call_args + 3;
636 while (i) {
637 *gen_args = *args;
638 args++;
639 gen_args++;
640 i--;
641 }
642 break;
643 case INDEX_op_set_label:
644 case INDEX_op_jmp:
645 case INDEX_op_br:
646 CASE_OP_32_64(brcond):
647 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
648 for (i = 0; i < def->nb_args; i++) {
649 *gen_args = *args;
650 args++;
651 gen_args++;
652 }
653 break;
654 default:
655 /* Default case: we do know nothing about operation so no
656 propagation is done. We only trash output args. */
657 for (i = 0; i < def->nb_oargs; i++) {
658 reset_temp(args[i], nb_temps, nb_globals);
659 }
660 for (i = 0; i < def->nb_args; i++) {
661 gen_args[i] = args[i];
662 }
663 args += def->nb_args;
664 gen_args += def->nb_args;
665 break;
666 }
667 }
668
669 return gen_args;
670 }
671
672 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
673 TCGArg *args, TCGOpDef *tcg_op_defs)
674 {
675 TCGArg *res;
676 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
677 return res;
678 }