]> git.proxmox.com Git - mirror_qemu.git/blob - tcg/optimize.c
tcg/optimize: remove TCG_TEMP_ANY
[mirror_qemu.git] / tcg / optimize.c
1 /*
2 * Optimizations for Tiny Code Generator for QEMU
3 *
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
26 #include "config.h"
27
28 #include <stdlib.h>
29 #include <stdio.h>
30
31 #include "qemu-common.h"
32 #include "tcg-op.h"
33
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
37
38 typedef enum {
39 TCG_TEMP_UNDEF = 0,
40 TCG_TEMP_CONST,
41 TCG_TEMP_COPY,
42 TCG_TEMP_HAS_COPY
43 } tcg_temp_state;
44
45 struct tcg_temp_info {
46 tcg_temp_state state;
47 uint16_t prev_copy;
48 uint16_t next_copy;
49 tcg_target_ulong val;
50 };
51
52 static struct tcg_temp_info temps[TCG_MAX_TEMPS];
53
54 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP was a representative of some
55 class of equivalent temp's, a new representative should be chosen in this
56 class. */
57 static void reset_temp(TCGArg temp, int nb_temps, int nb_globals)
58 {
59 int i;
60 TCGArg new_base = (TCGArg)-1;
61 if (temps[temp].state == TCG_TEMP_HAS_COPY) {
62 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
63 if (i >= nb_globals) {
64 temps[i].state = TCG_TEMP_HAS_COPY;
65 new_base = i;
66 break;
67 }
68 }
69 for (i = temps[temp].next_copy; i != temp; i = temps[i].next_copy) {
70 if (new_base == (TCGArg)-1) {
71 temps[i].state = TCG_TEMP_UNDEF;
72 } else {
73 temps[i].val = new_base;
74 }
75 }
76 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
77 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
78 } else if (temps[temp].state == TCG_TEMP_COPY) {
79 temps[temps[temp].next_copy].prev_copy = temps[temp].prev_copy;
80 temps[temps[temp].prev_copy].next_copy = temps[temp].next_copy;
81 new_base = temps[temp].val;
82 }
83 temps[temp].state = TCG_TEMP_UNDEF;
84 if (new_base != (TCGArg)-1 && temps[new_base].next_copy == new_base) {
85 temps[new_base].state = TCG_TEMP_UNDEF;
86 }
87 }
88
89 static int op_bits(TCGOpcode op)
90 {
91 const TCGOpDef *def = &tcg_op_defs[op];
92 return def->flags & TCG_OPF_64BIT ? 64 : 32;
93 }
94
95 static TCGOpcode op_to_movi(TCGOpcode op)
96 {
97 switch (op_bits(op)) {
98 case 32:
99 return INDEX_op_movi_i32;
100 case 64:
101 return INDEX_op_movi_i64;
102 default:
103 fprintf(stderr, "op_to_movi: unexpected return value of "
104 "function op_bits.\n");
105 tcg_abort();
106 }
107 }
108
109 static void tcg_opt_gen_mov(TCGArg *gen_args, TCGArg dst, TCGArg src,
110 int nb_temps, int nb_globals)
111 {
112 reset_temp(dst, nb_temps, nb_globals);
113 assert(temps[src].state != TCG_TEMP_COPY);
114 if (src >= nb_globals) {
115 assert(temps[src].state != TCG_TEMP_CONST);
116 if (temps[src].state != TCG_TEMP_HAS_COPY) {
117 temps[src].state = TCG_TEMP_HAS_COPY;
118 temps[src].next_copy = src;
119 temps[src].prev_copy = src;
120 }
121 temps[dst].state = TCG_TEMP_COPY;
122 temps[dst].val = src;
123 temps[dst].next_copy = temps[src].next_copy;
124 temps[dst].prev_copy = src;
125 temps[temps[dst].next_copy].prev_copy = dst;
126 temps[src].next_copy = dst;
127 }
128 gen_args[0] = dst;
129 gen_args[1] = src;
130 }
131
132 static void tcg_opt_gen_movi(TCGArg *gen_args, TCGArg dst, TCGArg val,
133 int nb_temps, int nb_globals)
134 {
135 reset_temp(dst, nb_temps, nb_globals);
136 temps[dst].state = TCG_TEMP_CONST;
137 temps[dst].val = val;
138 gen_args[0] = dst;
139 gen_args[1] = val;
140 }
141
142 static TCGOpcode op_to_mov(TCGOpcode op)
143 {
144 switch (op_bits(op)) {
145 case 32:
146 return INDEX_op_mov_i32;
147 case 64:
148 return INDEX_op_mov_i64;
149 default:
150 fprintf(stderr, "op_to_mov: unexpected return value of "
151 "function op_bits.\n");
152 tcg_abort();
153 }
154 }
155
156 static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y)
157 {
158 switch (op) {
159 CASE_OP_32_64(add):
160 return x + y;
161
162 CASE_OP_32_64(sub):
163 return x - y;
164
165 CASE_OP_32_64(mul):
166 return x * y;
167
168 CASE_OP_32_64(and):
169 return x & y;
170
171 CASE_OP_32_64(or):
172 return x | y;
173
174 CASE_OP_32_64(xor):
175 return x ^ y;
176
177 case INDEX_op_shl_i32:
178 return (uint32_t)x << (uint32_t)y;
179
180 case INDEX_op_shl_i64:
181 return (uint64_t)x << (uint64_t)y;
182
183 case INDEX_op_shr_i32:
184 return (uint32_t)x >> (uint32_t)y;
185
186 case INDEX_op_shr_i64:
187 return (uint64_t)x >> (uint64_t)y;
188
189 case INDEX_op_sar_i32:
190 return (int32_t)x >> (int32_t)y;
191
192 case INDEX_op_sar_i64:
193 return (int64_t)x >> (int64_t)y;
194
195 case INDEX_op_rotr_i32:
196 x = ((uint32_t)x << (32 - y)) | ((uint32_t)x >> y);
197 return x;
198
199 case INDEX_op_rotr_i64:
200 x = ((uint64_t)x << (64 - y)) | ((uint64_t)x >> y);
201 return x;
202
203 case INDEX_op_rotl_i32:
204 x = ((uint32_t)x << y) | ((uint32_t)x >> (32 - y));
205 return x;
206
207 case INDEX_op_rotl_i64:
208 x = ((uint64_t)x << y) | ((uint64_t)x >> (64 - y));
209 return x;
210
211 CASE_OP_32_64(not):
212 return ~x;
213
214 CASE_OP_32_64(neg):
215 return -x;
216
217 CASE_OP_32_64(andc):
218 return x & ~y;
219
220 CASE_OP_32_64(orc):
221 return x | ~y;
222
223 CASE_OP_32_64(eqv):
224 return ~(x ^ y);
225
226 CASE_OP_32_64(nand):
227 return ~(x & y);
228
229 CASE_OP_32_64(nor):
230 return ~(x | y);
231
232 CASE_OP_32_64(ext8s):
233 return (int8_t)x;
234
235 CASE_OP_32_64(ext16s):
236 return (int16_t)x;
237
238 CASE_OP_32_64(ext8u):
239 return (uint8_t)x;
240
241 CASE_OP_32_64(ext16u):
242 return (uint16_t)x;
243
244 case INDEX_op_ext32s_i64:
245 return (int32_t)x;
246
247 case INDEX_op_ext32u_i64:
248 return (uint32_t)x;
249
250 default:
251 fprintf(stderr,
252 "Unrecognized operation %d in do_constant_folding.\n", op);
253 tcg_abort();
254 }
255 }
256
257 static TCGArg do_constant_folding(TCGOpcode op, TCGArg x, TCGArg y)
258 {
259 TCGArg res = do_constant_folding_2(op, x, y);
260 if (op_bits(op) == 32) {
261 res &= 0xffffffff;
262 }
263 return res;
264 }
265
266 static TCGArg do_constant_folding_cond(TCGOpcode op, TCGArg x,
267 TCGArg y, TCGCond c)
268 {
269 switch (op_bits(op)) {
270 case 32:
271 switch (c) {
272 case TCG_COND_EQ:
273 return (uint32_t)x == (uint32_t)y;
274 case TCG_COND_NE:
275 return (uint32_t)x != (uint32_t)y;
276 case TCG_COND_LT:
277 return (int32_t)x < (int32_t)y;
278 case TCG_COND_GE:
279 return (int32_t)x >= (int32_t)y;
280 case TCG_COND_LE:
281 return (int32_t)x <= (int32_t)y;
282 case TCG_COND_GT:
283 return (int32_t)x > (int32_t)y;
284 case TCG_COND_LTU:
285 return (uint32_t)x < (uint32_t)y;
286 case TCG_COND_GEU:
287 return (uint32_t)x >= (uint32_t)y;
288 case TCG_COND_LEU:
289 return (uint32_t)x <= (uint32_t)y;
290 case TCG_COND_GTU:
291 return (uint32_t)x > (uint32_t)y;
292 }
293 break;
294 case 64:
295 switch (c) {
296 case TCG_COND_EQ:
297 return (uint64_t)x == (uint64_t)y;
298 case TCG_COND_NE:
299 return (uint64_t)x != (uint64_t)y;
300 case TCG_COND_LT:
301 return (int64_t)x < (int64_t)y;
302 case TCG_COND_GE:
303 return (int64_t)x >= (int64_t)y;
304 case TCG_COND_LE:
305 return (int64_t)x <= (int64_t)y;
306 case TCG_COND_GT:
307 return (int64_t)x > (int64_t)y;
308 case TCG_COND_LTU:
309 return (uint64_t)x < (uint64_t)y;
310 case TCG_COND_GEU:
311 return (uint64_t)x >= (uint64_t)y;
312 case TCG_COND_LEU:
313 return (uint64_t)x <= (uint64_t)y;
314 case TCG_COND_GTU:
315 return (uint64_t)x > (uint64_t)y;
316 }
317 break;
318 }
319
320 fprintf(stderr,
321 "Unrecognized bitness %d or condition %d in "
322 "do_constant_folding_cond.\n", op_bits(op), c);
323 tcg_abort();
324 }
325
326
327 /* Propagate constants and copies, fold constant expressions. */
328 static TCGArg *tcg_constant_folding(TCGContext *s, uint16_t *tcg_opc_ptr,
329 TCGArg *args, TCGOpDef *tcg_op_defs)
330 {
331 int i, nb_ops, op_index, nb_temps, nb_globals, nb_call_args;
332 TCGOpcode op;
333 const TCGOpDef *def;
334 TCGArg *gen_args;
335 TCGArg tmp;
336 TCGCond cond;
337
338 /* Array VALS has an element for each temp.
339 If this temp holds a constant then its value is kept in VALS' element.
340 If this temp is a copy of other ones then this equivalence class'
341 representative is kept in VALS' element.
342 If this temp is neither copy nor constant then corresponding VALS'
343 element is unused. */
344
345 nb_temps = s->nb_temps;
346 nb_globals = s->nb_globals;
347 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
348
349 nb_ops = tcg_opc_ptr - gen_opc_buf;
350 gen_args = args;
351 for (op_index = 0; op_index < nb_ops; op_index++) {
352 op = gen_opc_buf[op_index];
353 def = &tcg_op_defs[op];
354 /* Do copy propagation */
355 if (!(def->flags & (TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS))) {
356 assert(op != INDEX_op_call);
357 for (i = def->nb_oargs; i < def->nb_oargs + def->nb_iargs; i++) {
358 if (temps[args[i]].state == TCG_TEMP_COPY) {
359 args[i] = temps[args[i]].val;
360 }
361 }
362 }
363
364 /* For commutative operations make constant second argument */
365 switch (op) {
366 CASE_OP_32_64(add):
367 CASE_OP_32_64(mul):
368 CASE_OP_32_64(and):
369 CASE_OP_32_64(or):
370 CASE_OP_32_64(xor):
371 CASE_OP_32_64(eqv):
372 CASE_OP_32_64(nand):
373 CASE_OP_32_64(nor):
374 if (temps[args[1]].state == TCG_TEMP_CONST) {
375 tmp = args[1];
376 args[1] = args[2];
377 args[2] = tmp;
378 }
379 break;
380 CASE_OP_32_64(brcond):
381 if (temps[args[0]].state == TCG_TEMP_CONST
382 && temps[args[1]].state != TCG_TEMP_CONST) {
383 tmp = args[0];
384 args[0] = args[1];
385 args[1] = tmp;
386 args[2] = tcg_swap_cond(args[2]);
387 }
388 break;
389 CASE_OP_32_64(setcond):
390 if (temps[args[1]].state == TCG_TEMP_CONST
391 && temps[args[2]].state != TCG_TEMP_CONST) {
392 tmp = args[1];
393 args[1] = args[2];
394 args[2] = tmp;
395 args[3] = tcg_swap_cond(args[3]);
396 }
397 break;
398 CASE_OP_32_64(movcond):
399 cond = args[5];
400 if (temps[args[1]].state == TCG_TEMP_CONST
401 && temps[args[2]].state != TCG_TEMP_CONST) {
402 tmp = args[1];
403 args[1] = args[2];
404 args[2] = tmp;
405 cond = tcg_swap_cond(cond);
406 }
407 /* For movcond, we canonicalize the "false" input reg to match
408 the destination reg so that the tcg backend can implement
409 a "move if true" operation. */
410 if (args[0] == args[3]) {
411 tmp = args[3];
412 args[3] = args[4];
413 args[4] = tmp;
414 cond = tcg_invert_cond(cond);
415 }
416 args[5] = cond;
417 default:
418 break;
419 }
420
421 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0" */
422 switch (op) {
423 CASE_OP_32_64(shl):
424 CASE_OP_32_64(shr):
425 CASE_OP_32_64(sar):
426 CASE_OP_32_64(rotl):
427 CASE_OP_32_64(rotr):
428 if (temps[args[1]].state == TCG_TEMP_CONST
429 && temps[args[1]].val == 0) {
430 gen_opc_buf[op_index] = op_to_movi(op);
431 tcg_opt_gen_movi(gen_args, args[0], 0, nb_temps, nb_globals);
432 args += 3;
433 gen_args += 2;
434 continue;
435 }
436 break;
437 default:
438 break;
439 }
440
441 /* Simplify expression for "op r, a, 0 => mov r, a" cases */
442 switch (op) {
443 CASE_OP_32_64(add):
444 CASE_OP_32_64(sub):
445 CASE_OP_32_64(shl):
446 CASE_OP_32_64(shr):
447 CASE_OP_32_64(sar):
448 CASE_OP_32_64(rotl):
449 CASE_OP_32_64(rotr):
450 CASE_OP_32_64(or):
451 CASE_OP_32_64(xor):
452 if (temps[args[1]].state == TCG_TEMP_CONST) {
453 /* Proceed with possible constant folding. */
454 break;
455 }
456 if (temps[args[2]].state == TCG_TEMP_CONST
457 && temps[args[2]].val == 0) {
458 if ((temps[args[0]].state == TCG_TEMP_COPY
459 && temps[args[0]].val == args[1])
460 || args[0] == args[1]) {
461 gen_opc_buf[op_index] = INDEX_op_nop;
462 } else {
463 gen_opc_buf[op_index] = op_to_mov(op);
464 tcg_opt_gen_mov(gen_args, args[0], args[1],
465 nb_temps, nb_globals);
466 gen_args += 2;
467 }
468 args += 3;
469 continue;
470 }
471 break;
472 default:
473 break;
474 }
475
476 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
477 switch (op) {
478 CASE_OP_32_64(and):
479 CASE_OP_32_64(mul):
480 if ((temps[args[2]].state == TCG_TEMP_CONST
481 && temps[args[2]].val == 0)) {
482 gen_opc_buf[op_index] = op_to_movi(op);
483 tcg_opt_gen_movi(gen_args, args[0], 0, nb_temps, nb_globals);
484 args += 3;
485 gen_args += 2;
486 continue;
487 }
488 break;
489 default:
490 break;
491 }
492
493 /* Simplify expression for "op r, a, a => mov r, a" cases */
494 switch (op) {
495 CASE_OP_32_64(or):
496 CASE_OP_32_64(and):
497 if (args[1] == args[2]) {
498 if (args[1] == args[0]) {
499 gen_opc_buf[op_index] = INDEX_op_nop;
500 } else {
501 gen_opc_buf[op_index] = op_to_mov(op);
502 tcg_opt_gen_mov(gen_args, args[0], args[1], nb_temps,
503 nb_globals);
504 gen_args += 2;
505 }
506 args += 3;
507 continue;
508 }
509 break;
510 default:
511 break;
512 }
513
514 /* Propagate constants through copy operations and do constant
515 folding. Constants will be substituted to arguments by register
516 allocator where needed and possible. Also detect copies. */
517 switch (op) {
518 CASE_OP_32_64(mov):
519 if ((temps[args[1]].state == TCG_TEMP_COPY
520 && temps[args[1]].val == args[0])
521 || args[0] == args[1]) {
522 args += 2;
523 gen_opc_buf[op_index] = INDEX_op_nop;
524 break;
525 }
526 if (temps[args[1]].state != TCG_TEMP_CONST) {
527 tcg_opt_gen_mov(gen_args, args[0], args[1],
528 nb_temps, nb_globals);
529 gen_args += 2;
530 args += 2;
531 break;
532 }
533 /* Source argument is constant. Rewrite the operation and
534 let movi case handle it. */
535 op = op_to_movi(op);
536 gen_opc_buf[op_index] = op;
537 args[1] = temps[args[1]].val;
538 /* fallthrough */
539 CASE_OP_32_64(movi):
540 tcg_opt_gen_movi(gen_args, args[0], args[1], nb_temps, nb_globals);
541 gen_args += 2;
542 args += 2;
543 break;
544 CASE_OP_32_64(not):
545 CASE_OP_32_64(neg):
546 CASE_OP_32_64(ext8s):
547 CASE_OP_32_64(ext8u):
548 CASE_OP_32_64(ext16s):
549 CASE_OP_32_64(ext16u):
550 case INDEX_op_ext32s_i64:
551 case INDEX_op_ext32u_i64:
552 if (temps[args[1]].state == TCG_TEMP_CONST) {
553 gen_opc_buf[op_index] = op_to_movi(op);
554 tmp = do_constant_folding(op, temps[args[1]].val, 0);
555 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
556 } else {
557 reset_temp(args[0], nb_temps, nb_globals);
558 gen_args[0] = args[0];
559 gen_args[1] = args[1];
560 }
561 gen_args += 2;
562 args += 2;
563 break;
564 CASE_OP_32_64(add):
565 CASE_OP_32_64(sub):
566 CASE_OP_32_64(mul):
567 CASE_OP_32_64(or):
568 CASE_OP_32_64(and):
569 CASE_OP_32_64(xor):
570 CASE_OP_32_64(shl):
571 CASE_OP_32_64(shr):
572 CASE_OP_32_64(sar):
573 CASE_OP_32_64(rotl):
574 CASE_OP_32_64(rotr):
575 CASE_OP_32_64(andc):
576 CASE_OP_32_64(orc):
577 CASE_OP_32_64(eqv):
578 CASE_OP_32_64(nand):
579 CASE_OP_32_64(nor):
580 if (temps[args[1]].state == TCG_TEMP_CONST
581 && temps[args[2]].state == TCG_TEMP_CONST) {
582 gen_opc_buf[op_index] = op_to_movi(op);
583 tmp = do_constant_folding(op, temps[args[1]].val,
584 temps[args[2]].val);
585 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
586 gen_args += 2;
587 } else {
588 reset_temp(args[0], nb_temps, nb_globals);
589 gen_args[0] = args[0];
590 gen_args[1] = args[1];
591 gen_args[2] = args[2];
592 gen_args += 3;
593 }
594 args += 3;
595 break;
596 CASE_OP_32_64(setcond):
597 if (temps[args[1]].state == TCG_TEMP_CONST
598 && temps[args[2]].state == TCG_TEMP_CONST) {
599 gen_opc_buf[op_index] = op_to_movi(op);
600 tmp = do_constant_folding_cond(op, temps[args[1]].val,
601 temps[args[2]].val, args[3]);
602 tcg_opt_gen_movi(gen_args, args[0], tmp, nb_temps, nb_globals);
603 gen_args += 2;
604 } else {
605 reset_temp(args[0], nb_temps, nb_globals);
606 gen_args[0] = args[0];
607 gen_args[1] = args[1];
608 gen_args[2] = args[2];
609 gen_args[3] = args[3];
610 gen_args += 4;
611 }
612 args += 4;
613 break;
614 CASE_OP_32_64(brcond):
615 if (temps[args[0]].state == TCG_TEMP_CONST
616 && temps[args[1]].state == TCG_TEMP_CONST) {
617 if (do_constant_folding_cond(op, temps[args[0]].val,
618 temps[args[1]].val, args[2])) {
619 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
620 gen_opc_buf[op_index] = INDEX_op_br;
621 gen_args[0] = args[3];
622 gen_args += 1;
623 } else {
624 gen_opc_buf[op_index] = INDEX_op_nop;
625 }
626 } else {
627 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
628 reset_temp(args[0], nb_temps, nb_globals);
629 gen_args[0] = args[0];
630 gen_args[1] = args[1];
631 gen_args[2] = args[2];
632 gen_args[3] = args[3];
633 gen_args += 4;
634 }
635 args += 4;
636 break;
637 CASE_OP_32_64(movcond):
638 if (temps[args[1]].state == TCG_TEMP_CONST
639 && temps[args[2]].state == TCG_TEMP_CONST) {
640 tmp = do_constant_folding_cond(op, temps[args[1]].val,
641 temps[args[2]].val, args[5]);
642 if (args[0] == args[4-tmp]
643 || (temps[args[4-tmp]].state == TCG_TEMP_COPY
644 && temps[args[4-tmp]].val == args[0])) {
645 gen_opc_buf[op_index] = INDEX_op_nop;
646 } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) {
647 gen_opc_buf[op_index] = op_to_movi(op);
648 tcg_opt_gen_movi(gen_args, args[0], temps[args[4-tmp]].val,
649 nb_temps, nb_globals);
650 gen_args += 2;
651 } else {
652 gen_opc_buf[op_index] = op_to_mov(op);
653 tcg_opt_gen_mov(gen_args, args[0], args[4-tmp],
654 nb_temps, nb_globals);
655 gen_args += 2;
656 }
657 } else {
658 reset_temp(args[0], nb_temps, nb_globals);
659 gen_args[0] = args[0];
660 gen_args[1] = args[1];
661 gen_args[2] = args[2];
662 gen_args[3] = args[3];
663 gen_args[4] = args[4];
664 gen_args[5] = args[5];
665 gen_args += 6;
666 }
667 args += 6;
668 break;
669 case INDEX_op_call:
670 nb_call_args = (args[0] >> 16) + (args[0] & 0xffff);
671 if (!(args[nb_call_args + 1] & (TCG_CALL_CONST | TCG_CALL_PURE))) {
672 for (i = 0; i < nb_globals; i++) {
673 reset_temp(i, nb_temps, nb_globals);
674 }
675 }
676 for (i = 0; i < (args[0] >> 16); i++) {
677 reset_temp(args[i + 1], nb_temps, nb_globals);
678 }
679 i = nb_call_args + 3;
680 while (i) {
681 *gen_args = *args;
682 args++;
683 gen_args++;
684 i--;
685 }
686 break;
687 default:
688 /* Default case: we do know nothing about operation so no
689 propagation is done. We trash everything if the operation
690 is the end of a basic block, otherwise we only trash the
691 output args. */
692 if (def->flags & TCG_OPF_BB_END) {
693 memset(temps, 0, nb_temps * sizeof(struct tcg_temp_info));
694 } else {
695 for (i = 0; i < def->nb_oargs; i++) {
696 reset_temp(args[i], nb_temps, nb_globals);
697 }
698 }
699 for (i = 0; i < def->nb_args; i++) {
700 gen_args[i] = args[i];
701 }
702 args += def->nb_args;
703 gen_args += def->nb_args;
704 break;
705 }
706 }
707
708 return gen_args;
709 }
710
711 TCGArg *tcg_optimize(TCGContext *s, uint16_t *tcg_opc_ptr,
712 TCGArg *args, TCGOpDef *tcg_op_defs)
713 {
714 TCGArg *res;
715 res = tcg_constant_folding(s, tcg_opc_ptr, args, tcg_op_defs);
716 return res;
717 }