2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
31 #include "qemu-common.h"
34 #define CASE_OP_32_64(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64)
44 struct tcg_temp_info
{
49 tcg_target_ulong mask
;
52 static struct tcg_temp_info temps
[TCG_MAX_TEMPS
];
54 /* Reset TEMP's state to TCG_TEMP_UNDEF. If TEMP only had one copy, remove
55 the copy flag from the left temp. */
56 static void reset_temp(TCGArg temp
)
58 if (temps
[temp
].state
== TCG_TEMP_COPY
) {
59 if (temps
[temp
].prev_copy
== temps
[temp
].next_copy
) {
60 temps
[temps
[temp
].next_copy
].state
= TCG_TEMP_UNDEF
;
62 temps
[temps
[temp
].next_copy
].prev_copy
= temps
[temp
].prev_copy
;
63 temps
[temps
[temp
].prev_copy
].next_copy
= temps
[temp
].next_copy
;
66 temps
[temp
].state
= TCG_TEMP_UNDEF
;
67 temps
[temp
].mask
= -1;
70 /* Reset all temporaries, given that there are NB_TEMPS of them. */
71 static void reset_all_temps(int nb_temps
)
74 for (i
= 0; i
< nb_temps
; i
++) {
75 temps
[i
].state
= TCG_TEMP_UNDEF
;
80 static int op_bits(TCGOpcode op
)
82 const TCGOpDef
*def
= &tcg_op_defs
[op
];
83 return def
->flags
& TCG_OPF_64BIT
? 64 : 32;
86 static TCGOpcode
op_to_movi(TCGOpcode op
)
88 switch (op_bits(op
)) {
90 return INDEX_op_movi_i32
;
92 return INDEX_op_movi_i64
;
94 fprintf(stderr
, "op_to_movi: unexpected return value of "
95 "function op_bits.\n");
100 static TCGArg
find_better_copy(TCGContext
*s
, TCGArg temp
)
104 /* If this is already a global, we can't do better. */
105 if (temp
< s
->nb_globals
) {
109 /* Search for a global first. */
110 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
111 if (i
< s
->nb_globals
) {
116 /* If it is a temp, search for a temp local. */
117 if (!s
->temps
[temp
].temp_local
) {
118 for (i
= temps
[temp
].next_copy
; i
!= temp
; i
= temps
[i
].next_copy
) {
119 if (s
->temps
[i
].temp_local
) {
125 /* Failure to find a better representation, return the same temp. */
129 static bool temps_are_copies(TCGArg arg1
, TCGArg arg2
)
137 if (temps
[arg1
].state
!= TCG_TEMP_COPY
138 || temps
[arg2
].state
!= TCG_TEMP_COPY
) {
142 for (i
= temps
[arg1
].next_copy
; i
!= arg1
; i
= temps
[i
].next_copy
) {
151 static void tcg_opt_gen_mov(TCGContext
*s
, TCGArg
*gen_args
,
152 TCGArg dst
, TCGArg src
)
155 temps
[dst
].mask
= temps
[src
].mask
;
156 assert(temps
[src
].state
!= TCG_TEMP_CONST
);
158 if (s
->temps
[src
].type
== s
->temps
[dst
].type
) {
159 if (temps
[src
].state
!= TCG_TEMP_COPY
) {
160 temps
[src
].state
= TCG_TEMP_COPY
;
161 temps
[src
].next_copy
= src
;
162 temps
[src
].prev_copy
= src
;
164 temps
[dst
].state
= TCG_TEMP_COPY
;
165 temps
[dst
].next_copy
= temps
[src
].next_copy
;
166 temps
[dst
].prev_copy
= src
;
167 temps
[temps
[dst
].next_copy
].prev_copy
= dst
;
168 temps
[src
].next_copy
= dst
;
175 static void tcg_opt_gen_movi(TCGArg
*gen_args
, TCGArg dst
, TCGArg val
)
178 temps
[dst
].state
= TCG_TEMP_CONST
;
179 temps
[dst
].val
= val
;
180 temps
[dst
].mask
= val
;
185 static TCGOpcode
op_to_mov(TCGOpcode op
)
187 switch (op_bits(op
)) {
189 return INDEX_op_mov_i32
;
191 return INDEX_op_mov_i64
;
193 fprintf(stderr
, "op_to_mov: unexpected return value of "
194 "function op_bits.\n");
199 static TCGArg
do_constant_folding_2(TCGOpcode op
, TCGArg x
, TCGArg y
)
220 case INDEX_op_shl_i32
:
221 return (uint32_t)x
<< (uint32_t)y
;
223 case INDEX_op_shl_i64
:
224 return (uint64_t)x
<< (uint64_t)y
;
226 case INDEX_op_shr_i32
:
227 return (uint32_t)x
>> (uint32_t)y
;
229 case INDEX_op_shr_i64
:
230 return (uint64_t)x
>> (uint64_t)y
;
232 case INDEX_op_sar_i32
:
233 return (int32_t)x
>> (int32_t)y
;
235 case INDEX_op_sar_i64
:
236 return (int64_t)x
>> (int64_t)y
;
238 case INDEX_op_rotr_i32
:
239 x
= ((uint32_t)x
<< (32 - y
)) | ((uint32_t)x
>> y
);
242 case INDEX_op_rotr_i64
:
243 x
= ((uint64_t)x
<< (64 - y
)) | ((uint64_t)x
>> y
);
246 case INDEX_op_rotl_i32
:
247 x
= ((uint32_t)x
<< y
) | ((uint32_t)x
>> (32 - y
));
250 case INDEX_op_rotl_i64
:
251 x
= ((uint64_t)x
<< y
) | ((uint64_t)x
>> (64 - y
));
275 CASE_OP_32_64(ext8s
):
278 CASE_OP_32_64(ext16s
):
281 CASE_OP_32_64(ext8u
):
284 CASE_OP_32_64(ext16u
):
287 case INDEX_op_ext32s_i64
:
290 case INDEX_op_ext32u_i64
:
295 "Unrecognized operation %d in do_constant_folding.\n", op
);
300 static TCGArg
do_constant_folding(TCGOpcode op
, TCGArg x
, TCGArg y
)
302 TCGArg res
= do_constant_folding_2(op
, x
, y
);
303 if (op_bits(op
) == 32) {
309 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
317 return (int32_t)x
< (int32_t)y
;
319 return (int32_t)x
>= (int32_t)y
;
321 return (int32_t)x
<= (int32_t)y
;
323 return (int32_t)x
> (int32_t)y
;
337 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
345 return (int64_t)x
< (int64_t)y
;
347 return (int64_t)x
>= (int64_t)y
;
349 return (int64_t)x
<= (int64_t)y
;
351 return (int64_t)x
> (int64_t)y
;
365 static bool do_constant_folding_cond_eq(TCGCond c
)
385 /* Return 2 if the condition can't be simplified, and the result
386 of the condition (0 or 1) if it can */
387 static TCGArg
do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
390 if (temps
[x
].state
== TCG_TEMP_CONST
&& temps
[y
].state
== TCG_TEMP_CONST
) {
391 switch (op_bits(op
)) {
393 return do_constant_folding_cond_32(temps
[x
].val
, temps
[y
].val
, c
);
395 return do_constant_folding_cond_64(temps
[x
].val
, temps
[y
].val
, c
);
399 } else if (temps_are_copies(x
, y
)) {
400 return do_constant_folding_cond_eq(c
);
401 } else if (temps
[y
].state
== TCG_TEMP_CONST
&& temps
[y
].val
== 0) {
415 /* Return 2 if the condition can't be simplified, and the result
416 of the condition (0 or 1) if it can */
417 static TCGArg
do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
419 TCGArg al
= p1
[0], ah
= p1
[1];
420 TCGArg bl
= p2
[0], bh
= p2
[1];
422 if (temps
[bl
].state
== TCG_TEMP_CONST
423 && temps
[bh
].state
== TCG_TEMP_CONST
) {
424 uint64_t b
= ((uint64_t)temps
[bh
].val
<< 32) | (uint32_t)temps
[bl
].val
;
426 if (temps
[al
].state
== TCG_TEMP_CONST
427 && temps
[ah
].state
== TCG_TEMP_CONST
) {
429 a
= ((uint64_t)temps
[ah
].val
<< 32) | (uint32_t)temps
[al
].val
;
430 return do_constant_folding_cond_64(a
, b
, c
);
443 if (temps_are_copies(al
, bl
) && temps_are_copies(ah
, bh
)) {
444 return do_constant_folding_cond_eq(c
);
449 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
451 TCGArg a1
= *p1
, a2
= *p2
;
453 sum
+= temps
[a1
].state
== TCG_TEMP_CONST
;
454 sum
-= temps
[a2
].state
== TCG_TEMP_CONST
;
456 /* Prefer the constant in second argument, and then the form
457 op a, a, b, which is better handled on non-RISC hosts. */
458 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
466 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
469 sum
+= temps
[p1
[0]].state
== TCG_TEMP_CONST
;
470 sum
+= temps
[p1
[1]].state
== TCG_TEMP_CONST
;
471 sum
-= temps
[p2
[0]].state
== TCG_TEMP_CONST
;
472 sum
-= temps
[p2
[1]].state
== TCG_TEMP_CONST
;
475 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
476 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
482 /* Propagate constants and copies, fold constant expressions. */
483 static TCGArg
*tcg_constant_folding(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
484 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
486 int i
, nb_ops
, op_index
, nb_temps
, nb_globals
, nb_call_args
;
487 tcg_target_ulong mask
, affected
;
493 /* Array VALS has an element for each temp.
494 If this temp holds a constant then its value is kept in VALS' element.
495 If this temp is a copy of other ones then the other copies are
496 available through the doubly linked circular list. */
498 nb_temps
= s
->nb_temps
;
499 nb_globals
= s
->nb_globals
;
500 reset_all_temps(nb_temps
);
502 nb_ops
= tcg_opc_ptr
- s
->gen_opc_buf
;
504 for (op_index
= 0; op_index
< nb_ops
; op_index
++) {
505 op
= s
->gen_opc_buf
[op_index
];
506 def
= &tcg_op_defs
[op
];
507 /* Do copy propagation */
508 if (op
== INDEX_op_call
) {
509 int nb_oargs
= args
[0] >> 16;
510 int nb_iargs
= args
[0] & 0xffff;
511 for (i
= nb_oargs
+ 1; i
< nb_oargs
+ nb_iargs
+ 1; i
++) {
512 if (temps
[args
[i
]].state
== TCG_TEMP_COPY
) {
513 args
[i
] = find_better_copy(s
, args
[i
]);
517 for (i
= def
->nb_oargs
; i
< def
->nb_oargs
+ def
->nb_iargs
; i
++) {
518 if (temps
[args
[i
]].state
== TCG_TEMP_COPY
) {
519 args
[i
] = find_better_copy(s
, args
[i
]);
524 /* For commutative operations make constant second argument */
534 swap_commutative(args
[0], &args
[1], &args
[2]);
536 CASE_OP_32_64(brcond
):
537 if (swap_commutative(-1, &args
[0], &args
[1])) {
538 args
[2] = tcg_swap_cond(args
[2]);
541 CASE_OP_32_64(setcond
):
542 if (swap_commutative(args
[0], &args
[1], &args
[2])) {
543 args
[3] = tcg_swap_cond(args
[3]);
546 CASE_OP_32_64(movcond
):
547 if (swap_commutative(-1, &args
[1], &args
[2])) {
548 args
[5] = tcg_swap_cond(args
[5]);
550 /* For movcond, we canonicalize the "false" input reg to match
551 the destination reg so that the tcg backend can implement
552 a "move if true" operation. */
553 if (swap_commutative(args
[0], &args
[4], &args
[3])) {
554 args
[5] = tcg_invert_cond(args
[5]);
558 swap_commutative(args
[0], &args
[2], &args
[4]);
559 swap_commutative(args
[1], &args
[3], &args
[5]);
561 CASE_OP_32_64(mulu2
):
562 CASE_OP_32_64(muls2
):
563 swap_commutative(args
[0], &args
[2], &args
[3]);
565 case INDEX_op_brcond2_i32
:
566 if (swap_commutative2(&args
[0], &args
[2])) {
567 args
[4] = tcg_swap_cond(args
[4]);
570 case INDEX_op_setcond2_i32
:
571 if (swap_commutative2(&args
[1], &args
[3])) {
572 args
[5] = tcg_swap_cond(args
[5]);
579 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
580 and "sub r, 0, a => neg r, a" case. */
587 if (temps
[args
[1]].state
== TCG_TEMP_CONST
588 && temps
[args
[1]].val
== 0) {
589 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
590 tcg_opt_gen_movi(gen_args
, args
[0], 0);
601 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
602 /* Proceed with possible constant folding. */
605 if (op
== INDEX_op_sub_i32
) {
606 neg_op
= INDEX_op_neg_i32
;
607 have_neg
= TCG_TARGET_HAS_neg_i32
;
609 neg_op
= INDEX_op_neg_i64
;
610 have_neg
= TCG_TARGET_HAS_neg_i64
;
615 if (temps
[args
[1]].state
== TCG_TEMP_CONST
616 && temps
[args
[1]].val
== 0) {
617 s
->gen_opc_buf
[op_index
] = neg_op
;
619 gen_args
[0] = args
[0];
620 gen_args
[1] = args
[2];
631 /* Simplify expression for "op r, a, 0 => mov r, a" cases */
642 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
643 /* Proceed with possible constant folding. */
646 if (temps
[args
[2]].state
== TCG_TEMP_CONST
647 && temps
[args
[2]].val
== 0) {
648 if (temps_are_copies(args
[0], args
[1])) {
649 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
651 s
->gen_opc_buf
[op_index
] = op_to_mov(op
);
652 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
663 /* Simplify using known-zero bits */
667 CASE_OP_32_64(ext8s
):
668 if ((temps
[args
[1]].mask
& 0x80) != 0) {
671 CASE_OP_32_64(ext8u
):
674 CASE_OP_32_64(ext16s
):
675 if ((temps
[args
[1]].mask
& 0x8000) != 0) {
678 CASE_OP_32_64(ext16u
):
681 case INDEX_op_ext32s_i64
:
682 if ((temps
[args
[1]].mask
& 0x80000000) != 0) {
685 case INDEX_op_ext32u_i64
:
690 mask
= temps
[args
[2]].mask
;
691 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
693 affected
= temps
[args
[1]].mask
& ~mask
;
695 mask
= temps
[args
[1]].mask
& mask
;
699 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
700 mask
= ((tcg_target_long
)temps
[args
[1]].mask
701 >> temps
[args
[2]].val
);
706 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
707 mask
= temps
[args
[1]].mask
>> temps
[args
[2]].val
;
712 if (temps
[args
[2]].state
== TCG_TEMP_CONST
) {
713 mask
= temps
[args
[1]].mask
<< temps
[args
[2]].val
;
718 /* Set to 1 all bits to the left of the rightmost. */
719 mask
= -(temps
[args
[1]].mask
& -temps
[args
[1]].mask
);
722 CASE_OP_32_64(deposit
):
723 tmp
= ((1ull << args
[4]) - 1);
724 mask
= ((temps
[args
[1]].mask
& ~(tmp
<< args
[3]))
725 | ((temps
[args
[2]].mask
& tmp
) << args
[3]));
730 mask
= temps
[args
[1]].mask
| temps
[args
[2]].mask
;
733 CASE_OP_32_64(setcond
):
737 CASE_OP_32_64(movcond
):
738 mask
= temps
[args
[3]].mask
| temps
[args
[4]].mask
;
746 assert(def
->nb_oargs
== 1);
747 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
748 tcg_opt_gen_movi(gen_args
, args
[0], 0);
749 args
+= def
->nb_oargs
+ def
->nb_iargs
+ def
->nb_cargs
;
754 assert(def
->nb_oargs
== 1);
755 if (temps_are_copies(args
[0], args
[1])) {
756 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
757 } else if (temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
758 s
->gen_opc_buf
[op_index
] = op_to_mov(op
);
759 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
762 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
763 tcg_opt_gen_movi(gen_args
, args
[0], temps
[args
[1]].val
);
766 args
+= def
->nb_iargs
+ 1;
770 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
774 if ((temps
[args
[2]].state
== TCG_TEMP_CONST
775 && temps
[args
[2]].val
== 0)) {
776 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
777 tcg_opt_gen_movi(gen_args
, args
[0], 0);
787 /* Simplify expression for "op r, a, a => mov r, a" cases */
791 if (temps_are_copies(args
[1], args
[2])) {
792 if (temps_are_copies(args
[0], args
[1])) {
793 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
795 s
->gen_opc_buf
[op_index
] = op_to_mov(op
);
796 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
807 /* Simplify expression for "op r, a, a => movi r, 0" cases */
811 if (temps_are_copies(args
[1], args
[2])) {
812 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
813 tcg_opt_gen_movi(gen_args
, args
[0], 0);
823 /* Propagate constants through copy operations and do constant
824 folding. Constants will be substituted to arguments by register
825 allocator where needed and possible. Also detect copies. */
828 if (temps_are_copies(args
[0], args
[1])) {
830 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
833 if (temps
[args
[1]].state
!= TCG_TEMP_CONST
) {
834 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[1]);
839 /* Source argument is constant. Rewrite the operation and
840 let movi case handle it. */
842 s
->gen_opc_buf
[op_index
] = op
;
843 args
[1] = temps
[args
[1]].val
;
846 tcg_opt_gen_movi(gen_args
, args
[0], args
[1]);
853 CASE_OP_32_64(ext8s
):
854 CASE_OP_32_64(ext8u
):
855 CASE_OP_32_64(ext16s
):
856 CASE_OP_32_64(ext16u
):
857 case INDEX_op_ext32s_i64
:
858 case INDEX_op_ext32u_i64
:
859 if (temps
[args
[1]].state
== TCG_TEMP_CONST
) {
860 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
861 tmp
= do_constant_folding(op
, temps
[args
[1]].val
, 0);
862 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
885 if (temps
[args
[1]].state
== TCG_TEMP_CONST
886 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
887 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
888 tmp
= do_constant_folding(op
, temps
[args
[1]].val
,
890 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
897 CASE_OP_32_64(deposit
):
898 if (temps
[args
[1]].state
== TCG_TEMP_CONST
899 && temps
[args
[2]].state
== TCG_TEMP_CONST
) {
900 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
901 tmp
= ((1ull << args
[4]) - 1);
902 tmp
= (temps
[args
[1]].val
& ~(tmp
<< args
[3]))
903 | ((temps
[args
[2]].val
& tmp
) << args
[3]);
904 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
911 CASE_OP_32_64(setcond
):
912 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[3]);
914 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
915 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
922 CASE_OP_32_64(brcond
):
923 tmp
= do_constant_folding_cond(op
, args
[0], args
[1], args
[2]);
926 reset_all_temps(nb_temps
);
927 s
->gen_opc_buf
[op_index
] = INDEX_op_br
;
928 gen_args
[0] = args
[3];
931 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
938 CASE_OP_32_64(movcond
):
939 tmp
= do_constant_folding_cond(op
, args
[1], args
[2], args
[5]);
941 if (temps_are_copies(args
[0], args
[4-tmp
])) {
942 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
943 } else if (temps
[args
[4-tmp
]].state
== TCG_TEMP_CONST
) {
944 s
->gen_opc_buf
[op_index
] = op_to_movi(op
);
945 tcg_opt_gen_movi(gen_args
, args
[0], temps
[args
[4-tmp
]].val
);
948 s
->gen_opc_buf
[op_index
] = op_to_mov(op
);
949 tcg_opt_gen_mov(s
, gen_args
, args
[0], args
[4-tmp
]);
957 case INDEX_op_add2_i32
:
958 case INDEX_op_sub2_i32
:
959 if (temps
[args
[2]].state
== TCG_TEMP_CONST
960 && temps
[args
[3]].state
== TCG_TEMP_CONST
961 && temps
[args
[4]].state
== TCG_TEMP_CONST
962 && temps
[args
[5]].state
== TCG_TEMP_CONST
) {
963 uint32_t al
= temps
[args
[2]].val
;
964 uint32_t ah
= temps
[args
[3]].val
;
965 uint32_t bl
= temps
[args
[4]].val
;
966 uint32_t bh
= temps
[args
[5]].val
;
967 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
968 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
971 if (op
== INDEX_op_add2_i32
) {
977 /* We emit the extra nop when we emit the add2/sub2. */
978 assert(s
->gen_opc_buf
[op_index
+ 1] == INDEX_op_nop
);
982 s
->gen_opc_buf
[op_index
] = INDEX_op_movi_i32
;
983 s
->gen_opc_buf
[++op_index
] = INDEX_op_movi_i32
;
984 tcg_opt_gen_movi(&gen_args
[0], rl
, (uint32_t)a
);
985 tcg_opt_gen_movi(&gen_args
[2], rh
, (uint32_t)(a
>> 32));
992 case INDEX_op_mulu2_i32
:
993 if (temps
[args
[2]].state
== TCG_TEMP_CONST
994 && temps
[args
[3]].state
== TCG_TEMP_CONST
) {
995 uint32_t a
= temps
[args
[2]].val
;
996 uint32_t b
= temps
[args
[3]].val
;
997 uint64_t r
= (uint64_t)a
* b
;
1000 /* We emit the extra nop when we emit the mulu2. */
1001 assert(s
->gen_opc_buf
[op_index
+ 1] == INDEX_op_nop
);
1005 s
->gen_opc_buf
[op_index
] = INDEX_op_movi_i32
;
1006 s
->gen_opc_buf
[++op_index
] = INDEX_op_movi_i32
;
1007 tcg_opt_gen_movi(&gen_args
[0], rl
, (uint32_t)r
);
1008 tcg_opt_gen_movi(&gen_args
[2], rh
, (uint32_t)(r
>> 32));
1015 case INDEX_op_brcond2_i32
:
1016 tmp
= do_constant_folding_cond2(&args
[0], &args
[2], args
[4]);
1019 reset_all_temps(nb_temps
);
1020 s
->gen_opc_buf
[op_index
] = INDEX_op_br
;
1021 gen_args
[0] = args
[5];
1024 s
->gen_opc_buf
[op_index
] = INDEX_op_nop
;
1026 } else if ((args
[4] == TCG_COND_LT
|| args
[4] == TCG_COND_GE
)
1027 && temps
[args
[2]].state
== TCG_TEMP_CONST
1028 && temps
[args
[3]].state
== TCG_TEMP_CONST
1029 && temps
[args
[2]].val
== 0
1030 && temps
[args
[3]].val
== 0) {
1031 /* Simplify LT/GE comparisons vs zero to a single compare
1032 vs the high word of the input. */
1033 reset_all_temps(nb_temps
);
1034 s
->gen_opc_buf
[op_index
] = INDEX_op_brcond_i32
;
1035 gen_args
[0] = args
[1];
1036 gen_args
[1] = args
[3];
1037 gen_args
[2] = args
[4];
1038 gen_args
[3] = args
[5];
1046 case INDEX_op_setcond2_i32
:
1047 tmp
= do_constant_folding_cond2(&args
[1], &args
[3], args
[5]);
1049 s
->gen_opc_buf
[op_index
] = INDEX_op_movi_i32
;
1050 tcg_opt_gen_movi(gen_args
, args
[0], tmp
);
1052 } else if ((args
[5] == TCG_COND_LT
|| args
[5] == TCG_COND_GE
)
1053 && temps
[args
[3]].state
== TCG_TEMP_CONST
1054 && temps
[args
[4]].state
== TCG_TEMP_CONST
1055 && temps
[args
[3]].val
== 0
1056 && temps
[args
[4]].val
== 0) {
1057 /* Simplify LT/GE comparisons vs zero to a single compare
1058 vs the high word of the input. */
1059 s
->gen_opc_buf
[op_index
] = INDEX_op_setcond_i32
;
1060 reset_temp(args
[0]);
1061 gen_args
[0] = args
[0];
1062 gen_args
[1] = args
[2];
1063 gen_args
[2] = args
[4];
1064 gen_args
[3] = args
[5];
1073 nb_call_args
= (args
[0] >> 16) + (args
[0] & 0xffff);
1074 if (!(args
[nb_call_args
+ 1] & (TCG_CALL_NO_READ_GLOBALS
|
1075 TCG_CALL_NO_WRITE_GLOBALS
))) {
1076 for (i
= 0; i
< nb_globals
; i
++) {
1080 for (i
= 0; i
< (args
[0] >> 16); i
++) {
1081 reset_temp(args
[i
+ 1]);
1083 i
= nb_call_args
+ 3;
1094 /* Default case: we know nothing about operation (or were unable
1095 to compute the operation result) so no propagation is done.
1096 We trash everything if the operation is the end of a basic
1097 block, otherwise we only trash the output args. "mask" is
1098 the non-zero bits mask for the first output arg. */
1099 if (def
->flags
& TCG_OPF_BB_END
) {
1100 reset_all_temps(nb_temps
);
1102 for (i
= 0; i
< def
->nb_oargs
; i
++) {
1103 reset_temp(args
[i
]);
1106 for (i
= 0; i
< def
->nb_args
; i
++) {
1107 gen_args
[i
] = args
[i
];
1109 args
+= def
->nb_args
;
1110 gen_args
+= def
->nb_args
;
1118 TCGArg
*tcg_optimize(TCGContext
*s
, uint16_t *tcg_opc_ptr
,
1119 TCGArg
*args
, TCGOpDef
*tcg_op_defs
)
1122 res
= tcg_constant_folding(s
, tcg_opc_ptr
, args
, tcg_op_defs
);