2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg-internal.h"
30 #define CASE_OP_32_64(x) \
31 glue(glue(case INDEX_op_, x), _i32): \
32 glue(glue(case INDEX_op_, x), _i64)
34 #define CASE_OP_32_64_VEC(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64): \
37 glue(glue(case INDEX_op_, x), _vec)
39 typedef struct TempOptInfo
{
44 uint64_t z_mask
; /* mask bit is 0 if and only if value bit is 0 */
47 typedef struct OptContext
{
50 TCGTempSet temps_used
;
52 /* In flight values from optimization. */
56 static inline TempOptInfo
*ts_info(TCGTemp
*ts
)
61 static inline TempOptInfo
*arg_info(TCGArg arg
)
63 return ts_info(arg_temp(arg
));
66 static inline bool ts_is_const(TCGTemp
*ts
)
68 return ts_info(ts
)->is_const
;
71 static inline bool arg_is_const(TCGArg arg
)
73 return ts_is_const(arg_temp(arg
));
76 static inline bool ts_is_copy(TCGTemp
*ts
)
78 return ts_info(ts
)->next_copy
!= ts
;
81 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
82 static void reset_ts(TCGTemp
*ts
)
84 TempOptInfo
*ti
= ts_info(ts
);
85 TempOptInfo
*pi
= ts_info(ti
->prev_copy
);
86 TempOptInfo
*ni
= ts_info(ti
->next_copy
);
88 ni
->prev_copy
= ti
->prev_copy
;
89 pi
->next_copy
= ti
->next_copy
;
96 static void reset_temp(TCGArg arg
)
98 reset_ts(arg_temp(arg
));
101 /* Initialize and activate a temporary. */
102 static void init_ts_info(OptContext
*ctx
, TCGTemp
*ts
)
104 size_t idx
= temp_idx(ts
);
107 if (test_bit(idx
, ctx
->temps_used
.l
)) {
110 set_bit(idx
, ctx
->temps_used
.l
);
114 ti
= tcg_malloc(sizeof(TempOptInfo
));
120 if (ts
->kind
== TEMP_CONST
) {
123 ti
->z_mask
= ts
->val
;
124 if (TCG_TARGET_REG_BITS
> 32 && ts
->type
== TCG_TYPE_I32
) {
125 /* High bits of a 32-bit quantity are garbage. */
126 ti
->z_mask
|= ~0xffffffffull
;
129 ti
->is_const
= false;
134 static TCGTemp
*find_better_copy(TCGContext
*s
, TCGTemp
*ts
)
138 /* If this is already readonly, we can't do better. */
139 if (temp_readonly(ts
)) {
144 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
145 if (temp_readonly(i
)) {
147 } else if (i
->kind
> ts
->kind
) {
148 if (i
->kind
== TEMP_GLOBAL
) {
150 } else if (i
->kind
== TEMP_LOCAL
) {
156 /* If we didn't find a better representation, return the same temp. */
157 return g
? g
: l
? l
: ts
;
160 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
168 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
172 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
181 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
183 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
186 static bool tcg_opt_gen_mov(OptContext
*ctx
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
188 TCGTemp
*dst_ts
= arg_temp(dst
);
189 TCGTemp
*src_ts
= arg_temp(src
);
196 if (ts_are_copies(dst_ts
, src_ts
)) {
197 tcg_op_remove(ctx
->tcg
, op
);
202 di
= ts_info(dst_ts
);
203 si
= ts_info(src_ts
);
204 def
= &tcg_op_defs
[op
->opc
];
205 if (def
->flags
& TCG_OPF_VECTOR
) {
206 new_op
= INDEX_op_mov_vec
;
207 } else if (def
->flags
& TCG_OPF_64BIT
) {
208 new_op
= INDEX_op_mov_i64
;
210 new_op
= INDEX_op_mov_i32
;
213 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
218 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
219 /* High bits of the destination are now garbage. */
220 z_mask
|= ~0xffffffffull
;
224 if (src_ts
->type
== dst_ts
->type
) {
225 TempOptInfo
*ni
= ts_info(si
->next_copy
);
227 di
->next_copy
= si
->next_copy
;
228 di
->prev_copy
= src_ts
;
229 ni
->prev_copy
= dst_ts
;
230 si
->next_copy
= dst_ts
;
231 di
->is_const
= si
->is_const
;
237 static bool tcg_opt_gen_movi(OptContext
*ctx
, TCGOp
*op
,
238 TCGArg dst
, uint64_t val
)
240 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
244 if (def
->flags
& TCG_OPF_VECTOR
) {
245 type
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
246 } else if (def
->flags
& TCG_OPF_64BIT
) {
252 /* Convert movi to mov with constant temp. */
253 tv
= tcg_constant_internal(type
, val
);
254 init_ts_info(ctx
, tv
);
255 return tcg_opt_gen_mov(ctx
, op
, dst
, temp_arg(tv
));
258 static uint64_t do_constant_folding_2(TCGOpcode op
, uint64_t x
, uint64_t y
)
281 case INDEX_op_shl_i32
:
282 return (uint32_t)x
<< (y
& 31);
284 case INDEX_op_shl_i64
:
285 return (uint64_t)x
<< (y
& 63);
287 case INDEX_op_shr_i32
:
288 return (uint32_t)x
>> (y
& 31);
290 case INDEX_op_shr_i64
:
291 return (uint64_t)x
>> (y
& 63);
293 case INDEX_op_sar_i32
:
294 return (int32_t)x
>> (y
& 31);
296 case INDEX_op_sar_i64
:
297 return (int64_t)x
>> (y
& 63);
299 case INDEX_op_rotr_i32
:
300 return ror32(x
, y
& 31);
302 case INDEX_op_rotr_i64
:
303 return ror64(x
, y
& 63);
305 case INDEX_op_rotl_i32
:
306 return rol32(x
, y
& 31);
308 case INDEX_op_rotl_i64
:
309 return rol64(x
, y
& 63);
332 case INDEX_op_clz_i32
:
333 return (uint32_t)x
? clz32(x
) : y
;
335 case INDEX_op_clz_i64
:
336 return x
? clz64(x
) : y
;
338 case INDEX_op_ctz_i32
:
339 return (uint32_t)x
? ctz32(x
) : y
;
341 case INDEX_op_ctz_i64
:
342 return x
? ctz64(x
) : y
;
344 case INDEX_op_ctpop_i32
:
347 case INDEX_op_ctpop_i64
:
350 CASE_OP_32_64(ext8s
):
353 CASE_OP_32_64(ext16s
):
356 CASE_OP_32_64(ext8u
):
359 CASE_OP_32_64(ext16u
):
362 CASE_OP_32_64(bswap16
):
364 return y
& TCG_BSWAP_OS
? (int16_t)x
: x
;
366 CASE_OP_32_64(bswap32
):
368 return y
& TCG_BSWAP_OS
? (int32_t)x
: x
;
370 case INDEX_op_bswap64_i64
:
373 case INDEX_op_ext_i32_i64
:
374 case INDEX_op_ext32s_i64
:
377 case INDEX_op_extu_i32_i64
:
378 case INDEX_op_extrl_i64_i32
:
379 case INDEX_op_ext32u_i64
:
382 case INDEX_op_extrh_i64_i32
:
383 return (uint64_t)x
>> 32;
385 case INDEX_op_muluh_i32
:
386 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
387 case INDEX_op_mulsh_i32
:
388 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
390 case INDEX_op_muluh_i64
:
391 mulu64(&l64
, &h64
, x
, y
);
393 case INDEX_op_mulsh_i64
:
394 muls64(&l64
, &h64
, x
, y
);
397 case INDEX_op_div_i32
:
398 /* Avoid crashing on divide by zero, otherwise undefined. */
399 return (int32_t)x
/ ((int32_t)y
? : 1);
400 case INDEX_op_divu_i32
:
401 return (uint32_t)x
/ ((uint32_t)y
? : 1);
402 case INDEX_op_div_i64
:
403 return (int64_t)x
/ ((int64_t)y
? : 1);
404 case INDEX_op_divu_i64
:
405 return (uint64_t)x
/ ((uint64_t)y
? : 1);
407 case INDEX_op_rem_i32
:
408 return (int32_t)x
% ((int32_t)y
? : 1);
409 case INDEX_op_remu_i32
:
410 return (uint32_t)x
% ((uint32_t)y
? : 1);
411 case INDEX_op_rem_i64
:
412 return (int64_t)x
% ((int64_t)y
? : 1);
413 case INDEX_op_remu_i64
:
414 return (uint64_t)x
% ((uint64_t)y
? : 1);
418 "Unrecognized operation %d in do_constant_folding.\n", op
);
423 static uint64_t do_constant_folding(TCGOpcode op
, uint64_t x
, uint64_t y
)
425 const TCGOpDef
*def
= &tcg_op_defs
[op
];
426 uint64_t res
= do_constant_folding_2(op
, x
, y
);
427 if (!(def
->flags
& TCG_OPF_64BIT
)) {
433 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
441 return (int32_t)x
< (int32_t)y
;
443 return (int32_t)x
>= (int32_t)y
;
445 return (int32_t)x
<= (int32_t)y
;
447 return (int32_t)x
> (int32_t)y
;
461 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
469 return (int64_t)x
< (int64_t)y
;
471 return (int64_t)x
>= (int64_t)y
;
473 return (int64_t)x
<= (int64_t)y
;
475 return (int64_t)x
> (int64_t)y
;
489 static bool do_constant_folding_cond_eq(TCGCond c
)
510 * Return -1 if the condition can't be simplified,
511 * and the result of the condition (0 or 1) if it can.
513 static int do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
516 uint64_t xv
= arg_info(x
)->val
;
517 uint64_t yv
= arg_info(y
)->val
;
519 if (arg_is_const(x
) && arg_is_const(y
)) {
520 const TCGOpDef
*def
= &tcg_op_defs
[op
];
521 tcg_debug_assert(!(def
->flags
& TCG_OPF_VECTOR
));
522 if (def
->flags
& TCG_OPF_64BIT
) {
523 return do_constant_folding_cond_64(xv
, yv
, c
);
525 return do_constant_folding_cond_32(xv
, yv
, c
);
527 } else if (args_are_copies(x
, y
)) {
528 return do_constant_folding_cond_eq(c
);
529 } else if (arg_is_const(y
) && yv
== 0) {
543 * Return -1 if the condition can't be simplified,
544 * and the result of the condition (0 or 1) if it can.
546 static int do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
548 TCGArg al
= p1
[0], ah
= p1
[1];
549 TCGArg bl
= p2
[0], bh
= p2
[1];
551 if (arg_is_const(bl
) && arg_is_const(bh
)) {
552 tcg_target_ulong blv
= arg_info(bl
)->val
;
553 tcg_target_ulong bhv
= arg_info(bh
)->val
;
554 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
556 if (arg_is_const(al
) && arg_is_const(ah
)) {
557 tcg_target_ulong alv
= arg_info(al
)->val
;
558 tcg_target_ulong ahv
= arg_info(ah
)->val
;
559 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
560 return do_constant_folding_cond_64(a
, b
, c
);
573 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
574 return do_constant_folding_cond_eq(c
);
579 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
581 TCGArg a1
= *p1
, a2
= *p2
;
583 sum
+= arg_is_const(a1
);
584 sum
-= arg_is_const(a2
);
586 /* Prefer the constant in second argument, and then the form
587 op a, a, b, which is better handled on non-RISC hosts. */
588 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
596 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
599 sum
+= arg_is_const(p1
[0]);
600 sum
+= arg_is_const(p1
[1]);
601 sum
-= arg_is_const(p2
[0]);
602 sum
-= arg_is_const(p2
[1]);
605 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
606 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
612 static void init_arguments(OptContext
*ctx
, TCGOp
*op
, int nb_args
)
614 for (int i
= 0; i
< nb_args
; i
++) {
615 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
617 init_ts_info(ctx
, ts
);
622 static void copy_propagate(OptContext
*ctx
, TCGOp
*op
,
623 int nb_oargs
, int nb_iargs
)
625 TCGContext
*s
= ctx
->tcg
;
627 for (int i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
628 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
629 if (ts
&& ts_is_copy(ts
)) {
630 op
->args
[i
] = temp_arg(find_better_copy(s
, ts
));
635 static void finish_folding(OptContext
*ctx
, TCGOp
*op
)
637 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
641 * For an opcode that ends a BB, reset all temp data.
642 * We do no cross-BB optimization.
644 if (def
->flags
& TCG_OPF_BB_END
) {
645 memset(&ctx
->temps_used
, 0, sizeof(ctx
->temps_used
));
650 nb_oargs
= def
->nb_oargs
;
651 for (i
= 0; i
< nb_oargs
; i
++) {
652 reset_temp(op
->args
[i
]);
654 * Save the corresponding known-zero bits mask for the
655 * first output argument (only one supported so far).
658 arg_info(op
->args
[i
])->z_mask
= ctx
->z_mask
;
664 * The fold_* functions return true when processing is complete,
665 * usually by folding the operation to a constant or to a copy,
666 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
667 * like collect information about the value produced, for use in
668 * optimizing a subsequent operation.
670 * These first fold_* functions are all helpers, used by other
671 * folders for more specific operations.
674 static bool fold_const1(OptContext
*ctx
, TCGOp
*op
)
676 if (arg_is_const(op
->args
[1])) {
679 t
= arg_info(op
->args
[1])->val
;
680 t
= do_constant_folding(op
->opc
, t
, 0);
681 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
686 static bool fold_const2(OptContext
*ctx
, TCGOp
*op
)
688 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
689 uint64_t t1
= arg_info(op
->args
[1])->val
;
690 uint64_t t2
= arg_info(op
->args
[2])->val
;
692 t1
= do_constant_folding(op
->opc
, t1
, t2
);
693 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
698 /* If the binary operation has both arguments equal, fold to @i. */
699 static bool fold_xx_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
701 if (args_are_copies(op
->args
[1], op
->args
[2])) {
702 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
708 * These outermost fold_<op> functions are sorted alphabetically.
711 static bool fold_add(OptContext
*ctx
, TCGOp
*op
)
713 return fold_const2(ctx
, op
);
716 static bool fold_addsub2_i32(OptContext
*ctx
, TCGOp
*op
, bool add
)
718 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3]) &&
719 arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5])) {
720 uint32_t al
= arg_info(op
->args
[2])->val
;
721 uint32_t ah
= arg_info(op
->args
[3])->val
;
722 uint32_t bl
= arg_info(op
->args
[4])->val
;
723 uint32_t bh
= arg_info(op
->args
[5])->val
;
724 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
725 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
727 TCGOp
*op2
= tcg_op_insert_before(ctx
->tcg
, op
, INDEX_op_mov_i32
);
737 tcg_opt_gen_movi(ctx
, op
, rl
, (int32_t)a
);
738 tcg_opt_gen_movi(ctx
, op2
, rh
, (int32_t)(a
>> 32));
744 static bool fold_add2_i32(OptContext
*ctx
, TCGOp
*op
)
746 return fold_addsub2_i32(ctx
, op
, true);
749 static bool fold_and(OptContext
*ctx
, TCGOp
*op
)
751 return fold_const2(ctx
, op
);
754 static bool fold_andc(OptContext
*ctx
, TCGOp
*op
)
756 if (fold_const2(ctx
, op
) ||
757 fold_xx_to_i(ctx
, op
, 0)) {
763 static bool fold_brcond(OptContext
*ctx
, TCGOp
*op
)
765 TCGCond cond
= op
->args
[2];
766 int i
= do_constant_folding_cond(op
->opc
, op
->args
[0], op
->args
[1], cond
);
769 tcg_op_remove(ctx
->tcg
, op
);
773 op
->opc
= INDEX_op_br
;
774 op
->args
[0] = op
->args
[3];
779 static bool fold_brcond2(OptContext
*ctx
, TCGOp
*op
)
781 TCGCond cond
= op
->args
[4];
782 int i
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2], cond
);
783 TCGArg label
= op
->args
[5];
787 goto do_brcond_const
;
794 * Simplify LT/GE comparisons vs zero to a single compare
795 * vs the high word of the input.
797 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== 0 &&
798 arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0) {
808 * Simplify EQ/NE comparisons where one of the pairs
811 i
= do_constant_folding_cond(INDEX_op_brcond_i32
, op
->args
[0],
815 goto do_brcond_const
;
820 i
= do_constant_folding_cond(INDEX_op_brcond_i32
, op
->args
[1],
824 goto do_brcond_const
;
826 op
->opc
= INDEX_op_brcond_i32
;
827 op
->args
[1] = op
->args
[2];
838 op
->opc
= INDEX_op_brcond_i32
;
839 op
->args
[0] = op
->args
[1];
840 op
->args
[1] = op
->args
[3];
847 tcg_op_remove(ctx
->tcg
, op
);
850 op
->opc
= INDEX_op_br
;
857 static bool fold_bswap(OptContext
*ctx
, TCGOp
*op
)
859 if (arg_is_const(op
->args
[1])) {
860 uint64_t t
= arg_info(op
->args
[1])->val
;
862 t
= do_constant_folding(op
->opc
, t
, op
->args
[2]);
863 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
868 static bool fold_call(OptContext
*ctx
, TCGOp
*op
)
870 TCGContext
*s
= ctx
->tcg
;
871 int nb_oargs
= TCGOP_CALLO(op
);
872 int nb_iargs
= TCGOP_CALLI(op
);
875 init_arguments(ctx
, op
, nb_oargs
+ nb_iargs
);
876 copy_propagate(ctx
, op
, nb_oargs
, nb_iargs
);
878 /* If the function reads or writes globals, reset temp data. */
879 flags
= tcg_call_flags(op
);
880 if (!(flags
& (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
881 int nb_globals
= s
->nb_globals
;
883 for (i
= 0; i
< nb_globals
; i
++) {
884 if (test_bit(i
, ctx
->temps_used
.l
)) {
885 reset_ts(&ctx
->tcg
->temps
[i
]);
890 /* Reset temp data for outputs. */
891 for (i
= 0; i
< nb_oargs
; i
++) {
892 reset_temp(op
->args
[i
]);
895 /* Stop optimizing MB across calls. */
900 static bool fold_count_zeros(OptContext
*ctx
, TCGOp
*op
)
902 if (arg_is_const(op
->args
[1])) {
903 uint64_t t
= arg_info(op
->args
[1])->val
;
906 t
= do_constant_folding(op
->opc
, t
, 0);
907 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
909 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[2]);
914 static bool fold_ctpop(OptContext
*ctx
, TCGOp
*op
)
916 return fold_const1(ctx
, op
);
919 static bool fold_deposit(OptContext
*ctx
, TCGOp
*op
)
921 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
922 uint64_t t1
= arg_info(op
->args
[1])->val
;
923 uint64_t t2
= arg_info(op
->args
[2])->val
;
925 t1
= deposit64(t1
, op
->args
[3], op
->args
[4], t2
);
926 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
931 static bool fold_divide(OptContext
*ctx
, TCGOp
*op
)
933 return fold_const2(ctx
, op
);
936 static bool fold_dup(OptContext
*ctx
, TCGOp
*op
)
938 if (arg_is_const(op
->args
[1])) {
939 uint64_t t
= arg_info(op
->args
[1])->val
;
940 t
= dup_const(TCGOP_VECE(op
), t
);
941 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
946 static bool fold_dup2(OptContext
*ctx
, TCGOp
*op
)
948 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
949 uint64_t t
= deposit64(arg_info(op
->args
[1])->val
, 32, 32,
950 arg_info(op
->args
[2])->val
);
951 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
954 if (args_are_copies(op
->args
[1], op
->args
[2])) {
955 op
->opc
= INDEX_op_dup_vec
;
956 TCGOP_VECE(op
) = MO_32
;
961 static bool fold_eqv(OptContext
*ctx
, TCGOp
*op
)
963 return fold_const2(ctx
, op
);
966 static bool fold_extract(OptContext
*ctx
, TCGOp
*op
)
968 if (arg_is_const(op
->args
[1])) {
971 t
= arg_info(op
->args
[1])->val
;
972 t
= extract64(t
, op
->args
[2], op
->args
[3]);
973 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
978 static bool fold_extract2(OptContext
*ctx
, TCGOp
*op
)
980 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
981 uint64_t v1
= arg_info(op
->args
[1])->val
;
982 uint64_t v2
= arg_info(op
->args
[2])->val
;
983 int shr
= op
->args
[3];
985 if (op
->opc
== INDEX_op_extract2_i64
) {
989 v1
= (uint32_t)v1
>> shr
;
990 v2
= (int32_t)v2
<< (32 - shr
);
992 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], v1
| v2
);
997 static bool fold_exts(OptContext
*ctx
, TCGOp
*op
)
999 return fold_const1(ctx
, op
);
1002 static bool fold_extu(OptContext
*ctx
, TCGOp
*op
)
1004 return fold_const1(ctx
, op
);
1007 static bool fold_mb(OptContext
*ctx
, TCGOp
*op
)
1009 /* Eliminate duplicate and redundant fence instructions. */
1012 * Merge two barriers of the same type into one,
1013 * or a weaker barrier into a stronger one,
1014 * or two weaker barriers into a stronger one.
1015 * mb X; mb Y => mb X|Y
1016 * mb; strl => mb; st
1017 * ldaq; mb => ld; mb
1018 * ldaq; strl => ld; mb; st
1019 * Other combinations are also merged into a strong
1020 * barrier. This is stricter than specified but for
1021 * the purposes of TCG is better than not optimizing.
1023 ctx
->prev_mb
->args
[0] |= op
->args
[0];
1024 tcg_op_remove(ctx
->tcg
, op
);
1031 static bool fold_mov(OptContext
*ctx
, TCGOp
*op
)
1033 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
1036 static bool fold_movcond(OptContext
*ctx
, TCGOp
*op
)
1038 TCGOpcode opc
= op
->opc
;
1039 TCGCond cond
= op
->args
[5];
1040 int i
= do_constant_folding_cond(opc
, op
->args
[1], op
->args
[2], cond
);
1043 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[4 - i
]);
1046 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1047 uint64_t tv
= arg_info(op
->args
[3])->val
;
1048 uint64_t fv
= arg_info(op
->args
[4])->val
;
1050 opc
= (opc
== INDEX_op_movcond_i32
1051 ? INDEX_op_setcond_i32
: INDEX_op_setcond_i64
);
1053 if (tv
== 1 && fv
== 0) {
1056 } else if (fv
== 1 && tv
== 0) {
1058 op
->args
[3] = tcg_invert_cond(cond
);
1064 static bool fold_mul(OptContext
*ctx
, TCGOp
*op
)
1066 return fold_const2(ctx
, op
);
1069 static bool fold_mul_highpart(OptContext
*ctx
, TCGOp
*op
)
1071 return fold_const2(ctx
, op
);
1074 static bool fold_mulu2_i32(OptContext
*ctx
, TCGOp
*op
)
1076 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1077 uint32_t a
= arg_info(op
->args
[2])->val
;
1078 uint32_t b
= arg_info(op
->args
[3])->val
;
1079 uint64_t r
= (uint64_t)a
* b
;
1081 TCGOp
*op2
= tcg_op_insert_before(ctx
->tcg
, op
, INDEX_op_mov_i32
);
1085 tcg_opt_gen_movi(ctx
, op
, rl
, (int32_t)r
);
1086 tcg_opt_gen_movi(ctx
, op2
, rh
, (int32_t)(r
>> 32));
1092 static bool fold_nand(OptContext
*ctx
, TCGOp
*op
)
1094 return fold_const2(ctx
, op
);
1097 static bool fold_neg(OptContext
*ctx
, TCGOp
*op
)
1099 return fold_const1(ctx
, op
);
1102 static bool fold_nor(OptContext
*ctx
, TCGOp
*op
)
1104 return fold_const2(ctx
, op
);
1107 static bool fold_not(OptContext
*ctx
, TCGOp
*op
)
1109 return fold_const1(ctx
, op
);
1112 static bool fold_or(OptContext
*ctx
, TCGOp
*op
)
1114 return fold_const2(ctx
, op
);
1117 static bool fold_orc(OptContext
*ctx
, TCGOp
*op
)
1119 return fold_const2(ctx
, op
);
1122 static bool fold_qemu_ld(OptContext
*ctx
, TCGOp
*op
)
1124 /* Opcodes that touch guest memory stop the mb optimization. */
1125 ctx
->prev_mb
= NULL
;
1129 static bool fold_qemu_st(OptContext
*ctx
, TCGOp
*op
)
1131 /* Opcodes that touch guest memory stop the mb optimization. */
1132 ctx
->prev_mb
= NULL
;
1136 static bool fold_remainder(OptContext
*ctx
, TCGOp
*op
)
1138 return fold_const2(ctx
, op
);
1141 static bool fold_setcond(OptContext
*ctx
, TCGOp
*op
)
1143 TCGCond cond
= op
->args
[3];
1144 int i
= do_constant_folding_cond(op
->opc
, op
->args
[1], op
->args
[2], cond
);
1147 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1152 static bool fold_setcond2(OptContext
*ctx
, TCGOp
*op
)
1154 TCGCond cond
= op
->args
[5];
1155 int i
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3], cond
);
1159 goto do_setcond_const
;
1166 * Simplify LT/GE comparisons vs zero to a single compare
1167 * vs the high word of the input.
1169 if (arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0 &&
1170 arg_is_const(op
->args
[4]) && arg_info(op
->args
[4])->val
== 0) {
1171 goto do_setcond_high
;
1180 * Simplify EQ/NE comparisons where one of the pairs
1181 * can be simplified.
1183 i
= do_constant_folding_cond(INDEX_op_setcond_i32
, op
->args
[1],
1187 goto do_setcond_const
;
1189 goto do_setcond_high
;
1192 i
= do_constant_folding_cond(INDEX_op_setcond_i32
, op
->args
[2],
1196 goto do_setcond_const
;
1198 op
->args
[2] = op
->args
[3];
1200 op
->opc
= INDEX_op_setcond_i32
;
1209 op
->args
[1] = op
->args
[2];
1210 op
->args
[2] = op
->args
[4];
1212 op
->opc
= INDEX_op_setcond_i32
;
1218 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1221 static bool fold_sextract(OptContext
*ctx
, TCGOp
*op
)
1223 if (arg_is_const(op
->args
[1])) {
1226 t
= arg_info(op
->args
[1])->val
;
1227 t
= sextract64(t
, op
->args
[2], op
->args
[3]);
1228 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1233 static bool fold_shift(OptContext
*ctx
, TCGOp
*op
)
1235 return fold_const2(ctx
, op
);
1238 static bool fold_sub(OptContext
*ctx
, TCGOp
*op
)
1240 if (fold_const2(ctx
, op
) ||
1241 fold_xx_to_i(ctx
, op
, 0)) {
1247 static bool fold_sub2_i32(OptContext
*ctx
, TCGOp
*op
)
1249 return fold_addsub2_i32(ctx
, op
, false);
1252 static bool fold_xor(OptContext
*ctx
, TCGOp
*op
)
1254 if (fold_const2(ctx
, op
) ||
1255 fold_xx_to_i(ctx
, op
, 0)) {
1261 /* Propagate constants and copies, fold constant expressions. */
1262 void tcg_optimize(TCGContext
*s
)
1265 TCGOp
*op
, *op_next
;
1266 OptContext ctx
= { .tcg
= s
};
1268 /* Array VALS has an element for each temp.
1269 If this temp holds a constant then its value is kept in VALS' element.
1270 If this temp is a copy of other ones then the other copies are
1271 available through the doubly linked circular list. */
1273 nb_temps
= s
->nb_temps
;
1274 for (i
= 0; i
< nb_temps
; ++i
) {
1275 s
->temps
[i
].state_ptr
= NULL
;
1278 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
1279 uint64_t z_mask
, partmask
, affected
, tmp
;
1280 TCGOpcode opc
= op
->opc
;
1281 const TCGOpDef
*def
;
1284 /* Calls are special. */
1285 if (opc
== INDEX_op_call
) {
1286 fold_call(&ctx
, op
);
1290 def
= &tcg_op_defs
[opc
];
1291 init_arguments(&ctx
, op
, def
->nb_oargs
+ def
->nb_iargs
);
1292 copy_propagate(&ctx
, op
, def
->nb_oargs
, def
->nb_iargs
);
1294 /* For commutative operations make constant second argument */
1296 CASE_OP_32_64_VEC(add
):
1297 CASE_OP_32_64_VEC(mul
):
1298 CASE_OP_32_64_VEC(and):
1299 CASE_OP_32_64_VEC(or):
1300 CASE_OP_32_64_VEC(xor):
1302 CASE_OP_32_64(nand
):
1304 CASE_OP_32_64(muluh
):
1305 CASE_OP_32_64(mulsh
):
1306 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
1308 CASE_OP_32_64(brcond
):
1309 if (swap_commutative(-1, &op
->args
[0], &op
->args
[1])) {
1310 op
->args
[2] = tcg_swap_cond(op
->args
[2]);
1313 CASE_OP_32_64(setcond
):
1314 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
1315 op
->args
[3] = tcg_swap_cond(op
->args
[3]);
1318 CASE_OP_32_64(movcond
):
1319 if (swap_commutative(-1, &op
->args
[1], &op
->args
[2])) {
1320 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
1322 /* For movcond, we canonicalize the "false" input reg to match
1323 the destination reg so that the tcg backend can implement
1324 a "move if true" operation. */
1325 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
1326 op
->args
[5] = tcg_invert_cond(op
->args
[5]);
1329 CASE_OP_32_64(add2
):
1330 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
1331 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
1333 CASE_OP_32_64(mulu2
):
1334 CASE_OP_32_64(muls2
):
1335 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
1337 case INDEX_op_brcond2_i32
:
1338 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
1339 op
->args
[4] = tcg_swap_cond(op
->args
[4]);
1342 case INDEX_op_setcond2_i32
:
1343 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
1344 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
1351 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
1352 and "sub r, 0, a => neg r, a" case. */
1357 CASE_OP_32_64(rotl
):
1358 CASE_OP_32_64(rotr
):
1359 if (arg_is_const(op
->args
[1])
1360 && arg_info(op
->args
[1])->val
== 0) {
1361 tcg_opt_gen_movi(&ctx
, op
, op
->args
[0], 0);
1365 CASE_OP_32_64_VEC(sub
):
1370 if (arg_is_const(op
->args
[2])) {
1371 /* Proceed with possible constant folding. */
1374 if (opc
== INDEX_op_sub_i32
) {
1375 neg_op
= INDEX_op_neg_i32
;
1376 have_neg
= TCG_TARGET_HAS_neg_i32
;
1377 } else if (opc
== INDEX_op_sub_i64
) {
1378 neg_op
= INDEX_op_neg_i64
;
1379 have_neg
= TCG_TARGET_HAS_neg_i64
;
1380 } else if (TCG_TARGET_HAS_neg_vec
) {
1381 TCGType type
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
1382 unsigned vece
= TCGOP_VECE(op
);
1383 neg_op
= INDEX_op_neg_vec
;
1384 have_neg
= tcg_can_emit_vec_op(neg_op
, type
, vece
) > 0;
1391 if (arg_is_const(op
->args
[1])
1392 && arg_info(op
->args
[1])->val
== 0) {
1394 reset_temp(op
->args
[0]);
1395 op
->args
[1] = op
->args
[2];
1400 CASE_OP_32_64_VEC(xor):
1401 CASE_OP_32_64(nand
):
1402 if (!arg_is_const(op
->args
[1])
1403 && arg_is_const(op
->args
[2])
1404 && arg_info(op
->args
[2])->val
== -1) {
1410 if (!arg_is_const(op
->args
[1])
1411 && arg_is_const(op
->args
[2])
1412 && arg_info(op
->args
[2])->val
== 0) {
1417 CASE_OP_32_64_VEC(andc
):
1418 if (!arg_is_const(op
->args
[2])
1419 && arg_is_const(op
->args
[1])
1420 && arg_info(op
->args
[1])->val
== -1) {
1425 CASE_OP_32_64_VEC(orc
):
1427 if (!arg_is_const(op
->args
[2])
1428 && arg_is_const(op
->args
[1])
1429 && arg_info(op
->args
[1])->val
== 0) {
1439 if (def
->flags
& TCG_OPF_VECTOR
) {
1440 not_op
= INDEX_op_not_vec
;
1441 have_not
= TCG_TARGET_HAS_not_vec
;
1442 } else if (def
->flags
& TCG_OPF_64BIT
) {
1443 not_op
= INDEX_op_not_i64
;
1444 have_not
= TCG_TARGET_HAS_not_i64
;
1446 not_op
= INDEX_op_not_i32
;
1447 have_not
= TCG_TARGET_HAS_not_i32
;
1453 reset_temp(op
->args
[0]);
1454 op
->args
[1] = op
->args
[i
];
1461 /* Simplify expression for "op r, a, const => mov r, a" cases */
1463 CASE_OP_32_64_VEC(add
):
1464 CASE_OP_32_64_VEC(sub
):
1465 CASE_OP_32_64_VEC(or):
1466 CASE_OP_32_64_VEC(xor):
1467 CASE_OP_32_64_VEC(andc
):
1471 CASE_OP_32_64(rotl
):
1472 CASE_OP_32_64(rotr
):
1473 if (!arg_is_const(op
->args
[1])
1474 && arg_is_const(op
->args
[2])
1475 && arg_info(op
->args
[2])->val
== 0) {
1476 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1480 CASE_OP_32_64_VEC(and):
1481 CASE_OP_32_64_VEC(orc
):
1483 if (!arg_is_const(op
->args
[1])
1484 && arg_is_const(op
->args
[2])
1485 && arg_info(op
->args
[2])->val
== -1) {
1486 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1494 /* Simplify using known-zero bits. Currently only ops with a single
1495 output argument is supported. */
1499 CASE_OP_32_64(ext8s
):
1500 if ((arg_info(op
->args
[1])->z_mask
& 0x80) != 0) {
1504 CASE_OP_32_64(ext8u
):
1507 CASE_OP_32_64(ext16s
):
1508 if ((arg_info(op
->args
[1])->z_mask
& 0x8000) != 0) {
1512 CASE_OP_32_64(ext16u
):
1515 case INDEX_op_ext32s_i64
:
1516 if ((arg_info(op
->args
[1])->z_mask
& 0x80000000) != 0) {
1520 case INDEX_op_ext32u_i64
:
1521 z_mask
= 0xffffffffU
;
1525 z_mask
= arg_info(op
->args
[2])->z_mask
;
1526 if (arg_is_const(op
->args
[2])) {
1528 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1530 z_mask
= arg_info(op
->args
[1])->z_mask
& z_mask
;
1533 case INDEX_op_ext_i32_i64
:
1534 if ((arg_info(op
->args
[1])->z_mask
& 0x80000000) != 0) {
1538 case INDEX_op_extu_i32_i64
:
1539 /* We do not compute affected as it is a size changing op. */
1540 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
;
1543 CASE_OP_32_64(andc
):
1544 /* Known-zeros does not imply known-ones. Therefore unless
1545 op->args[2] is constant, we can't infer anything from it. */
1546 if (arg_is_const(op
->args
[2])) {
1547 z_mask
= ~arg_info(op
->args
[2])->z_mask
;
1550 /* But we certainly know nothing outside args[1] may be set. */
1551 z_mask
= arg_info(op
->args
[1])->z_mask
;
1554 case INDEX_op_sar_i32
:
1555 if (arg_is_const(op
->args
[2])) {
1556 tmp
= arg_info(op
->args
[2])->val
& 31;
1557 z_mask
= (int32_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1560 case INDEX_op_sar_i64
:
1561 if (arg_is_const(op
->args
[2])) {
1562 tmp
= arg_info(op
->args
[2])->val
& 63;
1563 z_mask
= (int64_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1567 case INDEX_op_shr_i32
:
1568 if (arg_is_const(op
->args
[2])) {
1569 tmp
= arg_info(op
->args
[2])->val
& 31;
1570 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1573 case INDEX_op_shr_i64
:
1574 if (arg_is_const(op
->args
[2])) {
1575 tmp
= arg_info(op
->args
[2])->val
& 63;
1576 z_mask
= (uint64_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1580 case INDEX_op_extrl_i64_i32
:
1581 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
;
1583 case INDEX_op_extrh_i64_i32
:
1584 z_mask
= (uint64_t)arg_info(op
->args
[1])->z_mask
>> 32;
1588 if (arg_is_const(op
->args
[2])) {
1589 tmp
= arg_info(op
->args
[2])->val
& (TCG_TARGET_REG_BITS
- 1);
1590 z_mask
= arg_info(op
->args
[1])->z_mask
<< tmp
;
1595 /* Set to 1 all bits to the left of the rightmost. */
1596 z_mask
= -(arg_info(op
->args
[1])->z_mask
1597 & -arg_info(op
->args
[1])->z_mask
);
1600 CASE_OP_32_64(deposit
):
1601 z_mask
= deposit64(arg_info(op
->args
[1])->z_mask
,
1602 op
->args
[3], op
->args
[4],
1603 arg_info(op
->args
[2])->z_mask
);
1606 CASE_OP_32_64(extract
):
1607 z_mask
= extract64(arg_info(op
->args
[1])->z_mask
,
1608 op
->args
[2], op
->args
[3]);
1609 if (op
->args
[2] == 0) {
1610 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1613 CASE_OP_32_64(sextract
):
1614 z_mask
= sextract64(arg_info(op
->args
[1])->z_mask
,
1615 op
->args
[2], op
->args
[3]);
1616 if (op
->args
[2] == 0 && (tcg_target_long
)z_mask
>= 0) {
1617 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1623 z_mask
= arg_info(op
->args
[1])->z_mask
1624 | arg_info(op
->args
[2])->z_mask
;
1627 case INDEX_op_clz_i32
:
1628 case INDEX_op_ctz_i32
:
1629 z_mask
= arg_info(op
->args
[2])->z_mask
| 31;
1632 case INDEX_op_clz_i64
:
1633 case INDEX_op_ctz_i64
:
1634 z_mask
= arg_info(op
->args
[2])->z_mask
| 63;
1637 case INDEX_op_ctpop_i32
:
1640 case INDEX_op_ctpop_i64
:
1644 CASE_OP_32_64(setcond
):
1645 case INDEX_op_setcond2_i32
:
1649 CASE_OP_32_64(movcond
):
1650 z_mask
= arg_info(op
->args
[3])->z_mask
1651 | arg_info(op
->args
[4])->z_mask
;
1654 CASE_OP_32_64(ld8u
):
1657 CASE_OP_32_64(ld16u
):
1660 case INDEX_op_ld32u_i64
:
1661 z_mask
= 0xffffffffu
;
1664 CASE_OP_32_64(qemu_ld
):
1666 MemOpIdx oi
= op
->args
[def
->nb_oargs
+ def
->nb_iargs
];
1667 MemOp mop
= get_memop(oi
);
1668 if (!(mop
& MO_SIGN
)) {
1669 z_mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
1674 CASE_OP_32_64(bswap16
):
1675 z_mask
= arg_info(op
->args
[1])->z_mask
;
1676 if (z_mask
<= 0xffff) {
1677 op
->args
[2] |= TCG_BSWAP_IZ
;
1679 z_mask
= bswap16(z_mask
);
1680 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1684 z_mask
= (int16_t)z_mask
;
1686 default: /* undefined high bits */
1687 z_mask
|= MAKE_64BIT_MASK(16, 48);
1692 case INDEX_op_bswap32_i64
:
1693 z_mask
= arg_info(op
->args
[1])->z_mask
;
1694 if (z_mask
<= 0xffffffffu
) {
1695 op
->args
[2] |= TCG_BSWAP_IZ
;
1697 z_mask
= bswap32(z_mask
);
1698 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1702 z_mask
= (int32_t)z_mask
;
1704 default: /* undefined high bits */
1705 z_mask
|= MAKE_64BIT_MASK(32, 32);
1714 /* 32-bit ops generate 32-bit results. For the result is zero test
1715 below, we can ignore high bits, but for further optimizations we
1716 need to record that the high bits contain garbage. */
1718 if (!(def
->flags
& TCG_OPF_64BIT
)) {
1719 z_mask
|= ~(tcg_target_ulong
)0xffffffffu
;
1720 partmask
&= 0xffffffffu
;
1721 affected
&= 0xffffffffu
;
1723 ctx
.z_mask
= z_mask
;
1725 if (partmask
== 0) {
1726 tcg_opt_gen_movi(&ctx
, op
, op
->args
[0], 0);
1729 if (affected
== 0) {
1730 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1734 /* Simplify expression for "op r, a, 0 => movi r, 0" cases */
1736 CASE_OP_32_64_VEC(and):
1737 CASE_OP_32_64_VEC(mul
):
1738 CASE_OP_32_64(muluh
):
1739 CASE_OP_32_64(mulsh
):
1740 if (arg_is_const(op
->args
[2])
1741 && arg_info(op
->args
[2])->val
== 0) {
1742 tcg_opt_gen_movi(&ctx
, op
, op
->args
[0], 0);
1750 /* Simplify expression for "op r, a, a => mov r, a" cases */
1752 CASE_OP_32_64_VEC(or):
1753 CASE_OP_32_64_VEC(and):
1754 if (args_are_copies(op
->args
[1], op
->args
[2])) {
1755 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1764 * Process each opcode.
1765 * Sorted alphabetically by opcode as much as possible.
1768 CASE_OP_32_64_VEC(add
):
1769 done
= fold_add(&ctx
, op
);
1771 case INDEX_op_add2_i32
:
1772 done
= fold_add2_i32(&ctx
, op
);
1774 CASE_OP_32_64_VEC(and):
1775 done
= fold_and(&ctx
, op
);
1777 CASE_OP_32_64_VEC(andc
):
1778 done
= fold_andc(&ctx
, op
);
1780 CASE_OP_32_64(brcond
):
1781 done
= fold_brcond(&ctx
, op
);
1783 case INDEX_op_brcond2_i32
:
1784 done
= fold_brcond2(&ctx
, op
);
1786 CASE_OP_32_64(bswap16
):
1787 CASE_OP_32_64(bswap32
):
1788 case INDEX_op_bswap64_i64
:
1789 done
= fold_bswap(&ctx
, op
);
1793 done
= fold_count_zeros(&ctx
, op
);
1795 CASE_OP_32_64(ctpop
):
1796 done
= fold_ctpop(&ctx
, op
);
1798 CASE_OP_32_64(deposit
):
1799 done
= fold_deposit(&ctx
, op
);
1802 CASE_OP_32_64(divu
):
1803 done
= fold_divide(&ctx
, op
);
1805 case INDEX_op_dup_vec
:
1806 done
= fold_dup(&ctx
, op
);
1808 case INDEX_op_dup2_vec
:
1809 done
= fold_dup2(&ctx
, op
);
1812 done
= fold_eqv(&ctx
, op
);
1814 CASE_OP_32_64(extract
):
1815 done
= fold_extract(&ctx
, op
);
1817 CASE_OP_32_64(extract2
):
1818 done
= fold_extract2(&ctx
, op
);
1820 CASE_OP_32_64(ext8s
):
1821 CASE_OP_32_64(ext16s
):
1822 case INDEX_op_ext32s_i64
:
1823 case INDEX_op_ext_i32_i64
:
1824 done
= fold_exts(&ctx
, op
);
1826 CASE_OP_32_64(ext8u
):
1827 CASE_OP_32_64(ext16u
):
1828 case INDEX_op_ext32u_i64
:
1829 case INDEX_op_extu_i32_i64
:
1830 case INDEX_op_extrl_i64_i32
:
1831 case INDEX_op_extrh_i64_i32
:
1832 done
= fold_extu(&ctx
, op
);
1835 done
= fold_mb(&ctx
, op
);
1837 CASE_OP_32_64_VEC(mov
):
1838 done
= fold_mov(&ctx
, op
);
1840 CASE_OP_32_64(movcond
):
1841 done
= fold_movcond(&ctx
, op
);
1844 done
= fold_mul(&ctx
, op
);
1846 CASE_OP_32_64(mulsh
):
1847 CASE_OP_32_64(muluh
):
1848 done
= fold_mul_highpart(&ctx
, op
);
1850 case INDEX_op_mulu2_i32
:
1851 done
= fold_mulu2_i32(&ctx
, op
);
1853 CASE_OP_32_64(nand
):
1854 done
= fold_nand(&ctx
, op
);
1857 done
= fold_neg(&ctx
, op
);
1860 done
= fold_nor(&ctx
, op
);
1862 CASE_OP_32_64_VEC(not):
1863 done
= fold_not(&ctx
, op
);
1865 CASE_OP_32_64_VEC(or):
1866 done
= fold_or(&ctx
, op
);
1868 CASE_OP_32_64_VEC(orc
):
1869 done
= fold_orc(&ctx
, op
);
1871 case INDEX_op_qemu_ld_i32
:
1872 case INDEX_op_qemu_ld_i64
:
1873 done
= fold_qemu_ld(&ctx
, op
);
1875 case INDEX_op_qemu_st_i32
:
1876 case INDEX_op_qemu_st8_i32
:
1877 case INDEX_op_qemu_st_i64
:
1878 done
= fold_qemu_st(&ctx
, op
);
1881 CASE_OP_32_64(remu
):
1882 done
= fold_remainder(&ctx
, op
);
1884 CASE_OP_32_64(rotl
):
1885 CASE_OP_32_64(rotr
):
1889 done
= fold_shift(&ctx
, op
);
1891 CASE_OP_32_64(setcond
):
1892 done
= fold_setcond(&ctx
, op
);
1894 case INDEX_op_setcond2_i32
:
1895 done
= fold_setcond2(&ctx
, op
);
1897 CASE_OP_32_64(sextract
):
1898 done
= fold_sextract(&ctx
, op
);
1900 CASE_OP_32_64_VEC(sub
):
1901 done
= fold_sub(&ctx
, op
);
1903 case INDEX_op_sub2_i32
:
1904 done
= fold_sub2_i32(&ctx
, op
);
1906 CASE_OP_32_64_VEC(xor):
1907 done
= fold_xor(&ctx
, op
);
1914 finish_folding(&ctx
, op
);