2 * Optimizations for Tiny Code Generator for QEMU
4 * Copyright (c) 2010 Samsung Electronics.
5 * Contributed by Kirill Batuzov <batuzovk@ispras.ru>
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 #include "qemu/osdep.h"
27 #include "tcg/tcg-op.h"
28 #include "tcg-internal.h"
30 #define CASE_OP_32_64(x) \
31 glue(glue(case INDEX_op_, x), _i32): \
32 glue(glue(case INDEX_op_, x), _i64)
34 #define CASE_OP_32_64_VEC(x) \
35 glue(glue(case INDEX_op_, x), _i32): \
36 glue(glue(case INDEX_op_, x), _i64): \
37 glue(glue(case INDEX_op_, x), _vec)
39 typedef struct TempOptInfo
{
44 uint64_t z_mask
; /* mask bit is 0 if and only if value bit is 0 */
47 typedef struct OptContext
{
50 TCGTempSet temps_used
;
52 /* In flight values from optimization. */
56 static inline TempOptInfo
*ts_info(TCGTemp
*ts
)
61 static inline TempOptInfo
*arg_info(TCGArg arg
)
63 return ts_info(arg_temp(arg
));
66 static inline bool ts_is_const(TCGTemp
*ts
)
68 return ts_info(ts
)->is_const
;
71 static inline bool arg_is_const(TCGArg arg
)
73 return ts_is_const(arg_temp(arg
));
76 static inline bool ts_is_copy(TCGTemp
*ts
)
78 return ts_info(ts
)->next_copy
!= ts
;
81 /* Reset TEMP's state, possibly removing the temp for the list of copies. */
82 static void reset_ts(TCGTemp
*ts
)
84 TempOptInfo
*ti
= ts_info(ts
);
85 TempOptInfo
*pi
= ts_info(ti
->prev_copy
);
86 TempOptInfo
*ni
= ts_info(ti
->next_copy
);
88 ni
->prev_copy
= ti
->prev_copy
;
89 pi
->next_copy
= ti
->next_copy
;
96 static void reset_temp(TCGArg arg
)
98 reset_ts(arg_temp(arg
));
101 /* Initialize and activate a temporary. */
102 static void init_ts_info(OptContext
*ctx
, TCGTemp
*ts
)
104 size_t idx
= temp_idx(ts
);
107 if (test_bit(idx
, ctx
->temps_used
.l
)) {
110 set_bit(idx
, ctx
->temps_used
.l
);
114 ti
= tcg_malloc(sizeof(TempOptInfo
));
120 if (ts
->kind
== TEMP_CONST
) {
123 ti
->z_mask
= ts
->val
;
124 if (TCG_TARGET_REG_BITS
> 32 && ts
->type
== TCG_TYPE_I32
) {
125 /* High bits of a 32-bit quantity are garbage. */
126 ti
->z_mask
|= ~0xffffffffull
;
129 ti
->is_const
= false;
134 static TCGTemp
*find_better_copy(TCGContext
*s
, TCGTemp
*ts
)
138 /* If this is already readonly, we can't do better. */
139 if (temp_readonly(ts
)) {
144 for (i
= ts_info(ts
)->next_copy
; i
!= ts
; i
= ts_info(i
)->next_copy
) {
145 if (temp_readonly(i
)) {
147 } else if (i
->kind
> ts
->kind
) {
148 if (i
->kind
== TEMP_GLOBAL
) {
150 } else if (i
->kind
== TEMP_LOCAL
) {
156 /* If we didn't find a better representation, return the same temp. */
157 return g
? g
: l
? l
: ts
;
160 static bool ts_are_copies(TCGTemp
*ts1
, TCGTemp
*ts2
)
168 if (!ts_is_copy(ts1
) || !ts_is_copy(ts2
)) {
172 for (i
= ts_info(ts1
)->next_copy
; i
!= ts1
; i
= ts_info(i
)->next_copy
) {
181 static bool args_are_copies(TCGArg arg1
, TCGArg arg2
)
183 return ts_are_copies(arg_temp(arg1
), arg_temp(arg2
));
186 static bool tcg_opt_gen_mov(OptContext
*ctx
, TCGOp
*op
, TCGArg dst
, TCGArg src
)
188 TCGTemp
*dst_ts
= arg_temp(dst
);
189 TCGTemp
*src_ts
= arg_temp(src
);
196 if (ts_are_copies(dst_ts
, src_ts
)) {
197 tcg_op_remove(ctx
->tcg
, op
);
202 di
= ts_info(dst_ts
);
203 si
= ts_info(src_ts
);
204 def
= &tcg_op_defs
[op
->opc
];
205 if (def
->flags
& TCG_OPF_VECTOR
) {
206 new_op
= INDEX_op_mov_vec
;
207 } else if (def
->flags
& TCG_OPF_64BIT
) {
208 new_op
= INDEX_op_mov_i64
;
210 new_op
= INDEX_op_mov_i32
;
213 /* TCGOP_VECL and TCGOP_VECE remain unchanged. */
218 if (TCG_TARGET_REG_BITS
> 32 && new_op
== INDEX_op_mov_i32
) {
219 /* High bits of the destination are now garbage. */
220 z_mask
|= ~0xffffffffull
;
224 if (src_ts
->type
== dst_ts
->type
) {
225 TempOptInfo
*ni
= ts_info(si
->next_copy
);
227 di
->next_copy
= si
->next_copy
;
228 di
->prev_copy
= src_ts
;
229 ni
->prev_copy
= dst_ts
;
230 si
->next_copy
= dst_ts
;
231 di
->is_const
= si
->is_const
;
237 static bool tcg_opt_gen_movi(OptContext
*ctx
, TCGOp
*op
,
238 TCGArg dst
, uint64_t val
)
240 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
244 if (def
->flags
& TCG_OPF_VECTOR
) {
245 type
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
246 } else if (def
->flags
& TCG_OPF_64BIT
) {
252 /* Convert movi to mov with constant temp. */
253 tv
= tcg_constant_internal(type
, val
);
254 init_ts_info(ctx
, tv
);
255 return tcg_opt_gen_mov(ctx
, op
, dst
, temp_arg(tv
));
258 static uint64_t do_constant_folding_2(TCGOpcode op
, uint64_t x
, uint64_t y
)
281 case INDEX_op_shl_i32
:
282 return (uint32_t)x
<< (y
& 31);
284 case INDEX_op_shl_i64
:
285 return (uint64_t)x
<< (y
& 63);
287 case INDEX_op_shr_i32
:
288 return (uint32_t)x
>> (y
& 31);
290 case INDEX_op_shr_i64
:
291 return (uint64_t)x
>> (y
& 63);
293 case INDEX_op_sar_i32
:
294 return (int32_t)x
>> (y
& 31);
296 case INDEX_op_sar_i64
:
297 return (int64_t)x
>> (y
& 63);
299 case INDEX_op_rotr_i32
:
300 return ror32(x
, y
& 31);
302 case INDEX_op_rotr_i64
:
303 return ror64(x
, y
& 63);
305 case INDEX_op_rotl_i32
:
306 return rol32(x
, y
& 31);
308 case INDEX_op_rotl_i64
:
309 return rol64(x
, y
& 63);
332 case INDEX_op_clz_i32
:
333 return (uint32_t)x
? clz32(x
) : y
;
335 case INDEX_op_clz_i64
:
336 return x
? clz64(x
) : y
;
338 case INDEX_op_ctz_i32
:
339 return (uint32_t)x
? ctz32(x
) : y
;
341 case INDEX_op_ctz_i64
:
342 return x
? ctz64(x
) : y
;
344 case INDEX_op_ctpop_i32
:
347 case INDEX_op_ctpop_i64
:
350 CASE_OP_32_64(ext8s
):
353 CASE_OP_32_64(ext16s
):
356 CASE_OP_32_64(ext8u
):
359 CASE_OP_32_64(ext16u
):
362 CASE_OP_32_64(bswap16
):
364 return y
& TCG_BSWAP_OS
? (int16_t)x
: x
;
366 CASE_OP_32_64(bswap32
):
368 return y
& TCG_BSWAP_OS
? (int32_t)x
: x
;
370 case INDEX_op_bswap64_i64
:
373 case INDEX_op_ext_i32_i64
:
374 case INDEX_op_ext32s_i64
:
377 case INDEX_op_extu_i32_i64
:
378 case INDEX_op_extrl_i64_i32
:
379 case INDEX_op_ext32u_i64
:
382 case INDEX_op_extrh_i64_i32
:
383 return (uint64_t)x
>> 32;
385 case INDEX_op_muluh_i32
:
386 return ((uint64_t)(uint32_t)x
* (uint32_t)y
) >> 32;
387 case INDEX_op_mulsh_i32
:
388 return ((int64_t)(int32_t)x
* (int32_t)y
) >> 32;
390 case INDEX_op_muluh_i64
:
391 mulu64(&l64
, &h64
, x
, y
);
393 case INDEX_op_mulsh_i64
:
394 muls64(&l64
, &h64
, x
, y
);
397 case INDEX_op_div_i32
:
398 /* Avoid crashing on divide by zero, otherwise undefined. */
399 return (int32_t)x
/ ((int32_t)y
? : 1);
400 case INDEX_op_divu_i32
:
401 return (uint32_t)x
/ ((uint32_t)y
? : 1);
402 case INDEX_op_div_i64
:
403 return (int64_t)x
/ ((int64_t)y
? : 1);
404 case INDEX_op_divu_i64
:
405 return (uint64_t)x
/ ((uint64_t)y
? : 1);
407 case INDEX_op_rem_i32
:
408 return (int32_t)x
% ((int32_t)y
? : 1);
409 case INDEX_op_remu_i32
:
410 return (uint32_t)x
% ((uint32_t)y
? : 1);
411 case INDEX_op_rem_i64
:
412 return (int64_t)x
% ((int64_t)y
? : 1);
413 case INDEX_op_remu_i64
:
414 return (uint64_t)x
% ((uint64_t)y
? : 1);
418 "Unrecognized operation %d in do_constant_folding.\n", op
);
423 static uint64_t do_constant_folding(TCGOpcode op
, uint64_t x
, uint64_t y
)
425 const TCGOpDef
*def
= &tcg_op_defs
[op
];
426 uint64_t res
= do_constant_folding_2(op
, x
, y
);
427 if (!(def
->flags
& TCG_OPF_64BIT
)) {
433 static bool do_constant_folding_cond_32(uint32_t x
, uint32_t y
, TCGCond c
)
441 return (int32_t)x
< (int32_t)y
;
443 return (int32_t)x
>= (int32_t)y
;
445 return (int32_t)x
<= (int32_t)y
;
447 return (int32_t)x
> (int32_t)y
;
461 static bool do_constant_folding_cond_64(uint64_t x
, uint64_t y
, TCGCond c
)
469 return (int64_t)x
< (int64_t)y
;
471 return (int64_t)x
>= (int64_t)y
;
473 return (int64_t)x
<= (int64_t)y
;
475 return (int64_t)x
> (int64_t)y
;
489 static bool do_constant_folding_cond_eq(TCGCond c
)
510 * Return -1 if the condition can't be simplified,
511 * and the result of the condition (0 or 1) if it can.
513 static int do_constant_folding_cond(TCGOpcode op
, TCGArg x
,
516 uint64_t xv
= arg_info(x
)->val
;
517 uint64_t yv
= arg_info(y
)->val
;
519 if (arg_is_const(x
) && arg_is_const(y
)) {
520 const TCGOpDef
*def
= &tcg_op_defs
[op
];
521 tcg_debug_assert(!(def
->flags
& TCG_OPF_VECTOR
));
522 if (def
->flags
& TCG_OPF_64BIT
) {
523 return do_constant_folding_cond_64(xv
, yv
, c
);
525 return do_constant_folding_cond_32(xv
, yv
, c
);
527 } else if (args_are_copies(x
, y
)) {
528 return do_constant_folding_cond_eq(c
);
529 } else if (arg_is_const(y
) && yv
== 0) {
543 * Return -1 if the condition can't be simplified,
544 * and the result of the condition (0 or 1) if it can.
546 static int do_constant_folding_cond2(TCGArg
*p1
, TCGArg
*p2
, TCGCond c
)
548 TCGArg al
= p1
[0], ah
= p1
[1];
549 TCGArg bl
= p2
[0], bh
= p2
[1];
551 if (arg_is_const(bl
) && arg_is_const(bh
)) {
552 tcg_target_ulong blv
= arg_info(bl
)->val
;
553 tcg_target_ulong bhv
= arg_info(bh
)->val
;
554 uint64_t b
= deposit64(blv
, 32, 32, bhv
);
556 if (arg_is_const(al
) && arg_is_const(ah
)) {
557 tcg_target_ulong alv
= arg_info(al
)->val
;
558 tcg_target_ulong ahv
= arg_info(ah
)->val
;
559 uint64_t a
= deposit64(alv
, 32, 32, ahv
);
560 return do_constant_folding_cond_64(a
, b
, c
);
573 if (args_are_copies(al
, bl
) && args_are_copies(ah
, bh
)) {
574 return do_constant_folding_cond_eq(c
);
579 static bool swap_commutative(TCGArg dest
, TCGArg
*p1
, TCGArg
*p2
)
581 TCGArg a1
= *p1
, a2
= *p2
;
583 sum
+= arg_is_const(a1
);
584 sum
-= arg_is_const(a2
);
586 /* Prefer the constant in second argument, and then the form
587 op a, a, b, which is better handled on non-RISC hosts. */
588 if (sum
> 0 || (sum
== 0 && dest
== a2
)) {
596 static bool swap_commutative2(TCGArg
*p1
, TCGArg
*p2
)
599 sum
+= arg_is_const(p1
[0]);
600 sum
+= arg_is_const(p1
[1]);
601 sum
-= arg_is_const(p2
[0]);
602 sum
-= arg_is_const(p2
[1]);
605 t
= p1
[0], p1
[0] = p2
[0], p2
[0] = t
;
606 t
= p1
[1], p1
[1] = p2
[1], p2
[1] = t
;
612 static void init_arguments(OptContext
*ctx
, TCGOp
*op
, int nb_args
)
614 for (int i
= 0; i
< nb_args
; i
++) {
615 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
617 init_ts_info(ctx
, ts
);
622 static void copy_propagate(OptContext
*ctx
, TCGOp
*op
,
623 int nb_oargs
, int nb_iargs
)
625 TCGContext
*s
= ctx
->tcg
;
627 for (int i
= nb_oargs
; i
< nb_oargs
+ nb_iargs
; i
++) {
628 TCGTemp
*ts
= arg_temp(op
->args
[i
]);
629 if (ts
&& ts_is_copy(ts
)) {
630 op
->args
[i
] = temp_arg(find_better_copy(s
, ts
));
635 static void finish_folding(OptContext
*ctx
, TCGOp
*op
)
637 const TCGOpDef
*def
= &tcg_op_defs
[op
->opc
];
641 * For an opcode that ends a BB, reset all temp data.
642 * We do no cross-BB optimization.
644 if (def
->flags
& TCG_OPF_BB_END
) {
645 memset(&ctx
->temps_used
, 0, sizeof(ctx
->temps_used
));
650 nb_oargs
= def
->nb_oargs
;
651 for (i
= 0; i
< nb_oargs
; i
++) {
652 reset_temp(op
->args
[i
]);
654 * Save the corresponding known-zero bits mask for the
655 * first output argument (only one supported so far).
658 arg_info(op
->args
[i
])->z_mask
= ctx
->z_mask
;
664 * The fold_* functions return true when processing is complete,
665 * usually by folding the operation to a constant or to a copy,
666 * and calling tcg_opt_gen_{mov,movi}. They may do other things,
667 * like collect information about the value produced, for use in
668 * optimizing a subsequent operation.
670 * These first fold_* functions are all helpers, used by other
671 * folders for more specific operations.
674 static bool fold_const1(OptContext
*ctx
, TCGOp
*op
)
676 if (arg_is_const(op
->args
[1])) {
679 t
= arg_info(op
->args
[1])->val
;
680 t
= do_constant_folding(op
->opc
, t
, 0);
681 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
686 static bool fold_const2(OptContext
*ctx
, TCGOp
*op
)
688 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
689 uint64_t t1
= arg_info(op
->args
[1])->val
;
690 uint64_t t2
= arg_info(op
->args
[2])->val
;
692 t1
= do_constant_folding(op
->opc
, t1
, t2
);
693 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
698 /* If the binary operation has second argument @i, fold to @i. */
699 static bool fold_xi_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
701 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== i
) {
702 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
707 /* If the binary operation has both arguments equal, fold to @i. */
708 static bool fold_xx_to_i(OptContext
*ctx
, TCGOp
*op
, uint64_t i
)
710 if (args_are_copies(op
->args
[1], op
->args
[2])) {
711 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
716 /* If the binary operation has both arguments equal, fold to identity. */
717 static bool fold_xx_to_x(OptContext
*ctx
, TCGOp
*op
)
719 if (args_are_copies(op
->args
[1], op
->args
[2])) {
720 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
726 * These outermost fold_<op> functions are sorted alphabetically.
728 * The ordering of the transformations should be:
729 * 1) those that produce a constant
730 * 2) those that produce a copy
731 * 3) those that produce information about the result value.
734 static bool fold_add(OptContext
*ctx
, TCGOp
*op
)
736 return fold_const2(ctx
, op
);
739 static bool fold_addsub2_i32(OptContext
*ctx
, TCGOp
*op
, bool add
)
741 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3]) &&
742 arg_is_const(op
->args
[4]) && arg_is_const(op
->args
[5])) {
743 uint32_t al
= arg_info(op
->args
[2])->val
;
744 uint32_t ah
= arg_info(op
->args
[3])->val
;
745 uint32_t bl
= arg_info(op
->args
[4])->val
;
746 uint32_t bh
= arg_info(op
->args
[5])->val
;
747 uint64_t a
= ((uint64_t)ah
<< 32) | al
;
748 uint64_t b
= ((uint64_t)bh
<< 32) | bl
;
750 TCGOp
*op2
= tcg_op_insert_before(ctx
->tcg
, op
, INDEX_op_mov_i32
);
760 tcg_opt_gen_movi(ctx
, op
, rl
, (int32_t)a
);
761 tcg_opt_gen_movi(ctx
, op2
, rh
, (int32_t)(a
>> 32));
767 static bool fold_add2_i32(OptContext
*ctx
, TCGOp
*op
)
769 return fold_addsub2_i32(ctx
, op
, true);
772 static bool fold_and(OptContext
*ctx
, TCGOp
*op
)
774 if (fold_const2(ctx
, op
) ||
775 fold_xi_to_i(ctx
, op
, 0) ||
776 fold_xx_to_x(ctx
, op
)) {
782 static bool fold_andc(OptContext
*ctx
, TCGOp
*op
)
784 if (fold_const2(ctx
, op
) ||
785 fold_xx_to_i(ctx
, op
, 0)) {
791 static bool fold_brcond(OptContext
*ctx
, TCGOp
*op
)
793 TCGCond cond
= op
->args
[2];
794 int i
= do_constant_folding_cond(op
->opc
, op
->args
[0], op
->args
[1], cond
);
797 tcg_op_remove(ctx
->tcg
, op
);
801 op
->opc
= INDEX_op_br
;
802 op
->args
[0] = op
->args
[3];
807 static bool fold_brcond2(OptContext
*ctx
, TCGOp
*op
)
809 TCGCond cond
= op
->args
[4];
810 int i
= do_constant_folding_cond2(&op
->args
[0], &op
->args
[2], cond
);
811 TCGArg label
= op
->args
[5];
815 goto do_brcond_const
;
822 * Simplify LT/GE comparisons vs zero to a single compare
823 * vs the high word of the input.
825 if (arg_is_const(op
->args
[2]) && arg_info(op
->args
[2])->val
== 0 &&
826 arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0) {
836 * Simplify EQ/NE comparisons where one of the pairs
839 i
= do_constant_folding_cond(INDEX_op_brcond_i32
, op
->args
[0],
843 goto do_brcond_const
;
848 i
= do_constant_folding_cond(INDEX_op_brcond_i32
, op
->args
[1],
852 goto do_brcond_const
;
854 op
->opc
= INDEX_op_brcond_i32
;
855 op
->args
[1] = op
->args
[2];
866 op
->opc
= INDEX_op_brcond_i32
;
867 op
->args
[0] = op
->args
[1];
868 op
->args
[1] = op
->args
[3];
875 tcg_op_remove(ctx
->tcg
, op
);
878 op
->opc
= INDEX_op_br
;
885 static bool fold_bswap(OptContext
*ctx
, TCGOp
*op
)
887 if (arg_is_const(op
->args
[1])) {
888 uint64_t t
= arg_info(op
->args
[1])->val
;
890 t
= do_constant_folding(op
->opc
, t
, op
->args
[2]);
891 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
896 static bool fold_call(OptContext
*ctx
, TCGOp
*op
)
898 TCGContext
*s
= ctx
->tcg
;
899 int nb_oargs
= TCGOP_CALLO(op
);
900 int nb_iargs
= TCGOP_CALLI(op
);
903 init_arguments(ctx
, op
, nb_oargs
+ nb_iargs
);
904 copy_propagate(ctx
, op
, nb_oargs
, nb_iargs
);
906 /* If the function reads or writes globals, reset temp data. */
907 flags
= tcg_call_flags(op
);
908 if (!(flags
& (TCG_CALL_NO_READ_GLOBALS
| TCG_CALL_NO_WRITE_GLOBALS
))) {
909 int nb_globals
= s
->nb_globals
;
911 for (i
= 0; i
< nb_globals
; i
++) {
912 if (test_bit(i
, ctx
->temps_used
.l
)) {
913 reset_ts(&ctx
->tcg
->temps
[i
]);
918 /* Reset temp data for outputs. */
919 for (i
= 0; i
< nb_oargs
; i
++) {
920 reset_temp(op
->args
[i
]);
923 /* Stop optimizing MB across calls. */
928 static bool fold_count_zeros(OptContext
*ctx
, TCGOp
*op
)
930 if (arg_is_const(op
->args
[1])) {
931 uint64_t t
= arg_info(op
->args
[1])->val
;
934 t
= do_constant_folding(op
->opc
, t
, 0);
935 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
937 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[2]);
942 static bool fold_ctpop(OptContext
*ctx
, TCGOp
*op
)
944 return fold_const1(ctx
, op
);
947 static bool fold_deposit(OptContext
*ctx
, TCGOp
*op
)
949 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
950 uint64_t t1
= arg_info(op
->args
[1])->val
;
951 uint64_t t2
= arg_info(op
->args
[2])->val
;
953 t1
= deposit64(t1
, op
->args
[3], op
->args
[4], t2
);
954 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t1
);
959 static bool fold_divide(OptContext
*ctx
, TCGOp
*op
)
961 return fold_const2(ctx
, op
);
964 static bool fold_dup(OptContext
*ctx
, TCGOp
*op
)
966 if (arg_is_const(op
->args
[1])) {
967 uint64_t t
= arg_info(op
->args
[1])->val
;
968 t
= dup_const(TCGOP_VECE(op
), t
);
969 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
974 static bool fold_dup2(OptContext
*ctx
, TCGOp
*op
)
976 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
977 uint64_t t
= deposit64(arg_info(op
->args
[1])->val
, 32, 32,
978 arg_info(op
->args
[2])->val
);
979 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
982 if (args_are_copies(op
->args
[1], op
->args
[2])) {
983 op
->opc
= INDEX_op_dup_vec
;
984 TCGOP_VECE(op
) = MO_32
;
989 static bool fold_eqv(OptContext
*ctx
, TCGOp
*op
)
991 return fold_const2(ctx
, op
);
994 static bool fold_extract(OptContext
*ctx
, TCGOp
*op
)
996 if (arg_is_const(op
->args
[1])) {
999 t
= arg_info(op
->args
[1])->val
;
1000 t
= extract64(t
, op
->args
[2], op
->args
[3]);
1001 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1006 static bool fold_extract2(OptContext
*ctx
, TCGOp
*op
)
1008 if (arg_is_const(op
->args
[1]) && arg_is_const(op
->args
[2])) {
1009 uint64_t v1
= arg_info(op
->args
[1])->val
;
1010 uint64_t v2
= arg_info(op
->args
[2])->val
;
1011 int shr
= op
->args
[3];
1013 if (op
->opc
== INDEX_op_extract2_i64
) {
1017 v1
= (uint32_t)v1
>> shr
;
1018 v2
= (int32_t)v2
<< (32 - shr
);
1020 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], v1
| v2
);
1025 static bool fold_exts(OptContext
*ctx
, TCGOp
*op
)
1027 return fold_const1(ctx
, op
);
1030 static bool fold_extu(OptContext
*ctx
, TCGOp
*op
)
1032 return fold_const1(ctx
, op
);
1035 static bool fold_mb(OptContext
*ctx
, TCGOp
*op
)
1037 /* Eliminate duplicate and redundant fence instructions. */
1040 * Merge two barriers of the same type into one,
1041 * or a weaker barrier into a stronger one,
1042 * or two weaker barriers into a stronger one.
1043 * mb X; mb Y => mb X|Y
1044 * mb; strl => mb; st
1045 * ldaq; mb => ld; mb
1046 * ldaq; strl => ld; mb; st
1047 * Other combinations are also merged into a strong
1048 * barrier. This is stricter than specified but for
1049 * the purposes of TCG is better than not optimizing.
1051 ctx
->prev_mb
->args
[0] |= op
->args
[0];
1052 tcg_op_remove(ctx
->tcg
, op
);
1059 static bool fold_mov(OptContext
*ctx
, TCGOp
*op
)
1061 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[1]);
1064 static bool fold_movcond(OptContext
*ctx
, TCGOp
*op
)
1066 TCGOpcode opc
= op
->opc
;
1067 TCGCond cond
= op
->args
[5];
1068 int i
= do_constant_folding_cond(opc
, op
->args
[1], op
->args
[2], cond
);
1071 return tcg_opt_gen_mov(ctx
, op
, op
->args
[0], op
->args
[4 - i
]);
1074 if (arg_is_const(op
->args
[3]) && arg_is_const(op
->args
[4])) {
1075 uint64_t tv
= arg_info(op
->args
[3])->val
;
1076 uint64_t fv
= arg_info(op
->args
[4])->val
;
1078 opc
= (opc
== INDEX_op_movcond_i32
1079 ? INDEX_op_setcond_i32
: INDEX_op_setcond_i64
);
1081 if (tv
== 1 && fv
== 0) {
1084 } else if (fv
== 1 && tv
== 0) {
1086 op
->args
[3] = tcg_invert_cond(cond
);
1092 static bool fold_mul(OptContext
*ctx
, TCGOp
*op
)
1094 if (fold_const2(ctx
, op
) ||
1095 fold_xi_to_i(ctx
, op
, 0)) {
1101 static bool fold_mul_highpart(OptContext
*ctx
, TCGOp
*op
)
1103 if (fold_const2(ctx
, op
) ||
1104 fold_xi_to_i(ctx
, op
, 0)) {
1110 static bool fold_mulu2_i32(OptContext
*ctx
, TCGOp
*op
)
1112 if (arg_is_const(op
->args
[2]) && arg_is_const(op
->args
[3])) {
1113 uint32_t a
= arg_info(op
->args
[2])->val
;
1114 uint32_t b
= arg_info(op
->args
[3])->val
;
1115 uint64_t r
= (uint64_t)a
* b
;
1117 TCGOp
*op2
= tcg_op_insert_before(ctx
->tcg
, op
, INDEX_op_mov_i32
);
1121 tcg_opt_gen_movi(ctx
, op
, rl
, (int32_t)r
);
1122 tcg_opt_gen_movi(ctx
, op2
, rh
, (int32_t)(r
>> 32));
1128 static bool fold_nand(OptContext
*ctx
, TCGOp
*op
)
1130 return fold_const2(ctx
, op
);
1133 static bool fold_neg(OptContext
*ctx
, TCGOp
*op
)
1135 return fold_const1(ctx
, op
);
1138 static bool fold_nor(OptContext
*ctx
, TCGOp
*op
)
1140 return fold_const2(ctx
, op
);
1143 static bool fold_not(OptContext
*ctx
, TCGOp
*op
)
1145 return fold_const1(ctx
, op
);
1148 static bool fold_or(OptContext
*ctx
, TCGOp
*op
)
1150 if (fold_const2(ctx
, op
) ||
1151 fold_xx_to_x(ctx
, op
)) {
1157 static bool fold_orc(OptContext
*ctx
, TCGOp
*op
)
1159 return fold_const2(ctx
, op
);
1162 static bool fold_qemu_ld(OptContext
*ctx
, TCGOp
*op
)
1164 /* Opcodes that touch guest memory stop the mb optimization. */
1165 ctx
->prev_mb
= NULL
;
1169 static bool fold_qemu_st(OptContext
*ctx
, TCGOp
*op
)
1171 /* Opcodes that touch guest memory stop the mb optimization. */
1172 ctx
->prev_mb
= NULL
;
1176 static bool fold_remainder(OptContext
*ctx
, TCGOp
*op
)
1178 return fold_const2(ctx
, op
);
1181 static bool fold_setcond(OptContext
*ctx
, TCGOp
*op
)
1183 TCGCond cond
= op
->args
[3];
1184 int i
= do_constant_folding_cond(op
->opc
, op
->args
[1], op
->args
[2], cond
);
1187 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1192 static bool fold_setcond2(OptContext
*ctx
, TCGOp
*op
)
1194 TCGCond cond
= op
->args
[5];
1195 int i
= do_constant_folding_cond2(&op
->args
[1], &op
->args
[3], cond
);
1199 goto do_setcond_const
;
1206 * Simplify LT/GE comparisons vs zero to a single compare
1207 * vs the high word of the input.
1209 if (arg_is_const(op
->args
[3]) && arg_info(op
->args
[3])->val
== 0 &&
1210 arg_is_const(op
->args
[4]) && arg_info(op
->args
[4])->val
== 0) {
1211 goto do_setcond_high
;
1220 * Simplify EQ/NE comparisons where one of the pairs
1221 * can be simplified.
1223 i
= do_constant_folding_cond(INDEX_op_setcond_i32
, op
->args
[1],
1227 goto do_setcond_const
;
1229 goto do_setcond_high
;
1232 i
= do_constant_folding_cond(INDEX_op_setcond_i32
, op
->args
[2],
1236 goto do_setcond_const
;
1238 op
->args
[2] = op
->args
[3];
1240 op
->opc
= INDEX_op_setcond_i32
;
1249 op
->args
[1] = op
->args
[2];
1250 op
->args
[2] = op
->args
[4];
1252 op
->opc
= INDEX_op_setcond_i32
;
1258 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], i
);
1261 static bool fold_sextract(OptContext
*ctx
, TCGOp
*op
)
1263 if (arg_is_const(op
->args
[1])) {
1266 t
= arg_info(op
->args
[1])->val
;
1267 t
= sextract64(t
, op
->args
[2], op
->args
[3]);
1268 return tcg_opt_gen_movi(ctx
, op
, op
->args
[0], t
);
1273 static bool fold_shift(OptContext
*ctx
, TCGOp
*op
)
1275 return fold_const2(ctx
, op
);
1278 static bool fold_sub(OptContext
*ctx
, TCGOp
*op
)
1280 if (fold_const2(ctx
, op
) ||
1281 fold_xx_to_i(ctx
, op
, 0)) {
1287 static bool fold_sub2_i32(OptContext
*ctx
, TCGOp
*op
)
1289 return fold_addsub2_i32(ctx
, op
, false);
1292 static bool fold_xor(OptContext
*ctx
, TCGOp
*op
)
1294 if (fold_const2(ctx
, op
) ||
1295 fold_xx_to_i(ctx
, op
, 0)) {
1301 /* Propagate constants and copies, fold constant expressions. */
1302 void tcg_optimize(TCGContext
*s
)
1305 TCGOp
*op
, *op_next
;
1306 OptContext ctx
= { .tcg
= s
};
1308 /* Array VALS has an element for each temp.
1309 If this temp holds a constant then its value is kept in VALS' element.
1310 If this temp is a copy of other ones then the other copies are
1311 available through the doubly linked circular list. */
1313 nb_temps
= s
->nb_temps
;
1314 for (i
= 0; i
< nb_temps
; ++i
) {
1315 s
->temps
[i
].state_ptr
= NULL
;
1318 QTAILQ_FOREACH_SAFE(op
, &s
->ops
, link
, op_next
) {
1319 uint64_t z_mask
, partmask
, affected
, tmp
;
1320 TCGOpcode opc
= op
->opc
;
1321 const TCGOpDef
*def
;
1324 /* Calls are special. */
1325 if (opc
== INDEX_op_call
) {
1326 fold_call(&ctx
, op
);
1330 def
= &tcg_op_defs
[opc
];
1331 init_arguments(&ctx
, op
, def
->nb_oargs
+ def
->nb_iargs
);
1332 copy_propagate(&ctx
, op
, def
->nb_oargs
, def
->nb_iargs
);
1334 /* For commutative operations make constant second argument */
1336 CASE_OP_32_64_VEC(add
):
1337 CASE_OP_32_64_VEC(mul
):
1338 CASE_OP_32_64_VEC(and):
1339 CASE_OP_32_64_VEC(or):
1340 CASE_OP_32_64_VEC(xor):
1342 CASE_OP_32_64(nand
):
1344 CASE_OP_32_64(muluh
):
1345 CASE_OP_32_64(mulsh
):
1346 swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2]);
1348 CASE_OP_32_64(brcond
):
1349 if (swap_commutative(-1, &op
->args
[0], &op
->args
[1])) {
1350 op
->args
[2] = tcg_swap_cond(op
->args
[2]);
1353 CASE_OP_32_64(setcond
):
1354 if (swap_commutative(op
->args
[0], &op
->args
[1], &op
->args
[2])) {
1355 op
->args
[3] = tcg_swap_cond(op
->args
[3]);
1358 CASE_OP_32_64(movcond
):
1359 if (swap_commutative(-1, &op
->args
[1], &op
->args
[2])) {
1360 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
1362 /* For movcond, we canonicalize the "false" input reg to match
1363 the destination reg so that the tcg backend can implement
1364 a "move if true" operation. */
1365 if (swap_commutative(op
->args
[0], &op
->args
[4], &op
->args
[3])) {
1366 op
->args
[5] = tcg_invert_cond(op
->args
[5]);
1369 CASE_OP_32_64(add2
):
1370 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[4]);
1371 swap_commutative(op
->args
[1], &op
->args
[3], &op
->args
[5]);
1373 CASE_OP_32_64(mulu2
):
1374 CASE_OP_32_64(muls2
):
1375 swap_commutative(op
->args
[0], &op
->args
[2], &op
->args
[3]);
1377 case INDEX_op_brcond2_i32
:
1378 if (swap_commutative2(&op
->args
[0], &op
->args
[2])) {
1379 op
->args
[4] = tcg_swap_cond(op
->args
[4]);
1382 case INDEX_op_setcond2_i32
:
1383 if (swap_commutative2(&op
->args
[1], &op
->args
[3])) {
1384 op
->args
[5] = tcg_swap_cond(op
->args
[5]);
1391 /* Simplify expressions for "shift/rot r, 0, a => movi r, 0",
1392 and "sub r, 0, a => neg r, a" case. */
1397 CASE_OP_32_64(rotl
):
1398 CASE_OP_32_64(rotr
):
1399 if (arg_is_const(op
->args
[1])
1400 && arg_info(op
->args
[1])->val
== 0) {
1401 tcg_opt_gen_movi(&ctx
, op
, op
->args
[0], 0);
1405 CASE_OP_32_64_VEC(sub
):
1410 if (arg_is_const(op
->args
[2])) {
1411 /* Proceed with possible constant folding. */
1414 if (opc
== INDEX_op_sub_i32
) {
1415 neg_op
= INDEX_op_neg_i32
;
1416 have_neg
= TCG_TARGET_HAS_neg_i32
;
1417 } else if (opc
== INDEX_op_sub_i64
) {
1418 neg_op
= INDEX_op_neg_i64
;
1419 have_neg
= TCG_TARGET_HAS_neg_i64
;
1420 } else if (TCG_TARGET_HAS_neg_vec
) {
1421 TCGType type
= TCGOP_VECL(op
) + TCG_TYPE_V64
;
1422 unsigned vece
= TCGOP_VECE(op
);
1423 neg_op
= INDEX_op_neg_vec
;
1424 have_neg
= tcg_can_emit_vec_op(neg_op
, type
, vece
) > 0;
1431 if (arg_is_const(op
->args
[1])
1432 && arg_info(op
->args
[1])->val
== 0) {
1434 reset_temp(op
->args
[0]);
1435 op
->args
[1] = op
->args
[2];
1440 CASE_OP_32_64_VEC(xor):
1441 CASE_OP_32_64(nand
):
1442 if (!arg_is_const(op
->args
[1])
1443 && arg_is_const(op
->args
[2])
1444 && arg_info(op
->args
[2])->val
== -1) {
1450 if (!arg_is_const(op
->args
[1])
1451 && arg_is_const(op
->args
[2])
1452 && arg_info(op
->args
[2])->val
== 0) {
1457 CASE_OP_32_64_VEC(andc
):
1458 if (!arg_is_const(op
->args
[2])
1459 && arg_is_const(op
->args
[1])
1460 && arg_info(op
->args
[1])->val
== -1) {
1465 CASE_OP_32_64_VEC(orc
):
1467 if (!arg_is_const(op
->args
[2])
1468 && arg_is_const(op
->args
[1])
1469 && arg_info(op
->args
[1])->val
== 0) {
1479 if (def
->flags
& TCG_OPF_VECTOR
) {
1480 not_op
= INDEX_op_not_vec
;
1481 have_not
= TCG_TARGET_HAS_not_vec
;
1482 } else if (def
->flags
& TCG_OPF_64BIT
) {
1483 not_op
= INDEX_op_not_i64
;
1484 have_not
= TCG_TARGET_HAS_not_i64
;
1486 not_op
= INDEX_op_not_i32
;
1487 have_not
= TCG_TARGET_HAS_not_i32
;
1493 reset_temp(op
->args
[0]);
1494 op
->args
[1] = op
->args
[i
];
1501 /* Simplify expression for "op r, a, const => mov r, a" cases */
1503 CASE_OP_32_64_VEC(add
):
1504 CASE_OP_32_64_VEC(sub
):
1505 CASE_OP_32_64_VEC(or):
1506 CASE_OP_32_64_VEC(xor):
1507 CASE_OP_32_64_VEC(andc
):
1511 CASE_OP_32_64(rotl
):
1512 CASE_OP_32_64(rotr
):
1513 if (!arg_is_const(op
->args
[1])
1514 && arg_is_const(op
->args
[2])
1515 && arg_info(op
->args
[2])->val
== 0) {
1516 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1520 CASE_OP_32_64_VEC(and):
1521 CASE_OP_32_64_VEC(orc
):
1523 if (!arg_is_const(op
->args
[1])
1524 && arg_is_const(op
->args
[2])
1525 && arg_info(op
->args
[2])->val
== -1) {
1526 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1534 /* Simplify using known-zero bits. Currently only ops with a single
1535 output argument is supported. */
1539 CASE_OP_32_64(ext8s
):
1540 if ((arg_info(op
->args
[1])->z_mask
& 0x80) != 0) {
1544 CASE_OP_32_64(ext8u
):
1547 CASE_OP_32_64(ext16s
):
1548 if ((arg_info(op
->args
[1])->z_mask
& 0x8000) != 0) {
1552 CASE_OP_32_64(ext16u
):
1555 case INDEX_op_ext32s_i64
:
1556 if ((arg_info(op
->args
[1])->z_mask
& 0x80000000) != 0) {
1560 case INDEX_op_ext32u_i64
:
1561 z_mask
= 0xffffffffU
;
1565 z_mask
= arg_info(op
->args
[2])->z_mask
;
1566 if (arg_is_const(op
->args
[2])) {
1568 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1570 z_mask
= arg_info(op
->args
[1])->z_mask
& z_mask
;
1573 case INDEX_op_ext_i32_i64
:
1574 if ((arg_info(op
->args
[1])->z_mask
& 0x80000000) != 0) {
1578 case INDEX_op_extu_i32_i64
:
1579 /* We do not compute affected as it is a size changing op. */
1580 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
;
1583 CASE_OP_32_64(andc
):
1584 /* Known-zeros does not imply known-ones. Therefore unless
1585 op->args[2] is constant, we can't infer anything from it. */
1586 if (arg_is_const(op
->args
[2])) {
1587 z_mask
= ~arg_info(op
->args
[2])->z_mask
;
1590 /* But we certainly know nothing outside args[1] may be set. */
1591 z_mask
= arg_info(op
->args
[1])->z_mask
;
1594 case INDEX_op_sar_i32
:
1595 if (arg_is_const(op
->args
[2])) {
1596 tmp
= arg_info(op
->args
[2])->val
& 31;
1597 z_mask
= (int32_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1600 case INDEX_op_sar_i64
:
1601 if (arg_is_const(op
->args
[2])) {
1602 tmp
= arg_info(op
->args
[2])->val
& 63;
1603 z_mask
= (int64_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1607 case INDEX_op_shr_i32
:
1608 if (arg_is_const(op
->args
[2])) {
1609 tmp
= arg_info(op
->args
[2])->val
& 31;
1610 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1613 case INDEX_op_shr_i64
:
1614 if (arg_is_const(op
->args
[2])) {
1615 tmp
= arg_info(op
->args
[2])->val
& 63;
1616 z_mask
= (uint64_t)arg_info(op
->args
[1])->z_mask
>> tmp
;
1620 case INDEX_op_extrl_i64_i32
:
1621 z_mask
= (uint32_t)arg_info(op
->args
[1])->z_mask
;
1623 case INDEX_op_extrh_i64_i32
:
1624 z_mask
= (uint64_t)arg_info(op
->args
[1])->z_mask
>> 32;
1628 if (arg_is_const(op
->args
[2])) {
1629 tmp
= arg_info(op
->args
[2])->val
& (TCG_TARGET_REG_BITS
- 1);
1630 z_mask
= arg_info(op
->args
[1])->z_mask
<< tmp
;
1635 /* Set to 1 all bits to the left of the rightmost. */
1636 z_mask
= -(arg_info(op
->args
[1])->z_mask
1637 & -arg_info(op
->args
[1])->z_mask
);
1640 CASE_OP_32_64(deposit
):
1641 z_mask
= deposit64(arg_info(op
->args
[1])->z_mask
,
1642 op
->args
[3], op
->args
[4],
1643 arg_info(op
->args
[2])->z_mask
);
1646 CASE_OP_32_64(extract
):
1647 z_mask
= extract64(arg_info(op
->args
[1])->z_mask
,
1648 op
->args
[2], op
->args
[3]);
1649 if (op
->args
[2] == 0) {
1650 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1653 CASE_OP_32_64(sextract
):
1654 z_mask
= sextract64(arg_info(op
->args
[1])->z_mask
,
1655 op
->args
[2], op
->args
[3]);
1656 if (op
->args
[2] == 0 && (tcg_target_long
)z_mask
>= 0) {
1657 affected
= arg_info(op
->args
[1])->z_mask
& ~z_mask
;
1663 z_mask
= arg_info(op
->args
[1])->z_mask
1664 | arg_info(op
->args
[2])->z_mask
;
1667 case INDEX_op_clz_i32
:
1668 case INDEX_op_ctz_i32
:
1669 z_mask
= arg_info(op
->args
[2])->z_mask
| 31;
1672 case INDEX_op_clz_i64
:
1673 case INDEX_op_ctz_i64
:
1674 z_mask
= arg_info(op
->args
[2])->z_mask
| 63;
1677 case INDEX_op_ctpop_i32
:
1680 case INDEX_op_ctpop_i64
:
1684 CASE_OP_32_64(setcond
):
1685 case INDEX_op_setcond2_i32
:
1689 CASE_OP_32_64(movcond
):
1690 z_mask
= arg_info(op
->args
[3])->z_mask
1691 | arg_info(op
->args
[4])->z_mask
;
1694 CASE_OP_32_64(ld8u
):
1697 CASE_OP_32_64(ld16u
):
1700 case INDEX_op_ld32u_i64
:
1701 z_mask
= 0xffffffffu
;
1704 CASE_OP_32_64(qemu_ld
):
1706 MemOpIdx oi
= op
->args
[def
->nb_oargs
+ def
->nb_iargs
];
1707 MemOp mop
= get_memop(oi
);
1708 if (!(mop
& MO_SIGN
)) {
1709 z_mask
= (2ULL << ((8 << (mop
& MO_SIZE
)) - 1)) - 1;
1714 CASE_OP_32_64(bswap16
):
1715 z_mask
= arg_info(op
->args
[1])->z_mask
;
1716 if (z_mask
<= 0xffff) {
1717 op
->args
[2] |= TCG_BSWAP_IZ
;
1719 z_mask
= bswap16(z_mask
);
1720 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1724 z_mask
= (int16_t)z_mask
;
1726 default: /* undefined high bits */
1727 z_mask
|= MAKE_64BIT_MASK(16, 48);
1732 case INDEX_op_bswap32_i64
:
1733 z_mask
= arg_info(op
->args
[1])->z_mask
;
1734 if (z_mask
<= 0xffffffffu
) {
1735 op
->args
[2] |= TCG_BSWAP_IZ
;
1737 z_mask
= bswap32(z_mask
);
1738 switch (op
->args
[2] & (TCG_BSWAP_OZ
| TCG_BSWAP_OS
)) {
1742 z_mask
= (int32_t)z_mask
;
1744 default: /* undefined high bits */
1745 z_mask
|= MAKE_64BIT_MASK(32, 32);
1754 /* 32-bit ops generate 32-bit results. For the result is zero test
1755 below, we can ignore high bits, but for further optimizations we
1756 need to record that the high bits contain garbage. */
1758 if (!(def
->flags
& TCG_OPF_64BIT
)) {
1759 z_mask
|= ~(tcg_target_ulong
)0xffffffffu
;
1760 partmask
&= 0xffffffffu
;
1761 affected
&= 0xffffffffu
;
1763 ctx
.z_mask
= z_mask
;
1765 if (partmask
== 0) {
1766 tcg_opt_gen_movi(&ctx
, op
, op
->args
[0], 0);
1769 if (affected
== 0) {
1770 tcg_opt_gen_mov(&ctx
, op
, op
->args
[0], op
->args
[1]);
1775 * Process each opcode.
1776 * Sorted alphabetically by opcode as much as possible.
1779 CASE_OP_32_64_VEC(add
):
1780 done
= fold_add(&ctx
, op
);
1782 case INDEX_op_add2_i32
:
1783 done
= fold_add2_i32(&ctx
, op
);
1785 CASE_OP_32_64_VEC(and):
1786 done
= fold_and(&ctx
, op
);
1788 CASE_OP_32_64_VEC(andc
):
1789 done
= fold_andc(&ctx
, op
);
1791 CASE_OP_32_64(brcond
):
1792 done
= fold_brcond(&ctx
, op
);
1794 case INDEX_op_brcond2_i32
:
1795 done
= fold_brcond2(&ctx
, op
);
1797 CASE_OP_32_64(bswap16
):
1798 CASE_OP_32_64(bswap32
):
1799 case INDEX_op_bswap64_i64
:
1800 done
= fold_bswap(&ctx
, op
);
1804 done
= fold_count_zeros(&ctx
, op
);
1806 CASE_OP_32_64(ctpop
):
1807 done
= fold_ctpop(&ctx
, op
);
1809 CASE_OP_32_64(deposit
):
1810 done
= fold_deposit(&ctx
, op
);
1813 CASE_OP_32_64(divu
):
1814 done
= fold_divide(&ctx
, op
);
1816 case INDEX_op_dup_vec
:
1817 done
= fold_dup(&ctx
, op
);
1819 case INDEX_op_dup2_vec
:
1820 done
= fold_dup2(&ctx
, op
);
1823 done
= fold_eqv(&ctx
, op
);
1825 CASE_OP_32_64(extract
):
1826 done
= fold_extract(&ctx
, op
);
1828 CASE_OP_32_64(extract2
):
1829 done
= fold_extract2(&ctx
, op
);
1831 CASE_OP_32_64(ext8s
):
1832 CASE_OP_32_64(ext16s
):
1833 case INDEX_op_ext32s_i64
:
1834 case INDEX_op_ext_i32_i64
:
1835 done
= fold_exts(&ctx
, op
);
1837 CASE_OP_32_64(ext8u
):
1838 CASE_OP_32_64(ext16u
):
1839 case INDEX_op_ext32u_i64
:
1840 case INDEX_op_extu_i32_i64
:
1841 case INDEX_op_extrl_i64_i32
:
1842 case INDEX_op_extrh_i64_i32
:
1843 done
= fold_extu(&ctx
, op
);
1846 done
= fold_mb(&ctx
, op
);
1848 CASE_OP_32_64_VEC(mov
):
1849 done
= fold_mov(&ctx
, op
);
1851 CASE_OP_32_64(movcond
):
1852 done
= fold_movcond(&ctx
, op
);
1855 done
= fold_mul(&ctx
, op
);
1857 CASE_OP_32_64(mulsh
):
1858 CASE_OP_32_64(muluh
):
1859 done
= fold_mul_highpart(&ctx
, op
);
1861 case INDEX_op_mulu2_i32
:
1862 done
= fold_mulu2_i32(&ctx
, op
);
1864 CASE_OP_32_64(nand
):
1865 done
= fold_nand(&ctx
, op
);
1868 done
= fold_neg(&ctx
, op
);
1871 done
= fold_nor(&ctx
, op
);
1873 CASE_OP_32_64_VEC(not):
1874 done
= fold_not(&ctx
, op
);
1876 CASE_OP_32_64_VEC(or):
1877 done
= fold_or(&ctx
, op
);
1879 CASE_OP_32_64_VEC(orc
):
1880 done
= fold_orc(&ctx
, op
);
1882 case INDEX_op_qemu_ld_i32
:
1883 case INDEX_op_qemu_ld_i64
:
1884 done
= fold_qemu_ld(&ctx
, op
);
1886 case INDEX_op_qemu_st_i32
:
1887 case INDEX_op_qemu_st8_i32
:
1888 case INDEX_op_qemu_st_i64
:
1889 done
= fold_qemu_st(&ctx
, op
);
1892 CASE_OP_32_64(remu
):
1893 done
= fold_remainder(&ctx
, op
);
1895 CASE_OP_32_64(rotl
):
1896 CASE_OP_32_64(rotr
):
1900 done
= fold_shift(&ctx
, op
);
1902 CASE_OP_32_64(setcond
):
1903 done
= fold_setcond(&ctx
, op
);
1905 case INDEX_op_setcond2_i32
:
1906 done
= fold_setcond2(&ctx
, op
);
1908 CASE_OP_32_64(sextract
):
1909 done
= fold_sextract(&ctx
, op
);
1911 CASE_OP_32_64_VEC(sub
):
1912 done
= fold_sub(&ctx
, op
);
1914 case INDEX_op_sub2_i32
:
1915 done
= fold_sub2_i32(&ctx
, op
);
1917 CASE_OP_32_64_VEC(xor):
1918 done
= fold_xor(&ctx
, op
);
1925 finish_folding(&ctx
, op
);