]>
Commit | Line | Data |
---|---|---|
8f2e8c07 KB |
1 | /* |
2 | * Optimizations for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2010 Samsung Electronics. | |
5 | * Contributed by Kirill Batuzov <batuzovk@ispras.ru> | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
757e725b | 26 | #include "qemu/osdep.h" |
9531c078 | 27 | #include "qemu/int128.h" |
dcb32f1d | 28 | #include "tcg/tcg-op.h" |
90163900 | 29 | #include "tcg-internal.h" |
8f2e8c07 | 30 | |
8f2e8c07 KB |
31 | #define CASE_OP_32_64(x) \ |
32 | glue(glue(case INDEX_op_, x), _i32): \ | |
33 | glue(glue(case INDEX_op_, x), _i64) | |
8f2e8c07 | 34 | |
170ba88f RH |
35 | #define CASE_OP_32_64_VEC(x) \ |
36 | glue(glue(case INDEX_op_, x), _i32): \ | |
37 | glue(glue(case INDEX_op_, x), _i64): \ | |
38 | glue(glue(case INDEX_op_, x), _vec) | |
39 | ||
6fcb98ed | 40 | typedef struct TempOptInfo { |
b41059dd | 41 | bool is_const; |
6349039d RH |
42 | TCGTemp *prev_copy; |
43 | TCGTemp *next_copy; | |
54795544 | 44 | uint64_t val; |
b1fde411 | 45 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ |
57fe5c6d | 46 | uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ |
6fcb98ed | 47 | } TempOptInfo; |
22613af4 | 48 | |
3b3f847d | 49 | typedef struct OptContext { |
dc84988a | 50 | TCGContext *tcg; |
d0ed5151 | 51 | TCGOp *prev_mb; |
3b3f847d | 52 | TCGTempSet temps_used; |
137f1f44 RH |
53 | |
54 | /* In flight values from optimization. */ | |
fae450ba RH |
55 | uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ |
56 | uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | |
57fe5c6d | 57 | uint64_t s_mask; /* mask of clrsb(value) bits */ |
67f84c96 | 58 | TCGType type; |
3b3f847d RH |
59 | } OptContext; |
60 | ||
57fe5c6d RH |
61 | /* Calculate the smask for a specific value. */ |
62 | static uint64_t smask_from_value(uint64_t value) | |
63 | { | |
64 | int rep = clrsb64(value); | |
65 | return ~(~0ull >> rep); | |
66 | } | |
67 | ||
68 | /* | |
69 | * Calculate the smask for a given set of known-zeros. | |
70 | * If there are lots of zeros on the left, we can consider the remainder | |
71 | * an unsigned field, and thus the corresponding signed field is one bit | |
72 | * larger. | |
73 | */ | |
74 | static uint64_t smask_from_zmask(uint64_t zmask) | |
75 | { | |
76 | /* | |
77 | * Only the 0 bits are significant for zmask, thus the msb itself | |
78 | * must be zero, else we have no sign information. | |
79 | */ | |
80 | int rep = clz64(zmask); | |
81 | if (rep == 0) { | |
82 | return 0; | |
83 | } | |
84 | rep -= 1; | |
85 | return ~(~0ull >> rep); | |
86 | } | |
87 | ||
93a967fb RH |
88 | /* |
89 | * Recreate a properly left-aligned smask after manipulation. | |
90 | * Some bit-shuffling, particularly shifts and rotates, may | |
91 | * retain sign bits on the left, but may scatter disconnected | |
92 | * sign bits on the right. Retain only what remains to the left. | |
93 | */ | |
94 | static uint64_t smask_from_smask(int64_t smask) | |
95 | { | |
96 | /* Only the 1 bits are significant for smask */ | |
97 | return smask_from_zmask(~smask); | |
98 | } | |
99 | ||
6fcb98ed | 100 | static inline TempOptInfo *ts_info(TCGTemp *ts) |
d9c769c6 | 101 | { |
6349039d | 102 | return ts->state_ptr; |
d9c769c6 AJ |
103 | } |
104 | ||
6fcb98ed | 105 | static inline TempOptInfo *arg_info(TCGArg arg) |
d9c769c6 | 106 | { |
6349039d RH |
107 | return ts_info(arg_temp(arg)); |
108 | } | |
109 | ||
110 | static inline bool ts_is_const(TCGTemp *ts) | |
111 | { | |
112 | return ts_info(ts)->is_const; | |
113 | } | |
114 | ||
115 | static inline bool arg_is_const(TCGArg arg) | |
116 | { | |
117 | return ts_is_const(arg_temp(arg)); | |
118 | } | |
119 | ||
120 | static inline bool ts_is_copy(TCGTemp *ts) | |
121 | { | |
122 | return ts_info(ts)->next_copy != ts; | |
d9c769c6 AJ |
123 | } |
124 | ||
b41059dd | 125 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ |
6349039d RH |
126 | static void reset_ts(TCGTemp *ts) |
127 | { | |
6fcb98ed RH |
128 | TempOptInfo *ti = ts_info(ts); |
129 | TempOptInfo *pi = ts_info(ti->prev_copy); | |
130 | TempOptInfo *ni = ts_info(ti->next_copy); | |
6349039d RH |
131 | |
132 | ni->prev_copy = ti->prev_copy; | |
133 | pi->next_copy = ti->next_copy; | |
134 | ti->next_copy = ts; | |
135 | ti->prev_copy = ts; | |
136 | ti->is_const = false; | |
b1fde411 | 137 | ti->z_mask = -1; |
57fe5c6d | 138 | ti->s_mask = 0; |
6349039d RH |
139 | } |
140 | ||
141 | static void reset_temp(TCGArg arg) | |
22613af4 | 142 | { |
6349039d | 143 | reset_ts(arg_temp(arg)); |
22613af4 KB |
144 | } |
145 | ||
1208d7dd | 146 | /* Initialize and activate a temporary. */ |
3b3f847d | 147 | static void init_ts_info(OptContext *ctx, TCGTemp *ts) |
1208d7dd | 148 | { |
6349039d | 149 | size_t idx = temp_idx(ts); |
8f17a975 | 150 | TempOptInfo *ti; |
6349039d | 151 | |
3b3f847d | 152 | if (test_bit(idx, ctx->temps_used.l)) { |
8f17a975 RH |
153 | return; |
154 | } | |
3b3f847d | 155 | set_bit(idx, ctx->temps_used.l); |
8f17a975 RH |
156 | |
157 | ti = ts->state_ptr; | |
158 | if (ti == NULL) { | |
159 | ti = tcg_malloc(sizeof(TempOptInfo)); | |
6349039d | 160 | ts->state_ptr = ti; |
8f17a975 RH |
161 | } |
162 | ||
163 | ti->next_copy = ts; | |
164 | ti->prev_copy = ts; | |
165 | if (ts->kind == TEMP_CONST) { | |
166 | ti->is_const = true; | |
167 | ti->val = ts->val; | |
b1fde411 | 168 | ti->z_mask = ts->val; |
57fe5c6d | 169 | ti->s_mask = smask_from_value(ts->val); |
8f17a975 RH |
170 | } else { |
171 | ti->is_const = false; | |
b1fde411 | 172 | ti->z_mask = -1; |
57fe5c6d | 173 | ti->s_mask = 0; |
1208d7dd AJ |
174 | } |
175 | } | |
176 | ||
6349039d | 177 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) |
e590d4e6 | 178 | { |
4c868ce6 | 179 | TCGTemp *i, *g, *l; |
e590d4e6 | 180 | |
4c868ce6 RH |
181 | /* If this is already readonly, we can't do better. */ |
182 | if (temp_readonly(ts)) { | |
6349039d | 183 | return ts; |
e590d4e6 AJ |
184 | } |
185 | ||
4c868ce6 | 186 | g = l = NULL; |
6349039d | 187 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { |
4c868ce6 | 188 | if (temp_readonly(i)) { |
e590d4e6 | 189 | return i; |
4c868ce6 RH |
190 | } else if (i->kind > ts->kind) { |
191 | if (i->kind == TEMP_GLOBAL) { | |
192 | g = i; | |
f57c6915 | 193 | } else if (i->kind == TEMP_TB) { |
4c868ce6 | 194 | l = i; |
e590d4e6 AJ |
195 | } |
196 | } | |
197 | } | |
198 | ||
4c868ce6 RH |
199 | /* If we didn't find a better representation, return the same temp. */ |
200 | return g ? g : l ? l : ts; | |
e590d4e6 AJ |
201 | } |
202 | ||
6349039d | 203 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) |
e590d4e6 | 204 | { |
6349039d | 205 | TCGTemp *i; |
e590d4e6 | 206 | |
6349039d | 207 | if (ts1 == ts2) { |
e590d4e6 AJ |
208 | return true; |
209 | } | |
210 | ||
6349039d | 211 | if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { |
e590d4e6 AJ |
212 | return false; |
213 | } | |
214 | ||
6349039d RH |
215 | for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { |
216 | if (i == ts2) { | |
e590d4e6 AJ |
217 | return true; |
218 | } | |
219 | } | |
220 | ||
221 | return false; | |
222 | } | |
223 | ||
6349039d RH |
224 | static bool args_are_copies(TCGArg arg1, TCGArg arg2) |
225 | { | |
226 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | |
227 | } | |
228 | ||
6b99d5bf | 229 | static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) |
22613af4 | 230 | { |
6349039d RH |
231 | TCGTemp *dst_ts = arg_temp(dst); |
232 | TCGTemp *src_ts = arg_temp(src); | |
6fcb98ed RH |
233 | TempOptInfo *di; |
234 | TempOptInfo *si; | |
6349039d RH |
235 | TCGOpcode new_op; |
236 | ||
237 | if (ts_are_copies(dst_ts, src_ts)) { | |
dc84988a | 238 | tcg_op_remove(ctx->tcg, op); |
6b99d5bf | 239 | return true; |
5365718a AJ |
240 | } |
241 | ||
6349039d RH |
242 | reset_ts(dst_ts); |
243 | di = ts_info(dst_ts); | |
244 | si = ts_info(src_ts); | |
67f84c96 RH |
245 | |
246 | switch (ctx->type) { | |
247 | case TCG_TYPE_I32: | |
170ba88f | 248 | new_op = INDEX_op_mov_i32; |
67f84c96 RH |
249 | break; |
250 | case TCG_TYPE_I64: | |
251 | new_op = INDEX_op_mov_i64; | |
252 | break; | |
253 | case TCG_TYPE_V64: | |
254 | case TCG_TYPE_V128: | |
255 | case TCG_TYPE_V256: | |
256 | /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | |
257 | new_op = INDEX_op_mov_vec; | |
258 | break; | |
259 | default: | |
260 | g_assert_not_reached(); | |
170ba88f | 261 | } |
c45cb8bb | 262 | op->opc = new_op; |
6349039d RH |
263 | op->args[0] = dst; |
264 | op->args[1] = src; | |
a62f6f56 | 265 | |
faa2e100 | 266 | di->z_mask = si->z_mask; |
57fe5c6d | 267 | di->s_mask = si->s_mask; |
e590d4e6 | 268 | |
6349039d | 269 | if (src_ts->type == dst_ts->type) { |
6fcb98ed | 270 | TempOptInfo *ni = ts_info(si->next_copy); |
6349039d RH |
271 | |
272 | di->next_copy = si->next_copy; | |
273 | di->prev_copy = src_ts; | |
274 | ni->prev_copy = dst_ts; | |
275 | si->next_copy = dst_ts; | |
276 | di->is_const = si->is_const; | |
277 | di->val = si->val; | |
278 | } | |
6b99d5bf | 279 | return true; |
22613af4 KB |
280 | } |
281 | ||
6b99d5bf | 282 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, |
dc84988a | 283 | TCGArg dst, uint64_t val) |
8fe35e04 | 284 | { |
faa2e100 | 285 | TCGTemp *tv; |
67f84c96 | 286 | |
faa2e100 RH |
287 | if (ctx->type == TCG_TYPE_I32) { |
288 | val = (int32_t)val; | |
289 | } | |
290 | ||
291 | /* Convert movi to mov with constant temp. */ | |
292 | tv = tcg_constant_internal(ctx->type, val); | |
3b3f847d | 293 | init_ts_info(ctx, tv); |
6b99d5bf | 294 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); |
8fe35e04 RH |
295 | } |
296 | ||
54795544 | 297 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 298 | { |
03271524 RH |
299 | uint64_t l64, h64; |
300 | ||
53108fb5 KB |
301 | switch (op) { |
302 | CASE_OP_32_64(add): | |
303 | return x + y; | |
304 | ||
305 | CASE_OP_32_64(sub): | |
306 | return x - y; | |
307 | ||
308 | CASE_OP_32_64(mul): | |
309 | return x * y; | |
310 | ||
c578ff18 | 311 | CASE_OP_32_64_VEC(and): |
9a81090b KB |
312 | return x & y; |
313 | ||
c578ff18 | 314 | CASE_OP_32_64_VEC(or): |
9a81090b KB |
315 | return x | y; |
316 | ||
c578ff18 | 317 | CASE_OP_32_64_VEC(xor): |
9a81090b KB |
318 | return x ^ y; |
319 | ||
55c0975c | 320 | case INDEX_op_shl_i32: |
50c5c4d1 | 321 | return (uint32_t)x << (y & 31); |
55c0975c | 322 | |
55c0975c | 323 | case INDEX_op_shl_i64: |
50c5c4d1 | 324 | return (uint64_t)x << (y & 63); |
55c0975c KB |
325 | |
326 | case INDEX_op_shr_i32: | |
50c5c4d1 | 327 | return (uint32_t)x >> (y & 31); |
55c0975c | 328 | |
55c0975c | 329 | case INDEX_op_shr_i64: |
50c5c4d1 | 330 | return (uint64_t)x >> (y & 63); |
55c0975c KB |
331 | |
332 | case INDEX_op_sar_i32: | |
50c5c4d1 | 333 | return (int32_t)x >> (y & 31); |
55c0975c | 334 | |
55c0975c | 335 | case INDEX_op_sar_i64: |
50c5c4d1 | 336 | return (int64_t)x >> (y & 63); |
55c0975c KB |
337 | |
338 | case INDEX_op_rotr_i32: | |
50c5c4d1 | 339 | return ror32(x, y & 31); |
55c0975c | 340 | |
55c0975c | 341 | case INDEX_op_rotr_i64: |
50c5c4d1 | 342 | return ror64(x, y & 63); |
55c0975c KB |
343 | |
344 | case INDEX_op_rotl_i32: | |
50c5c4d1 | 345 | return rol32(x, y & 31); |
55c0975c | 346 | |
55c0975c | 347 | case INDEX_op_rotl_i64: |
50c5c4d1 | 348 | return rol64(x, y & 63); |
25c4d9cc | 349 | |
c578ff18 | 350 | CASE_OP_32_64_VEC(not): |
a640f031 | 351 | return ~x; |
25c4d9cc | 352 | |
cb25c80a RH |
353 | CASE_OP_32_64(neg): |
354 | return -x; | |
355 | ||
c578ff18 | 356 | CASE_OP_32_64_VEC(andc): |
cb25c80a RH |
357 | return x & ~y; |
358 | ||
c578ff18 | 359 | CASE_OP_32_64_VEC(orc): |
cb25c80a RH |
360 | return x | ~y; |
361 | ||
ed523473 | 362 | CASE_OP_32_64_VEC(eqv): |
cb25c80a RH |
363 | return ~(x ^ y); |
364 | ||
ed523473 | 365 | CASE_OP_32_64_VEC(nand): |
cb25c80a RH |
366 | return ~(x & y); |
367 | ||
ed523473 | 368 | CASE_OP_32_64_VEC(nor): |
cb25c80a RH |
369 | return ~(x | y); |
370 | ||
0e28d006 RH |
371 | case INDEX_op_clz_i32: |
372 | return (uint32_t)x ? clz32(x) : y; | |
373 | ||
374 | case INDEX_op_clz_i64: | |
375 | return x ? clz64(x) : y; | |
376 | ||
377 | case INDEX_op_ctz_i32: | |
378 | return (uint32_t)x ? ctz32(x) : y; | |
379 | ||
380 | case INDEX_op_ctz_i64: | |
381 | return x ? ctz64(x) : y; | |
382 | ||
a768e4e9 RH |
383 | case INDEX_op_ctpop_i32: |
384 | return ctpop32(x); | |
385 | ||
386 | case INDEX_op_ctpop_i64: | |
387 | return ctpop64(x); | |
388 | ||
25c4d9cc | 389 | CASE_OP_32_64(ext8s): |
a640f031 | 390 | return (int8_t)x; |
25c4d9cc RH |
391 | |
392 | CASE_OP_32_64(ext16s): | |
a640f031 | 393 | return (int16_t)x; |
25c4d9cc RH |
394 | |
395 | CASE_OP_32_64(ext8u): | |
a640f031 | 396 | return (uint8_t)x; |
25c4d9cc RH |
397 | |
398 | CASE_OP_32_64(ext16u): | |
a640f031 KB |
399 | return (uint16_t)x; |
400 | ||
6498594c | 401 | CASE_OP_32_64(bswap16): |
0b76ff8f RH |
402 | x = bswap16(x); |
403 | return y & TCG_BSWAP_OS ? (int16_t)x : x; | |
6498594c RH |
404 | |
405 | CASE_OP_32_64(bswap32): | |
0b76ff8f RH |
406 | x = bswap32(x); |
407 | return y & TCG_BSWAP_OS ? (int32_t)x : x; | |
6498594c RH |
408 | |
409 | case INDEX_op_bswap64_i64: | |
410 | return bswap64(x); | |
411 | ||
8bcb5c8f | 412 | case INDEX_op_ext_i32_i64: |
a640f031 KB |
413 | case INDEX_op_ext32s_i64: |
414 | return (int32_t)x; | |
415 | ||
8bcb5c8f | 416 | case INDEX_op_extu_i32_i64: |
609ad705 | 417 | case INDEX_op_extrl_i64_i32: |
a640f031 KB |
418 | case INDEX_op_ext32u_i64: |
419 | return (uint32_t)x; | |
a640f031 | 420 | |
609ad705 RH |
421 | case INDEX_op_extrh_i64_i32: |
422 | return (uint64_t)x >> 32; | |
423 | ||
03271524 RH |
424 | case INDEX_op_muluh_i32: |
425 | return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; | |
426 | case INDEX_op_mulsh_i32: | |
427 | return ((int64_t)(int32_t)x * (int32_t)y) >> 32; | |
428 | ||
429 | case INDEX_op_muluh_i64: | |
430 | mulu64(&l64, &h64, x, y); | |
431 | return h64; | |
432 | case INDEX_op_mulsh_i64: | |
433 | muls64(&l64, &h64, x, y); | |
434 | return h64; | |
435 | ||
01547f7f RH |
436 | case INDEX_op_div_i32: |
437 | /* Avoid crashing on divide by zero, otherwise undefined. */ | |
438 | return (int32_t)x / ((int32_t)y ? : 1); | |
439 | case INDEX_op_divu_i32: | |
440 | return (uint32_t)x / ((uint32_t)y ? : 1); | |
441 | case INDEX_op_div_i64: | |
442 | return (int64_t)x / ((int64_t)y ? : 1); | |
443 | case INDEX_op_divu_i64: | |
444 | return (uint64_t)x / ((uint64_t)y ? : 1); | |
445 | ||
446 | case INDEX_op_rem_i32: | |
447 | return (int32_t)x % ((int32_t)y ? : 1); | |
448 | case INDEX_op_remu_i32: | |
449 | return (uint32_t)x % ((uint32_t)y ? : 1); | |
450 | case INDEX_op_rem_i64: | |
451 | return (int64_t)x % ((int64_t)y ? : 1); | |
452 | case INDEX_op_remu_i64: | |
453 | return (uint64_t)x % ((uint64_t)y ? : 1); | |
454 | ||
53108fb5 KB |
455 | default: |
456 | fprintf(stderr, | |
457 | "Unrecognized operation %d in do_constant_folding.\n", op); | |
458 | tcg_abort(); | |
459 | } | |
460 | } | |
461 | ||
67f84c96 RH |
462 | static uint64_t do_constant_folding(TCGOpcode op, TCGType type, |
463 | uint64_t x, uint64_t y) | |
53108fb5 | 464 | { |
54795544 | 465 | uint64_t res = do_constant_folding_2(op, x, y); |
67f84c96 | 466 | if (type == TCG_TYPE_I32) { |
29f3ff8d | 467 | res = (int32_t)res; |
53108fb5 | 468 | } |
53108fb5 KB |
469 | return res; |
470 | } | |
471 | ||
9519da7e RH |
472 | static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) |
473 | { | |
474 | switch (c) { | |
475 | case TCG_COND_EQ: | |
476 | return x == y; | |
477 | case TCG_COND_NE: | |
478 | return x != y; | |
479 | case TCG_COND_LT: | |
480 | return (int32_t)x < (int32_t)y; | |
481 | case TCG_COND_GE: | |
482 | return (int32_t)x >= (int32_t)y; | |
483 | case TCG_COND_LE: | |
484 | return (int32_t)x <= (int32_t)y; | |
485 | case TCG_COND_GT: | |
486 | return (int32_t)x > (int32_t)y; | |
487 | case TCG_COND_LTU: | |
488 | return x < y; | |
489 | case TCG_COND_GEU: | |
490 | return x >= y; | |
491 | case TCG_COND_LEU: | |
492 | return x <= y; | |
493 | case TCG_COND_GTU: | |
494 | return x > y; | |
495 | default: | |
496 | tcg_abort(); | |
497 | } | |
498 | } | |
499 | ||
500 | static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) | |
501 | { | |
502 | switch (c) { | |
503 | case TCG_COND_EQ: | |
504 | return x == y; | |
505 | case TCG_COND_NE: | |
506 | return x != y; | |
507 | case TCG_COND_LT: | |
508 | return (int64_t)x < (int64_t)y; | |
509 | case TCG_COND_GE: | |
510 | return (int64_t)x >= (int64_t)y; | |
511 | case TCG_COND_LE: | |
512 | return (int64_t)x <= (int64_t)y; | |
513 | case TCG_COND_GT: | |
514 | return (int64_t)x > (int64_t)y; | |
515 | case TCG_COND_LTU: | |
516 | return x < y; | |
517 | case TCG_COND_GEU: | |
518 | return x >= y; | |
519 | case TCG_COND_LEU: | |
520 | return x <= y; | |
521 | case TCG_COND_GTU: | |
522 | return x > y; | |
523 | default: | |
524 | tcg_abort(); | |
525 | } | |
526 | } | |
527 | ||
528 | static bool do_constant_folding_cond_eq(TCGCond c) | |
529 | { | |
530 | switch (c) { | |
531 | case TCG_COND_GT: | |
532 | case TCG_COND_LTU: | |
533 | case TCG_COND_LT: | |
534 | case TCG_COND_GTU: | |
535 | case TCG_COND_NE: | |
536 | return 0; | |
537 | case TCG_COND_GE: | |
538 | case TCG_COND_GEU: | |
539 | case TCG_COND_LE: | |
540 | case TCG_COND_LEU: | |
541 | case TCG_COND_EQ: | |
542 | return 1; | |
543 | default: | |
544 | tcg_abort(); | |
545 | } | |
546 | } | |
547 | ||
8d57bf1e RH |
548 | /* |
549 | * Return -1 if the condition can't be simplified, | |
550 | * and the result of the condition (0 or 1) if it can. | |
551 | */ | |
67f84c96 | 552 | static int do_constant_folding_cond(TCGType type, TCGArg x, |
8d57bf1e | 553 | TCGArg y, TCGCond c) |
f8dd19e5 | 554 | { |
6349039d | 555 | if (arg_is_const(x) && arg_is_const(y)) { |
9becc36f AB |
556 | uint64_t xv = arg_info(x)->val; |
557 | uint64_t yv = arg_info(y)->val; | |
558 | ||
67f84c96 RH |
559 | switch (type) { |
560 | case TCG_TYPE_I32: | |
170ba88f | 561 | return do_constant_folding_cond_32(xv, yv, c); |
67f84c96 RH |
562 | case TCG_TYPE_I64: |
563 | return do_constant_folding_cond_64(xv, yv, c); | |
564 | default: | |
565 | /* Only scalar comparisons are optimizable */ | |
566 | return -1; | |
b336ceb6 | 567 | } |
6349039d | 568 | } else if (args_are_copies(x, y)) { |
9519da7e | 569 | return do_constant_folding_cond_eq(c); |
9becc36f | 570 | } else if (arg_is_const(y) && arg_info(y)->val == 0) { |
b336ceb6 | 571 | switch (c) { |
f8dd19e5 | 572 | case TCG_COND_LTU: |
b336ceb6 | 573 | return 0; |
f8dd19e5 | 574 | case TCG_COND_GEU: |
b336ceb6 AJ |
575 | return 1; |
576 | default: | |
8d57bf1e | 577 | return -1; |
f8dd19e5 | 578 | } |
f8dd19e5 | 579 | } |
8d57bf1e | 580 | return -1; |
f8dd19e5 AJ |
581 | } |
582 | ||
8d57bf1e RH |
583 | /* |
584 | * Return -1 if the condition can't be simplified, | |
585 | * and the result of the condition (0 or 1) if it can. | |
586 | */ | |
587 | static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | |
6c4382f8 RH |
588 | { |
589 | TCGArg al = p1[0], ah = p1[1]; | |
590 | TCGArg bl = p2[0], bh = p2[1]; | |
591 | ||
6349039d RH |
592 | if (arg_is_const(bl) && arg_is_const(bh)) { |
593 | tcg_target_ulong blv = arg_info(bl)->val; | |
594 | tcg_target_ulong bhv = arg_info(bh)->val; | |
595 | uint64_t b = deposit64(blv, 32, 32, bhv); | |
6c4382f8 | 596 | |
6349039d RH |
597 | if (arg_is_const(al) && arg_is_const(ah)) { |
598 | tcg_target_ulong alv = arg_info(al)->val; | |
599 | tcg_target_ulong ahv = arg_info(ah)->val; | |
600 | uint64_t a = deposit64(alv, 32, 32, ahv); | |
6c4382f8 RH |
601 | return do_constant_folding_cond_64(a, b, c); |
602 | } | |
603 | if (b == 0) { | |
604 | switch (c) { | |
605 | case TCG_COND_LTU: | |
606 | return 0; | |
607 | case TCG_COND_GEU: | |
608 | return 1; | |
609 | default: | |
610 | break; | |
611 | } | |
612 | } | |
613 | } | |
6349039d | 614 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { |
6c4382f8 RH |
615 | return do_constant_folding_cond_eq(c); |
616 | } | |
8d57bf1e | 617 | return -1; |
6c4382f8 RH |
618 | } |
619 | ||
7a2f7084 RH |
620 | /** |
621 | * swap_commutative: | |
622 | * @dest: TCGArg of the destination argument, or NO_DEST. | |
623 | * @p1: first paired argument | |
624 | * @p2: second paired argument | |
625 | * | |
626 | * If *@p1 is a constant and *@p2 is not, swap. | |
627 | * If *@p2 matches @dest, swap. | |
628 | * Return true if a swap was performed. | |
629 | */ | |
630 | ||
631 | #define NO_DEST temp_arg(NULL) | |
632 | ||
24c9ae4e RH |
633 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) |
634 | { | |
635 | TCGArg a1 = *p1, a2 = *p2; | |
636 | int sum = 0; | |
6349039d RH |
637 | sum += arg_is_const(a1); |
638 | sum -= arg_is_const(a2); | |
24c9ae4e RH |
639 | |
640 | /* Prefer the constant in second argument, and then the form | |
641 | op a, a, b, which is better handled on non-RISC hosts. */ | |
642 | if (sum > 0 || (sum == 0 && dest == a2)) { | |
643 | *p1 = a2; | |
644 | *p2 = a1; | |
645 | return true; | |
646 | } | |
647 | return false; | |
648 | } | |
649 | ||
0bfcb865 RH |
650 | static bool swap_commutative2(TCGArg *p1, TCGArg *p2) |
651 | { | |
652 | int sum = 0; | |
6349039d RH |
653 | sum += arg_is_const(p1[0]); |
654 | sum += arg_is_const(p1[1]); | |
655 | sum -= arg_is_const(p2[0]); | |
656 | sum -= arg_is_const(p2[1]); | |
0bfcb865 RH |
657 | if (sum > 0) { |
658 | TCGArg t; | |
659 | t = p1[0], p1[0] = p2[0], p2[0] = t; | |
660 | t = p1[1], p1[1] = p2[1], p2[1] = t; | |
661 | return true; | |
662 | } | |
663 | return false; | |
664 | } | |
665 | ||
e2577ea2 RH |
666 | static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) |
667 | { | |
668 | for (int i = 0; i < nb_args; i++) { | |
669 | TCGTemp *ts = arg_temp(op->args[i]); | |
39004a71 | 670 | init_ts_info(ctx, ts); |
e2577ea2 RH |
671 | } |
672 | } | |
673 | ||
8774dded RH |
674 | static void copy_propagate(OptContext *ctx, TCGOp *op, |
675 | int nb_oargs, int nb_iargs) | |
676 | { | |
677 | TCGContext *s = ctx->tcg; | |
678 | ||
679 | for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
680 | TCGTemp *ts = arg_temp(op->args[i]); | |
39004a71 | 681 | if (ts_is_copy(ts)) { |
8774dded RH |
682 | op->args[i] = temp_arg(find_better_copy(s, ts)); |
683 | } | |
684 | } | |
685 | } | |
686 | ||
137f1f44 RH |
687 | static void finish_folding(OptContext *ctx, TCGOp *op) |
688 | { | |
689 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
690 | int i, nb_oargs; | |
691 | ||
692 | /* | |
693 | * For an opcode that ends a BB, reset all temp data. | |
694 | * We do no cross-BB optimization. | |
695 | */ | |
696 | if (def->flags & TCG_OPF_BB_END) { | |
697 | memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | |
698 | ctx->prev_mb = NULL; | |
699 | return; | |
700 | } | |
701 | ||
702 | nb_oargs = def->nb_oargs; | |
703 | for (i = 0; i < nb_oargs; i++) { | |
57fe5c6d RH |
704 | TCGTemp *ts = arg_temp(op->args[i]); |
705 | reset_ts(ts); | |
137f1f44 | 706 | /* |
57fe5c6d | 707 | * Save the corresponding known-zero/sign bits mask for the |
137f1f44 RH |
708 | * first output argument (only one supported so far). |
709 | */ | |
710 | if (i == 0) { | |
57fe5c6d RH |
711 | ts_info(ts)->z_mask = ctx->z_mask; |
712 | ts_info(ts)->s_mask = ctx->s_mask; | |
137f1f44 RH |
713 | } |
714 | } | |
715 | } | |
716 | ||
2f9f08ba RH |
717 | /* |
718 | * The fold_* functions return true when processing is complete, | |
719 | * usually by folding the operation to a constant or to a copy, | |
720 | * and calling tcg_opt_gen_{mov,movi}. They may do other things, | |
721 | * like collect information about the value produced, for use in | |
722 | * optimizing a subsequent operation. | |
723 | * | |
724 | * These first fold_* functions are all helpers, used by other | |
725 | * folders for more specific operations. | |
726 | */ | |
727 | ||
728 | static bool fold_const1(OptContext *ctx, TCGOp *op) | |
729 | { | |
730 | if (arg_is_const(op->args[1])) { | |
731 | uint64_t t; | |
732 | ||
733 | t = arg_info(op->args[1])->val; | |
67f84c96 | 734 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
2f9f08ba RH |
735 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
736 | } | |
737 | return false; | |
738 | } | |
739 | ||
740 | static bool fold_const2(OptContext *ctx, TCGOp *op) | |
741 | { | |
742 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
743 | uint64_t t1 = arg_info(op->args[1])->val; | |
744 | uint64_t t2 = arg_info(op->args[2])->val; | |
745 | ||
67f84c96 | 746 | t1 = do_constant_folding(op->opc, ctx->type, t1, t2); |
2f9f08ba RH |
747 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); |
748 | } | |
749 | return false; | |
750 | } | |
751 | ||
c578ff18 RH |
752 | static bool fold_commutative(OptContext *ctx, TCGOp *op) |
753 | { | |
754 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); | |
755 | return false; | |
756 | } | |
757 | ||
7a2f7084 RH |
758 | static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) |
759 | { | |
760 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); | |
761 | return fold_const2(ctx, op); | |
762 | } | |
763 | ||
fae450ba RH |
764 | static bool fold_masks(OptContext *ctx, TCGOp *op) |
765 | { | |
766 | uint64_t a_mask = ctx->a_mask; | |
767 | uint64_t z_mask = ctx->z_mask; | |
57fe5c6d | 768 | uint64_t s_mask = ctx->s_mask; |
fae450ba RH |
769 | |
770 | /* | |
faa2e100 RH |
771 | * 32-bit ops generate 32-bit results, which for the purpose of |
772 | * simplifying tcg are sign-extended. Certainly that's how we | |
773 | * represent our constants elsewhere. Note that the bits will | |
774 | * be reset properly for a 64-bit value when encountering the | |
775 | * type changing opcodes. | |
fae450ba RH |
776 | */ |
777 | if (ctx->type == TCG_TYPE_I32) { | |
faa2e100 RH |
778 | a_mask = (int32_t)a_mask; |
779 | z_mask = (int32_t)z_mask; | |
57fe5c6d | 780 | s_mask |= MAKE_64BIT_MASK(32, 32); |
faa2e100 | 781 | ctx->z_mask = z_mask; |
57fe5c6d | 782 | ctx->s_mask = s_mask; |
fae450ba RH |
783 | } |
784 | ||
785 | if (z_mask == 0) { | |
786 | return tcg_opt_gen_movi(ctx, op, op->args[0], 0); | |
787 | } | |
788 | if (a_mask == 0) { | |
789 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
790 | } | |
791 | return false; | |
792 | } | |
793 | ||
0e0a32ba RH |
794 | /* |
795 | * Convert @op to NOT, if NOT is supported by the host. | |
796 | * Return true f the conversion is successful, which will still | |
797 | * indicate that the processing is complete. | |
798 | */ | |
799 | static bool fold_not(OptContext *ctx, TCGOp *op); | |
800 | static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | |
801 | { | |
802 | TCGOpcode not_op; | |
803 | bool have_not; | |
804 | ||
805 | switch (ctx->type) { | |
806 | case TCG_TYPE_I32: | |
807 | not_op = INDEX_op_not_i32; | |
808 | have_not = TCG_TARGET_HAS_not_i32; | |
809 | break; | |
810 | case TCG_TYPE_I64: | |
811 | not_op = INDEX_op_not_i64; | |
812 | have_not = TCG_TARGET_HAS_not_i64; | |
813 | break; | |
814 | case TCG_TYPE_V64: | |
815 | case TCG_TYPE_V128: | |
816 | case TCG_TYPE_V256: | |
817 | not_op = INDEX_op_not_vec; | |
818 | have_not = TCG_TARGET_HAS_not_vec; | |
819 | break; | |
820 | default: | |
821 | g_assert_not_reached(); | |
822 | } | |
823 | if (have_not) { | |
824 | op->opc = not_op; | |
825 | op->args[1] = op->args[idx]; | |
826 | return fold_not(ctx, op); | |
827 | } | |
828 | return false; | |
829 | } | |
830 | ||
da48e272 RH |
831 | /* If the binary operation has first argument @i, fold to @i. */ |
832 | static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
833 | { | |
834 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
835 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
836 | } | |
837 | return false; | |
838 | } | |
839 | ||
0e0a32ba RH |
840 | /* If the binary operation has first argument @i, fold to NOT. */ |
841 | static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
842 | { | |
843 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
844 | return fold_to_not(ctx, op, 2); | |
845 | } | |
846 | return false; | |
847 | } | |
848 | ||
e8679955 RH |
849 | /* If the binary operation has second argument @i, fold to @i. */ |
850 | static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
851 | { | |
852 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
853 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
854 | } | |
855 | return false; | |
856 | } | |
857 | ||
a63ce0e9 RH |
858 | /* If the binary operation has second argument @i, fold to identity. */ |
859 | static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) | |
860 | { | |
861 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
862 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
863 | } | |
864 | return false; | |
865 | } | |
866 | ||
0e0a32ba RH |
867 | /* If the binary operation has second argument @i, fold to NOT. */ |
868 | static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
869 | { | |
870 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
871 | return fold_to_not(ctx, op, 1); | |
872 | } | |
873 | return false; | |
874 | } | |
875 | ||
cbe42fb2 RH |
876 | /* If the binary operation has both arguments equal, fold to @i. */ |
877 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
878 | { | |
879 | if (args_are_copies(op->args[1], op->args[2])) { | |
880 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
881 | } | |
882 | return false; | |
883 | } | |
884 | ||
ca7bb049 RH |
885 | /* If the binary operation has both arguments equal, fold to identity. */ |
886 | static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | |
887 | { | |
888 | if (args_are_copies(op->args[1], op->args[2])) { | |
889 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
890 | } | |
891 | return false; | |
892 | } | |
893 | ||
2f9f08ba RH |
894 | /* |
895 | * These outermost fold_<op> functions are sorted alphabetically. | |
ca7bb049 RH |
896 | * |
897 | * The ordering of the transformations should be: | |
898 | * 1) those that produce a constant | |
899 | * 2) those that produce a copy | |
900 | * 3) those that produce information about the result value. | |
2f9f08ba RH |
901 | */ |
902 | ||
903 | static bool fold_add(OptContext *ctx, TCGOp *op) | |
904 | { | |
7a2f7084 | 905 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 RH |
906 | fold_xi_to_x(ctx, op, 0)) { |
907 | return true; | |
908 | } | |
909 | return false; | |
2f9f08ba RH |
910 | } |
911 | ||
c578ff18 RH |
912 | /* We cannot as yet do_constant_folding with vectors. */ |
913 | static bool fold_add_vec(OptContext *ctx, TCGOp *op) | |
914 | { | |
915 | if (fold_commutative(ctx, op) || | |
916 | fold_xi_to_x(ctx, op, 0)) { | |
917 | return true; | |
918 | } | |
919 | return false; | |
920 | } | |
921 | ||
9531c078 | 922 | static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) |
e3f7dc21 RH |
923 | { |
924 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | |
925 | arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | |
9531c078 RH |
926 | uint64_t al = arg_info(op->args[2])->val; |
927 | uint64_t ah = arg_info(op->args[3])->val; | |
928 | uint64_t bl = arg_info(op->args[4])->val; | |
929 | uint64_t bh = arg_info(op->args[5])->val; | |
e3f7dc21 | 930 | TCGArg rl, rh; |
9531c078 RH |
931 | TCGOp *op2; |
932 | ||
933 | if (ctx->type == TCG_TYPE_I32) { | |
934 | uint64_t a = deposit64(al, 32, 32, ah); | |
935 | uint64_t b = deposit64(bl, 32, 32, bh); | |
936 | ||
937 | if (add) { | |
938 | a += b; | |
939 | } else { | |
940 | a -= b; | |
941 | } | |
e3f7dc21 | 942 | |
9531c078 RH |
943 | al = sextract64(a, 0, 32); |
944 | ah = sextract64(a, 32, 32); | |
e3f7dc21 | 945 | } else { |
9531c078 RH |
946 | Int128 a = int128_make128(al, ah); |
947 | Int128 b = int128_make128(bl, bh); | |
948 | ||
949 | if (add) { | |
950 | a = int128_add(a, b); | |
951 | } else { | |
952 | a = int128_sub(a, b); | |
953 | } | |
954 | ||
955 | al = int128_getlo(a); | |
956 | ah = int128_gethi(a); | |
e3f7dc21 RH |
957 | } |
958 | ||
959 | rl = op->args[0]; | |
960 | rh = op->args[1]; | |
9531c078 RH |
961 | |
962 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
d4478943 | 963 | op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); |
9531c078 RH |
964 | |
965 | tcg_opt_gen_movi(ctx, op, rl, al); | |
966 | tcg_opt_gen_movi(ctx, op2, rh, ah); | |
e3f7dc21 RH |
967 | return true; |
968 | } | |
969 | return false; | |
970 | } | |
971 | ||
9531c078 | 972 | static bool fold_add2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 973 | { |
7a2f7084 RH |
974 | /* Note that the high and low parts may be independently swapped. */ |
975 | swap_commutative(op->args[0], &op->args[2], &op->args[4]); | |
976 | swap_commutative(op->args[1], &op->args[3], &op->args[5]); | |
977 | ||
9531c078 | 978 | return fold_addsub2(ctx, op, true); |
e3f7dc21 RH |
979 | } |
980 | ||
2f9f08ba RH |
981 | static bool fold_and(OptContext *ctx, TCGOp *op) |
982 | { | |
fae450ba RH |
983 | uint64_t z1, z2; |
984 | ||
7a2f7084 | 985 | if (fold_const2_commutative(ctx, op) || |
e8679955 | 986 | fold_xi_to_i(ctx, op, 0) || |
a63ce0e9 | 987 | fold_xi_to_x(ctx, op, -1) || |
ca7bb049 RH |
988 | fold_xx_to_x(ctx, op)) { |
989 | return true; | |
990 | } | |
fae450ba RH |
991 | |
992 | z1 = arg_info(op->args[1])->z_mask; | |
993 | z2 = arg_info(op->args[2])->z_mask; | |
994 | ctx->z_mask = z1 & z2; | |
995 | ||
3f2b1f83 RH |
996 | /* |
997 | * Sign repetitions are perforce all identical, whether they are 1 or 0. | |
998 | * Bitwise operations preserve the relative quantity of the repetitions. | |
999 | */ | |
1000 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1001 | & arg_info(op->args[2])->s_mask; | |
1002 | ||
fae450ba RH |
1003 | /* |
1004 | * Known-zeros does not imply known-ones. Therefore unless | |
1005 | * arg2 is constant, we can't infer affected bits from it. | |
1006 | */ | |
1007 | if (arg_is_const(op->args[2])) { | |
1008 | ctx->a_mask = z1 & ~z2; | |
1009 | } | |
1010 | ||
1011 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1012 | } |
1013 | ||
1014 | static bool fold_andc(OptContext *ctx, TCGOp *op) | |
1015 | { | |
fae450ba RH |
1016 | uint64_t z1; |
1017 | ||
cbe42fb2 | 1018 | if (fold_const2(ctx, op) || |
0e0a32ba | 1019 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1020 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 1021 | fold_ix_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
1022 | return true; |
1023 | } | |
fae450ba RH |
1024 | |
1025 | z1 = arg_info(op->args[1])->z_mask; | |
1026 | ||
1027 | /* | |
1028 | * Known-zeros does not imply known-ones. Therefore unless | |
1029 | * arg2 is constant, we can't infer anything from it. | |
1030 | */ | |
1031 | if (arg_is_const(op->args[2])) { | |
1032 | uint64_t z2 = ~arg_info(op->args[2])->z_mask; | |
1033 | ctx->a_mask = z1 & ~z2; | |
1034 | z1 &= z2; | |
1035 | } | |
1036 | ctx->z_mask = z1; | |
1037 | ||
3f2b1f83 RH |
1038 | ctx->s_mask = arg_info(op->args[1])->s_mask |
1039 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 1040 | return fold_masks(ctx, op); |
2f9f08ba RH |
1041 | } |
1042 | ||
079b0804 RH |
1043 | static bool fold_brcond(OptContext *ctx, TCGOp *op) |
1044 | { | |
1045 | TCGCond cond = op->args[2]; | |
7a2f7084 | 1046 | int i; |
079b0804 | 1047 | |
7a2f7084 RH |
1048 | if (swap_commutative(NO_DEST, &op->args[0], &op->args[1])) { |
1049 | op->args[2] = cond = tcg_swap_cond(cond); | |
1050 | } | |
1051 | ||
1052 | i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); | |
079b0804 RH |
1053 | if (i == 0) { |
1054 | tcg_op_remove(ctx->tcg, op); | |
1055 | return true; | |
1056 | } | |
1057 | if (i > 0) { | |
1058 | op->opc = INDEX_op_br; | |
1059 | op->args[0] = op->args[3]; | |
1060 | } | |
1061 | return false; | |
1062 | } | |
1063 | ||
764d2aba RH |
1064 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) |
1065 | { | |
1066 | TCGCond cond = op->args[4]; | |
764d2aba | 1067 | TCGArg label = op->args[5]; |
7a2f7084 RH |
1068 | int i, inv = 0; |
1069 | ||
1070 | if (swap_commutative2(&op->args[0], &op->args[2])) { | |
1071 | op->args[4] = cond = tcg_swap_cond(cond); | |
1072 | } | |
764d2aba | 1073 | |
7a2f7084 | 1074 | i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); |
764d2aba RH |
1075 | if (i >= 0) { |
1076 | goto do_brcond_const; | |
1077 | } | |
1078 | ||
1079 | switch (cond) { | |
1080 | case TCG_COND_LT: | |
1081 | case TCG_COND_GE: | |
1082 | /* | |
1083 | * Simplify LT/GE comparisons vs zero to a single compare | |
1084 | * vs the high word of the input. | |
1085 | */ | |
1086 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && | |
1087 | arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { | |
1088 | goto do_brcond_high; | |
1089 | } | |
1090 | break; | |
1091 | ||
1092 | case TCG_COND_NE: | |
1093 | inv = 1; | |
1094 | QEMU_FALLTHROUGH; | |
1095 | case TCG_COND_EQ: | |
1096 | /* | |
1097 | * Simplify EQ/NE comparisons where one of the pairs | |
1098 | * can be simplified. | |
1099 | */ | |
67f84c96 | 1100 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], |
764d2aba RH |
1101 | op->args[2], cond); |
1102 | switch (i ^ inv) { | |
1103 | case 0: | |
1104 | goto do_brcond_const; | |
1105 | case 1: | |
1106 | goto do_brcond_high; | |
1107 | } | |
1108 | ||
67f84c96 | 1109 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
764d2aba RH |
1110 | op->args[3], cond); |
1111 | switch (i ^ inv) { | |
1112 | case 0: | |
1113 | goto do_brcond_const; | |
1114 | case 1: | |
1115 | op->opc = INDEX_op_brcond_i32; | |
1116 | op->args[1] = op->args[2]; | |
1117 | op->args[2] = cond; | |
1118 | op->args[3] = label; | |
1119 | break; | |
1120 | } | |
1121 | break; | |
1122 | ||
1123 | default: | |
1124 | break; | |
1125 | ||
1126 | do_brcond_high: | |
1127 | op->opc = INDEX_op_brcond_i32; | |
1128 | op->args[0] = op->args[1]; | |
1129 | op->args[1] = op->args[3]; | |
1130 | op->args[2] = cond; | |
1131 | op->args[3] = label; | |
1132 | break; | |
1133 | ||
1134 | do_brcond_const: | |
1135 | if (i == 0) { | |
1136 | tcg_op_remove(ctx->tcg, op); | |
1137 | return true; | |
1138 | } | |
1139 | op->opc = INDEX_op_br; | |
1140 | op->args[0] = label; | |
1141 | break; | |
1142 | } | |
1143 | return false; | |
1144 | } | |
1145 | ||
09bacdc2 RH |
1146 | static bool fold_bswap(OptContext *ctx, TCGOp *op) |
1147 | { | |
57fe5c6d | 1148 | uint64_t z_mask, s_mask, sign; |
fae450ba | 1149 | |
09bacdc2 RH |
1150 | if (arg_is_const(op->args[1])) { |
1151 | uint64_t t = arg_info(op->args[1])->val; | |
1152 | ||
67f84c96 | 1153 | t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); |
09bacdc2 RH |
1154 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1155 | } | |
fae450ba RH |
1156 | |
1157 | z_mask = arg_info(op->args[1])->z_mask; | |
57fe5c6d | 1158 | |
fae450ba RH |
1159 | switch (op->opc) { |
1160 | case INDEX_op_bswap16_i32: | |
1161 | case INDEX_op_bswap16_i64: | |
1162 | z_mask = bswap16(z_mask); | |
1163 | sign = INT16_MIN; | |
1164 | break; | |
1165 | case INDEX_op_bswap32_i32: | |
1166 | case INDEX_op_bswap32_i64: | |
1167 | z_mask = bswap32(z_mask); | |
1168 | sign = INT32_MIN; | |
1169 | break; | |
1170 | case INDEX_op_bswap64_i64: | |
1171 | z_mask = bswap64(z_mask); | |
1172 | sign = INT64_MIN; | |
1173 | break; | |
1174 | default: | |
1175 | g_assert_not_reached(); | |
1176 | } | |
57fe5c6d | 1177 | s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1178 | |
1179 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | |
1180 | case TCG_BSWAP_OZ: | |
1181 | break; | |
1182 | case TCG_BSWAP_OS: | |
1183 | /* If the sign bit may be 1, force all the bits above to 1. */ | |
1184 | if (z_mask & sign) { | |
1185 | z_mask |= sign; | |
57fe5c6d | 1186 | s_mask = sign << 1; |
fae450ba RH |
1187 | } |
1188 | break; | |
1189 | default: | |
1190 | /* The high bits are undefined: force all bits above the sign to 1. */ | |
1191 | z_mask |= sign << 1; | |
57fe5c6d | 1192 | s_mask = 0; |
fae450ba RH |
1193 | break; |
1194 | } | |
1195 | ctx->z_mask = z_mask; | |
57fe5c6d | 1196 | ctx->s_mask = s_mask; |
fae450ba RH |
1197 | |
1198 | return fold_masks(ctx, op); | |
09bacdc2 RH |
1199 | } |
1200 | ||
5cf32be7 RH |
1201 | static bool fold_call(OptContext *ctx, TCGOp *op) |
1202 | { | |
1203 | TCGContext *s = ctx->tcg; | |
1204 | int nb_oargs = TCGOP_CALLO(op); | |
1205 | int nb_iargs = TCGOP_CALLI(op); | |
1206 | int flags, i; | |
1207 | ||
1208 | init_arguments(ctx, op, nb_oargs + nb_iargs); | |
1209 | copy_propagate(ctx, op, nb_oargs, nb_iargs); | |
1210 | ||
1211 | /* If the function reads or writes globals, reset temp data. */ | |
1212 | flags = tcg_call_flags(op); | |
1213 | if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | |
1214 | int nb_globals = s->nb_globals; | |
1215 | ||
1216 | for (i = 0; i < nb_globals; i++) { | |
1217 | if (test_bit(i, ctx->temps_used.l)) { | |
1218 | reset_ts(&ctx->tcg->temps[i]); | |
1219 | } | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | /* Reset temp data for outputs. */ | |
1224 | for (i = 0; i < nb_oargs; i++) { | |
1225 | reset_temp(op->args[i]); | |
1226 | } | |
1227 | ||
1228 | /* Stop optimizing MB across calls. */ | |
1229 | ctx->prev_mb = NULL; | |
1230 | return true; | |
1231 | } | |
1232 | ||
30dd0bfe RH |
1233 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) |
1234 | { | |
fae450ba RH |
1235 | uint64_t z_mask; |
1236 | ||
30dd0bfe RH |
1237 | if (arg_is_const(op->args[1])) { |
1238 | uint64_t t = arg_info(op->args[1])->val; | |
1239 | ||
1240 | if (t != 0) { | |
67f84c96 | 1241 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
30dd0bfe RH |
1242 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1243 | } | |
1244 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | |
1245 | } | |
fae450ba RH |
1246 | |
1247 | switch (ctx->type) { | |
1248 | case TCG_TYPE_I32: | |
1249 | z_mask = 31; | |
1250 | break; | |
1251 | case TCG_TYPE_I64: | |
1252 | z_mask = 63; | |
1253 | break; | |
1254 | default: | |
1255 | g_assert_not_reached(); | |
1256 | } | |
1257 | ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | |
2b9d0c59 | 1258 | ctx->s_mask = smask_from_zmask(ctx->z_mask); |
30dd0bfe RH |
1259 | return false; |
1260 | } | |
1261 | ||
2f9f08ba RH |
1262 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) |
1263 | { | |
fae450ba RH |
1264 | if (fold_const1(ctx, op)) { |
1265 | return true; | |
1266 | } | |
1267 | ||
1268 | switch (ctx->type) { | |
1269 | case TCG_TYPE_I32: | |
1270 | ctx->z_mask = 32 | 31; | |
1271 | break; | |
1272 | case TCG_TYPE_I64: | |
1273 | ctx->z_mask = 64 | 63; | |
1274 | break; | |
1275 | default: | |
1276 | g_assert_not_reached(); | |
1277 | } | |
2b9d0c59 | 1278 | ctx->s_mask = smask_from_zmask(ctx->z_mask); |
fae450ba | 1279 | return false; |
2f9f08ba RH |
1280 | } |
1281 | ||
1b1907b8 RH |
1282 | static bool fold_deposit(OptContext *ctx, TCGOp *op) |
1283 | { | |
1284 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1285 | uint64_t t1 = arg_info(op->args[1])->val; | |
1286 | uint64_t t2 = arg_info(op->args[2])->val; | |
1287 | ||
1288 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | |
1289 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
1290 | } | |
fae450ba RH |
1291 | |
1292 | ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, | |
1293 | op->args[3], op->args[4], | |
1294 | arg_info(op->args[2])->z_mask); | |
1b1907b8 RH |
1295 | return false; |
1296 | } | |
1297 | ||
2f9f08ba RH |
1298 | static bool fold_divide(OptContext *ctx, TCGOp *op) |
1299 | { | |
2f9d9a34 RH |
1300 | if (fold_const2(ctx, op) || |
1301 | fold_xi_to_x(ctx, op, 1)) { | |
1302 | return true; | |
1303 | } | |
1304 | return false; | |
2f9f08ba RH |
1305 | } |
1306 | ||
8cdb3fcb RH |
1307 | static bool fold_dup(OptContext *ctx, TCGOp *op) |
1308 | { | |
1309 | if (arg_is_const(op->args[1])) { | |
1310 | uint64_t t = arg_info(op->args[1])->val; | |
1311 | t = dup_const(TCGOP_VECE(op), t); | |
1312 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1313 | } | |
1314 | return false; | |
1315 | } | |
1316 | ||
1317 | static bool fold_dup2(OptContext *ctx, TCGOp *op) | |
1318 | { | |
1319 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1320 | uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, | |
1321 | arg_info(op->args[2])->val); | |
1322 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1323 | } | |
1324 | ||
1325 | if (args_are_copies(op->args[1], op->args[2])) { | |
1326 | op->opc = INDEX_op_dup_vec; | |
1327 | TCGOP_VECE(op) = MO_32; | |
1328 | } | |
1329 | return false; | |
1330 | } | |
1331 | ||
2f9f08ba RH |
1332 | static bool fold_eqv(OptContext *ctx, TCGOp *op) |
1333 | { | |
7a2f7084 | 1334 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 1335 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1336 | fold_xi_to_not(ctx, op, 0)) { |
1337 | return true; | |
1338 | } | |
3f2b1f83 RH |
1339 | |
1340 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1341 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1342 | return false; |
2f9f08ba RH |
1343 | } |
1344 | ||
b6617c88 RH |
1345 | static bool fold_extract(OptContext *ctx, TCGOp *op) |
1346 | { | |
fae450ba | 1347 | uint64_t z_mask_old, z_mask; |
57fe5c6d RH |
1348 | int pos = op->args[2]; |
1349 | int len = op->args[3]; | |
fae450ba | 1350 | |
b6617c88 RH |
1351 | if (arg_is_const(op->args[1])) { |
1352 | uint64_t t; | |
1353 | ||
1354 | t = arg_info(op->args[1])->val; | |
57fe5c6d | 1355 | t = extract64(t, pos, len); |
b6617c88 RH |
1356 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1357 | } | |
fae450ba RH |
1358 | |
1359 | z_mask_old = arg_info(op->args[1])->z_mask; | |
57fe5c6d RH |
1360 | z_mask = extract64(z_mask_old, pos, len); |
1361 | if (pos == 0) { | |
fae450ba RH |
1362 | ctx->a_mask = z_mask_old ^ z_mask; |
1363 | } | |
1364 | ctx->z_mask = z_mask; | |
57fe5c6d | 1365 | ctx->s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1366 | |
1367 | return fold_masks(ctx, op); | |
b6617c88 RH |
1368 | } |
1369 | ||
dcd08996 RH |
1370 | static bool fold_extract2(OptContext *ctx, TCGOp *op) |
1371 | { | |
1372 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1373 | uint64_t v1 = arg_info(op->args[1])->val; | |
1374 | uint64_t v2 = arg_info(op->args[2])->val; | |
1375 | int shr = op->args[3]; | |
1376 | ||
1377 | if (op->opc == INDEX_op_extract2_i64) { | |
1378 | v1 >>= shr; | |
1379 | v2 <<= 64 - shr; | |
1380 | } else { | |
1381 | v1 = (uint32_t)v1 >> shr; | |
225bec0c | 1382 | v2 = (uint64_t)((int32_t)v2 << (32 - shr)); |
dcd08996 RH |
1383 | } |
1384 | return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | |
1385 | } | |
1386 | return false; | |
1387 | } | |
1388 | ||
2f9f08ba RH |
1389 | static bool fold_exts(OptContext *ctx, TCGOp *op) |
1390 | { | |
57fe5c6d | 1391 | uint64_t s_mask_old, s_mask, z_mask, sign; |
fae450ba RH |
1392 | bool type_change = false; |
1393 | ||
1394 | if (fold_const1(ctx, op)) { | |
1395 | return true; | |
1396 | } | |
1397 | ||
57fe5c6d RH |
1398 | z_mask = arg_info(op->args[1])->z_mask; |
1399 | s_mask = arg_info(op->args[1])->s_mask; | |
1400 | s_mask_old = s_mask; | |
fae450ba RH |
1401 | |
1402 | switch (op->opc) { | |
1403 | CASE_OP_32_64(ext8s): | |
1404 | sign = INT8_MIN; | |
1405 | z_mask = (uint8_t)z_mask; | |
1406 | break; | |
1407 | CASE_OP_32_64(ext16s): | |
1408 | sign = INT16_MIN; | |
1409 | z_mask = (uint16_t)z_mask; | |
1410 | break; | |
1411 | case INDEX_op_ext_i32_i64: | |
1412 | type_change = true; | |
1413 | QEMU_FALLTHROUGH; | |
1414 | case INDEX_op_ext32s_i64: | |
1415 | sign = INT32_MIN; | |
1416 | z_mask = (uint32_t)z_mask; | |
1417 | break; | |
1418 | default: | |
1419 | g_assert_not_reached(); | |
1420 | } | |
1421 | ||
1422 | if (z_mask & sign) { | |
1423 | z_mask |= sign; | |
fae450ba | 1424 | } |
57fe5c6d RH |
1425 | s_mask |= sign << 1; |
1426 | ||
fae450ba | 1427 | ctx->z_mask = z_mask; |
57fe5c6d RH |
1428 | ctx->s_mask = s_mask; |
1429 | if (!type_change) { | |
1430 | ctx->a_mask = s_mask & ~s_mask_old; | |
1431 | } | |
fae450ba RH |
1432 | |
1433 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1434 | } |
1435 | ||
1436 | static bool fold_extu(OptContext *ctx, TCGOp *op) | |
1437 | { | |
fae450ba RH |
1438 | uint64_t z_mask_old, z_mask; |
1439 | bool type_change = false; | |
1440 | ||
1441 | if (fold_const1(ctx, op)) { | |
1442 | return true; | |
1443 | } | |
1444 | ||
1445 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1446 | ||
1447 | switch (op->opc) { | |
1448 | CASE_OP_32_64(ext8u): | |
1449 | z_mask = (uint8_t)z_mask; | |
1450 | break; | |
1451 | CASE_OP_32_64(ext16u): | |
1452 | z_mask = (uint16_t)z_mask; | |
1453 | break; | |
1454 | case INDEX_op_extrl_i64_i32: | |
1455 | case INDEX_op_extu_i32_i64: | |
1456 | type_change = true; | |
1457 | QEMU_FALLTHROUGH; | |
1458 | case INDEX_op_ext32u_i64: | |
1459 | z_mask = (uint32_t)z_mask; | |
1460 | break; | |
1461 | case INDEX_op_extrh_i64_i32: | |
1462 | type_change = true; | |
1463 | z_mask >>= 32; | |
1464 | break; | |
1465 | default: | |
1466 | g_assert_not_reached(); | |
1467 | } | |
1468 | ||
1469 | ctx->z_mask = z_mask; | |
57fe5c6d | 1470 | ctx->s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1471 | if (!type_change) { |
1472 | ctx->a_mask = z_mask_old ^ z_mask; | |
1473 | } | |
1474 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1475 | } |
1476 | ||
3eefdf2b RH |
1477 | static bool fold_mb(OptContext *ctx, TCGOp *op) |
1478 | { | |
1479 | /* Eliminate duplicate and redundant fence instructions. */ | |
1480 | if (ctx->prev_mb) { | |
1481 | /* | |
1482 | * Merge two barriers of the same type into one, | |
1483 | * or a weaker barrier into a stronger one, | |
1484 | * or two weaker barriers into a stronger one. | |
1485 | * mb X; mb Y => mb X|Y | |
1486 | * mb; strl => mb; st | |
1487 | * ldaq; mb => ld; mb | |
1488 | * ldaq; strl => ld; mb; st | |
1489 | * Other combinations are also merged into a strong | |
1490 | * barrier. This is stricter than specified but for | |
1491 | * the purposes of TCG is better than not optimizing. | |
1492 | */ | |
1493 | ctx->prev_mb->args[0] |= op->args[0]; | |
1494 | tcg_op_remove(ctx->tcg, op); | |
1495 | } else { | |
1496 | ctx->prev_mb = op; | |
1497 | } | |
1498 | return true; | |
1499 | } | |
1500 | ||
2cfac7fa RH |
1501 | static bool fold_mov(OptContext *ctx, TCGOp *op) |
1502 | { | |
1503 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1504 | } | |
1505 | ||
0c310a30 RH |
1506 | static bool fold_movcond(OptContext *ctx, TCGOp *op) |
1507 | { | |
0c310a30 | 1508 | TCGCond cond = op->args[5]; |
7a2f7084 RH |
1509 | int i; |
1510 | ||
1511 | if (swap_commutative(NO_DEST, &op->args[1], &op->args[2])) { | |
1512 | op->args[5] = cond = tcg_swap_cond(cond); | |
1513 | } | |
1514 | /* | |
1515 | * Canonicalize the "false" input reg to match the destination reg so | |
1516 | * that the tcg backend can implement a "move if true" operation. | |
1517 | */ | |
1518 | if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { | |
1519 | op->args[5] = cond = tcg_invert_cond(cond); | |
1520 | } | |
0c310a30 | 1521 | |
7a2f7084 | 1522 | i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
0c310a30 RH |
1523 | if (i >= 0) { |
1524 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | |
1525 | } | |
1526 | ||
fae450ba RH |
1527 | ctx->z_mask = arg_info(op->args[3])->z_mask |
1528 | | arg_info(op->args[4])->z_mask; | |
3f2b1f83 RH |
1529 | ctx->s_mask = arg_info(op->args[3])->s_mask |
1530 | & arg_info(op->args[4])->s_mask; | |
fae450ba | 1531 | |
0c310a30 RH |
1532 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { |
1533 | uint64_t tv = arg_info(op->args[3])->val; | |
1534 | uint64_t fv = arg_info(op->args[4])->val; | |
67f84c96 | 1535 | TCGOpcode opc; |
0c310a30 | 1536 | |
67f84c96 RH |
1537 | switch (ctx->type) { |
1538 | case TCG_TYPE_I32: | |
1539 | opc = INDEX_op_setcond_i32; | |
1540 | break; | |
1541 | case TCG_TYPE_I64: | |
1542 | opc = INDEX_op_setcond_i64; | |
1543 | break; | |
1544 | default: | |
1545 | g_assert_not_reached(); | |
1546 | } | |
0c310a30 RH |
1547 | |
1548 | if (tv == 1 && fv == 0) { | |
1549 | op->opc = opc; | |
1550 | op->args[3] = cond; | |
1551 | } else if (fv == 1 && tv == 0) { | |
1552 | op->opc = opc; | |
1553 | op->args[3] = tcg_invert_cond(cond); | |
1554 | } | |
1555 | } | |
1556 | return false; | |
1557 | } | |
1558 | ||
2f9f08ba RH |
1559 | static bool fold_mul(OptContext *ctx, TCGOp *op) |
1560 | { | |
e8679955 | 1561 | if (fold_const2(ctx, op) || |
5b5cf479 RH |
1562 | fold_xi_to_i(ctx, op, 0) || |
1563 | fold_xi_to_x(ctx, op, 1)) { | |
e8679955 RH |
1564 | return true; |
1565 | } | |
1566 | return false; | |
2f9f08ba RH |
1567 | } |
1568 | ||
1569 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | |
1570 | { | |
7a2f7084 | 1571 | if (fold_const2_commutative(ctx, op) || |
e8679955 RH |
1572 | fold_xi_to_i(ctx, op, 0)) { |
1573 | return true; | |
1574 | } | |
1575 | return false; | |
2f9f08ba RH |
1576 | } |
1577 | ||
407112b0 | 1578 | static bool fold_multiply2(OptContext *ctx, TCGOp *op) |
6b8ac0d1 | 1579 | { |
7a2f7084 RH |
1580 | swap_commutative(op->args[0], &op->args[2], &op->args[3]); |
1581 | ||
6b8ac0d1 | 1582 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { |
407112b0 RH |
1583 | uint64_t a = arg_info(op->args[2])->val; |
1584 | uint64_t b = arg_info(op->args[3])->val; | |
1585 | uint64_t h, l; | |
6b8ac0d1 | 1586 | TCGArg rl, rh; |
407112b0 RH |
1587 | TCGOp *op2; |
1588 | ||
1589 | switch (op->opc) { | |
1590 | case INDEX_op_mulu2_i32: | |
1591 | l = (uint64_t)(uint32_t)a * (uint32_t)b; | |
1592 | h = (int32_t)(l >> 32); | |
1593 | l = (int32_t)l; | |
1594 | break; | |
1595 | case INDEX_op_muls2_i32: | |
1596 | l = (int64_t)(int32_t)a * (int32_t)b; | |
1597 | h = l >> 32; | |
1598 | l = (int32_t)l; | |
1599 | break; | |
1600 | case INDEX_op_mulu2_i64: | |
1601 | mulu64(&l, &h, a, b); | |
1602 | break; | |
1603 | case INDEX_op_muls2_i64: | |
1604 | muls64(&l, &h, a, b); | |
1605 | break; | |
1606 | default: | |
1607 | g_assert_not_reached(); | |
1608 | } | |
6b8ac0d1 RH |
1609 | |
1610 | rl = op->args[0]; | |
1611 | rh = op->args[1]; | |
407112b0 RH |
1612 | |
1613 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
d4478943 | 1614 | op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); |
407112b0 RH |
1615 | |
1616 | tcg_opt_gen_movi(ctx, op, rl, l); | |
1617 | tcg_opt_gen_movi(ctx, op2, rh, h); | |
6b8ac0d1 RH |
1618 | return true; |
1619 | } | |
1620 | return false; | |
1621 | } | |
1622 | ||
2f9f08ba RH |
1623 | static bool fold_nand(OptContext *ctx, TCGOp *op) |
1624 | { | |
7a2f7084 | 1625 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
1626 | fold_xi_to_not(ctx, op, -1)) { |
1627 | return true; | |
1628 | } | |
3f2b1f83 RH |
1629 | |
1630 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1631 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1632 | return false; |
2f9f08ba RH |
1633 | } |
1634 | ||
1635 | static bool fold_neg(OptContext *ctx, TCGOp *op) | |
1636 | { | |
fae450ba RH |
1637 | uint64_t z_mask; |
1638 | ||
9caca88a RH |
1639 | if (fold_const1(ctx, op)) { |
1640 | return true; | |
1641 | } | |
fae450ba RH |
1642 | |
1643 | /* Set to 1 all bits to the left of the rightmost. */ | |
1644 | z_mask = arg_info(op->args[1])->z_mask; | |
1645 | ctx->z_mask = -(z_mask & -z_mask); | |
1646 | ||
9caca88a RH |
1647 | /* |
1648 | * Because of fold_sub_to_neg, we want to always return true, | |
1649 | * via finish_folding. | |
1650 | */ | |
1651 | finish_folding(ctx, op); | |
1652 | return true; | |
2f9f08ba RH |
1653 | } |
1654 | ||
1655 | static bool fold_nor(OptContext *ctx, TCGOp *op) | |
1656 | { | |
7a2f7084 | 1657 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
1658 | fold_xi_to_not(ctx, op, 0)) { |
1659 | return true; | |
1660 | } | |
3f2b1f83 RH |
1661 | |
1662 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1663 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1664 | return false; |
2f9f08ba RH |
1665 | } |
1666 | ||
1667 | static bool fold_not(OptContext *ctx, TCGOp *op) | |
1668 | { | |
0e0a32ba RH |
1669 | if (fold_const1(ctx, op)) { |
1670 | return true; | |
1671 | } | |
1672 | ||
3f2b1f83 RH |
1673 | ctx->s_mask = arg_info(op->args[1])->s_mask; |
1674 | ||
0e0a32ba RH |
1675 | /* Because of fold_to_not, we want to always return true, via finish. */ |
1676 | finish_folding(ctx, op); | |
1677 | return true; | |
2f9f08ba RH |
1678 | } |
1679 | ||
1680 | static bool fold_or(OptContext *ctx, TCGOp *op) | |
1681 | { | |
7a2f7084 | 1682 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 1683 | fold_xi_to_x(ctx, op, 0) || |
ca7bb049 RH |
1684 | fold_xx_to_x(ctx, op)) { |
1685 | return true; | |
1686 | } | |
fae450ba RH |
1687 | |
1688 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
1689 | | arg_info(op->args[2])->z_mask; | |
3f2b1f83 RH |
1690 | ctx->s_mask = arg_info(op->args[1])->s_mask |
1691 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 1692 | return fold_masks(ctx, op); |
2f9f08ba RH |
1693 | } |
1694 | ||
1695 | static bool fold_orc(OptContext *ctx, TCGOp *op) | |
1696 | { | |
0e0a32ba | 1697 | if (fold_const2(ctx, op) || |
4e858d96 | 1698 | fold_xx_to_i(ctx, op, -1) || |
a63ce0e9 | 1699 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1700 | fold_ix_to_not(ctx, op, 0)) { |
1701 | return true; | |
1702 | } | |
3f2b1f83 RH |
1703 | |
1704 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1705 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1706 | return false; |
2f9f08ba RH |
1707 | } |
1708 | ||
3eefdf2b RH |
1709 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) |
1710 | { | |
fae450ba RH |
1711 | const TCGOpDef *def = &tcg_op_defs[op->opc]; |
1712 | MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | |
1713 | MemOp mop = get_memop(oi); | |
1714 | int width = 8 * memop_size(mop); | |
1715 | ||
57fe5c6d RH |
1716 | if (width < 64) { |
1717 | ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); | |
1718 | if (!(mop & MO_SIGN)) { | |
1719 | ctx->z_mask = MAKE_64BIT_MASK(0, width); | |
1720 | ctx->s_mask <<= 1; | |
1721 | } | |
fae450ba RH |
1722 | } |
1723 | ||
3eefdf2b RH |
1724 | /* Opcodes that touch guest memory stop the mb optimization. */ |
1725 | ctx->prev_mb = NULL; | |
1726 | return false; | |
1727 | } | |
1728 | ||
1729 | static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | |
1730 | { | |
1731 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
1732 | ctx->prev_mb = NULL; | |
1733 | return false; | |
1734 | } | |
1735 | ||
2f9f08ba RH |
1736 | static bool fold_remainder(OptContext *ctx, TCGOp *op) |
1737 | { | |
267c17e8 RH |
1738 | if (fold_const2(ctx, op) || |
1739 | fold_xx_to_i(ctx, op, 0)) { | |
1740 | return true; | |
1741 | } | |
1742 | return false; | |
2f9f08ba RH |
1743 | } |
1744 | ||
c63ff55c RH |
1745 | static bool fold_setcond(OptContext *ctx, TCGOp *op) |
1746 | { | |
1747 | TCGCond cond = op->args[3]; | |
7a2f7084 RH |
1748 | int i; |
1749 | ||
1750 | if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { | |
1751 | op->args[3] = cond = tcg_swap_cond(cond); | |
1752 | } | |
c63ff55c | 1753 | |
7a2f7084 | 1754 | i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
c63ff55c RH |
1755 | if (i >= 0) { |
1756 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1757 | } | |
fae450ba RH |
1758 | |
1759 | ctx->z_mask = 1; | |
275d7d8e | 1760 | ctx->s_mask = smask_from_zmask(1); |
c63ff55c RH |
1761 | return false; |
1762 | } | |
1763 | ||
bc47b1aa RH |
1764 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) |
1765 | { | |
1766 | TCGCond cond = op->args[5]; | |
7a2f7084 | 1767 | int i, inv = 0; |
bc47b1aa | 1768 | |
7a2f7084 RH |
1769 | if (swap_commutative2(&op->args[1], &op->args[3])) { |
1770 | op->args[5] = cond = tcg_swap_cond(cond); | |
1771 | } | |
1772 | ||
1773 | i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | |
bc47b1aa RH |
1774 | if (i >= 0) { |
1775 | goto do_setcond_const; | |
1776 | } | |
1777 | ||
1778 | switch (cond) { | |
1779 | case TCG_COND_LT: | |
1780 | case TCG_COND_GE: | |
1781 | /* | |
1782 | * Simplify LT/GE comparisons vs zero to a single compare | |
1783 | * vs the high word of the input. | |
1784 | */ | |
1785 | if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && | |
1786 | arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { | |
1787 | goto do_setcond_high; | |
1788 | } | |
1789 | break; | |
1790 | ||
1791 | case TCG_COND_NE: | |
1792 | inv = 1; | |
1793 | QEMU_FALLTHROUGH; | |
1794 | case TCG_COND_EQ: | |
1795 | /* | |
1796 | * Simplify EQ/NE comparisons where one of the pairs | |
1797 | * can be simplified. | |
1798 | */ | |
67f84c96 | 1799 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
bc47b1aa RH |
1800 | op->args[3], cond); |
1801 | switch (i ^ inv) { | |
1802 | case 0: | |
1803 | goto do_setcond_const; | |
1804 | case 1: | |
1805 | goto do_setcond_high; | |
1806 | } | |
1807 | ||
67f84c96 | 1808 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], |
bc47b1aa RH |
1809 | op->args[4], cond); |
1810 | switch (i ^ inv) { | |
1811 | case 0: | |
1812 | goto do_setcond_const; | |
1813 | case 1: | |
1814 | op->args[2] = op->args[3]; | |
1815 | op->args[3] = cond; | |
1816 | op->opc = INDEX_op_setcond_i32; | |
1817 | break; | |
1818 | } | |
1819 | break; | |
1820 | ||
1821 | default: | |
1822 | break; | |
1823 | ||
1824 | do_setcond_high: | |
1825 | op->args[1] = op->args[2]; | |
1826 | op->args[2] = op->args[4]; | |
1827 | op->args[3] = cond; | |
1828 | op->opc = INDEX_op_setcond_i32; | |
1829 | break; | |
1830 | } | |
fae450ba RH |
1831 | |
1832 | ctx->z_mask = 1; | |
275d7d8e | 1833 | ctx->s_mask = smask_from_zmask(1); |
bc47b1aa RH |
1834 | return false; |
1835 | ||
1836 | do_setcond_const: | |
1837 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1838 | } | |
1839 | ||
b6617c88 RH |
1840 | static bool fold_sextract(OptContext *ctx, TCGOp *op) |
1841 | { | |
57fe5c6d RH |
1842 | uint64_t z_mask, s_mask, s_mask_old; |
1843 | int pos = op->args[2]; | |
1844 | int len = op->args[3]; | |
fae450ba | 1845 | |
b6617c88 RH |
1846 | if (arg_is_const(op->args[1])) { |
1847 | uint64_t t; | |
1848 | ||
1849 | t = arg_info(op->args[1])->val; | |
57fe5c6d | 1850 | t = sextract64(t, pos, len); |
b6617c88 RH |
1851 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1852 | } | |
fae450ba | 1853 | |
57fe5c6d RH |
1854 | z_mask = arg_info(op->args[1])->z_mask; |
1855 | z_mask = sextract64(z_mask, pos, len); | |
fae450ba RH |
1856 | ctx->z_mask = z_mask; |
1857 | ||
57fe5c6d RH |
1858 | s_mask_old = arg_info(op->args[1])->s_mask; |
1859 | s_mask = sextract64(s_mask_old, pos, len); | |
1860 | s_mask |= MAKE_64BIT_MASK(len, 64 - len); | |
1861 | ctx->s_mask = s_mask; | |
1862 | ||
1863 | if (pos == 0) { | |
1864 | ctx->a_mask = s_mask & ~s_mask_old; | |
1865 | } | |
1866 | ||
fae450ba | 1867 | return fold_masks(ctx, op); |
b6617c88 RH |
1868 | } |
1869 | ||
2f9f08ba RH |
1870 | static bool fold_shift(OptContext *ctx, TCGOp *op) |
1871 | { | |
93a967fb RH |
1872 | uint64_t s_mask, z_mask, sign; |
1873 | ||
a63ce0e9 | 1874 | if (fold_const2(ctx, op) || |
da48e272 | 1875 | fold_ix_to_i(ctx, op, 0) || |
a63ce0e9 RH |
1876 | fold_xi_to_x(ctx, op, 0)) { |
1877 | return true; | |
1878 | } | |
fae450ba | 1879 | |
93a967fb RH |
1880 | s_mask = arg_info(op->args[1])->s_mask; |
1881 | z_mask = arg_info(op->args[1])->z_mask; | |
1882 | ||
fae450ba | 1883 | if (arg_is_const(op->args[2])) { |
93a967fb RH |
1884 | int sh = arg_info(op->args[2])->val; |
1885 | ||
1886 | ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); | |
1887 | ||
1888 | s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); | |
1889 | ctx->s_mask = smask_from_smask(s_mask); | |
1890 | ||
fae450ba RH |
1891 | return fold_masks(ctx, op); |
1892 | } | |
93a967fb RH |
1893 | |
1894 | switch (op->opc) { | |
1895 | CASE_OP_32_64(sar): | |
1896 | /* | |
1897 | * Arithmetic right shift will not reduce the number of | |
1898 | * input sign repetitions. | |
1899 | */ | |
1900 | ctx->s_mask = s_mask; | |
1901 | break; | |
1902 | CASE_OP_32_64(shr): | |
1903 | /* | |
1904 | * If the sign bit is known zero, then logical right shift | |
1905 | * will not reduced the number of input sign repetitions. | |
1906 | */ | |
1907 | sign = (s_mask & -s_mask) >> 1; | |
1908 | if (!(z_mask & sign)) { | |
1909 | ctx->s_mask = s_mask; | |
1910 | } | |
1911 | break; | |
1912 | default: | |
1913 | break; | |
1914 | } | |
1915 | ||
a63ce0e9 | 1916 | return false; |
2f9f08ba RH |
1917 | } |
1918 | ||
9caca88a RH |
1919 | static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) |
1920 | { | |
1921 | TCGOpcode neg_op; | |
1922 | bool have_neg; | |
1923 | ||
1924 | if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { | |
1925 | return false; | |
1926 | } | |
1927 | ||
1928 | switch (ctx->type) { | |
1929 | case TCG_TYPE_I32: | |
1930 | neg_op = INDEX_op_neg_i32; | |
1931 | have_neg = TCG_TARGET_HAS_neg_i32; | |
1932 | break; | |
1933 | case TCG_TYPE_I64: | |
1934 | neg_op = INDEX_op_neg_i64; | |
1935 | have_neg = TCG_TARGET_HAS_neg_i64; | |
1936 | break; | |
1937 | case TCG_TYPE_V64: | |
1938 | case TCG_TYPE_V128: | |
1939 | case TCG_TYPE_V256: | |
1940 | neg_op = INDEX_op_neg_vec; | |
1941 | have_neg = (TCG_TARGET_HAS_neg_vec && | |
1942 | tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); | |
1943 | break; | |
1944 | default: | |
1945 | g_assert_not_reached(); | |
1946 | } | |
1947 | if (have_neg) { | |
1948 | op->opc = neg_op; | |
1949 | op->args[1] = op->args[2]; | |
1950 | return fold_neg(ctx, op); | |
1951 | } | |
1952 | return false; | |
1953 | } | |
1954 | ||
c578ff18 RH |
1955 | /* We cannot as yet do_constant_folding with vectors. */ |
1956 | static bool fold_sub_vec(OptContext *ctx, TCGOp *op) | |
2f9f08ba | 1957 | { |
c578ff18 | 1958 | if (fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1959 | fold_xi_to_x(ctx, op, 0) || |
9caca88a | 1960 | fold_sub_to_neg(ctx, op)) { |
cbe42fb2 RH |
1961 | return true; |
1962 | } | |
1963 | return false; | |
2f9f08ba RH |
1964 | } |
1965 | ||
c578ff18 RH |
1966 | static bool fold_sub(OptContext *ctx, TCGOp *op) |
1967 | { | |
1968 | return fold_const2(ctx, op) || fold_sub_vec(ctx, op); | |
1969 | } | |
1970 | ||
9531c078 | 1971 | static bool fold_sub2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 1972 | { |
9531c078 | 1973 | return fold_addsub2(ctx, op, false); |
e3f7dc21 RH |
1974 | } |
1975 | ||
fae450ba RH |
1976 | static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) |
1977 | { | |
1978 | /* We can't do any folding with a load, but we can record bits. */ | |
1979 | switch (op->opc) { | |
57fe5c6d RH |
1980 | CASE_OP_32_64(ld8s): |
1981 | ctx->s_mask = MAKE_64BIT_MASK(8, 56); | |
1982 | break; | |
fae450ba RH |
1983 | CASE_OP_32_64(ld8u): |
1984 | ctx->z_mask = MAKE_64BIT_MASK(0, 8); | |
57fe5c6d RH |
1985 | ctx->s_mask = MAKE_64BIT_MASK(9, 55); |
1986 | break; | |
1987 | CASE_OP_32_64(ld16s): | |
1988 | ctx->s_mask = MAKE_64BIT_MASK(16, 48); | |
fae450ba RH |
1989 | break; |
1990 | CASE_OP_32_64(ld16u): | |
1991 | ctx->z_mask = MAKE_64BIT_MASK(0, 16); | |
57fe5c6d RH |
1992 | ctx->s_mask = MAKE_64BIT_MASK(17, 47); |
1993 | break; | |
1994 | case INDEX_op_ld32s_i64: | |
1995 | ctx->s_mask = MAKE_64BIT_MASK(32, 32); | |
fae450ba RH |
1996 | break; |
1997 | case INDEX_op_ld32u_i64: | |
1998 | ctx->z_mask = MAKE_64BIT_MASK(0, 32); | |
57fe5c6d | 1999 | ctx->s_mask = MAKE_64BIT_MASK(33, 31); |
fae450ba RH |
2000 | break; |
2001 | default: | |
2002 | g_assert_not_reached(); | |
2003 | } | |
2004 | return false; | |
2005 | } | |
2006 | ||
2f9f08ba RH |
2007 | static bool fold_xor(OptContext *ctx, TCGOp *op) |
2008 | { | |
7a2f7084 | 2009 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba | 2010 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 2011 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 2012 | fold_xi_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
2013 | return true; |
2014 | } | |
fae450ba RH |
2015 | |
2016 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
2017 | | arg_info(op->args[2])->z_mask; | |
3f2b1f83 RH |
2018 | ctx->s_mask = arg_info(op->args[1])->s_mask |
2019 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 2020 | return fold_masks(ctx, op); |
2f9f08ba RH |
2021 | } |
2022 | ||
22613af4 | 2023 | /* Propagate constants and copies, fold constant expressions. */ |
36e60ef6 | 2024 | void tcg_optimize(TCGContext *s) |
8f2e8c07 | 2025 | { |
5cf32be7 | 2026 | int nb_temps, i; |
d0ed5151 | 2027 | TCGOp *op, *op_next; |
dc84988a | 2028 | OptContext ctx = { .tcg = s }; |
5d8f5363 | 2029 | |
22613af4 KB |
2030 | /* Array VALS has an element for each temp. |
2031 | If this temp holds a constant then its value is kept in VALS' element. | |
e590d4e6 AJ |
2032 | If this temp is a copy of other ones then the other copies are |
2033 | available through the doubly linked circular list. */ | |
8f2e8c07 KB |
2034 | |
2035 | nb_temps = s->nb_temps; | |
8f17a975 RH |
2036 | for (i = 0; i < nb_temps; ++i) { |
2037 | s->temps[i].state_ptr = NULL; | |
2038 | } | |
8f2e8c07 | 2039 | |
15fa08f8 | 2040 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
c45cb8bb | 2041 | TCGOpcode opc = op->opc; |
5cf32be7 | 2042 | const TCGOpDef *def; |
404a148d | 2043 | bool done = false; |
c45cb8bb | 2044 | |
5cf32be7 | 2045 | /* Calls are special. */ |
c45cb8bb | 2046 | if (opc == INDEX_op_call) { |
5cf32be7 RH |
2047 | fold_call(&ctx, op); |
2048 | continue; | |
cf066674 | 2049 | } |
5cf32be7 RH |
2050 | |
2051 | def = &tcg_op_defs[opc]; | |
ec5d4cbe RH |
2052 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); |
2053 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | |
22613af4 | 2054 | |
67f84c96 RH |
2055 | /* Pre-compute the type of the operation. */ |
2056 | if (def->flags & TCG_OPF_VECTOR) { | |
2057 | ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); | |
2058 | } else if (def->flags & TCG_OPF_64BIT) { | |
2059 | ctx.type = TCG_TYPE_I64; | |
2060 | } else { | |
2061 | ctx.type = TCG_TYPE_I32; | |
2062 | } | |
2063 | ||
57fe5c6d | 2064 | /* Assume all bits affected, no bits known zero, no sign reps. */ |
fae450ba RH |
2065 | ctx.a_mask = -1; |
2066 | ctx.z_mask = -1; | |
57fe5c6d | 2067 | ctx.s_mask = 0; |
633f6502 | 2068 | |
2cfac7fa RH |
2069 | /* |
2070 | * Process each opcode. | |
2071 | * Sorted alphabetically by opcode as much as possible. | |
2072 | */ | |
c45cb8bb | 2073 | switch (opc) { |
c578ff18 | 2074 | CASE_OP_32_64(add): |
2f9f08ba RH |
2075 | done = fold_add(&ctx, op); |
2076 | break; | |
c578ff18 RH |
2077 | case INDEX_op_add_vec: |
2078 | done = fold_add_vec(&ctx, op); | |
2079 | break; | |
9531c078 RH |
2080 | CASE_OP_32_64(add2): |
2081 | done = fold_add2(&ctx, op); | |
e3f7dc21 | 2082 | break; |
2f9f08ba RH |
2083 | CASE_OP_32_64_VEC(and): |
2084 | done = fold_and(&ctx, op); | |
2085 | break; | |
2086 | CASE_OP_32_64_VEC(andc): | |
2087 | done = fold_andc(&ctx, op); | |
2088 | break; | |
079b0804 RH |
2089 | CASE_OP_32_64(brcond): |
2090 | done = fold_brcond(&ctx, op); | |
2091 | break; | |
764d2aba RH |
2092 | case INDEX_op_brcond2_i32: |
2093 | done = fold_brcond2(&ctx, op); | |
2094 | break; | |
09bacdc2 RH |
2095 | CASE_OP_32_64(bswap16): |
2096 | CASE_OP_32_64(bswap32): | |
2097 | case INDEX_op_bswap64_i64: | |
2098 | done = fold_bswap(&ctx, op); | |
2099 | break; | |
30dd0bfe RH |
2100 | CASE_OP_32_64(clz): |
2101 | CASE_OP_32_64(ctz): | |
2102 | done = fold_count_zeros(&ctx, op); | |
2103 | break; | |
2f9f08ba RH |
2104 | CASE_OP_32_64(ctpop): |
2105 | done = fold_ctpop(&ctx, op); | |
2106 | break; | |
1b1907b8 RH |
2107 | CASE_OP_32_64(deposit): |
2108 | done = fold_deposit(&ctx, op); | |
2109 | break; | |
2f9f08ba RH |
2110 | CASE_OP_32_64(div): |
2111 | CASE_OP_32_64(divu): | |
2112 | done = fold_divide(&ctx, op); | |
2113 | break; | |
8cdb3fcb RH |
2114 | case INDEX_op_dup_vec: |
2115 | done = fold_dup(&ctx, op); | |
2116 | break; | |
2117 | case INDEX_op_dup2_vec: | |
2118 | done = fold_dup2(&ctx, op); | |
2119 | break; | |
ed523473 | 2120 | CASE_OP_32_64_VEC(eqv): |
2f9f08ba RH |
2121 | done = fold_eqv(&ctx, op); |
2122 | break; | |
b6617c88 RH |
2123 | CASE_OP_32_64(extract): |
2124 | done = fold_extract(&ctx, op); | |
2125 | break; | |
dcd08996 RH |
2126 | CASE_OP_32_64(extract2): |
2127 | done = fold_extract2(&ctx, op); | |
2128 | break; | |
2f9f08ba RH |
2129 | CASE_OP_32_64(ext8s): |
2130 | CASE_OP_32_64(ext16s): | |
2131 | case INDEX_op_ext32s_i64: | |
2132 | case INDEX_op_ext_i32_i64: | |
2133 | done = fold_exts(&ctx, op); | |
2134 | break; | |
2135 | CASE_OP_32_64(ext8u): | |
2136 | CASE_OP_32_64(ext16u): | |
2137 | case INDEX_op_ext32u_i64: | |
2138 | case INDEX_op_extu_i32_i64: | |
2139 | case INDEX_op_extrl_i64_i32: | |
2140 | case INDEX_op_extrh_i64_i32: | |
2141 | done = fold_extu(&ctx, op); | |
2142 | break; | |
57fe5c6d | 2143 | CASE_OP_32_64(ld8s): |
fae450ba | 2144 | CASE_OP_32_64(ld8u): |
57fe5c6d | 2145 | CASE_OP_32_64(ld16s): |
fae450ba | 2146 | CASE_OP_32_64(ld16u): |
57fe5c6d | 2147 | case INDEX_op_ld32s_i64: |
fae450ba RH |
2148 | case INDEX_op_ld32u_i64: |
2149 | done = fold_tcg_ld(&ctx, op); | |
2150 | break; | |
3eefdf2b RH |
2151 | case INDEX_op_mb: |
2152 | done = fold_mb(&ctx, op); | |
0c310a30 | 2153 | break; |
2cfac7fa RH |
2154 | CASE_OP_32_64_VEC(mov): |
2155 | done = fold_mov(&ctx, op); | |
2156 | break; | |
0c310a30 RH |
2157 | CASE_OP_32_64(movcond): |
2158 | done = fold_movcond(&ctx, op); | |
3eefdf2b | 2159 | break; |
2f9f08ba RH |
2160 | CASE_OP_32_64(mul): |
2161 | done = fold_mul(&ctx, op); | |
2162 | break; | |
2163 | CASE_OP_32_64(mulsh): | |
2164 | CASE_OP_32_64(muluh): | |
2165 | done = fold_mul_highpart(&ctx, op); | |
2166 | break; | |
407112b0 RH |
2167 | CASE_OP_32_64(muls2): |
2168 | CASE_OP_32_64(mulu2): | |
2169 | done = fold_multiply2(&ctx, op); | |
6b8ac0d1 | 2170 | break; |
ed523473 | 2171 | CASE_OP_32_64_VEC(nand): |
2f9f08ba RH |
2172 | done = fold_nand(&ctx, op); |
2173 | break; | |
2174 | CASE_OP_32_64(neg): | |
2175 | done = fold_neg(&ctx, op); | |
2176 | break; | |
ed523473 | 2177 | CASE_OP_32_64_VEC(nor): |
2f9f08ba RH |
2178 | done = fold_nor(&ctx, op); |
2179 | break; | |
2180 | CASE_OP_32_64_VEC(not): | |
2181 | done = fold_not(&ctx, op); | |
2182 | break; | |
2183 | CASE_OP_32_64_VEC(or): | |
2184 | done = fold_or(&ctx, op); | |
2185 | break; | |
2186 | CASE_OP_32_64_VEC(orc): | |
2187 | done = fold_orc(&ctx, op); | |
2188 | break; | |
3eefdf2b RH |
2189 | case INDEX_op_qemu_ld_i32: |
2190 | case INDEX_op_qemu_ld_i64: | |
2191 | done = fold_qemu_ld(&ctx, op); | |
2192 | break; | |
2193 | case INDEX_op_qemu_st_i32: | |
2194 | case INDEX_op_qemu_st8_i32: | |
2195 | case INDEX_op_qemu_st_i64: | |
2196 | done = fold_qemu_st(&ctx, op); | |
2197 | break; | |
2f9f08ba RH |
2198 | CASE_OP_32_64(rem): |
2199 | CASE_OP_32_64(remu): | |
2200 | done = fold_remainder(&ctx, op); | |
2201 | break; | |
2202 | CASE_OP_32_64(rotl): | |
2203 | CASE_OP_32_64(rotr): | |
2204 | CASE_OP_32_64(sar): | |
2205 | CASE_OP_32_64(shl): | |
2206 | CASE_OP_32_64(shr): | |
2207 | done = fold_shift(&ctx, op); | |
2208 | break; | |
c63ff55c RH |
2209 | CASE_OP_32_64(setcond): |
2210 | done = fold_setcond(&ctx, op); | |
2211 | break; | |
bc47b1aa RH |
2212 | case INDEX_op_setcond2_i32: |
2213 | done = fold_setcond2(&ctx, op); | |
2214 | break; | |
b6617c88 RH |
2215 | CASE_OP_32_64(sextract): |
2216 | done = fold_sextract(&ctx, op); | |
2217 | break; | |
c578ff18 | 2218 | CASE_OP_32_64(sub): |
2f9f08ba RH |
2219 | done = fold_sub(&ctx, op); |
2220 | break; | |
c578ff18 RH |
2221 | case INDEX_op_sub_vec: |
2222 | done = fold_sub_vec(&ctx, op); | |
2223 | break; | |
9531c078 RH |
2224 | CASE_OP_32_64(sub2): |
2225 | done = fold_sub2(&ctx, op); | |
e3f7dc21 | 2226 | break; |
2f9f08ba RH |
2227 | CASE_OP_32_64_VEC(xor): |
2228 | done = fold_xor(&ctx, op); | |
b10f3833 | 2229 | break; |
2cfac7fa RH |
2230 | default: |
2231 | break; | |
b10f3833 RH |
2232 | } |
2233 | ||
404a148d RH |
2234 | if (!done) { |
2235 | finish_folding(&ctx, op); | |
2236 | } | |
8f2e8c07 | 2237 | } |
8f2e8c07 | 2238 | } |