]>
Commit | Line | Data |
---|---|---|
8f2e8c07 KB |
1 | /* |
2 | * Optimizations for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2010 Samsung Electronics. | |
5 | * Contributed by Kirill Batuzov <batuzovk@ispras.ru> | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
757e725b | 26 | #include "qemu/osdep.h" |
dcb32f1d | 27 | #include "tcg/tcg-op.h" |
90163900 | 28 | #include "tcg-internal.h" |
8f2e8c07 | 29 | |
8f2e8c07 KB |
30 | #define CASE_OP_32_64(x) \ |
31 | glue(glue(case INDEX_op_, x), _i32): \ | |
32 | glue(glue(case INDEX_op_, x), _i64) | |
8f2e8c07 | 33 | |
170ba88f RH |
34 | #define CASE_OP_32_64_VEC(x) \ |
35 | glue(glue(case INDEX_op_, x), _i32): \ | |
36 | glue(glue(case INDEX_op_, x), _i64): \ | |
37 | glue(glue(case INDEX_op_, x), _vec) | |
38 | ||
6fcb98ed | 39 | typedef struct TempOptInfo { |
b41059dd | 40 | bool is_const; |
6349039d RH |
41 | TCGTemp *prev_copy; |
42 | TCGTemp *next_copy; | |
54795544 | 43 | uint64_t val; |
b1fde411 | 44 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ |
6fcb98ed | 45 | } TempOptInfo; |
22613af4 | 46 | |
3b3f847d | 47 | typedef struct OptContext { |
dc84988a | 48 | TCGContext *tcg; |
d0ed5151 | 49 | TCGOp *prev_mb; |
3b3f847d | 50 | TCGTempSet temps_used; |
137f1f44 RH |
51 | |
52 | /* In flight values from optimization. */ | |
fae450ba RH |
53 | uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ |
54 | uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | |
67f84c96 | 55 | TCGType type; |
3b3f847d RH |
56 | } OptContext; |
57 | ||
6fcb98ed | 58 | static inline TempOptInfo *ts_info(TCGTemp *ts) |
d9c769c6 | 59 | { |
6349039d | 60 | return ts->state_ptr; |
d9c769c6 AJ |
61 | } |
62 | ||
6fcb98ed | 63 | static inline TempOptInfo *arg_info(TCGArg arg) |
d9c769c6 | 64 | { |
6349039d RH |
65 | return ts_info(arg_temp(arg)); |
66 | } | |
67 | ||
68 | static inline bool ts_is_const(TCGTemp *ts) | |
69 | { | |
70 | return ts_info(ts)->is_const; | |
71 | } | |
72 | ||
73 | static inline bool arg_is_const(TCGArg arg) | |
74 | { | |
75 | return ts_is_const(arg_temp(arg)); | |
76 | } | |
77 | ||
78 | static inline bool ts_is_copy(TCGTemp *ts) | |
79 | { | |
80 | return ts_info(ts)->next_copy != ts; | |
d9c769c6 AJ |
81 | } |
82 | ||
b41059dd | 83 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ |
6349039d RH |
84 | static void reset_ts(TCGTemp *ts) |
85 | { | |
6fcb98ed RH |
86 | TempOptInfo *ti = ts_info(ts); |
87 | TempOptInfo *pi = ts_info(ti->prev_copy); | |
88 | TempOptInfo *ni = ts_info(ti->next_copy); | |
6349039d RH |
89 | |
90 | ni->prev_copy = ti->prev_copy; | |
91 | pi->next_copy = ti->next_copy; | |
92 | ti->next_copy = ts; | |
93 | ti->prev_copy = ts; | |
94 | ti->is_const = false; | |
b1fde411 | 95 | ti->z_mask = -1; |
6349039d RH |
96 | } |
97 | ||
98 | static void reset_temp(TCGArg arg) | |
22613af4 | 99 | { |
6349039d | 100 | reset_ts(arg_temp(arg)); |
22613af4 KB |
101 | } |
102 | ||
1208d7dd | 103 | /* Initialize and activate a temporary. */ |
3b3f847d | 104 | static void init_ts_info(OptContext *ctx, TCGTemp *ts) |
1208d7dd | 105 | { |
6349039d | 106 | size_t idx = temp_idx(ts); |
8f17a975 | 107 | TempOptInfo *ti; |
6349039d | 108 | |
3b3f847d | 109 | if (test_bit(idx, ctx->temps_used.l)) { |
8f17a975 RH |
110 | return; |
111 | } | |
3b3f847d | 112 | set_bit(idx, ctx->temps_used.l); |
8f17a975 RH |
113 | |
114 | ti = ts->state_ptr; | |
115 | if (ti == NULL) { | |
116 | ti = tcg_malloc(sizeof(TempOptInfo)); | |
6349039d | 117 | ts->state_ptr = ti; |
8f17a975 RH |
118 | } |
119 | ||
120 | ti->next_copy = ts; | |
121 | ti->prev_copy = ts; | |
122 | if (ts->kind == TEMP_CONST) { | |
123 | ti->is_const = true; | |
124 | ti->val = ts->val; | |
b1fde411 | 125 | ti->z_mask = ts->val; |
8f17a975 RH |
126 | if (TCG_TARGET_REG_BITS > 32 && ts->type == TCG_TYPE_I32) { |
127 | /* High bits of a 32-bit quantity are garbage. */ | |
b1fde411 | 128 | ti->z_mask |= ~0xffffffffull; |
c0522136 | 129 | } |
8f17a975 RH |
130 | } else { |
131 | ti->is_const = false; | |
b1fde411 | 132 | ti->z_mask = -1; |
1208d7dd AJ |
133 | } |
134 | } | |
135 | ||
6349039d | 136 | static TCGTemp *find_better_copy(TCGContext *s, TCGTemp *ts) |
e590d4e6 | 137 | { |
4c868ce6 | 138 | TCGTemp *i, *g, *l; |
e590d4e6 | 139 | |
4c868ce6 RH |
140 | /* If this is already readonly, we can't do better. */ |
141 | if (temp_readonly(ts)) { | |
6349039d | 142 | return ts; |
e590d4e6 AJ |
143 | } |
144 | ||
4c868ce6 | 145 | g = l = NULL; |
6349039d | 146 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { |
4c868ce6 | 147 | if (temp_readonly(i)) { |
e590d4e6 | 148 | return i; |
4c868ce6 RH |
149 | } else if (i->kind > ts->kind) { |
150 | if (i->kind == TEMP_GLOBAL) { | |
151 | g = i; | |
152 | } else if (i->kind == TEMP_LOCAL) { | |
153 | l = i; | |
e590d4e6 AJ |
154 | } |
155 | } | |
156 | } | |
157 | ||
4c868ce6 RH |
158 | /* If we didn't find a better representation, return the same temp. */ |
159 | return g ? g : l ? l : ts; | |
e590d4e6 AJ |
160 | } |
161 | ||
6349039d | 162 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) |
e590d4e6 | 163 | { |
6349039d | 164 | TCGTemp *i; |
e590d4e6 | 165 | |
6349039d | 166 | if (ts1 == ts2) { |
e590d4e6 AJ |
167 | return true; |
168 | } | |
169 | ||
6349039d | 170 | if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { |
e590d4e6 AJ |
171 | return false; |
172 | } | |
173 | ||
6349039d RH |
174 | for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { |
175 | if (i == ts2) { | |
e590d4e6 AJ |
176 | return true; |
177 | } | |
178 | } | |
179 | ||
180 | return false; | |
181 | } | |
182 | ||
6349039d RH |
183 | static bool args_are_copies(TCGArg arg1, TCGArg arg2) |
184 | { | |
185 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | |
186 | } | |
187 | ||
6b99d5bf | 188 | static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) |
22613af4 | 189 | { |
6349039d RH |
190 | TCGTemp *dst_ts = arg_temp(dst); |
191 | TCGTemp *src_ts = arg_temp(src); | |
6fcb98ed RH |
192 | TempOptInfo *di; |
193 | TempOptInfo *si; | |
b1fde411 | 194 | uint64_t z_mask; |
6349039d RH |
195 | TCGOpcode new_op; |
196 | ||
197 | if (ts_are_copies(dst_ts, src_ts)) { | |
dc84988a | 198 | tcg_op_remove(ctx->tcg, op); |
6b99d5bf | 199 | return true; |
5365718a AJ |
200 | } |
201 | ||
6349039d RH |
202 | reset_ts(dst_ts); |
203 | di = ts_info(dst_ts); | |
204 | si = ts_info(src_ts); | |
67f84c96 RH |
205 | |
206 | switch (ctx->type) { | |
207 | case TCG_TYPE_I32: | |
170ba88f | 208 | new_op = INDEX_op_mov_i32; |
67f84c96 RH |
209 | break; |
210 | case TCG_TYPE_I64: | |
211 | new_op = INDEX_op_mov_i64; | |
212 | break; | |
213 | case TCG_TYPE_V64: | |
214 | case TCG_TYPE_V128: | |
215 | case TCG_TYPE_V256: | |
216 | /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | |
217 | new_op = INDEX_op_mov_vec; | |
218 | break; | |
219 | default: | |
220 | g_assert_not_reached(); | |
170ba88f | 221 | } |
c45cb8bb | 222 | op->opc = new_op; |
6349039d RH |
223 | op->args[0] = dst; |
224 | op->args[1] = src; | |
a62f6f56 | 225 | |
b1fde411 | 226 | z_mask = si->z_mask; |
24666baf RH |
227 | if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { |
228 | /* High bits of the destination are now garbage. */ | |
b1fde411 | 229 | z_mask |= ~0xffffffffull; |
24666baf | 230 | } |
b1fde411 | 231 | di->z_mask = z_mask; |
e590d4e6 | 232 | |
6349039d | 233 | if (src_ts->type == dst_ts->type) { |
6fcb98ed | 234 | TempOptInfo *ni = ts_info(si->next_copy); |
6349039d RH |
235 | |
236 | di->next_copy = si->next_copy; | |
237 | di->prev_copy = src_ts; | |
238 | ni->prev_copy = dst_ts; | |
239 | si->next_copy = dst_ts; | |
240 | di->is_const = si->is_const; | |
241 | di->val = si->val; | |
242 | } | |
6b99d5bf | 243 | return true; |
22613af4 KB |
244 | } |
245 | ||
6b99d5bf | 246 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, |
dc84988a | 247 | TCGArg dst, uint64_t val) |
8fe35e04 | 248 | { |
8fe35e04 | 249 | /* Convert movi to mov with constant temp. */ |
67f84c96 RH |
250 | TCGTemp *tv = tcg_constant_internal(ctx->type, val); |
251 | ||
3b3f847d | 252 | init_ts_info(ctx, tv); |
6b99d5bf | 253 | return tcg_opt_gen_mov(ctx, op, dst, temp_arg(tv)); |
8fe35e04 RH |
254 | } |
255 | ||
54795544 | 256 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 257 | { |
03271524 RH |
258 | uint64_t l64, h64; |
259 | ||
53108fb5 KB |
260 | switch (op) { |
261 | CASE_OP_32_64(add): | |
262 | return x + y; | |
263 | ||
264 | CASE_OP_32_64(sub): | |
265 | return x - y; | |
266 | ||
267 | CASE_OP_32_64(mul): | |
268 | return x * y; | |
269 | ||
9a81090b KB |
270 | CASE_OP_32_64(and): |
271 | return x & y; | |
272 | ||
273 | CASE_OP_32_64(or): | |
274 | return x | y; | |
275 | ||
276 | CASE_OP_32_64(xor): | |
277 | return x ^ y; | |
278 | ||
55c0975c | 279 | case INDEX_op_shl_i32: |
50c5c4d1 | 280 | return (uint32_t)x << (y & 31); |
55c0975c | 281 | |
55c0975c | 282 | case INDEX_op_shl_i64: |
50c5c4d1 | 283 | return (uint64_t)x << (y & 63); |
55c0975c KB |
284 | |
285 | case INDEX_op_shr_i32: | |
50c5c4d1 | 286 | return (uint32_t)x >> (y & 31); |
55c0975c | 287 | |
55c0975c | 288 | case INDEX_op_shr_i64: |
50c5c4d1 | 289 | return (uint64_t)x >> (y & 63); |
55c0975c KB |
290 | |
291 | case INDEX_op_sar_i32: | |
50c5c4d1 | 292 | return (int32_t)x >> (y & 31); |
55c0975c | 293 | |
55c0975c | 294 | case INDEX_op_sar_i64: |
50c5c4d1 | 295 | return (int64_t)x >> (y & 63); |
55c0975c KB |
296 | |
297 | case INDEX_op_rotr_i32: | |
50c5c4d1 | 298 | return ror32(x, y & 31); |
55c0975c | 299 | |
55c0975c | 300 | case INDEX_op_rotr_i64: |
50c5c4d1 | 301 | return ror64(x, y & 63); |
55c0975c KB |
302 | |
303 | case INDEX_op_rotl_i32: | |
50c5c4d1 | 304 | return rol32(x, y & 31); |
55c0975c | 305 | |
55c0975c | 306 | case INDEX_op_rotl_i64: |
50c5c4d1 | 307 | return rol64(x, y & 63); |
25c4d9cc RH |
308 | |
309 | CASE_OP_32_64(not): | |
a640f031 | 310 | return ~x; |
25c4d9cc | 311 | |
cb25c80a RH |
312 | CASE_OP_32_64(neg): |
313 | return -x; | |
314 | ||
315 | CASE_OP_32_64(andc): | |
316 | return x & ~y; | |
317 | ||
318 | CASE_OP_32_64(orc): | |
319 | return x | ~y; | |
320 | ||
321 | CASE_OP_32_64(eqv): | |
322 | return ~(x ^ y); | |
323 | ||
324 | CASE_OP_32_64(nand): | |
325 | return ~(x & y); | |
326 | ||
327 | CASE_OP_32_64(nor): | |
328 | return ~(x | y); | |
329 | ||
0e28d006 RH |
330 | case INDEX_op_clz_i32: |
331 | return (uint32_t)x ? clz32(x) : y; | |
332 | ||
333 | case INDEX_op_clz_i64: | |
334 | return x ? clz64(x) : y; | |
335 | ||
336 | case INDEX_op_ctz_i32: | |
337 | return (uint32_t)x ? ctz32(x) : y; | |
338 | ||
339 | case INDEX_op_ctz_i64: | |
340 | return x ? ctz64(x) : y; | |
341 | ||
a768e4e9 RH |
342 | case INDEX_op_ctpop_i32: |
343 | return ctpop32(x); | |
344 | ||
345 | case INDEX_op_ctpop_i64: | |
346 | return ctpop64(x); | |
347 | ||
25c4d9cc | 348 | CASE_OP_32_64(ext8s): |
a640f031 | 349 | return (int8_t)x; |
25c4d9cc RH |
350 | |
351 | CASE_OP_32_64(ext16s): | |
a640f031 | 352 | return (int16_t)x; |
25c4d9cc RH |
353 | |
354 | CASE_OP_32_64(ext8u): | |
a640f031 | 355 | return (uint8_t)x; |
25c4d9cc RH |
356 | |
357 | CASE_OP_32_64(ext16u): | |
a640f031 KB |
358 | return (uint16_t)x; |
359 | ||
6498594c | 360 | CASE_OP_32_64(bswap16): |
0b76ff8f RH |
361 | x = bswap16(x); |
362 | return y & TCG_BSWAP_OS ? (int16_t)x : x; | |
6498594c RH |
363 | |
364 | CASE_OP_32_64(bswap32): | |
0b76ff8f RH |
365 | x = bswap32(x); |
366 | return y & TCG_BSWAP_OS ? (int32_t)x : x; | |
6498594c RH |
367 | |
368 | case INDEX_op_bswap64_i64: | |
369 | return bswap64(x); | |
370 | ||
8bcb5c8f | 371 | case INDEX_op_ext_i32_i64: |
a640f031 KB |
372 | case INDEX_op_ext32s_i64: |
373 | return (int32_t)x; | |
374 | ||
8bcb5c8f | 375 | case INDEX_op_extu_i32_i64: |
609ad705 | 376 | case INDEX_op_extrl_i64_i32: |
a640f031 KB |
377 | case INDEX_op_ext32u_i64: |
378 | return (uint32_t)x; | |
a640f031 | 379 | |
609ad705 RH |
380 | case INDEX_op_extrh_i64_i32: |
381 | return (uint64_t)x >> 32; | |
382 | ||
03271524 RH |
383 | case INDEX_op_muluh_i32: |
384 | return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; | |
385 | case INDEX_op_mulsh_i32: | |
386 | return ((int64_t)(int32_t)x * (int32_t)y) >> 32; | |
387 | ||
388 | case INDEX_op_muluh_i64: | |
389 | mulu64(&l64, &h64, x, y); | |
390 | return h64; | |
391 | case INDEX_op_mulsh_i64: | |
392 | muls64(&l64, &h64, x, y); | |
393 | return h64; | |
394 | ||
01547f7f RH |
395 | case INDEX_op_div_i32: |
396 | /* Avoid crashing on divide by zero, otherwise undefined. */ | |
397 | return (int32_t)x / ((int32_t)y ? : 1); | |
398 | case INDEX_op_divu_i32: | |
399 | return (uint32_t)x / ((uint32_t)y ? : 1); | |
400 | case INDEX_op_div_i64: | |
401 | return (int64_t)x / ((int64_t)y ? : 1); | |
402 | case INDEX_op_divu_i64: | |
403 | return (uint64_t)x / ((uint64_t)y ? : 1); | |
404 | ||
405 | case INDEX_op_rem_i32: | |
406 | return (int32_t)x % ((int32_t)y ? : 1); | |
407 | case INDEX_op_remu_i32: | |
408 | return (uint32_t)x % ((uint32_t)y ? : 1); | |
409 | case INDEX_op_rem_i64: | |
410 | return (int64_t)x % ((int64_t)y ? : 1); | |
411 | case INDEX_op_remu_i64: | |
412 | return (uint64_t)x % ((uint64_t)y ? : 1); | |
413 | ||
53108fb5 KB |
414 | default: |
415 | fprintf(stderr, | |
416 | "Unrecognized operation %d in do_constant_folding.\n", op); | |
417 | tcg_abort(); | |
418 | } | |
419 | } | |
420 | ||
67f84c96 RH |
421 | static uint64_t do_constant_folding(TCGOpcode op, TCGType type, |
422 | uint64_t x, uint64_t y) | |
53108fb5 | 423 | { |
54795544 | 424 | uint64_t res = do_constant_folding_2(op, x, y); |
67f84c96 | 425 | if (type == TCG_TYPE_I32) { |
29f3ff8d | 426 | res = (int32_t)res; |
53108fb5 | 427 | } |
53108fb5 KB |
428 | return res; |
429 | } | |
430 | ||
9519da7e RH |
431 | static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) |
432 | { | |
433 | switch (c) { | |
434 | case TCG_COND_EQ: | |
435 | return x == y; | |
436 | case TCG_COND_NE: | |
437 | return x != y; | |
438 | case TCG_COND_LT: | |
439 | return (int32_t)x < (int32_t)y; | |
440 | case TCG_COND_GE: | |
441 | return (int32_t)x >= (int32_t)y; | |
442 | case TCG_COND_LE: | |
443 | return (int32_t)x <= (int32_t)y; | |
444 | case TCG_COND_GT: | |
445 | return (int32_t)x > (int32_t)y; | |
446 | case TCG_COND_LTU: | |
447 | return x < y; | |
448 | case TCG_COND_GEU: | |
449 | return x >= y; | |
450 | case TCG_COND_LEU: | |
451 | return x <= y; | |
452 | case TCG_COND_GTU: | |
453 | return x > y; | |
454 | default: | |
455 | tcg_abort(); | |
456 | } | |
457 | } | |
458 | ||
459 | static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) | |
460 | { | |
461 | switch (c) { | |
462 | case TCG_COND_EQ: | |
463 | return x == y; | |
464 | case TCG_COND_NE: | |
465 | return x != y; | |
466 | case TCG_COND_LT: | |
467 | return (int64_t)x < (int64_t)y; | |
468 | case TCG_COND_GE: | |
469 | return (int64_t)x >= (int64_t)y; | |
470 | case TCG_COND_LE: | |
471 | return (int64_t)x <= (int64_t)y; | |
472 | case TCG_COND_GT: | |
473 | return (int64_t)x > (int64_t)y; | |
474 | case TCG_COND_LTU: | |
475 | return x < y; | |
476 | case TCG_COND_GEU: | |
477 | return x >= y; | |
478 | case TCG_COND_LEU: | |
479 | return x <= y; | |
480 | case TCG_COND_GTU: | |
481 | return x > y; | |
482 | default: | |
483 | tcg_abort(); | |
484 | } | |
485 | } | |
486 | ||
487 | static bool do_constant_folding_cond_eq(TCGCond c) | |
488 | { | |
489 | switch (c) { | |
490 | case TCG_COND_GT: | |
491 | case TCG_COND_LTU: | |
492 | case TCG_COND_LT: | |
493 | case TCG_COND_GTU: | |
494 | case TCG_COND_NE: | |
495 | return 0; | |
496 | case TCG_COND_GE: | |
497 | case TCG_COND_GEU: | |
498 | case TCG_COND_LE: | |
499 | case TCG_COND_LEU: | |
500 | case TCG_COND_EQ: | |
501 | return 1; | |
502 | default: | |
503 | tcg_abort(); | |
504 | } | |
505 | } | |
506 | ||
8d57bf1e RH |
507 | /* |
508 | * Return -1 if the condition can't be simplified, | |
509 | * and the result of the condition (0 or 1) if it can. | |
510 | */ | |
67f84c96 | 511 | static int do_constant_folding_cond(TCGType type, TCGArg x, |
8d57bf1e | 512 | TCGArg y, TCGCond c) |
f8dd19e5 | 513 | { |
54795544 RH |
514 | uint64_t xv = arg_info(x)->val; |
515 | uint64_t yv = arg_info(y)->val; | |
516 | ||
6349039d | 517 | if (arg_is_const(x) && arg_is_const(y)) { |
67f84c96 RH |
518 | switch (type) { |
519 | case TCG_TYPE_I32: | |
170ba88f | 520 | return do_constant_folding_cond_32(xv, yv, c); |
67f84c96 RH |
521 | case TCG_TYPE_I64: |
522 | return do_constant_folding_cond_64(xv, yv, c); | |
523 | default: | |
524 | /* Only scalar comparisons are optimizable */ | |
525 | return -1; | |
b336ceb6 | 526 | } |
6349039d | 527 | } else if (args_are_copies(x, y)) { |
9519da7e | 528 | return do_constant_folding_cond_eq(c); |
6349039d | 529 | } else if (arg_is_const(y) && yv == 0) { |
b336ceb6 | 530 | switch (c) { |
f8dd19e5 | 531 | case TCG_COND_LTU: |
b336ceb6 | 532 | return 0; |
f8dd19e5 | 533 | case TCG_COND_GEU: |
b336ceb6 AJ |
534 | return 1; |
535 | default: | |
8d57bf1e | 536 | return -1; |
f8dd19e5 | 537 | } |
f8dd19e5 | 538 | } |
8d57bf1e | 539 | return -1; |
f8dd19e5 AJ |
540 | } |
541 | ||
8d57bf1e RH |
542 | /* |
543 | * Return -1 if the condition can't be simplified, | |
544 | * and the result of the condition (0 or 1) if it can. | |
545 | */ | |
546 | static int do_constant_folding_cond2(TCGArg *p1, TCGArg *p2, TCGCond c) | |
6c4382f8 RH |
547 | { |
548 | TCGArg al = p1[0], ah = p1[1]; | |
549 | TCGArg bl = p2[0], bh = p2[1]; | |
550 | ||
6349039d RH |
551 | if (arg_is_const(bl) && arg_is_const(bh)) { |
552 | tcg_target_ulong blv = arg_info(bl)->val; | |
553 | tcg_target_ulong bhv = arg_info(bh)->val; | |
554 | uint64_t b = deposit64(blv, 32, 32, bhv); | |
6c4382f8 | 555 | |
6349039d RH |
556 | if (arg_is_const(al) && arg_is_const(ah)) { |
557 | tcg_target_ulong alv = arg_info(al)->val; | |
558 | tcg_target_ulong ahv = arg_info(ah)->val; | |
559 | uint64_t a = deposit64(alv, 32, 32, ahv); | |
6c4382f8 RH |
560 | return do_constant_folding_cond_64(a, b, c); |
561 | } | |
562 | if (b == 0) { | |
563 | switch (c) { | |
564 | case TCG_COND_LTU: | |
565 | return 0; | |
566 | case TCG_COND_GEU: | |
567 | return 1; | |
568 | default: | |
569 | break; | |
570 | } | |
571 | } | |
572 | } | |
6349039d | 573 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { |
6c4382f8 RH |
574 | return do_constant_folding_cond_eq(c); |
575 | } | |
8d57bf1e | 576 | return -1; |
6c4382f8 RH |
577 | } |
578 | ||
24c9ae4e RH |
579 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) |
580 | { | |
581 | TCGArg a1 = *p1, a2 = *p2; | |
582 | int sum = 0; | |
6349039d RH |
583 | sum += arg_is_const(a1); |
584 | sum -= arg_is_const(a2); | |
24c9ae4e RH |
585 | |
586 | /* Prefer the constant in second argument, and then the form | |
587 | op a, a, b, which is better handled on non-RISC hosts. */ | |
588 | if (sum > 0 || (sum == 0 && dest == a2)) { | |
589 | *p1 = a2; | |
590 | *p2 = a1; | |
591 | return true; | |
592 | } | |
593 | return false; | |
594 | } | |
595 | ||
0bfcb865 RH |
596 | static bool swap_commutative2(TCGArg *p1, TCGArg *p2) |
597 | { | |
598 | int sum = 0; | |
6349039d RH |
599 | sum += arg_is_const(p1[0]); |
600 | sum += arg_is_const(p1[1]); | |
601 | sum -= arg_is_const(p2[0]); | |
602 | sum -= arg_is_const(p2[1]); | |
0bfcb865 RH |
603 | if (sum > 0) { |
604 | TCGArg t; | |
605 | t = p1[0], p1[0] = p2[0], p2[0] = t; | |
606 | t = p1[1], p1[1] = p2[1], p2[1] = t; | |
607 | return true; | |
608 | } | |
609 | return false; | |
610 | } | |
611 | ||
e2577ea2 RH |
612 | static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) |
613 | { | |
614 | for (int i = 0; i < nb_args; i++) { | |
615 | TCGTemp *ts = arg_temp(op->args[i]); | |
616 | if (ts) { | |
617 | init_ts_info(ctx, ts); | |
618 | } | |
619 | } | |
620 | } | |
621 | ||
8774dded RH |
622 | static void copy_propagate(OptContext *ctx, TCGOp *op, |
623 | int nb_oargs, int nb_iargs) | |
624 | { | |
625 | TCGContext *s = ctx->tcg; | |
626 | ||
627 | for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { | |
628 | TCGTemp *ts = arg_temp(op->args[i]); | |
629 | if (ts && ts_is_copy(ts)) { | |
630 | op->args[i] = temp_arg(find_better_copy(s, ts)); | |
631 | } | |
632 | } | |
633 | } | |
634 | ||
137f1f44 RH |
635 | static void finish_folding(OptContext *ctx, TCGOp *op) |
636 | { | |
637 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
638 | int i, nb_oargs; | |
639 | ||
640 | /* | |
641 | * For an opcode that ends a BB, reset all temp data. | |
642 | * We do no cross-BB optimization. | |
643 | */ | |
644 | if (def->flags & TCG_OPF_BB_END) { | |
645 | memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | |
646 | ctx->prev_mb = NULL; | |
647 | return; | |
648 | } | |
649 | ||
650 | nb_oargs = def->nb_oargs; | |
651 | for (i = 0; i < nb_oargs; i++) { | |
652 | reset_temp(op->args[i]); | |
653 | /* | |
654 | * Save the corresponding known-zero bits mask for the | |
655 | * first output argument (only one supported so far). | |
656 | */ | |
657 | if (i == 0) { | |
658 | arg_info(op->args[i])->z_mask = ctx->z_mask; | |
659 | } | |
660 | } | |
661 | } | |
662 | ||
2f9f08ba RH |
663 | /* |
664 | * The fold_* functions return true when processing is complete, | |
665 | * usually by folding the operation to a constant or to a copy, | |
666 | * and calling tcg_opt_gen_{mov,movi}. They may do other things, | |
667 | * like collect information about the value produced, for use in | |
668 | * optimizing a subsequent operation. | |
669 | * | |
670 | * These first fold_* functions are all helpers, used by other | |
671 | * folders for more specific operations. | |
672 | */ | |
673 | ||
674 | static bool fold_const1(OptContext *ctx, TCGOp *op) | |
675 | { | |
676 | if (arg_is_const(op->args[1])) { | |
677 | uint64_t t; | |
678 | ||
679 | t = arg_info(op->args[1])->val; | |
67f84c96 | 680 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
2f9f08ba RH |
681 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
682 | } | |
683 | return false; | |
684 | } | |
685 | ||
686 | static bool fold_const2(OptContext *ctx, TCGOp *op) | |
687 | { | |
688 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
689 | uint64_t t1 = arg_info(op->args[1])->val; | |
690 | uint64_t t2 = arg_info(op->args[2])->val; | |
691 | ||
67f84c96 | 692 | t1 = do_constant_folding(op->opc, ctx->type, t1, t2); |
2f9f08ba RH |
693 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); |
694 | } | |
695 | return false; | |
696 | } | |
697 | ||
fae450ba RH |
698 | static bool fold_masks(OptContext *ctx, TCGOp *op) |
699 | { | |
700 | uint64_t a_mask = ctx->a_mask; | |
701 | uint64_t z_mask = ctx->z_mask; | |
702 | ||
703 | /* | |
704 | * 32-bit ops generate 32-bit results. For the result is zero test | |
705 | * below, we can ignore high bits, but for further optimizations we | |
706 | * need to record that the high bits contain garbage. | |
707 | */ | |
708 | if (ctx->type == TCG_TYPE_I32) { | |
709 | ctx->z_mask |= MAKE_64BIT_MASK(32, 32); | |
710 | a_mask &= MAKE_64BIT_MASK(0, 32); | |
711 | z_mask &= MAKE_64BIT_MASK(0, 32); | |
712 | } | |
713 | ||
714 | if (z_mask == 0) { | |
715 | return tcg_opt_gen_movi(ctx, op, op->args[0], 0); | |
716 | } | |
717 | if (a_mask == 0) { | |
718 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
719 | } | |
720 | return false; | |
721 | } | |
722 | ||
0e0a32ba RH |
723 | /* |
724 | * Convert @op to NOT, if NOT is supported by the host. | |
725 | * Return true f the conversion is successful, which will still | |
726 | * indicate that the processing is complete. | |
727 | */ | |
728 | static bool fold_not(OptContext *ctx, TCGOp *op); | |
729 | static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | |
730 | { | |
731 | TCGOpcode not_op; | |
732 | bool have_not; | |
733 | ||
734 | switch (ctx->type) { | |
735 | case TCG_TYPE_I32: | |
736 | not_op = INDEX_op_not_i32; | |
737 | have_not = TCG_TARGET_HAS_not_i32; | |
738 | break; | |
739 | case TCG_TYPE_I64: | |
740 | not_op = INDEX_op_not_i64; | |
741 | have_not = TCG_TARGET_HAS_not_i64; | |
742 | break; | |
743 | case TCG_TYPE_V64: | |
744 | case TCG_TYPE_V128: | |
745 | case TCG_TYPE_V256: | |
746 | not_op = INDEX_op_not_vec; | |
747 | have_not = TCG_TARGET_HAS_not_vec; | |
748 | break; | |
749 | default: | |
750 | g_assert_not_reached(); | |
751 | } | |
752 | if (have_not) { | |
753 | op->opc = not_op; | |
754 | op->args[1] = op->args[idx]; | |
755 | return fold_not(ctx, op); | |
756 | } | |
757 | return false; | |
758 | } | |
759 | ||
da48e272 RH |
760 | /* If the binary operation has first argument @i, fold to @i. */ |
761 | static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
762 | { | |
763 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
764 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
765 | } | |
766 | return false; | |
767 | } | |
768 | ||
0e0a32ba RH |
769 | /* If the binary operation has first argument @i, fold to NOT. */ |
770 | static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
771 | { | |
772 | if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) { | |
773 | return fold_to_not(ctx, op, 2); | |
774 | } | |
775 | return false; | |
776 | } | |
777 | ||
e8679955 RH |
778 | /* If the binary operation has second argument @i, fold to @i. */ |
779 | static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
780 | { | |
781 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
782 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
783 | } | |
784 | return false; | |
785 | } | |
786 | ||
a63ce0e9 RH |
787 | /* If the binary operation has second argument @i, fold to identity. */ |
788 | static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) | |
789 | { | |
790 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
791 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
792 | } | |
793 | return false; | |
794 | } | |
795 | ||
0e0a32ba RH |
796 | /* If the binary operation has second argument @i, fold to NOT. */ |
797 | static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
798 | { | |
799 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) { | |
800 | return fold_to_not(ctx, op, 1); | |
801 | } | |
802 | return false; | |
803 | } | |
804 | ||
cbe42fb2 RH |
805 | /* If the binary operation has both arguments equal, fold to @i. */ |
806 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
807 | { | |
808 | if (args_are_copies(op->args[1], op->args[2])) { | |
809 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
810 | } | |
811 | return false; | |
812 | } | |
813 | ||
ca7bb049 RH |
814 | /* If the binary operation has both arguments equal, fold to identity. */ |
815 | static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | |
816 | { | |
817 | if (args_are_copies(op->args[1], op->args[2])) { | |
818 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
819 | } | |
820 | return false; | |
821 | } | |
822 | ||
2f9f08ba RH |
823 | /* |
824 | * These outermost fold_<op> functions are sorted alphabetically. | |
ca7bb049 RH |
825 | * |
826 | * The ordering of the transformations should be: | |
827 | * 1) those that produce a constant | |
828 | * 2) those that produce a copy | |
829 | * 3) those that produce information about the result value. | |
2f9f08ba RH |
830 | */ |
831 | ||
832 | static bool fold_add(OptContext *ctx, TCGOp *op) | |
833 | { | |
a63ce0e9 RH |
834 | if (fold_const2(ctx, op) || |
835 | fold_xi_to_x(ctx, op, 0)) { | |
836 | return true; | |
837 | } | |
838 | return false; | |
2f9f08ba RH |
839 | } |
840 | ||
e3f7dc21 RH |
841 | static bool fold_addsub2_i32(OptContext *ctx, TCGOp *op, bool add) |
842 | { | |
843 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && | |
844 | arg_is_const(op->args[4]) && arg_is_const(op->args[5])) { | |
845 | uint32_t al = arg_info(op->args[2])->val; | |
846 | uint32_t ah = arg_info(op->args[3])->val; | |
847 | uint32_t bl = arg_info(op->args[4])->val; | |
848 | uint32_t bh = arg_info(op->args[5])->val; | |
849 | uint64_t a = ((uint64_t)ah << 32) | al; | |
850 | uint64_t b = ((uint64_t)bh << 32) | bl; | |
851 | TCGArg rl, rh; | |
852 | TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_mov_i32); | |
853 | ||
854 | if (add) { | |
855 | a += b; | |
856 | } else { | |
857 | a -= b; | |
858 | } | |
859 | ||
860 | rl = op->args[0]; | |
861 | rh = op->args[1]; | |
862 | tcg_opt_gen_movi(ctx, op, rl, (int32_t)a); | |
863 | tcg_opt_gen_movi(ctx, op2, rh, (int32_t)(a >> 32)); | |
864 | return true; | |
865 | } | |
866 | return false; | |
867 | } | |
868 | ||
869 | static bool fold_add2_i32(OptContext *ctx, TCGOp *op) | |
870 | { | |
871 | return fold_addsub2_i32(ctx, op, true); | |
872 | } | |
873 | ||
2f9f08ba RH |
874 | static bool fold_and(OptContext *ctx, TCGOp *op) |
875 | { | |
fae450ba RH |
876 | uint64_t z1, z2; |
877 | ||
ca7bb049 | 878 | if (fold_const2(ctx, op) || |
e8679955 | 879 | fold_xi_to_i(ctx, op, 0) || |
a63ce0e9 | 880 | fold_xi_to_x(ctx, op, -1) || |
ca7bb049 RH |
881 | fold_xx_to_x(ctx, op)) { |
882 | return true; | |
883 | } | |
fae450ba RH |
884 | |
885 | z1 = arg_info(op->args[1])->z_mask; | |
886 | z2 = arg_info(op->args[2])->z_mask; | |
887 | ctx->z_mask = z1 & z2; | |
888 | ||
889 | /* | |
890 | * Known-zeros does not imply known-ones. Therefore unless | |
891 | * arg2 is constant, we can't infer affected bits from it. | |
892 | */ | |
893 | if (arg_is_const(op->args[2])) { | |
894 | ctx->a_mask = z1 & ~z2; | |
895 | } | |
896 | ||
897 | return fold_masks(ctx, op); | |
2f9f08ba RH |
898 | } |
899 | ||
900 | static bool fold_andc(OptContext *ctx, TCGOp *op) | |
901 | { | |
fae450ba RH |
902 | uint64_t z1; |
903 | ||
cbe42fb2 | 904 | if (fold_const2(ctx, op) || |
0e0a32ba | 905 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 906 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 907 | fold_ix_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
908 | return true; |
909 | } | |
fae450ba RH |
910 | |
911 | z1 = arg_info(op->args[1])->z_mask; | |
912 | ||
913 | /* | |
914 | * Known-zeros does not imply known-ones. Therefore unless | |
915 | * arg2 is constant, we can't infer anything from it. | |
916 | */ | |
917 | if (arg_is_const(op->args[2])) { | |
918 | uint64_t z2 = ~arg_info(op->args[2])->z_mask; | |
919 | ctx->a_mask = z1 & ~z2; | |
920 | z1 &= z2; | |
921 | } | |
922 | ctx->z_mask = z1; | |
923 | ||
924 | return fold_masks(ctx, op); | |
2f9f08ba RH |
925 | } |
926 | ||
079b0804 RH |
927 | static bool fold_brcond(OptContext *ctx, TCGOp *op) |
928 | { | |
929 | TCGCond cond = op->args[2]; | |
67f84c96 | 930 | int i = do_constant_folding_cond(ctx->type, op->args[0], op->args[1], cond); |
079b0804 RH |
931 | |
932 | if (i == 0) { | |
933 | tcg_op_remove(ctx->tcg, op); | |
934 | return true; | |
935 | } | |
936 | if (i > 0) { | |
937 | op->opc = INDEX_op_br; | |
938 | op->args[0] = op->args[3]; | |
939 | } | |
940 | return false; | |
941 | } | |
942 | ||
764d2aba RH |
943 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) |
944 | { | |
945 | TCGCond cond = op->args[4]; | |
946 | int i = do_constant_folding_cond2(&op->args[0], &op->args[2], cond); | |
947 | TCGArg label = op->args[5]; | |
948 | int inv = 0; | |
949 | ||
950 | if (i >= 0) { | |
951 | goto do_brcond_const; | |
952 | } | |
953 | ||
954 | switch (cond) { | |
955 | case TCG_COND_LT: | |
956 | case TCG_COND_GE: | |
957 | /* | |
958 | * Simplify LT/GE comparisons vs zero to a single compare | |
959 | * vs the high word of the input. | |
960 | */ | |
961 | if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == 0 && | |
962 | arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0) { | |
963 | goto do_brcond_high; | |
964 | } | |
965 | break; | |
966 | ||
967 | case TCG_COND_NE: | |
968 | inv = 1; | |
969 | QEMU_FALLTHROUGH; | |
970 | case TCG_COND_EQ: | |
971 | /* | |
972 | * Simplify EQ/NE comparisons where one of the pairs | |
973 | * can be simplified. | |
974 | */ | |
67f84c96 | 975 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], |
764d2aba RH |
976 | op->args[2], cond); |
977 | switch (i ^ inv) { | |
978 | case 0: | |
979 | goto do_brcond_const; | |
980 | case 1: | |
981 | goto do_brcond_high; | |
982 | } | |
983 | ||
67f84c96 | 984 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
764d2aba RH |
985 | op->args[3], cond); |
986 | switch (i ^ inv) { | |
987 | case 0: | |
988 | goto do_brcond_const; | |
989 | case 1: | |
990 | op->opc = INDEX_op_brcond_i32; | |
991 | op->args[1] = op->args[2]; | |
992 | op->args[2] = cond; | |
993 | op->args[3] = label; | |
994 | break; | |
995 | } | |
996 | break; | |
997 | ||
998 | default: | |
999 | break; | |
1000 | ||
1001 | do_brcond_high: | |
1002 | op->opc = INDEX_op_brcond_i32; | |
1003 | op->args[0] = op->args[1]; | |
1004 | op->args[1] = op->args[3]; | |
1005 | op->args[2] = cond; | |
1006 | op->args[3] = label; | |
1007 | break; | |
1008 | ||
1009 | do_brcond_const: | |
1010 | if (i == 0) { | |
1011 | tcg_op_remove(ctx->tcg, op); | |
1012 | return true; | |
1013 | } | |
1014 | op->opc = INDEX_op_br; | |
1015 | op->args[0] = label; | |
1016 | break; | |
1017 | } | |
1018 | return false; | |
1019 | } | |
1020 | ||
09bacdc2 RH |
1021 | static bool fold_bswap(OptContext *ctx, TCGOp *op) |
1022 | { | |
fae450ba RH |
1023 | uint64_t z_mask, sign; |
1024 | ||
09bacdc2 RH |
1025 | if (arg_is_const(op->args[1])) { |
1026 | uint64_t t = arg_info(op->args[1])->val; | |
1027 | ||
67f84c96 | 1028 | t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); |
09bacdc2 RH |
1029 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1030 | } | |
fae450ba RH |
1031 | |
1032 | z_mask = arg_info(op->args[1])->z_mask; | |
1033 | switch (op->opc) { | |
1034 | case INDEX_op_bswap16_i32: | |
1035 | case INDEX_op_bswap16_i64: | |
1036 | z_mask = bswap16(z_mask); | |
1037 | sign = INT16_MIN; | |
1038 | break; | |
1039 | case INDEX_op_bswap32_i32: | |
1040 | case INDEX_op_bswap32_i64: | |
1041 | z_mask = bswap32(z_mask); | |
1042 | sign = INT32_MIN; | |
1043 | break; | |
1044 | case INDEX_op_bswap64_i64: | |
1045 | z_mask = bswap64(z_mask); | |
1046 | sign = INT64_MIN; | |
1047 | break; | |
1048 | default: | |
1049 | g_assert_not_reached(); | |
1050 | } | |
1051 | ||
1052 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | |
1053 | case TCG_BSWAP_OZ: | |
1054 | break; | |
1055 | case TCG_BSWAP_OS: | |
1056 | /* If the sign bit may be 1, force all the bits above to 1. */ | |
1057 | if (z_mask & sign) { | |
1058 | z_mask |= sign; | |
1059 | } | |
1060 | break; | |
1061 | default: | |
1062 | /* The high bits are undefined: force all bits above the sign to 1. */ | |
1063 | z_mask |= sign << 1; | |
1064 | break; | |
1065 | } | |
1066 | ctx->z_mask = z_mask; | |
1067 | ||
1068 | return fold_masks(ctx, op); | |
09bacdc2 RH |
1069 | } |
1070 | ||
5cf32be7 RH |
1071 | static bool fold_call(OptContext *ctx, TCGOp *op) |
1072 | { | |
1073 | TCGContext *s = ctx->tcg; | |
1074 | int nb_oargs = TCGOP_CALLO(op); | |
1075 | int nb_iargs = TCGOP_CALLI(op); | |
1076 | int flags, i; | |
1077 | ||
1078 | init_arguments(ctx, op, nb_oargs + nb_iargs); | |
1079 | copy_propagate(ctx, op, nb_oargs, nb_iargs); | |
1080 | ||
1081 | /* If the function reads or writes globals, reset temp data. */ | |
1082 | flags = tcg_call_flags(op); | |
1083 | if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | |
1084 | int nb_globals = s->nb_globals; | |
1085 | ||
1086 | for (i = 0; i < nb_globals; i++) { | |
1087 | if (test_bit(i, ctx->temps_used.l)) { | |
1088 | reset_ts(&ctx->tcg->temps[i]); | |
1089 | } | |
1090 | } | |
1091 | } | |
1092 | ||
1093 | /* Reset temp data for outputs. */ | |
1094 | for (i = 0; i < nb_oargs; i++) { | |
1095 | reset_temp(op->args[i]); | |
1096 | } | |
1097 | ||
1098 | /* Stop optimizing MB across calls. */ | |
1099 | ctx->prev_mb = NULL; | |
1100 | return true; | |
1101 | } | |
1102 | ||
30dd0bfe RH |
1103 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) |
1104 | { | |
fae450ba RH |
1105 | uint64_t z_mask; |
1106 | ||
30dd0bfe RH |
1107 | if (arg_is_const(op->args[1])) { |
1108 | uint64_t t = arg_info(op->args[1])->val; | |
1109 | ||
1110 | if (t != 0) { | |
67f84c96 | 1111 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
30dd0bfe RH |
1112 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1113 | } | |
1114 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | |
1115 | } | |
fae450ba RH |
1116 | |
1117 | switch (ctx->type) { | |
1118 | case TCG_TYPE_I32: | |
1119 | z_mask = 31; | |
1120 | break; | |
1121 | case TCG_TYPE_I64: | |
1122 | z_mask = 63; | |
1123 | break; | |
1124 | default: | |
1125 | g_assert_not_reached(); | |
1126 | } | |
1127 | ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | |
1128 | ||
30dd0bfe RH |
1129 | return false; |
1130 | } | |
1131 | ||
2f9f08ba RH |
1132 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) |
1133 | { | |
fae450ba RH |
1134 | if (fold_const1(ctx, op)) { |
1135 | return true; | |
1136 | } | |
1137 | ||
1138 | switch (ctx->type) { | |
1139 | case TCG_TYPE_I32: | |
1140 | ctx->z_mask = 32 | 31; | |
1141 | break; | |
1142 | case TCG_TYPE_I64: | |
1143 | ctx->z_mask = 64 | 63; | |
1144 | break; | |
1145 | default: | |
1146 | g_assert_not_reached(); | |
1147 | } | |
1148 | return false; | |
2f9f08ba RH |
1149 | } |
1150 | ||
1b1907b8 RH |
1151 | static bool fold_deposit(OptContext *ctx, TCGOp *op) |
1152 | { | |
1153 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1154 | uint64_t t1 = arg_info(op->args[1])->val; | |
1155 | uint64_t t2 = arg_info(op->args[2])->val; | |
1156 | ||
1157 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | |
1158 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
1159 | } | |
fae450ba RH |
1160 | |
1161 | ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, | |
1162 | op->args[3], op->args[4], | |
1163 | arg_info(op->args[2])->z_mask); | |
1b1907b8 RH |
1164 | return false; |
1165 | } | |
1166 | ||
2f9f08ba RH |
1167 | static bool fold_divide(OptContext *ctx, TCGOp *op) |
1168 | { | |
1169 | return fold_const2(ctx, op); | |
1170 | } | |
1171 | ||
8cdb3fcb RH |
1172 | static bool fold_dup(OptContext *ctx, TCGOp *op) |
1173 | { | |
1174 | if (arg_is_const(op->args[1])) { | |
1175 | uint64_t t = arg_info(op->args[1])->val; | |
1176 | t = dup_const(TCGOP_VECE(op), t); | |
1177 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1178 | } | |
1179 | return false; | |
1180 | } | |
1181 | ||
1182 | static bool fold_dup2(OptContext *ctx, TCGOp *op) | |
1183 | { | |
1184 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1185 | uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, | |
1186 | arg_info(op->args[2])->val); | |
1187 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1188 | } | |
1189 | ||
1190 | if (args_are_copies(op->args[1], op->args[2])) { | |
1191 | op->opc = INDEX_op_dup_vec; | |
1192 | TCGOP_VECE(op) = MO_32; | |
1193 | } | |
1194 | return false; | |
1195 | } | |
1196 | ||
2f9f08ba RH |
1197 | static bool fold_eqv(OptContext *ctx, TCGOp *op) |
1198 | { | |
0e0a32ba | 1199 | if (fold_const2(ctx, op) || |
a63ce0e9 | 1200 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1201 | fold_xi_to_not(ctx, op, 0)) { |
1202 | return true; | |
1203 | } | |
1204 | return false; | |
2f9f08ba RH |
1205 | } |
1206 | ||
b6617c88 RH |
1207 | static bool fold_extract(OptContext *ctx, TCGOp *op) |
1208 | { | |
fae450ba RH |
1209 | uint64_t z_mask_old, z_mask; |
1210 | ||
b6617c88 RH |
1211 | if (arg_is_const(op->args[1])) { |
1212 | uint64_t t; | |
1213 | ||
1214 | t = arg_info(op->args[1])->val; | |
1215 | t = extract64(t, op->args[2], op->args[3]); | |
1216 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1217 | } | |
fae450ba RH |
1218 | |
1219 | z_mask_old = arg_info(op->args[1])->z_mask; | |
1220 | z_mask = extract64(z_mask_old, op->args[2], op->args[3]); | |
1221 | if (op->args[2] == 0) { | |
1222 | ctx->a_mask = z_mask_old ^ z_mask; | |
1223 | } | |
1224 | ctx->z_mask = z_mask; | |
1225 | ||
1226 | return fold_masks(ctx, op); | |
b6617c88 RH |
1227 | } |
1228 | ||
dcd08996 RH |
1229 | static bool fold_extract2(OptContext *ctx, TCGOp *op) |
1230 | { | |
1231 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1232 | uint64_t v1 = arg_info(op->args[1])->val; | |
1233 | uint64_t v2 = arg_info(op->args[2])->val; | |
1234 | int shr = op->args[3]; | |
1235 | ||
1236 | if (op->opc == INDEX_op_extract2_i64) { | |
1237 | v1 >>= shr; | |
1238 | v2 <<= 64 - shr; | |
1239 | } else { | |
1240 | v1 = (uint32_t)v1 >> shr; | |
1241 | v2 = (int32_t)v2 << (32 - shr); | |
1242 | } | |
1243 | return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | |
1244 | } | |
1245 | return false; | |
1246 | } | |
1247 | ||
2f9f08ba RH |
1248 | static bool fold_exts(OptContext *ctx, TCGOp *op) |
1249 | { | |
fae450ba RH |
1250 | uint64_t z_mask_old, z_mask, sign; |
1251 | bool type_change = false; | |
1252 | ||
1253 | if (fold_const1(ctx, op)) { | |
1254 | return true; | |
1255 | } | |
1256 | ||
1257 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1258 | ||
1259 | switch (op->opc) { | |
1260 | CASE_OP_32_64(ext8s): | |
1261 | sign = INT8_MIN; | |
1262 | z_mask = (uint8_t)z_mask; | |
1263 | break; | |
1264 | CASE_OP_32_64(ext16s): | |
1265 | sign = INT16_MIN; | |
1266 | z_mask = (uint16_t)z_mask; | |
1267 | break; | |
1268 | case INDEX_op_ext_i32_i64: | |
1269 | type_change = true; | |
1270 | QEMU_FALLTHROUGH; | |
1271 | case INDEX_op_ext32s_i64: | |
1272 | sign = INT32_MIN; | |
1273 | z_mask = (uint32_t)z_mask; | |
1274 | break; | |
1275 | default: | |
1276 | g_assert_not_reached(); | |
1277 | } | |
1278 | ||
1279 | if (z_mask & sign) { | |
1280 | z_mask |= sign; | |
1281 | } else if (!type_change) { | |
1282 | ctx->a_mask = z_mask_old ^ z_mask; | |
1283 | } | |
1284 | ctx->z_mask = z_mask; | |
1285 | ||
1286 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1287 | } |
1288 | ||
1289 | static bool fold_extu(OptContext *ctx, TCGOp *op) | |
1290 | { | |
fae450ba RH |
1291 | uint64_t z_mask_old, z_mask; |
1292 | bool type_change = false; | |
1293 | ||
1294 | if (fold_const1(ctx, op)) { | |
1295 | return true; | |
1296 | } | |
1297 | ||
1298 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1299 | ||
1300 | switch (op->opc) { | |
1301 | CASE_OP_32_64(ext8u): | |
1302 | z_mask = (uint8_t)z_mask; | |
1303 | break; | |
1304 | CASE_OP_32_64(ext16u): | |
1305 | z_mask = (uint16_t)z_mask; | |
1306 | break; | |
1307 | case INDEX_op_extrl_i64_i32: | |
1308 | case INDEX_op_extu_i32_i64: | |
1309 | type_change = true; | |
1310 | QEMU_FALLTHROUGH; | |
1311 | case INDEX_op_ext32u_i64: | |
1312 | z_mask = (uint32_t)z_mask; | |
1313 | break; | |
1314 | case INDEX_op_extrh_i64_i32: | |
1315 | type_change = true; | |
1316 | z_mask >>= 32; | |
1317 | break; | |
1318 | default: | |
1319 | g_assert_not_reached(); | |
1320 | } | |
1321 | ||
1322 | ctx->z_mask = z_mask; | |
1323 | if (!type_change) { | |
1324 | ctx->a_mask = z_mask_old ^ z_mask; | |
1325 | } | |
1326 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1327 | } |
1328 | ||
3eefdf2b RH |
1329 | static bool fold_mb(OptContext *ctx, TCGOp *op) |
1330 | { | |
1331 | /* Eliminate duplicate and redundant fence instructions. */ | |
1332 | if (ctx->prev_mb) { | |
1333 | /* | |
1334 | * Merge two barriers of the same type into one, | |
1335 | * or a weaker barrier into a stronger one, | |
1336 | * or two weaker barriers into a stronger one. | |
1337 | * mb X; mb Y => mb X|Y | |
1338 | * mb; strl => mb; st | |
1339 | * ldaq; mb => ld; mb | |
1340 | * ldaq; strl => ld; mb; st | |
1341 | * Other combinations are also merged into a strong | |
1342 | * barrier. This is stricter than specified but for | |
1343 | * the purposes of TCG is better than not optimizing. | |
1344 | */ | |
1345 | ctx->prev_mb->args[0] |= op->args[0]; | |
1346 | tcg_op_remove(ctx->tcg, op); | |
1347 | } else { | |
1348 | ctx->prev_mb = op; | |
1349 | } | |
1350 | return true; | |
1351 | } | |
1352 | ||
2cfac7fa RH |
1353 | static bool fold_mov(OptContext *ctx, TCGOp *op) |
1354 | { | |
1355 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1356 | } | |
1357 | ||
0c310a30 RH |
1358 | static bool fold_movcond(OptContext *ctx, TCGOp *op) |
1359 | { | |
0c310a30 | 1360 | TCGCond cond = op->args[5]; |
67f84c96 | 1361 | int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
0c310a30 RH |
1362 | |
1363 | if (i >= 0) { | |
1364 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | |
1365 | } | |
1366 | ||
fae450ba RH |
1367 | ctx->z_mask = arg_info(op->args[3])->z_mask |
1368 | | arg_info(op->args[4])->z_mask; | |
1369 | ||
0c310a30 RH |
1370 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { |
1371 | uint64_t tv = arg_info(op->args[3])->val; | |
1372 | uint64_t fv = arg_info(op->args[4])->val; | |
67f84c96 | 1373 | TCGOpcode opc; |
0c310a30 | 1374 | |
67f84c96 RH |
1375 | switch (ctx->type) { |
1376 | case TCG_TYPE_I32: | |
1377 | opc = INDEX_op_setcond_i32; | |
1378 | break; | |
1379 | case TCG_TYPE_I64: | |
1380 | opc = INDEX_op_setcond_i64; | |
1381 | break; | |
1382 | default: | |
1383 | g_assert_not_reached(); | |
1384 | } | |
0c310a30 RH |
1385 | |
1386 | if (tv == 1 && fv == 0) { | |
1387 | op->opc = opc; | |
1388 | op->args[3] = cond; | |
1389 | } else if (fv == 1 && tv == 0) { | |
1390 | op->opc = opc; | |
1391 | op->args[3] = tcg_invert_cond(cond); | |
1392 | } | |
1393 | } | |
1394 | return false; | |
1395 | } | |
1396 | ||
2f9f08ba RH |
1397 | static bool fold_mul(OptContext *ctx, TCGOp *op) |
1398 | { | |
e8679955 RH |
1399 | if (fold_const2(ctx, op) || |
1400 | fold_xi_to_i(ctx, op, 0)) { | |
1401 | return true; | |
1402 | } | |
1403 | return false; | |
2f9f08ba RH |
1404 | } |
1405 | ||
1406 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | |
1407 | { | |
e8679955 RH |
1408 | if (fold_const2(ctx, op) || |
1409 | fold_xi_to_i(ctx, op, 0)) { | |
1410 | return true; | |
1411 | } | |
1412 | return false; | |
2f9f08ba RH |
1413 | } |
1414 | ||
407112b0 | 1415 | static bool fold_multiply2(OptContext *ctx, TCGOp *op) |
6b8ac0d1 RH |
1416 | { |
1417 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { | |
407112b0 RH |
1418 | uint64_t a = arg_info(op->args[2])->val; |
1419 | uint64_t b = arg_info(op->args[3])->val; | |
1420 | uint64_t h, l; | |
6b8ac0d1 | 1421 | TCGArg rl, rh; |
407112b0 RH |
1422 | TCGOp *op2; |
1423 | ||
1424 | switch (op->opc) { | |
1425 | case INDEX_op_mulu2_i32: | |
1426 | l = (uint64_t)(uint32_t)a * (uint32_t)b; | |
1427 | h = (int32_t)(l >> 32); | |
1428 | l = (int32_t)l; | |
1429 | break; | |
1430 | case INDEX_op_muls2_i32: | |
1431 | l = (int64_t)(int32_t)a * (int32_t)b; | |
1432 | h = l >> 32; | |
1433 | l = (int32_t)l; | |
1434 | break; | |
1435 | case INDEX_op_mulu2_i64: | |
1436 | mulu64(&l, &h, a, b); | |
1437 | break; | |
1438 | case INDEX_op_muls2_i64: | |
1439 | muls64(&l, &h, a, b); | |
1440 | break; | |
1441 | default: | |
1442 | g_assert_not_reached(); | |
1443 | } | |
6b8ac0d1 RH |
1444 | |
1445 | rl = op->args[0]; | |
1446 | rh = op->args[1]; | |
407112b0 RH |
1447 | |
1448 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
1449 | op2 = tcg_op_insert_before(ctx->tcg, op, 0); | |
1450 | ||
1451 | tcg_opt_gen_movi(ctx, op, rl, l); | |
1452 | tcg_opt_gen_movi(ctx, op2, rh, h); | |
6b8ac0d1 RH |
1453 | return true; |
1454 | } | |
1455 | return false; | |
1456 | } | |
1457 | ||
2f9f08ba RH |
1458 | static bool fold_nand(OptContext *ctx, TCGOp *op) |
1459 | { | |
0e0a32ba RH |
1460 | if (fold_const2(ctx, op) || |
1461 | fold_xi_to_not(ctx, op, -1)) { | |
1462 | return true; | |
1463 | } | |
1464 | return false; | |
2f9f08ba RH |
1465 | } |
1466 | ||
1467 | static bool fold_neg(OptContext *ctx, TCGOp *op) | |
1468 | { | |
fae450ba RH |
1469 | uint64_t z_mask; |
1470 | ||
9caca88a RH |
1471 | if (fold_const1(ctx, op)) { |
1472 | return true; | |
1473 | } | |
fae450ba RH |
1474 | |
1475 | /* Set to 1 all bits to the left of the rightmost. */ | |
1476 | z_mask = arg_info(op->args[1])->z_mask; | |
1477 | ctx->z_mask = -(z_mask & -z_mask); | |
1478 | ||
9caca88a RH |
1479 | /* |
1480 | * Because of fold_sub_to_neg, we want to always return true, | |
1481 | * via finish_folding. | |
1482 | */ | |
1483 | finish_folding(ctx, op); | |
1484 | return true; | |
2f9f08ba RH |
1485 | } |
1486 | ||
1487 | static bool fold_nor(OptContext *ctx, TCGOp *op) | |
1488 | { | |
0e0a32ba RH |
1489 | if (fold_const2(ctx, op) || |
1490 | fold_xi_to_not(ctx, op, 0)) { | |
1491 | return true; | |
1492 | } | |
1493 | return false; | |
2f9f08ba RH |
1494 | } |
1495 | ||
1496 | static bool fold_not(OptContext *ctx, TCGOp *op) | |
1497 | { | |
0e0a32ba RH |
1498 | if (fold_const1(ctx, op)) { |
1499 | return true; | |
1500 | } | |
1501 | ||
1502 | /* Because of fold_to_not, we want to always return true, via finish. */ | |
1503 | finish_folding(ctx, op); | |
1504 | return true; | |
2f9f08ba RH |
1505 | } |
1506 | ||
1507 | static bool fold_or(OptContext *ctx, TCGOp *op) | |
1508 | { | |
ca7bb049 | 1509 | if (fold_const2(ctx, op) || |
a63ce0e9 | 1510 | fold_xi_to_x(ctx, op, 0) || |
ca7bb049 RH |
1511 | fold_xx_to_x(ctx, op)) { |
1512 | return true; | |
1513 | } | |
fae450ba RH |
1514 | |
1515 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
1516 | | arg_info(op->args[2])->z_mask; | |
1517 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1518 | } |
1519 | ||
1520 | static bool fold_orc(OptContext *ctx, TCGOp *op) | |
1521 | { | |
0e0a32ba | 1522 | if (fold_const2(ctx, op) || |
a63ce0e9 | 1523 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1524 | fold_ix_to_not(ctx, op, 0)) { |
1525 | return true; | |
1526 | } | |
1527 | return false; | |
2f9f08ba RH |
1528 | } |
1529 | ||
3eefdf2b RH |
1530 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) |
1531 | { | |
fae450ba RH |
1532 | const TCGOpDef *def = &tcg_op_defs[op->opc]; |
1533 | MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | |
1534 | MemOp mop = get_memop(oi); | |
1535 | int width = 8 * memop_size(mop); | |
1536 | ||
1537 | if (!(mop & MO_SIGN) && width < 64) { | |
1538 | ctx->z_mask = MAKE_64BIT_MASK(0, width); | |
1539 | } | |
1540 | ||
3eefdf2b RH |
1541 | /* Opcodes that touch guest memory stop the mb optimization. */ |
1542 | ctx->prev_mb = NULL; | |
1543 | return false; | |
1544 | } | |
1545 | ||
1546 | static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | |
1547 | { | |
1548 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
1549 | ctx->prev_mb = NULL; | |
1550 | return false; | |
1551 | } | |
1552 | ||
2f9f08ba RH |
1553 | static bool fold_remainder(OptContext *ctx, TCGOp *op) |
1554 | { | |
1555 | return fold_const2(ctx, op); | |
1556 | } | |
1557 | ||
c63ff55c RH |
1558 | static bool fold_setcond(OptContext *ctx, TCGOp *op) |
1559 | { | |
1560 | TCGCond cond = op->args[3]; | |
67f84c96 | 1561 | int i = do_constant_folding_cond(ctx->type, op->args[1], op->args[2], cond); |
c63ff55c RH |
1562 | |
1563 | if (i >= 0) { | |
1564 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1565 | } | |
fae450ba RH |
1566 | |
1567 | ctx->z_mask = 1; | |
c63ff55c RH |
1568 | return false; |
1569 | } | |
1570 | ||
bc47b1aa RH |
1571 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) |
1572 | { | |
1573 | TCGCond cond = op->args[5]; | |
1574 | int i = do_constant_folding_cond2(&op->args[1], &op->args[3], cond); | |
1575 | int inv = 0; | |
1576 | ||
1577 | if (i >= 0) { | |
1578 | goto do_setcond_const; | |
1579 | } | |
1580 | ||
1581 | switch (cond) { | |
1582 | case TCG_COND_LT: | |
1583 | case TCG_COND_GE: | |
1584 | /* | |
1585 | * Simplify LT/GE comparisons vs zero to a single compare | |
1586 | * vs the high word of the input. | |
1587 | */ | |
1588 | if (arg_is_const(op->args[3]) && arg_info(op->args[3])->val == 0 && | |
1589 | arg_is_const(op->args[4]) && arg_info(op->args[4])->val == 0) { | |
1590 | goto do_setcond_high; | |
1591 | } | |
1592 | break; | |
1593 | ||
1594 | case TCG_COND_NE: | |
1595 | inv = 1; | |
1596 | QEMU_FALLTHROUGH; | |
1597 | case TCG_COND_EQ: | |
1598 | /* | |
1599 | * Simplify EQ/NE comparisons where one of the pairs | |
1600 | * can be simplified. | |
1601 | */ | |
67f84c96 | 1602 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
bc47b1aa RH |
1603 | op->args[3], cond); |
1604 | switch (i ^ inv) { | |
1605 | case 0: | |
1606 | goto do_setcond_const; | |
1607 | case 1: | |
1608 | goto do_setcond_high; | |
1609 | } | |
1610 | ||
67f84c96 | 1611 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], |
bc47b1aa RH |
1612 | op->args[4], cond); |
1613 | switch (i ^ inv) { | |
1614 | case 0: | |
1615 | goto do_setcond_const; | |
1616 | case 1: | |
1617 | op->args[2] = op->args[3]; | |
1618 | op->args[3] = cond; | |
1619 | op->opc = INDEX_op_setcond_i32; | |
1620 | break; | |
1621 | } | |
1622 | break; | |
1623 | ||
1624 | default: | |
1625 | break; | |
1626 | ||
1627 | do_setcond_high: | |
1628 | op->args[1] = op->args[2]; | |
1629 | op->args[2] = op->args[4]; | |
1630 | op->args[3] = cond; | |
1631 | op->opc = INDEX_op_setcond_i32; | |
1632 | break; | |
1633 | } | |
fae450ba RH |
1634 | |
1635 | ctx->z_mask = 1; | |
bc47b1aa RH |
1636 | return false; |
1637 | ||
1638 | do_setcond_const: | |
1639 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1640 | } | |
1641 | ||
b6617c88 RH |
1642 | static bool fold_sextract(OptContext *ctx, TCGOp *op) |
1643 | { | |
fae450ba RH |
1644 | int64_t z_mask_old, z_mask; |
1645 | ||
b6617c88 RH |
1646 | if (arg_is_const(op->args[1])) { |
1647 | uint64_t t; | |
1648 | ||
1649 | t = arg_info(op->args[1])->val; | |
1650 | t = sextract64(t, op->args[2], op->args[3]); | |
1651 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1652 | } | |
fae450ba RH |
1653 | |
1654 | z_mask_old = arg_info(op->args[1])->z_mask; | |
1655 | z_mask = sextract64(z_mask_old, op->args[2], op->args[3]); | |
1656 | if (op->args[2] == 0 && z_mask >= 0) { | |
1657 | ctx->a_mask = z_mask_old ^ z_mask; | |
1658 | } | |
1659 | ctx->z_mask = z_mask; | |
1660 | ||
1661 | return fold_masks(ctx, op); | |
b6617c88 RH |
1662 | } |
1663 | ||
2f9f08ba RH |
1664 | static bool fold_shift(OptContext *ctx, TCGOp *op) |
1665 | { | |
a63ce0e9 | 1666 | if (fold_const2(ctx, op) || |
da48e272 | 1667 | fold_ix_to_i(ctx, op, 0) || |
a63ce0e9 RH |
1668 | fold_xi_to_x(ctx, op, 0)) { |
1669 | return true; | |
1670 | } | |
fae450ba RH |
1671 | |
1672 | if (arg_is_const(op->args[2])) { | |
1673 | ctx->z_mask = do_constant_folding(op->opc, ctx->type, | |
1674 | arg_info(op->args[1])->z_mask, | |
1675 | arg_info(op->args[2])->val); | |
1676 | return fold_masks(ctx, op); | |
1677 | } | |
a63ce0e9 | 1678 | return false; |
2f9f08ba RH |
1679 | } |
1680 | ||
9caca88a RH |
1681 | static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) |
1682 | { | |
1683 | TCGOpcode neg_op; | |
1684 | bool have_neg; | |
1685 | ||
1686 | if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { | |
1687 | return false; | |
1688 | } | |
1689 | ||
1690 | switch (ctx->type) { | |
1691 | case TCG_TYPE_I32: | |
1692 | neg_op = INDEX_op_neg_i32; | |
1693 | have_neg = TCG_TARGET_HAS_neg_i32; | |
1694 | break; | |
1695 | case TCG_TYPE_I64: | |
1696 | neg_op = INDEX_op_neg_i64; | |
1697 | have_neg = TCG_TARGET_HAS_neg_i64; | |
1698 | break; | |
1699 | case TCG_TYPE_V64: | |
1700 | case TCG_TYPE_V128: | |
1701 | case TCG_TYPE_V256: | |
1702 | neg_op = INDEX_op_neg_vec; | |
1703 | have_neg = (TCG_TARGET_HAS_neg_vec && | |
1704 | tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); | |
1705 | break; | |
1706 | default: | |
1707 | g_assert_not_reached(); | |
1708 | } | |
1709 | if (have_neg) { | |
1710 | op->opc = neg_op; | |
1711 | op->args[1] = op->args[2]; | |
1712 | return fold_neg(ctx, op); | |
1713 | } | |
1714 | return false; | |
1715 | } | |
1716 | ||
2f9f08ba RH |
1717 | static bool fold_sub(OptContext *ctx, TCGOp *op) |
1718 | { | |
cbe42fb2 | 1719 | if (fold_const2(ctx, op) || |
9caca88a | 1720 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1721 | fold_xi_to_x(ctx, op, 0) || |
9caca88a | 1722 | fold_sub_to_neg(ctx, op)) { |
cbe42fb2 RH |
1723 | return true; |
1724 | } | |
1725 | return false; | |
2f9f08ba RH |
1726 | } |
1727 | ||
e3f7dc21 RH |
1728 | static bool fold_sub2_i32(OptContext *ctx, TCGOp *op) |
1729 | { | |
1730 | return fold_addsub2_i32(ctx, op, false); | |
1731 | } | |
1732 | ||
fae450ba RH |
1733 | static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) |
1734 | { | |
1735 | /* We can't do any folding with a load, but we can record bits. */ | |
1736 | switch (op->opc) { | |
1737 | CASE_OP_32_64(ld8u): | |
1738 | ctx->z_mask = MAKE_64BIT_MASK(0, 8); | |
1739 | break; | |
1740 | CASE_OP_32_64(ld16u): | |
1741 | ctx->z_mask = MAKE_64BIT_MASK(0, 16); | |
1742 | break; | |
1743 | case INDEX_op_ld32u_i64: | |
1744 | ctx->z_mask = MAKE_64BIT_MASK(0, 32); | |
1745 | break; | |
1746 | default: | |
1747 | g_assert_not_reached(); | |
1748 | } | |
1749 | return false; | |
1750 | } | |
1751 | ||
2f9f08ba RH |
1752 | static bool fold_xor(OptContext *ctx, TCGOp *op) |
1753 | { | |
cbe42fb2 | 1754 | if (fold_const2(ctx, op) || |
0e0a32ba | 1755 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1756 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 1757 | fold_xi_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
1758 | return true; |
1759 | } | |
fae450ba RH |
1760 | |
1761 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
1762 | | arg_info(op->args[2])->z_mask; | |
1763 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1764 | } |
1765 | ||
22613af4 | 1766 | /* Propagate constants and copies, fold constant expressions. */ |
36e60ef6 | 1767 | void tcg_optimize(TCGContext *s) |
8f2e8c07 | 1768 | { |
5cf32be7 | 1769 | int nb_temps, i; |
d0ed5151 | 1770 | TCGOp *op, *op_next; |
dc84988a | 1771 | OptContext ctx = { .tcg = s }; |
5d8f5363 | 1772 | |
22613af4 KB |
1773 | /* Array VALS has an element for each temp. |
1774 | If this temp holds a constant then its value is kept in VALS' element. | |
e590d4e6 AJ |
1775 | If this temp is a copy of other ones then the other copies are |
1776 | available through the doubly linked circular list. */ | |
8f2e8c07 KB |
1777 | |
1778 | nb_temps = s->nb_temps; | |
8f17a975 RH |
1779 | for (i = 0; i < nb_temps; ++i) { |
1780 | s->temps[i].state_ptr = NULL; | |
1781 | } | |
8f2e8c07 | 1782 | |
15fa08f8 | 1783 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
c45cb8bb | 1784 | TCGOpcode opc = op->opc; |
5cf32be7 | 1785 | const TCGOpDef *def; |
404a148d | 1786 | bool done = false; |
c45cb8bb | 1787 | |
5cf32be7 | 1788 | /* Calls are special. */ |
c45cb8bb | 1789 | if (opc == INDEX_op_call) { |
5cf32be7 RH |
1790 | fold_call(&ctx, op); |
1791 | continue; | |
cf066674 | 1792 | } |
5cf32be7 RH |
1793 | |
1794 | def = &tcg_op_defs[opc]; | |
ec5d4cbe RH |
1795 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); |
1796 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | |
22613af4 | 1797 | |
67f84c96 RH |
1798 | /* Pre-compute the type of the operation. */ |
1799 | if (def->flags & TCG_OPF_VECTOR) { | |
1800 | ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); | |
1801 | } else if (def->flags & TCG_OPF_64BIT) { | |
1802 | ctx.type = TCG_TYPE_I64; | |
1803 | } else { | |
1804 | ctx.type = TCG_TYPE_I32; | |
1805 | } | |
1806 | ||
53108fb5 | 1807 | /* For commutative operations make constant second argument */ |
c45cb8bb | 1808 | switch (opc) { |
170ba88f RH |
1809 | CASE_OP_32_64_VEC(add): |
1810 | CASE_OP_32_64_VEC(mul): | |
1811 | CASE_OP_32_64_VEC(and): | |
1812 | CASE_OP_32_64_VEC(or): | |
1813 | CASE_OP_32_64_VEC(xor): | |
cb25c80a RH |
1814 | CASE_OP_32_64(eqv): |
1815 | CASE_OP_32_64(nand): | |
1816 | CASE_OP_32_64(nor): | |
03271524 RH |
1817 | CASE_OP_32_64(muluh): |
1818 | CASE_OP_32_64(mulsh): | |
acd93701 | 1819 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); |
53108fb5 | 1820 | break; |
65a7cce1 | 1821 | CASE_OP_32_64(brcond): |
acd93701 RH |
1822 | if (swap_commutative(-1, &op->args[0], &op->args[1])) { |
1823 | op->args[2] = tcg_swap_cond(op->args[2]); | |
65a7cce1 AJ |
1824 | } |
1825 | break; | |
1826 | CASE_OP_32_64(setcond): | |
acd93701 RH |
1827 | if (swap_commutative(op->args[0], &op->args[1], &op->args[2])) { |
1828 | op->args[3] = tcg_swap_cond(op->args[3]); | |
65a7cce1 AJ |
1829 | } |
1830 | break; | |
fa01a208 | 1831 | CASE_OP_32_64(movcond): |
acd93701 RH |
1832 | if (swap_commutative(-1, &op->args[1], &op->args[2])) { |
1833 | op->args[5] = tcg_swap_cond(op->args[5]); | |
5d8f5363 RH |
1834 | } |
1835 | /* For movcond, we canonicalize the "false" input reg to match | |
1836 | the destination reg so that the tcg backend can implement | |
1837 | a "move if true" operation. */ | |
acd93701 RH |
1838 | if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { |
1839 | op->args[5] = tcg_invert_cond(op->args[5]); | |
fa01a208 | 1840 | } |
1e484e61 | 1841 | break; |
d7156f7c | 1842 | CASE_OP_32_64(add2): |
acd93701 RH |
1843 | swap_commutative(op->args[0], &op->args[2], &op->args[4]); |
1844 | swap_commutative(op->args[1], &op->args[3], &op->args[5]); | |
1e484e61 | 1845 | break; |
d7156f7c | 1846 | CASE_OP_32_64(mulu2): |
4d3203fd | 1847 | CASE_OP_32_64(muls2): |
acd93701 | 1848 | swap_commutative(op->args[0], &op->args[2], &op->args[3]); |
1414968a | 1849 | break; |
0bfcb865 | 1850 | case INDEX_op_brcond2_i32: |
acd93701 RH |
1851 | if (swap_commutative2(&op->args[0], &op->args[2])) { |
1852 | op->args[4] = tcg_swap_cond(op->args[4]); | |
0bfcb865 RH |
1853 | } |
1854 | break; | |
1855 | case INDEX_op_setcond2_i32: | |
acd93701 RH |
1856 | if (swap_commutative2(&op->args[1], &op->args[3])) { |
1857 | op->args[5] = tcg_swap_cond(op->args[5]); | |
0bfcb865 RH |
1858 | } |
1859 | break; | |
53108fb5 KB |
1860 | default: |
1861 | break; | |
1862 | } | |
1863 | ||
fae450ba RH |
1864 | /* Assume all bits affected, and no bits known zero. */ |
1865 | ctx.a_mask = -1; | |
1866 | ctx.z_mask = -1; | |
633f6502 | 1867 | |
2cfac7fa RH |
1868 | /* |
1869 | * Process each opcode. | |
1870 | * Sorted alphabetically by opcode as much as possible. | |
1871 | */ | |
c45cb8bb | 1872 | switch (opc) { |
2f9f08ba RH |
1873 | CASE_OP_32_64_VEC(add): |
1874 | done = fold_add(&ctx, op); | |
1875 | break; | |
e3f7dc21 RH |
1876 | case INDEX_op_add2_i32: |
1877 | done = fold_add2_i32(&ctx, op); | |
1878 | break; | |
2f9f08ba RH |
1879 | CASE_OP_32_64_VEC(and): |
1880 | done = fold_and(&ctx, op); | |
1881 | break; | |
1882 | CASE_OP_32_64_VEC(andc): | |
1883 | done = fold_andc(&ctx, op); | |
1884 | break; | |
079b0804 RH |
1885 | CASE_OP_32_64(brcond): |
1886 | done = fold_brcond(&ctx, op); | |
1887 | break; | |
764d2aba RH |
1888 | case INDEX_op_brcond2_i32: |
1889 | done = fold_brcond2(&ctx, op); | |
1890 | break; | |
09bacdc2 RH |
1891 | CASE_OP_32_64(bswap16): |
1892 | CASE_OP_32_64(bswap32): | |
1893 | case INDEX_op_bswap64_i64: | |
1894 | done = fold_bswap(&ctx, op); | |
1895 | break; | |
30dd0bfe RH |
1896 | CASE_OP_32_64(clz): |
1897 | CASE_OP_32_64(ctz): | |
1898 | done = fold_count_zeros(&ctx, op); | |
1899 | break; | |
2f9f08ba RH |
1900 | CASE_OP_32_64(ctpop): |
1901 | done = fold_ctpop(&ctx, op); | |
1902 | break; | |
1b1907b8 RH |
1903 | CASE_OP_32_64(deposit): |
1904 | done = fold_deposit(&ctx, op); | |
1905 | break; | |
2f9f08ba RH |
1906 | CASE_OP_32_64(div): |
1907 | CASE_OP_32_64(divu): | |
1908 | done = fold_divide(&ctx, op); | |
1909 | break; | |
8cdb3fcb RH |
1910 | case INDEX_op_dup_vec: |
1911 | done = fold_dup(&ctx, op); | |
1912 | break; | |
1913 | case INDEX_op_dup2_vec: | |
1914 | done = fold_dup2(&ctx, op); | |
1915 | break; | |
2f9f08ba RH |
1916 | CASE_OP_32_64(eqv): |
1917 | done = fold_eqv(&ctx, op); | |
1918 | break; | |
b6617c88 RH |
1919 | CASE_OP_32_64(extract): |
1920 | done = fold_extract(&ctx, op); | |
1921 | break; | |
dcd08996 RH |
1922 | CASE_OP_32_64(extract2): |
1923 | done = fold_extract2(&ctx, op); | |
1924 | break; | |
2f9f08ba RH |
1925 | CASE_OP_32_64(ext8s): |
1926 | CASE_OP_32_64(ext16s): | |
1927 | case INDEX_op_ext32s_i64: | |
1928 | case INDEX_op_ext_i32_i64: | |
1929 | done = fold_exts(&ctx, op); | |
1930 | break; | |
1931 | CASE_OP_32_64(ext8u): | |
1932 | CASE_OP_32_64(ext16u): | |
1933 | case INDEX_op_ext32u_i64: | |
1934 | case INDEX_op_extu_i32_i64: | |
1935 | case INDEX_op_extrl_i64_i32: | |
1936 | case INDEX_op_extrh_i64_i32: | |
1937 | done = fold_extu(&ctx, op); | |
1938 | break; | |
fae450ba RH |
1939 | CASE_OP_32_64(ld8u): |
1940 | CASE_OP_32_64(ld16u): | |
1941 | case INDEX_op_ld32u_i64: | |
1942 | done = fold_tcg_ld(&ctx, op); | |
1943 | break; | |
3eefdf2b RH |
1944 | case INDEX_op_mb: |
1945 | done = fold_mb(&ctx, op); | |
0c310a30 | 1946 | break; |
2cfac7fa RH |
1947 | CASE_OP_32_64_VEC(mov): |
1948 | done = fold_mov(&ctx, op); | |
1949 | break; | |
0c310a30 RH |
1950 | CASE_OP_32_64(movcond): |
1951 | done = fold_movcond(&ctx, op); | |
3eefdf2b | 1952 | break; |
2f9f08ba RH |
1953 | CASE_OP_32_64(mul): |
1954 | done = fold_mul(&ctx, op); | |
1955 | break; | |
1956 | CASE_OP_32_64(mulsh): | |
1957 | CASE_OP_32_64(muluh): | |
1958 | done = fold_mul_highpart(&ctx, op); | |
1959 | break; | |
407112b0 RH |
1960 | CASE_OP_32_64(muls2): |
1961 | CASE_OP_32_64(mulu2): | |
1962 | done = fold_multiply2(&ctx, op); | |
6b8ac0d1 | 1963 | break; |
2f9f08ba RH |
1964 | CASE_OP_32_64(nand): |
1965 | done = fold_nand(&ctx, op); | |
1966 | break; | |
1967 | CASE_OP_32_64(neg): | |
1968 | done = fold_neg(&ctx, op); | |
1969 | break; | |
1970 | CASE_OP_32_64(nor): | |
1971 | done = fold_nor(&ctx, op); | |
1972 | break; | |
1973 | CASE_OP_32_64_VEC(not): | |
1974 | done = fold_not(&ctx, op); | |
1975 | break; | |
1976 | CASE_OP_32_64_VEC(or): | |
1977 | done = fold_or(&ctx, op); | |
1978 | break; | |
1979 | CASE_OP_32_64_VEC(orc): | |
1980 | done = fold_orc(&ctx, op); | |
1981 | break; | |
3eefdf2b RH |
1982 | case INDEX_op_qemu_ld_i32: |
1983 | case INDEX_op_qemu_ld_i64: | |
1984 | done = fold_qemu_ld(&ctx, op); | |
1985 | break; | |
1986 | case INDEX_op_qemu_st_i32: | |
1987 | case INDEX_op_qemu_st8_i32: | |
1988 | case INDEX_op_qemu_st_i64: | |
1989 | done = fold_qemu_st(&ctx, op); | |
1990 | break; | |
2f9f08ba RH |
1991 | CASE_OP_32_64(rem): |
1992 | CASE_OP_32_64(remu): | |
1993 | done = fold_remainder(&ctx, op); | |
1994 | break; | |
1995 | CASE_OP_32_64(rotl): | |
1996 | CASE_OP_32_64(rotr): | |
1997 | CASE_OP_32_64(sar): | |
1998 | CASE_OP_32_64(shl): | |
1999 | CASE_OP_32_64(shr): | |
2000 | done = fold_shift(&ctx, op); | |
2001 | break; | |
c63ff55c RH |
2002 | CASE_OP_32_64(setcond): |
2003 | done = fold_setcond(&ctx, op); | |
2004 | break; | |
bc47b1aa RH |
2005 | case INDEX_op_setcond2_i32: |
2006 | done = fold_setcond2(&ctx, op); | |
2007 | break; | |
b6617c88 RH |
2008 | CASE_OP_32_64(sextract): |
2009 | done = fold_sextract(&ctx, op); | |
2010 | break; | |
2f9f08ba RH |
2011 | CASE_OP_32_64_VEC(sub): |
2012 | done = fold_sub(&ctx, op); | |
2013 | break; | |
e3f7dc21 RH |
2014 | case INDEX_op_sub2_i32: |
2015 | done = fold_sub2_i32(&ctx, op); | |
2016 | break; | |
2f9f08ba RH |
2017 | CASE_OP_32_64_VEC(xor): |
2018 | done = fold_xor(&ctx, op); | |
b10f3833 | 2019 | break; |
2cfac7fa RH |
2020 | default: |
2021 | break; | |
b10f3833 RH |
2022 | } |
2023 | ||
404a148d RH |
2024 | if (!done) { |
2025 | finish_folding(&ctx, op); | |
2026 | } | |
8f2e8c07 | 2027 | } |
8f2e8c07 | 2028 | } |