]>
Commit | Line | Data |
---|---|---|
8f2e8c07 KB |
1 | /* |
2 | * Optimizations for Tiny Code Generator for QEMU | |
3 | * | |
4 | * Copyright (c) 2010 Samsung Electronics. | |
5 | * Contributed by Kirill Batuzov <batuzovk@ispras.ru> | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a copy | |
8 | * of this software and associated documentation files (the "Software"), to deal | |
9 | * in the Software without restriction, including without limitation the rights | |
10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | |
11 | * copies of the Software, and to permit persons to whom the Software is | |
12 | * furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice shall be included in | |
15 | * all copies or substantial portions of the Software. | |
16 | * | |
17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | |
23 | * THE SOFTWARE. | |
24 | */ | |
25 | ||
757e725b | 26 | #include "qemu/osdep.h" |
9531c078 | 27 | #include "qemu/int128.h" |
ab84dc39 | 28 | #include "qemu/interval-tree.h" |
ad3d0e4d | 29 | #include "tcg/tcg-op-common.h" |
90163900 | 30 | #include "tcg-internal.h" |
8f2e8c07 | 31 | |
8f2e8c07 KB |
32 | #define CASE_OP_32_64(x) \ |
33 | glue(glue(case INDEX_op_, x), _i32): \ | |
34 | glue(glue(case INDEX_op_, x), _i64) | |
8f2e8c07 | 35 | |
170ba88f RH |
36 | #define CASE_OP_32_64_VEC(x) \ |
37 | glue(glue(case INDEX_op_, x), _i32): \ | |
38 | glue(glue(case INDEX_op_, x), _i64): \ | |
39 | glue(glue(case INDEX_op_, x), _vec) | |
40 | ||
ab84dc39 RH |
41 | typedef struct MemCopyInfo { |
42 | IntervalTreeNode itree; | |
43 | QSIMPLEQ_ENTRY (MemCopyInfo) next; | |
44 | TCGTemp *ts; | |
45 | TCGType type; | |
46 | } MemCopyInfo; | |
47 | ||
6fcb98ed | 48 | typedef struct TempOptInfo { |
b41059dd | 49 | bool is_const; |
6349039d RH |
50 | TCGTemp *prev_copy; |
51 | TCGTemp *next_copy; | |
ab84dc39 | 52 | QSIMPLEQ_HEAD(, MemCopyInfo) mem_copy; |
54795544 | 53 | uint64_t val; |
b1fde411 | 54 | uint64_t z_mask; /* mask bit is 0 if and only if value bit is 0 */ |
57fe5c6d | 55 | uint64_t s_mask; /* a left-aligned mask of clrsb(value) bits. */ |
6fcb98ed | 56 | } TempOptInfo; |
22613af4 | 57 | |
3b3f847d | 58 | typedef struct OptContext { |
dc84988a | 59 | TCGContext *tcg; |
d0ed5151 | 60 | TCGOp *prev_mb; |
3b3f847d | 61 | TCGTempSet temps_used; |
137f1f44 | 62 | |
ab84dc39 RH |
63 | IntervalTreeRoot mem_copy; |
64 | QSIMPLEQ_HEAD(, MemCopyInfo) mem_free; | |
65 | ||
137f1f44 | 66 | /* In flight values from optimization. */ |
fae450ba RH |
67 | uint64_t a_mask; /* mask bit is 0 iff value identical to first input */ |
68 | uint64_t z_mask; /* mask bit is 0 iff value bit is 0 */ | |
57fe5c6d | 69 | uint64_t s_mask; /* mask of clrsb(value) bits */ |
67f84c96 | 70 | TCGType type; |
3b3f847d RH |
71 | } OptContext; |
72 | ||
57fe5c6d RH |
73 | /* Calculate the smask for a specific value. */ |
74 | static uint64_t smask_from_value(uint64_t value) | |
75 | { | |
76 | int rep = clrsb64(value); | |
77 | return ~(~0ull >> rep); | |
78 | } | |
79 | ||
80 | /* | |
81 | * Calculate the smask for a given set of known-zeros. | |
82 | * If there are lots of zeros on the left, we can consider the remainder | |
83 | * an unsigned field, and thus the corresponding signed field is one bit | |
84 | * larger. | |
85 | */ | |
86 | static uint64_t smask_from_zmask(uint64_t zmask) | |
87 | { | |
88 | /* | |
89 | * Only the 0 bits are significant for zmask, thus the msb itself | |
90 | * must be zero, else we have no sign information. | |
91 | */ | |
92 | int rep = clz64(zmask); | |
93 | if (rep == 0) { | |
94 | return 0; | |
95 | } | |
96 | rep -= 1; | |
97 | return ~(~0ull >> rep); | |
98 | } | |
99 | ||
93a967fb RH |
100 | /* |
101 | * Recreate a properly left-aligned smask after manipulation. | |
102 | * Some bit-shuffling, particularly shifts and rotates, may | |
103 | * retain sign bits on the left, but may scatter disconnected | |
104 | * sign bits on the right. Retain only what remains to the left. | |
105 | */ | |
106 | static uint64_t smask_from_smask(int64_t smask) | |
107 | { | |
108 | /* Only the 1 bits are significant for smask */ | |
109 | return smask_from_zmask(~smask); | |
110 | } | |
111 | ||
6fcb98ed | 112 | static inline TempOptInfo *ts_info(TCGTemp *ts) |
d9c769c6 | 113 | { |
6349039d | 114 | return ts->state_ptr; |
d9c769c6 AJ |
115 | } |
116 | ||
6fcb98ed | 117 | static inline TempOptInfo *arg_info(TCGArg arg) |
d9c769c6 | 118 | { |
6349039d RH |
119 | return ts_info(arg_temp(arg)); |
120 | } | |
121 | ||
122 | static inline bool ts_is_const(TCGTemp *ts) | |
123 | { | |
124 | return ts_info(ts)->is_const; | |
125 | } | |
126 | ||
27cdb85d RH |
127 | static inline bool ts_is_const_val(TCGTemp *ts, uint64_t val) |
128 | { | |
129 | TempOptInfo *ti = ts_info(ts); | |
130 | return ti->is_const && ti->val == val; | |
131 | } | |
132 | ||
6349039d RH |
133 | static inline bool arg_is_const(TCGArg arg) |
134 | { | |
135 | return ts_is_const(arg_temp(arg)); | |
136 | } | |
137 | ||
27cdb85d RH |
138 | static inline bool arg_is_const_val(TCGArg arg, uint64_t val) |
139 | { | |
140 | return ts_is_const_val(arg_temp(arg), val); | |
141 | } | |
142 | ||
6349039d RH |
143 | static inline bool ts_is_copy(TCGTemp *ts) |
144 | { | |
145 | return ts_info(ts)->next_copy != ts; | |
d9c769c6 AJ |
146 | } |
147 | ||
9f75e528 RH |
148 | static TCGTemp *cmp_better_copy(TCGTemp *a, TCGTemp *b) |
149 | { | |
150 | return a->kind < b->kind ? b : a; | |
151 | } | |
152 | ||
1208d7dd | 153 | /* Initialize and activate a temporary. */ |
3b3f847d | 154 | static void init_ts_info(OptContext *ctx, TCGTemp *ts) |
1208d7dd | 155 | { |
6349039d | 156 | size_t idx = temp_idx(ts); |
8f17a975 | 157 | TempOptInfo *ti; |
6349039d | 158 | |
3b3f847d | 159 | if (test_bit(idx, ctx->temps_used.l)) { |
8f17a975 RH |
160 | return; |
161 | } | |
3b3f847d | 162 | set_bit(idx, ctx->temps_used.l); |
8f17a975 RH |
163 | |
164 | ti = ts->state_ptr; | |
165 | if (ti == NULL) { | |
166 | ti = tcg_malloc(sizeof(TempOptInfo)); | |
6349039d | 167 | ts->state_ptr = ti; |
8f17a975 RH |
168 | } |
169 | ||
170 | ti->next_copy = ts; | |
171 | ti->prev_copy = ts; | |
ab84dc39 | 172 | QSIMPLEQ_INIT(&ti->mem_copy); |
8f17a975 RH |
173 | if (ts->kind == TEMP_CONST) { |
174 | ti->is_const = true; | |
175 | ti->val = ts->val; | |
b1fde411 | 176 | ti->z_mask = ts->val; |
57fe5c6d | 177 | ti->s_mask = smask_from_value(ts->val); |
8f17a975 RH |
178 | } else { |
179 | ti->is_const = false; | |
b1fde411 | 180 | ti->z_mask = -1; |
57fe5c6d | 181 | ti->s_mask = 0; |
1208d7dd AJ |
182 | } |
183 | } | |
184 | ||
ab84dc39 RH |
185 | static MemCopyInfo *mem_copy_first(OptContext *ctx, intptr_t s, intptr_t l) |
186 | { | |
187 | IntervalTreeNode *r = interval_tree_iter_first(&ctx->mem_copy, s, l); | |
188 | return r ? container_of(r, MemCopyInfo, itree) : NULL; | |
189 | } | |
190 | ||
191 | static MemCopyInfo *mem_copy_next(MemCopyInfo *mem, intptr_t s, intptr_t l) | |
192 | { | |
193 | IntervalTreeNode *r = interval_tree_iter_next(&mem->itree, s, l); | |
194 | return r ? container_of(r, MemCopyInfo, itree) : NULL; | |
195 | } | |
196 | ||
197 | static void remove_mem_copy(OptContext *ctx, MemCopyInfo *mc) | |
198 | { | |
199 | TCGTemp *ts = mc->ts; | |
200 | TempOptInfo *ti = ts_info(ts); | |
201 | ||
202 | interval_tree_remove(&mc->itree, &ctx->mem_copy); | |
203 | QSIMPLEQ_REMOVE(&ti->mem_copy, mc, MemCopyInfo, next); | |
204 | QSIMPLEQ_INSERT_TAIL(&ctx->mem_free, mc, next); | |
205 | } | |
206 | ||
207 | static void remove_mem_copy_in(OptContext *ctx, intptr_t s, intptr_t l) | |
208 | { | |
209 | while (true) { | |
210 | MemCopyInfo *mc = mem_copy_first(ctx, s, l); | |
211 | if (!mc) { | |
212 | break; | |
213 | } | |
214 | remove_mem_copy(ctx, mc); | |
215 | } | |
216 | } | |
217 | ||
218 | static void remove_mem_copy_all(OptContext *ctx) | |
219 | { | |
220 | remove_mem_copy_in(ctx, 0, -1); | |
221 | tcg_debug_assert(interval_tree_is_empty(&ctx->mem_copy)); | |
222 | } | |
223 | ||
9f75e528 | 224 | static TCGTemp *find_better_copy(TCGTemp *ts) |
e590d4e6 | 225 | { |
9f75e528 | 226 | TCGTemp *i, *ret; |
e590d4e6 | 227 | |
4c868ce6 RH |
228 | /* If this is already readonly, we can't do better. */ |
229 | if (temp_readonly(ts)) { | |
6349039d | 230 | return ts; |
e590d4e6 AJ |
231 | } |
232 | ||
9f75e528 | 233 | ret = ts; |
6349039d | 234 | for (i = ts_info(ts)->next_copy; i != ts; i = ts_info(i)->next_copy) { |
9f75e528 | 235 | ret = cmp_better_copy(ret, i); |
e590d4e6 | 236 | } |
9f75e528 | 237 | return ret; |
e590d4e6 AJ |
238 | } |
239 | ||
ab84dc39 RH |
240 | static void move_mem_copies(TCGTemp *dst_ts, TCGTemp *src_ts) |
241 | { | |
242 | TempOptInfo *si = ts_info(src_ts); | |
243 | TempOptInfo *di = ts_info(dst_ts); | |
244 | MemCopyInfo *mc; | |
245 | ||
246 | QSIMPLEQ_FOREACH(mc, &si->mem_copy, next) { | |
247 | tcg_debug_assert(mc->ts == src_ts); | |
248 | mc->ts = dst_ts; | |
249 | } | |
250 | QSIMPLEQ_CONCAT(&di->mem_copy, &si->mem_copy); | |
251 | } | |
252 | ||
253 | /* Reset TEMP's state, possibly removing the temp for the list of copies. */ | |
254 | static void reset_ts(OptContext *ctx, TCGTemp *ts) | |
255 | { | |
256 | TempOptInfo *ti = ts_info(ts); | |
257 | TCGTemp *pts = ti->prev_copy; | |
258 | TCGTemp *nts = ti->next_copy; | |
259 | TempOptInfo *pi = ts_info(pts); | |
260 | TempOptInfo *ni = ts_info(nts); | |
261 | ||
262 | ni->prev_copy = ti->prev_copy; | |
263 | pi->next_copy = ti->next_copy; | |
264 | ti->next_copy = ts; | |
265 | ti->prev_copy = ts; | |
266 | ti->is_const = false; | |
267 | ti->z_mask = -1; | |
268 | ti->s_mask = 0; | |
269 | ||
270 | if (!QSIMPLEQ_EMPTY(&ti->mem_copy)) { | |
271 | if (ts == nts) { | |
272 | /* Last temp copy being removed, the mem copies die. */ | |
273 | MemCopyInfo *mc; | |
274 | QSIMPLEQ_FOREACH(mc, &ti->mem_copy, next) { | |
275 | interval_tree_remove(&mc->itree, &ctx->mem_copy); | |
276 | } | |
277 | QSIMPLEQ_CONCAT(&ctx->mem_free, &ti->mem_copy); | |
278 | } else { | |
279 | move_mem_copies(find_better_copy(nts), ts); | |
280 | } | |
281 | } | |
282 | } | |
283 | ||
284 | static void reset_temp(OptContext *ctx, TCGArg arg) | |
285 | { | |
286 | reset_ts(ctx, arg_temp(arg)); | |
287 | } | |
288 | ||
289 | static void record_mem_copy(OptContext *ctx, TCGType type, | |
290 | TCGTemp *ts, intptr_t start, intptr_t last) | |
291 | { | |
292 | MemCopyInfo *mc; | |
293 | TempOptInfo *ti; | |
294 | ||
295 | mc = QSIMPLEQ_FIRST(&ctx->mem_free); | |
296 | if (mc) { | |
297 | QSIMPLEQ_REMOVE_HEAD(&ctx->mem_free, next); | |
298 | } else { | |
299 | mc = tcg_malloc(sizeof(*mc)); | |
300 | } | |
301 | ||
302 | memset(mc, 0, sizeof(*mc)); | |
303 | mc->itree.start = start; | |
304 | mc->itree.last = last; | |
305 | mc->type = type; | |
306 | interval_tree_insert(&mc->itree, &ctx->mem_copy); | |
307 | ||
308 | ts = find_better_copy(ts); | |
309 | ti = ts_info(ts); | |
310 | mc->ts = ts; | |
311 | QSIMPLEQ_INSERT_TAIL(&ti->mem_copy, mc, next); | |
312 | } | |
313 | ||
6349039d | 314 | static bool ts_are_copies(TCGTemp *ts1, TCGTemp *ts2) |
e590d4e6 | 315 | { |
6349039d | 316 | TCGTemp *i; |
e590d4e6 | 317 | |
6349039d | 318 | if (ts1 == ts2) { |
e590d4e6 AJ |
319 | return true; |
320 | } | |
321 | ||
6349039d | 322 | if (!ts_is_copy(ts1) || !ts_is_copy(ts2)) { |
e590d4e6 AJ |
323 | return false; |
324 | } | |
325 | ||
6349039d RH |
326 | for (i = ts_info(ts1)->next_copy; i != ts1; i = ts_info(i)->next_copy) { |
327 | if (i == ts2) { | |
e590d4e6 AJ |
328 | return true; |
329 | } | |
330 | } | |
331 | ||
332 | return false; | |
333 | } | |
334 | ||
6349039d RH |
335 | static bool args_are_copies(TCGArg arg1, TCGArg arg2) |
336 | { | |
337 | return ts_are_copies(arg_temp(arg1), arg_temp(arg2)); | |
338 | } | |
339 | ||
ab84dc39 RH |
340 | static TCGTemp *find_mem_copy_for(OptContext *ctx, TCGType type, intptr_t s) |
341 | { | |
342 | MemCopyInfo *mc; | |
343 | ||
344 | for (mc = mem_copy_first(ctx, s, s); mc; mc = mem_copy_next(mc, s, s)) { | |
345 | if (mc->itree.start == s && mc->type == type) { | |
346 | return find_better_copy(mc->ts); | |
347 | } | |
348 | } | |
349 | return NULL; | |
350 | } | |
351 | ||
26aac97c RH |
352 | static TCGArg arg_new_constant(OptContext *ctx, uint64_t val) |
353 | { | |
354 | TCGType type = ctx->type; | |
355 | TCGTemp *ts; | |
356 | ||
357 | if (type == TCG_TYPE_I32) { | |
358 | val = (int32_t)val; | |
359 | } | |
360 | ||
361 | ts = tcg_constant_internal(type, val); | |
362 | init_ts_info(ctx, ts); | |
363 | ||
364 | return temp_arg(ts); | |
365 | } | |
366 | ||
fb04ab7d RH |
367 | static TCGArg arg_new_temp(OptContext *ctx) |
368 | { | |
369 | TCGTemp *ts = tcg_temp_new_internal(ctx->type, TEMP_EBB); | |
370 | init_ts_info(ctx, ts); | |
371 | return temp_arg(ts); | |
372 | } | |
373 | ||
6b99d5bf | 374 | static bool tcg_opt_gen_mov(OptContext *ctx, TCGOp *op, TCGArg dst, TCGArg src) |
22613af4 | 375 | { |
6349039d RH |
376 | TCGTemp *dst_ts = arg_temp(dst); |
377 | TCGTemp *src_ts = arg_temp(src); | |
6fcb98ed RH |
378 | TempOptInfo *di; |
379 | TempOptInfo *si; | |
6349039d RH |
380 | TCGOpcode new_op; |
381 | ||
382 | if (ts_are_copies(dst_ts, src_ts)) { | |
dc84988a | 383 | tcg_op_remove(ctx->tcg, op); |
6b99d5bf | 384 | return true; |
5365718a AJ |
385 | } |
386 | ||
986cac1d | 387 | reset_ts(ctx, dst_ts); |
6349039d RH |
388 | di = ts_info(dst_ts); |
389 | si = ts_info(src_ts); | |
67f84c96 RH |
390 | |
391 | switch (ctx->type) { | |
392 | case TCG_TYPE_I32: | |
170ba88f | 393 | new_op = INDEX_op_mov_i32; |
67f84c96 RH |
394 | break; |
395 | case TCG_TYPE_I64: | |
396 | new_op = INDEX_op_mov_i64; | |
397 | break; | |
398 | case TCG_TYPE_V64: | |
399 | case TCG_TYPE_V128: | |
400 | case TCG_TYPE_V256: | |
401 | /* TCGOP_VECL and TCGOP_VECE remain unchanged. */ | |
402 | new_op = INDEX_op_mov_vec; | |
403 | break; | |
404 | default: | |
405 | g_assert_not_reached(); | |
170ba88f | 406 | } |
c45cb8bb | 407 | op->opc = new_op; |
6349039d RH |
408 | op->args[0] = dst; |
409 | op->args[1] = src; | |
a62f6f56 | 410 | |
faa2e100 | 411 | di->z_mask = si->z_mask; |
57fe5c6d | 412 | di->s_mask = si->s_mask; |
e590d4e6 | 413 | |
6349039d | 414 | if (src_ts->type == dst_ts->type) { |
6fcb98ed | 415 | TempOptInfo *ni = ts_info(si->next_copy); |
6349039d RH |
416 | |
417 | di->next_copy = si->next_copy; | |
418 | di->prev_copy = src_ts; | |
419 | ni->prev_copy = dst_ts; | |
420 | si->next_copy = dst_ts; | |
421 | di->is_const = si->is_const; | |
422 | di->val = si->val; | |
ab84dc39 RH |
423 | |
424 | if (!QSIMPLEQ_EMPTY(&si->mem_copy) | |
425 | && cmp_better_copy(src_ts, dst_ts) == dst_ts) { | |
426 | move_mem_copies(dst_ts, src_ts); | |
427 | } | |
6349039d | 428 | } |
6b99d5bf | 429 | return true; |
22613af4 KB |
430 | } |
431 | ||
6b99d5bf | 432 | static bool tcg_opt_gen_movi(OptContext *ctx, TCGOp *op, |
dc84988a | 433 | TCGArg dst, uint64_t val) |
8fe35e04 | 434 | { |
faa2e100 | 435 | /* Convert movi to mov with constant temp. */ |
26aac97c | 436 | return tcg_opt_gen_mov(ctx, op, dst, arg_new_constant(ctx, val)); |
8fe35e04 RH |
437 | } |
438 | ||
54795544 | 439 | static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) |
53108fb5 | 440 | { |
03271524 RH |
441 | uint64_t l64, h64; |
442 | ||
53108fb5 KB |
443 | switch (op) { |
444 | CASE_OP_32_64(add): | |
445 | return x + y; | |
446 | ||
447 | CASE_OP_32_64(sub): | |
448 | return x - y; | |
449 | ||
450 | CASE_OP_32_64(mul): | |
451 | return x * y; | |
452 | ||
c578ff18 | 453 | CASE_OP_32_64_VEC(and): |
9a81090b KB |
454 | return x & y; |
455 | ||
c578ff18 | 456 | CASE_OP_32_64_VEC(or): |
9a81090b KB |
457 | return x | y; |
458 | ||
c578ff18 | 459 | CASE_OP_32_64_VEC(xor): |
9a81090b KB |
460 | return x ^ y; |
461 | ||
55c0975c | 462 | case INDEX_op_shl_i32: |
50c5c4d1 | 463 | return (uint32_t)x << (y & 31); |
55c0975c | 464 | |
55c0975c | 465 | case INDEX_op_shl_i64: |
50c5c4d1 | 466 | return (uint64_t)x << (y & 63); |
55c0975c KB |
467 | |
468 | case INDEX_op_shr_i32: | |
50c5c4d1 | 469 | return (uint32_t)x >> (y & 31); |
55c0975c | 470 | |
55c0975c | 471 | case INDEX_op_shr_i64: |
50c5c4d1 | 472 | return (uint64_t)x >> (y & 63); |
55c0975c KB |
473 | |
474 | case INDEX_op_sar_i32: | |
50c5c4d1 | 475 | return (int32_t)x >> (y & 31); |
55c0975c | 476 | |
55c0975c | 477 | case INDEX_op_sar_i64: |
50c5c4d1 | 478 | return (int64_t)x >> (y & 63); |
55c0975c KB |
479 | |
480 | case INDEX_op_rotr_i32: | |
50c5c4d1 | 481 | return ror32(x, y & 31); |
55c0975c | 482 | |
55c0975c | 483 | case INDEX_op_rotr_i64: |
50c5c4d1 | 484 | return ror64(x, y & 63); |
55c0975c KB |
485 | |
486 | case INDEX_op_rotl_i32: | |
50c5c4d1 | 487 | return rol32(x, y & 31); |
55c0975c | 488 | |
55c0975c | 489 | case INDEX_op_rotl_i64: |
50c5c4d1 | 490 | return rol64(x, y & 63); |
25c4d9cc | 491 | |
c578ff18 | 492 | CASE_OP_32_64_VEC(not): |
a640f031 | 493 | return ~x; |
25c4d9cc | 494 | |
cb25c80a RH |
495 | CASE_OP_32_64(neg): |
496 | return -x; | |
497 | ||
c578ff18 | 498 | CASE_OP_32_64_VEC(andc): |
cb25c80a RH |
499 | return x & ~y; |
500 | ||
c578ff18 | 501 | CASE_OP_32_64_VEC(orc): |
cb25c80a RH |
502 | return x | ~y; |
503 | ||
ed523473 | 504 | CASE_OP_32_64_VEC(eqv): |
cb25c80a RH |
505 | return ~(x ^ y); |
506 | ||
ed523473 | 507 | CASE_OP_32_64_VEC(nand): |
cb25c80a RH |
508 | return ~(x & y); |
509 | ||
ed523473 | 510 | CASE_OP_32_64_VEC(nor): |
cb25c80a RH |
511 | return ~(x | y); |
512 | ||
0e28d006 RH |
513 | case INDEX_op_clz_i32: |
514 | return (uint32_t)x ? clz32(x) : y; | |
515 | ||
516 | case INDEX_op_clz_i64: | |
517 | return x ? clz64(x) : y; | |
518 | ||
519 | case INDEX_op_ctz_i32: | |
520 | return (uint32_t)x ? ctz32(x) : y; | |
521 | ||
522 | case INDEX_op_ctz_i64: | |
523 | return x ? ctz64(x) : y; | |
524 | ||
a768e4e9 RH |
525 | case INDEX_op_ctpop_i32: |
526 | return ctpop32(x); | |
527 | ||
528 | case INDEX_op_ctpop_i64: | |
529 | return ctpop64(x); | |
530 | ||
25c4d9cc | 531 | CASE_OP_32_64(ext8s): |
a640f031 | 532 | return (int8_t)x; |
25c4d9cc RH |
533 | |
534 | CASE_OP_32_64(ext16s): | |
a640f031 | 535 | return (int16_t)x; |
25c4d9cc RH |
536 | |
537 | CASE_OP_32_64(ext8u): | |
a640f031 | 538 | return (uint8_t)x; |
25c4d9cc RH |
539 | |
540 | CASE_OP_32_64(ext16u): | |
a640f031 KB |
541 | return (uint16_t)x; |
542 | ||
6498594c | 543 | CASE_OP_32_64(bswap16): |
0b76ff8f RH |
544 | x = bswap16(x); |
545 | return y & TCG_BSWAP_OS ? (int16_t)x : x; | |
6498594c RH |
546 | |
547 | CASE_OP_32_64(bswap32): | |
0b76ff8f RH |
548 | x = bswap32(x); |
549 | return y & TCG_BSWAP_OS ? (int32_t)x : x; | |
6498594c RH |
550 | |
551 | case INDEX_op_bswap64_i64: | |
552 | return bswap64(x); | |
553 | ||
8bcb5c8f | 554 | case INDEX_op_ext_i32_i64: |
a640f031 KB |
555 | case INDEX_op_ext32s_i64: |
556 | return (int32_t)x; | |
557 | ||
8bcb5c8f | 558 | case INDEX_op_extu_i32_i64: |
609ad705 | 559 | case INDEX_op_extrl_i64_i32: |
a640f031 KB |
560 | case INDEX_op_ext32u_i64: |
561 | return (uint32_t)x; | |
a640f031 | 562 | |
609ad705 RH |
563 | case INDEX_op_extrh_i64_i32: |
564 | return (uint64_t)x >> 32; | |
565 | ||
03271524 RH |
566 | case INDEX_op_muluh_i32: |
567 | return ((uint64_t)(uint32_t)x * (uint32_t)y) >> 32; | |
568 | case INDEX_op_mulsh_i32: | |
569 | return ((int64_t)(int32_t)x * (int32_t)y) >> 32; | |
570 | ||
571 | case INDEX_op_muluh_i64: | |
572 | mulu64(&l64, &h64, x, y); | |
573 | return h64; | |
574 | case INDEX_op_mulsh_i64: | |
575 | muls64(&l64, &h64, x, y); | |
576 | return h64; | |
577 | ||
01547f7f RH |
578 | case INDEX_op_div_i32: |
579 | /* Avoid crashing on divide by zero, otherwise undefined. */ | |
580 | return (int32_t)x / ((int32_t)y ? : 1); | |
581 | case INDEX_op_divu_i32: | |
582 | return (uint32_t)x / ((uint32_t)y ? : 1); | |
583 | case INDEX_op_div_i64: | |
584 | return (int64_t)x / ((int64_t)y ? : 1); | |
585 | case INDEX_op_divu_i64: | |
586 | return (uint64_t)x / ((uint64_t)y ? : 1); | |
587 | ||
588 | case INDEX_op_rem_i32: | |
589 | return (int32_t)x % ((int32_t)y ? : 1); | |
590 | case INDEX_op_remu_i32: | |
591 | return (uint32_t)x % ((uint32_t)y ? : 1); | |
592 | case INDEX_op_rem_i64: | |
593 | return (int64_t)x % ((int64_t)y ? : 1); | |
594 | case INDEX_op_remu_i64: | |
595 | return (uint64_t)x % ((uint64_t)y ? : 1); | |
596 | ||
53108fb5 | 597 | default: |
732e89f4 | 598 | g_assert_not_reached(); |
53108fb5 KB |
599 | } |
600 | } | |
601 | ||
67f84c96 RH |
602 | static uint64_t do_constant_folding(TCGOpcode op, TCGType type, |
603 | uint64_t x, uint64_t y) | |
53108fb5 | 604 | { |
54795544 | 605 | uint64_t res = do_constant_folding_2(op, x, y); |
67f84c96 | 606 | if (type == TCG_TYPE_I32) { |
29f3ff8d | 607 | res = (int32_t)res; |
53108fb5 | 608 | } |
53108fb5 KB |
609 | return res; |
610 | } | |
611 | ||
9519da7e RH |
612 | static bool do_constant_folding_cond_32(uint32_t x, uint32_t y, TCGCond c) |
613 | { | |
614 | switch (c) { | |
615 | case TCG_COND_EQ: | |
616 | return x == y; | |
617 | case TCG_COND_NE: | |
618 | return x != y; | |
619 | case TCG_COND_LT: | |
620 | return (int32_t)x < (int32_t)y; | |
621 | case TCG_COND_GE: | |
622 | return (int32_t)x >= (int32_t)y; | |
623 | case TCG_COND_LE: | |
624 | return (int32_t)x <= (int32_t)y; | |
625 | case TCG_COND_GT: | |
626 | return (int32_t)x > (int32_t)y; | |
627 | case TCG_COND_LTU: | |
628 | return x < y; | |
629 | case TCG_COND_GEU: | |
630 | return x >= y; | |
631 | case TCG_COND_LEU: | |
632 | return x <= y; | |
633 | case TCG_COND_GTU: | |
634 | return x > y; | |
ceb9ee06 RH |
635 | case TCG_COND_TSTEQ: |
636 | return (x & y) == 0; | |
637 | case TCG_COND_TSTNE: | |
638 | return (x & y) != 0; | |
639 | case TCG_COND_ALWAYS: | |
640 | case TCG_COND_NEVER: | |
641 | break; | |
9519da7e | 642 | } |
ceb9ee06 | 643 | g_assert_not_reached(); |
9519da7e RH |
644 | } |
645 | ||
646 | static bool do_constant_folding_cond_64(uint64_t x, uint64_t y, TCGCond c) | |
647 | { | |
648 | switch (c) { | |
649 | case TCG_COND_EQ: | |
650 | return x == y; | |
651 | case TCG_COND_NE: | |
652 | return x != y; | |
653 | case TCG_COND_LT: | |
654 | return (int64_t)x < (int64_t)y; | |
655 | case TCG_COND_GE: | |
656 | return (int64_t)x >= (int64_t)y; | |
657 | case TCG_COND_LE: | |
658 | return (int64_t)x <= (int64_t)y; | |
659 | case TCG_COND_GT: | |
660 | return (int64_t)x > (int64_t)y; | |
661 | case TCG_COND_LTU: | |
662 | return x < y; | |
663 | case TCG_COND_GEU: | |
664 | return x >= y; | |
665 | case TCG_COND_LEU: | |
666 | return x <= y; | |
667 | case TCG_COND_GTU: | |
668 | return x > y; | |
ceb9ee06 RH |
669 | case TCG_COND_TSTEQ: |
670 | return (x & y) == 0; | |
671 | case TCG_COND_TSTNE: | |
672 | return (x & y) != 0; | |
673 | case TCG_COND_ALWAYS: | |
674 | case TCG_COND_NEVER: | |
675 | break; | |
9519da7e | 676 | } |
ceb9ee06 | 677 | g_assert_not_reached(); |
9519da7e RH |
678 | } |
679 | ||
ceb9ee06 | 680 | static int do_constant_folding_cond_eq(TCGCond c) |
9519da7e RH |
681 | { |
682 | switch (c) { | |
683 | case TCG_COND_GT: | |
684 | case TCG_COND_LTU: | |
685 | case TCG_COND_LT: | |
686 | case TCG_COND_GTU: | |
687 | case TCG_COND_NE: | |
688 | return 0; | |
689 | case TCG_COND_GE: | |
690 | case TCG_COND_GEU: | |
691 | case TCG_COND_LE: | |
692 | case TCG_COND_LEU: | |
693 | case TCG_COND_EQ: | |
694 | return 1; | |
ceb9ee06 RH |
695 | case TCG_COND_TSTEQ: |
696 | case TCG_COND_TSTNE: | |
697 | return -1; | |
698 | case TCG_COND_ALWAYS: | |
699 | case TCG_COND_NEVER: | |
700 | break; | |
9519da7e | 701 | } |
ceb9ee06 | 702 | g_assert_not_reached(); |
9519da7e RH |
703 | } |
704 | ||
8d57bf1e RH |
705 | /* |
706 | * Return -1 if the condition can't be simplified, | |
707 | * and the result of the condition (0 or 1) if it can. | |
708 | */ | |
67f84c96 | 709 | static int do_constant_folding_cond(TCGType type, TCGArg x, |
8d57bf1e | 710 | TCGArg y, TCGCond c) |
f8dd19e5 | 711 | { |
6349039d | 712 | if (arg_is_const(x) && arg_is_const(y)) { |
9becc36f AB |
713 | uint64_t xv = arg_info(x)->val; |
714 | uint64_t yv = arg_info(y)->val; | |
715 | ||
67f84c96 RH |
716 | switch (type) { |
717 | case TCG_TYPE_I32: | |
170ba88f | 718 | return do_constant_folding_cond_32(xv, yv, c); |
67f84c96 RH |
719 | case TCG_TYPE_I64: |
720 | return do_constant_folding_cond_64(xv, yv, c); | |
721 | default: | |
722 | /* Only scalar comparisons are optimizable */ | |
723 | return -1; | |
b336ceb6 | 724 | } |
6349039d | 725 | } else if (args_are_copies(x, y)) { |
9519da7e | 726 | return do_constant_folding_cond_eq(c); |
27cdb85d | 727 | } else if (arg_is_const_val(y, 0)) { |
b336ceb6 | 728 | switch (c) { |
f8dd19e5 | 729 | case TCG_COND_LTU: |
ceb9ee06 | 730 | case TCG_COND_TSTNE: |
b336ceb6 | 731 | return 0; |
f8dd19e5 | 732 | case TCG_COND_GEU: |
ceb9ee06 | 733 | case TCG_COND_TSTEQ: |
b336ceb6 AJ |
734 | return 1; |
735 | default: | |
8d57bf1e | 736 | return -1; |
f8dd19e5 | 737 | } |
f8dd19e5 | 738 | } |
8d57bf1e | 739 | return -1; |
f8dd19e5 AJ |
740 | } |
741 | ||
7a2f7084 RH |
742 | /** |
743 | * swap_commutative: | |
744 | * @dest: TCGArg of the destination argument, or NO_DEST. | |
745 | * @p1: first paired argument | |
746 | * @p2: second paired argument | |
747 | * | |
748 | * If *@p1 is a constant and *@p2 is not, swap. | |
749 | * If *@p2 matches @dest, swap. | |
750 | * Return true if a swap was performed. | |
751 | */ | |
752 | ||
753 | #define NO_DEST temp_arg(NULL) | |
754 | ||
24c9ae4e RH |
755 | static bool swap_commutative(TCGArg dest, TCGArg *p1, TCGArg *p2) |
756 | { | |
757 | TCGArg a1 = *p1, a2 = *p2; | |
758 | int sum = 0; | |
6349039d RH |
759 | sum += arg_is_const(a1); |
760 | sum -= arg_is_const(a2); | |
24c9ae4e RH |
761 | |
762 | /* Prefer the constant in second argument, and then the form | |
763 | op a, a, b, which is better handled on non-RISC hosts. */ | |
764 | if (sum > 0 || (sum == 0 && dest == a2)) { | |
765 | *p1 = a2; | |
766 | *p2 = a1; | |
767 | return true; | |
768 | } | |
769 | return false; | |
770 | } | |
771 | ||
0bfcb865 RH |
772 | static bool swap_commutative2(TCGArg *p1, TCGArg *p2) |
773 | { | |
774 | int sum = 0; | |
6349039d RH |
775 | sum += arg_is_const(p1[0]); |
776 | sum += arg_is_const(p1[1]); | |
777 | sum -= arg_is_const(p2[0]); | |
778 | sum -= arg_is_const(p2[1]); | |
0bfcb865 RH |
779 | if (sum > 0) { |
780 | TCGArg t; | |
781 | t = p1[0], p1[0] = p2[0], p2[0] = t; | |
782 | t = p1[1], p1[1] = p2[1], p2[1] = t; | |
783 | return true; | |
784 | } | |
785 | return false; | |
786 | } | |
787 | ||
7e64b114 RH |
788 | /* |
789 | * Return -1 if the condition can't be simplified, | |
790 | * and the result of the condition (0 or 1) if it can. | |
791 | */ | |
fb04ab7d | 792 | static int do_constant_folding_cond1(OptContext *ctx, TCGOp *op, TCGArg dest, |
246c4b72 RH |
793 | TCGArg *p1, TCGArg *p2, TCGArg *pcond) |
794 | { | |
795 | TCGCond cond; | |
796 | bool swap; | |
797 | int r; | |
798 | ||
799 | swap = swap_commutative(dest, p1, p2); | |
800 | cond = *pcond; | |
801 | if (swap) { | |
802 | *pcond = cond = tcg_swap_cond(cond); | |
803 | } | |
804 | ||
805 | r = do_constant_folding_cond(ctx->type, *p1, *p2, cond); | |
ceb9ee06 RH |
806 | if (r >= 0) { |
807 | return r; | |
808 | } | |
809 | if (!is_tst_cond(cond)) { | |
810 | return -1; | |
811 | } | |
812 | ||
813 | /* | |
814 | * TSTNE x,x -> NE x,0 | |
815 | * TSTNE x,-1 -> NE x,0 | |
816 | */ | |
817 | if (args_are_copies(*p1, *p2) || arg_is_const_val(*p2, -1)) { | |
818 | *p2 = arg_new_constant(ctx, 0); | |
819 | *pcond = tcg_tst_eqne_cond(cond); | |
820 | return -1; | |
821 | } | |
822 | ||
823 | /* TSTNE x,sign -> LT x,0 */ | |
824 | if (arg_is_const_val(*p2, (ctx->type == TCG_TYPE_I32 | |
825 | ? INT32_MIN : INT64_MIN))) { | |
826 | *p2 = arg_new_constant(ctx, 0); | |
827 | *pcond = tcg_tst_ltge_cond(cond); | |
fb04ab7d RH |
828 | return -1; |
829 | } | |
830 | ||
831 | /* Expand to AND with a temporary if no backend support. */ | |
832 | if (!TCG_TARGET_HAS_tst) { | |
833 | TCGOpcode and_opc = (ctx->type == TCG_TYPE_I32 | |
834 | ? INDEX_op_and_i32 : INDEX_op_and_i64); | |
835 | TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, and_opc, 3); | |
836 | TCGArg tmp = arg_new_temp(ctx); | |
837 | ||
838 | op2->args[0] = tmp; | |
839 | op2->args[1] = *p1; | |
840 | op2->args[2] = *p2; | |
841 | ||
842 | *p1 = tmp; | |
843 | *p2 = arg_new_constant(ctx, 0); | |
844 | *pcond = tcg_tst_eqne_cond(cond); | |
ceb9ee06 RH |
845 | } |
846 | return -1; | |
246c4b72 RH |
847 | } |
848 | ||
fb04ab7d | 849 | static int do_constant_folding_cond2(OptContext *ctx, TCGOp *op, TCGArg *args) |
7e64b114 RH |
850 | { |
851 | TCGArg al, ah, bl, bh; | |
852 | TCGCond c; | |
853 | bool swap; | |
ceb9ee06 | 854 | int r; |
7e64b114 RH |
855 | |
856 | swap = swap_commutative2(args, args + 2); | |
857 | c = args[4]; | |
858 | if (swap) { | |
859 | args[4] = c = tcg_swap_cond(c); | |
860 | } | |
861 | ||
862 | al = args[0]; | |
863 | ah = args[1]; | |
864 | bl = args[2]; | |
865 | bh = args[3]; | |
866 | ||
867 | if (arg_is_const(bl) && arg_is_const(bh)) { | |
868 | tcg_target_ulong blv = arg_info(bl)->val; | |
869 | tcg_target_ulong bhv = arg_info(bh)->val; | |
870 | uint64_t b = deposit64(blv, 32, 32, bhv); | |
871 | ||
872 | if (arg_is_const(al) && arg_is_const(ah)) { | |
873 | tcg_target_ulong alv = arg_info(al)->val; | |
874 | tcg_target_ulong ahv = arg_info(ah)->val; | |
875 | uint64_t a = deposit64(alv, 32, 32, ahv); | |
ceb9ee06 RH |
876 | |
877 | r = do_constant_folding_cond_64(a, b, c); | |
878 | if (r >= 0) { | |
879 | return r; | |
880 | } | |
7e64b114 | 881 | } |
ceb9ee06 | 882 | |
7e64b114 RH |
883 | if (b == 0) { |
884 | switch (c) { | |
885 | case TCG_COND_LTU: | |
ceb9ee06 | 886 | case TCG_COND_TSTNE: |
7e64b114 RH |
887 | return 0; |
888 | case TCG_COND_GEU: | |
ceb9ee06 | 889 | case TCG_COND_TSTEQ: |
7e64b114 RH |
890 | return 1; |
891 | default: | |
892 | break; | |
893 | } | |
894 | } | |
ceb9ee06 RH |
895 | |
896 | /* TSTNE x,-1 -> NE x,0 */ | |
897 | if (b == -1 && is_tst_cond(c)) { | |
898 | args[3] = args[2] = arg_new_constant(ctx, 0); | |
899 | args[4] = tcg_tst_eqne_cond(c); | |
900 | return -1; | |
901 | } | |
902 | ||
903 | /* TSTNE x,sign -> LT x,0 */ | |
904 | if (b == INT64_MIN && is_tst_cond(c)) { | |
905 | /* bl must be 0, so copy that to bh */ | |
906 | args[3] = bl; | |
907 | args[4] = tcg_tst_ltge_cond(c); | |
908 | return -1; | |
909 | } | |
7e64b114 | 910 | } |
ceb9ee06 | 911 | |
7e64b114 | 912 | if (args_are_copies(al, bl) && args_are_copies(ah, bh)) { |
ceb9ee06 RH |
913 | r = do_constant_folding_cond_eq(c); |
914 | if (r >= 0) { | |
915 | return r; | |
916 | } | |
917 | ||
918 | /* TSTNE x,x -> NE x,0 */ | |
919 | if (is_tst_cond(c)) { | |
920 | args[3] = args[2] = arg_new_constant(ctx, 0); | |
921 | args[4] = tcg_tst_eqne_cond(c); | |
922 | return -1; | |
923 | } | |
7e64b114 | 924 | } |
fb04ab7d RH |
925 | |
926 | /* Expand to AND with a temporary if no backend support. */ | |
927 | if (!TCG_TARGET_HAS_tst && is_tst_cond(c)) { | |
928 | TCGOp *op1 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); | |
929 | TCGOp *op2 = tcg_op_insert_before(ctx->tcg, op, INDEX_op_and_i32, 3); | |
930 | TCGArg t1 = arg_new_temp(ctx); | |
931 | TCGArg t2 = arg_new_temp(ctx); | |
932 | ||
933 | op1->args[0] = t1; | |
934 | op1->args[1] = al; | |
935 | op1->args[2] = bl; | |
936 | op2->args[0] = t2; | |
937 | op2->args[1] = ah; | |
938 | op2->args[2] = bh; | |
939 | ||
940 | args[0] = t1; | |
941 | args[1] = t2; | |
942 | args[3] = args[2] = arg_new_constant(ctx, 0); | |
943 | args[4] = tcg_tst_eqne_cond(c); | |
944 | } | |
7e64b114 RH |
945 | return -1; |
946 | } | |
947 | ||
e2577ea2 RH |
948 | static void init_arguments(OptContext *ctx, TCGOp *op, int nb_args) |
949 | { | |
950 | for (int i = 0; i < nb_args; i++) { | |
951 | TCGTemp *ts = arg_temp(op->args[i]); | |
39004a71 | 952 | init_ts_info(ctx, ts); |
e2577ea2 RH |
953 | } |
954 | } | |
955 | ||
8774dded RH |
956 | static void copy_propagate(OptContext *ctx, TCGOp *op, |
957 | int nb_oargs, int nb_iargs) | |
958 | { | |
8774dded RH |
959 | for (int i = nb_oargs; i < nb_oargs + nb_iargs; i++) { |
960 | TCGTemp *ts = arg_temp(op->args[i]); | |
39004a71 | 961 | if (ts_is_copy(ts)) { |
9f75e528 | 962 | op->args[i] = temp_arg(find_better_copy(ts)); |
8774dded RH |
963 | } |
964 | } | |
965 | } | |
966 | ||
137f1f44 RH |
967 | static void finish_folding(OptContext *ctx, TCGOp *op) |
968 | { | |
969 | const TCGOpDef *def = &tcg_op_defs[op->opc]; | |
970 | int i, nb_oargs; | |
971 | ||
972 | /* | |
d97f8f39 RH |
973 | * We only optimize extended basic blocks. If the opcode ends a BB |
974 | * and is not a conditional branch, reset all temp data. | |
137f1f44 RH |
975 | */ |
976 | if (def->flags & TCG_OPF_BB_END) { | |
137f1f44 | 977 | ctx->prev_mb = NULL; |
d97f8f39 RH |
978 | if (!(def->flags & TCG_OPF_COND_BRANCH)) { |
979 | memset(&ctx->temps_used, 0, sizeof(ctx->temps_used)); | |
ab84dc39 | 980 | remove_mem_copy_all(ctx); |
d97f8f39 | 981 | } |
137f1f44 RH |
982 | return; |
983 | } | |
984 | ||
985 | nb_oargs = def->nb_oargs; | |
986 | for (i = 0; i < nb_oargs; i++) { | |
57fe5c6d | 987 | TCGTemp *ts = arg_temp(op->args[i]); |
986cac1d | 988 | reset_ts(ctx, ts); |
137f1f44 | 989 | /* |
57fe5c6d | 990 | * Save the corresponding known-zero/sign bits mask for the |
137f1f44 RH |
991 | * first output argument (only one supported so far). |
992 | */ | |
993 | if (i == 0) { | |
57fe5c6d RH |
994 | ts_info(ts)->z_mask = ctx->z_mask; |
995 | ts_info(ts)->s_mask = ctx->s_mask; | |
137f1f44 RH |
996 | } |
997 | } | |
998 | } | |
999 | ||
2f9f08ba RH |
1000 | /* |
1001 | * The fold_* functions return true when processing is complete, | |
1002 | * usually by folding the operation to a constant or to a copy, | |
1003 | * and calling tcg_opt_gen_{mov,movi}. They may do other things, | |
1004 | * like collect information about the value produced, for use in | |
1005 | * optimizing a subsequent operation. | |
1006 | * | |
1007 | * These first fold_* functions are all helpers, used by other | |
1008 | * folders for more specific operations. | |
1009 | */ | |
1010 | ||
1011 | static bool fold_const1(OptContext *ctx, TCGOp *op) | |
1012 | { | |
1013 | if (arg_is_const(op->args[1])) { | |
1014 | uint64_t t; | |
1015 | ||
1016 | t = arg_info(op->args[1])->val; | |
67f84c96 | 1017 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
2f9f08ba RH |
1018 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1019 | } | |
1020 | return false; | |
1021 | } | |
1022 | ||
1023 | static bool fold_const2(OptContext *ctx, TCGOp *op) | |
1024 | { | |
1025 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1026 | uint64_t t1 = arg_info(op->args[1])->val; | |
1027 | uint64_t t2 = arg_info(op->args[2])->val; | |
1028 | ||
67f84c96 | 1029 | t1 = do_constant_folding(op->opc, ctx->type, t1, t2); |
2f9f08ba RH |
1030 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); |
1031 | } | |
1032 | return false; | |
1033 | } | |
1034 | ||
c578ff18 RH |
1035 | static bool fold_commutative(OptContext *ctx, TCGOp *op) |
1036 | { | |
1037 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); | |
1038 | return false; | |
1039 | } | |
1040 | ||
7a2f7084 RH |
1041 | static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) |
1042 | { | |
1043 | swap_commutative(op->args[0], &op->args[1], &op->args[2]); | |
1044 | return fold_const2(ctx, op); | |
1045 | } | |
1046 | ||
fae450ba RH |
1047 | static bool fold_masks(OptContext *ctx, TCGOp *op) |
1048 | { | |
1049 | uint64_t a_mask = ctx->a_mask; | |
1050 | uint64_t z_mask = ctx->z_mask; | |
57fe5c6d | 1051 | uint64_t s_mask = ctx->s_mask; |
fae450ba RH |
1052 | |
1053 | /* | |
faa2e100 RH |
1054 | * 32-bit ops generate 32-bit results, which for the purpose of |
1055 | * simplifying tcg are sign-extended. Certainly that's how we | |
1056 | * represent our constants elsewhere. Note that the bits will | |
1057 | * be reset properly for a 64-bit value when encountering the | |
1058 | * type changing opcodes. | |
fae450ba RH |
1059 | */ |
1060 | if (ctx->type == TCG_TYPE_I32) { | |
faa2e100 RH |
1061 | a_mask = (int32_t)a_mask; |
1062 | z_mask = (int32_t)z_mask; | |
57fe5c6d | 1063 | s_mask |= MAKE_64BIT_MASK(32, 32); |
faa2e100 | 1064 | ctx->z_mask = z_mask; |
57fe5c6d | 1065 | ctx->s_mask = s_mask; |
fae450ba RH |
1066 | } |
1067 | ||
1068 | if (z_mask == 0) { | |
1069 | return tcg_opt_gen_movi(ctx, op, op->args[0], 0); | |
1070 | } | |
1071 | if (a_mask == 0) { | |
1072 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1073 | } | |
1074 | return false; | |
1075 | } | |
1076 | ||
0e0a32ba RH |
1077 | /* |
1078 | * Convert @op to NOT, if NOT is supported by the host. | |
1079 | * Return true f the conversion is successful, which will still | |
1080 | * indicate that the processing is complete. | |
1081 | */ | |
1082 | static bool fold_not(OptContext *ctx, TCGOp *op); | |
1083 | static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx) | |
1084 | { | |
1085 | TCGOpcode not_op; | |
1086 | bool have_not; | |
1087 | ||
1088 | switch (ctx->type) { | |
1089 | case TCG_TYPE_I32: | |
1090 | not_op = INDEX_op_not_i32; | |
1091 | have_not = TCG_TARGET_HAS_not_i32; | |
1092 | break; | |
1093 | case TCG_TYPE_I64: | |
1094 | not_op = INDEX_op_not_i64; | |
1095 | have_not = TCG_TARGET_HAS_not_i64; | |
1096 | break; | |
1097 | case TCG_TYPE_V64: | |
1098 | case TCG_TYPE_V128: | |
1099 | case TCG_TYPE_V256: | |
1100 | not_op = INDEX_op_not_vec; | |
1101 | have_not = TCG_TARGET_HAS_not_vec; | |
1102 | break; | |
1103 | default: | |
1104 | g_assert_not_reached(); | |
1105 | } | |
1106 | if (have_not) { | |
1107 | op->opc = not_op; | |
1108 | op->args[1] = op->args[idx]; | |
1109 | return fold_not(ctx, op); | |
1110 | } | |
1111 | return false; | |
1112 | } | |
1113 | ||
da48e272 RH |
1114 | /* If the binary operation has first argument @i, fold to @i. */ |
1115 | static bool fold_ix_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
1116 | { | |
27cdb85d | 1117 | if (arg_is_const_val(op->args[1], i)) { |
da48e272 RH |
1118 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); |
1119 | } | |
1120 | return false; | |
1121 | } | |
1122 | ||
0e0a32ba RH |
1123 | /* If the binary operation has first argument @i, fold to NOT. */ |
1124 | static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
1125 | { | |
27cdb85d | 1126 | if (arg_is_const_val(op->args[1], i)) { |
0e0a32ba RH |
1127 | return fold_to_not(ctx, op, 2); |
1128 | } | |
1129 | return false; | |
1130 | } | |
1131 | ||
e8679955 RH |
1132 | /* If the binary operation has second argument @i, fold to @i. */ |
1133 | static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
1134 | { | |
27cdb85d | 1135 | if (arg_is_const_val(op->args[2], i)) { |
e8679955 RH |
1136 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); |
1137 | } | |
1138 | return false; | |
1139 | } | |
1140 | ||
a63ce0e9 RH |
1141 | /* If the binary operation has second argument @i, fold to identity. */ |
1142 | static bool fold_xi_to_x(OptContext *ctx, TCGOp *op, uint64_t i) | |
1143 | { | |
27cdb85d | 1144 | if (arg_is_const_val(op->args[2], i)) { |
a63ce0e9 RH |
1145 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); |
1146 | } | |
1147 | return false; | |
1148 | } | |
1149 | ||
0e0a32ba RH |
1150 | /* If the binary operation has second argument @i, fold to NOT. */ |
1151 | static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i) | |
1152 | { | |
27cdb85d | 1153 | if (arg_is_const_val(op->args[2], i)) { |
0e0a32ba RH |
1154 | return fold_to_not(ctx, op, 1); |
1155 | } | |
1156 | return false; | |
1157 | } | |
1158 | ||
cbe42fb2 RH |
1159 | /* If the binary operation has both arguments equal, fold to @i. */ |
1160 | static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i) | |
1161 | { | |
1162 | if (args_are_copies(op->args[1], op->args[2])) { | |
1163 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
1164 | } | |
1165 | return false; | |
1166 | } | |
1167 | ||
ca7bb049 RH |
1168 | /* If the binary operation has both arguments equal, fold to identity. */ |
1169 | static bool fold_xx_to_x(OptContext *ctx, TCGOp *op) | |
1170 | { | |
1171 | if (args_are_copies(op->args[1], op->args[2])) { | |
1172 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1173 | } | |
1174 | return false; | |
1175 | } | |
1176 | ||
2f9f08ba RH |
1177 | /* |
1178 | * These outermost fold_<op> functions are sorted alphabetically. | |
ca7bb049 RH |
1179 | * |
1180 | * The ordering of the transformations should be: | |
1181 | * 1) those that produce a constant | |
1182 | * 2) those that produce a copy | |
1183 | * 3) those that produce information about the result value. | |
2f9f08ba RH |
1184 | */ |
1185 | ||
1186 | static bool fold_add(OptContext *ctx, TCGOp *op) | |
1187 | { | |
7a2f7084 | 1188 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 RH |
1189 | fold_xi_to_x(ctx, op, 0)) { |
1190 | return true; | |
1191 | } | |
1192 | return false; | |
2f9f08ba RH |
1193 | } |
1194 | ||
c578ff18 RH |
1195 | /* We cannot as yet do_constant_folding with vectors. */ |
1196 | static bool fold_add_vec(OptContext *ctx, TCGOp *op) | |
1197 | { | |
1198 | if (fold_commutative(ctx, op) || | |
1199 | fold_xi_to_x(ctx, op, 0)) { | |
1200 | return true; | |
1201 | } | |
1202 | return false; | |
1203 | } | |
1204 | ||
9531c078 | 1205 | static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) |
e3f7dc21 | 1206 | { |
f2457577 RH |
1207 | bool a_const = arg_is_const(op->args[2]) && arg_is_const(op->args[3]); |
1208 | bool b_const = arg_is_const(op->args[4]) && arg_is_const(op->args[5]); | |
1209 | ||
1210 | if (a_const && b_const) { | |
9531c078 RH |
1211 | uint64_t al = arg_info(op->args[2])->val; |
1212 | uint64_t ah = arg_info(op->args[3])->val; | |
1213 | uint64_t bl = arg_info(op->args[4])->val; | |
1214 | uint64_t bh = arg_info(op->args[5])->val; | |
e3f7dc21 | 1215 | TCGArg rl, rh; |
9531c078 RH |
1216 | TCGOp *op2; |
1217 | ||
1218 | if (ctx->type == TCG_TYPE_I32) { | |
1219 | uint64_t a = deposit64(al, 32, 32, ah); | |
1220 | uint64_t b = deposit64(bl, 32, 32, bh); | |
1221 | ||
1222 | if (add) { | |
1223 | a += b; | |
1224 | } else { | |
1225 | a -= b; | |
1226 | } | |
e3f7dc21 | 1227 | |
9531c078 RH |
1228 | al = sextract64(a, 0, 32); |
1229 | ah = sextract64(a, 32, 32); | |
e3f7dc21 | 1230 | } else { |
9531c078 RH |
1231 | Int128 a = int128_make128(al, ah); |
1232 | Int128 b = int128_make128(bl, bh); | |
1233 | ||
1234 | if (add) { | |
1235 | a = int128_add(a, b); | |
1236 | } else { | |
1237 | a = int128_sub(a, b); | |
1238 | } | |
1239 | ||
1240 | al = int128_getlo(a); | |
1241 | ah = int128_gethi(a); | |
e3f7dc21 RH |
1242 | } |
1243 | ||
1244 | rl = op->args[0]; | |
1245 | rh = op->args[1]; | |
9531c078 RH |
1246 | |
1247 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
d4478943 | 1248 | op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); |
9531c078 RH |
1249 | |
1250 | tcg_opt_gen_movi(ctx, op, rl, al); | |
1251 | tcg_opt_gen_movi(ctx, op2, rh, ah); | |
e3f7dc21 RH |
1252 | return true; |
1253 | } | |
f2457577 RH |
1254 | |
1255 | /* Fold sub2 r,x,i to add2 r,x,-i */ | |
1256 | if (!add && b_const) { | |
1257 | uint64_t bl = arg_info(op->args[4])->val; | |
1258 | uint64_t bh = arg_info(op->args[5])->val; | |
1259 | ||
1260 | /* Negate the two parts without assembling and disassembling. */ | |
1261 | bl = -bl; | |
1262 | bh = ~bh + !bl; | |
1263 | ||
1264 | op->opc = (ctx->type == TCG_TYPE_I32 | |
1265 | ? INDEX_op_add2_i32 : INDEX_op_add2_i64); | |
1266 | op->args[4] = arg_new_constant(ctx, bl); | |
1267 | op->args[5] = arg_new_constant(ctx, bh); | |
1268 | } | |
e3f7dc21 RH |
1269 | return false; |
1270 | } | |
1271 | ||
9531c078 | 1272 | static bool fold_add2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 1273 | { |
7a2f7084 RH |
1274 | /* Note that the high and low parts may be independently swapped. */ |
1275 | swap_commutative(op->args[0], &op->args[2], &op->args[4]); | |
1276 | swap_commutative(op->args[1], &op->args[3], &op->args[5]); | |
1277 | ||
9531c078 | 1278 | return fold_addsub2(ctx, op, true); |
e3f7dc21 RH |
1279 | } |
1280 | ||
2f9f08ba RH |
1281 | static bool fold_and(OptContext *ctx, TCGOp *op) |
1282 | { | |
fae450ba RH |
1283 | uint64_t z1, z2; |
1284 | ||
7a2f7084 | 1285 | if (fold_const2_commutative(ctx, op) || |
e8679955 | 1286 | fold_xi_to_i(ctx, op, 0) || |
a63ce0e9 | 1287 | fold_xi_to_x(ctx, op, -1) || |
ca7bb049 RH |
1288 | fold_xx_to_x(ctx, op)) { |
1289 | return true; | |
1290 | } | |
fae450ba RH |
1291 | |
1292 | z1 = arg_info(op->args[1])->z_mask; | |
1293 | z2 = arg_info(op->args[2])->z_mask; | |
1294 | ctx->z_mask = z1 & z2; | |
1295 | ||
3f2b1f83 RH |
1296 | /* |
1297 | * Sign repetitions are perforce all identical, whether they are 1 or 0. | |
1298 | * Bitwise operations preserve the relative quantity of the repetitions. | |
1299 | */ | |
1300 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1301 | & arg_info(op->args[2])->s_mask; | |
1302 | ||
fae450ba RH |
1303 | /* |
1304 | * Known-zeros does not imply known-ones. Therefore unless | |
1305 | * arg2 is constant, we can't infer affected bits from it. | |
1306 | */ | |
1307 | if (arg_is_const(op->args[2])) { | |
1308 | ctx->a_mask = z1 & ~z2; | |
1309 | } | |
1310 | ||
1311 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1312 | } |
1313 | ||
1314 | static bool fold_andc(OptContext *ctx, TCGOp *op) | |
1315 | { | |
fae450ba RH |
1316 | uint64_t z1; |
1317 | ||
cbe42fb2 | 1318 | if (fold_const2(ctx, op) || |
0e0a32ba | 1319 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 1320 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 1321 | fold_ix_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
1322 | return true; |
1323 | } | |
fae450ba RH |
1324 | |
1325 | z1 = arg_info(op->args[1])->z_mask; | |
1326 | ||
1327 | /* | |
1328 | * Known-zeros does not imply known-ones. Therefore unless | |
1329 | * arg2 is constant, we can't infer anything from it. | |
1330 | */ | |
1331 | if (arg_is_const(op->args[2])) { | |
1332 | uint64_t z2 = ~arg_info(op->args[2])->z_mask; | |
1333 | ctx->a_mask = z1 & ~z2; | |
1334 | z1 &= z2; | |
1335 | } | |
1336 | ctx->z_mask = z1; | |
1337 | ||
3f2b1f83 RH |
1338 | ctx->s_mask = arg_info(op->args[1])->s_mask |
1339 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 1340 | return fold_masks(ctx, op); |
2f9f08ba RH |
1341 | } |
1342 | ||
079b0804 RH |
1343 | static bool fold_brcond(OptContext *ctx, TCGOp *op) |
1344 | { | |
fb04ab7d | 1345 | int i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[0], |
246c4b72 | 1346 | &op->args[1], &op->args[2]); |
079b0804 RH |
1347 | if (i == 0) { |
1348 | tcg_op_remove(ctx->tcg, op); | |
1349 | return true; | |
1350 | } | |
1351 | if (i > 0) { | |
1352 | op->opc = INDEX_op_br; | |
1353 | op->args[0] = op->args[3]; | |
1354 | } | |
1355 | return false; | |
1356 | } | |
1357 | ||
764d2aba RH |
1358 | static bool fold_brcond2(OptContext *ctx, TCGOp *op) |
1359 | { | |
7e64b114 RH |
1360 | TCGCond cond; |
1361 | TCGArg label; | |
7a2f7084 RH |
1362 | int i, inv = 0; |
1363 | ||
fb04ab7d | 1364 | i = do_constant_folding_cond2(ctx, op, &op->args[0]); |
7e64b114 RH |
1365 | cond = op->args[4]; |
1366 | label = op->args[5]; | |
764d2aba RH |
1367 | if (i >= 0) { |
1368 | goto do_brcond_const; | |
1369 | } | |
1370 | ||
1371 | switch (cond) { | |
1372 | case TCG_COND_LT: | |
1373 | case TCG_COND_GE: | |
1374 | /* | |
1375 | * Simplify LT/GE comparisons vs zero to a single compare | |
1376 | * vs the high word of the input. | |
1377 | */ | |
27cdb85d RH |
1378 | if (arg_is_const_val(op->args[2], 0) && |
1379 | arg_is_const_val(op->args[3], 0)) { | |
764d2aba RH |
1380 | goto do_brcond_high; |
1381 | } | |
1382 | break; | |
1383 | ||
1384 | case TCG_COND_NE: | |
1385 | inv = 1; | |
1386 | QEMU_FALLTHROUGH; | |
1387 | case TCG_COND_EQ: | |
1388 | /* | |
1389 | * Simplify EQ/NE comparisons where one of the pairs | |
1390 | * can be simplified. | |
1391 | */ | |
67f84c96 | 1392 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[0], |
764d2aba RH |
1393 | op->args[2], cond); |
1394 | switch (i ^ inv) { | |
1395 | case 0: | |
1396 | goto do_brcond_const; | |
1397 | case 1: | |
1398 | goto do_brcond_high; | |
1399 | } | |
1400 | ||
67f84c96 | 1401 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
764d2aba RH |
1402 | op->args[3], cond); |
1403 | switch (i ^ inv) { | |
1404 | case 0: | |
1405 | goto do_brcond_const; | |
1406 | case 1: | |
ceb9ee06 RH |
1407 | goto do_brcond_low; |
1408 | } | |
1409 | break; | |
1410 | ||
1411 | case TCG_COND_TSTEQ: | |
1412 | case TCG_COND_TSTNE: | |
1413 | if (arg_is_const_val(op->args[2], 0)) { | |
1414 | goto do_brcond_high; | |
1415 | } | |
1416 | if (arg_is_const_val(op->args[3], 0)) { | |
1417 | goto do_brcond_low; | |
764d2aba RH |
1418 | } |
1419 | break; | |
1420 | ||
1421 | default: | |
1422 | break; | |
1423 | ||
ceb9ee06 RH |
1424 | do_brcond_low: |
1425 | op->opc = INDEX_op_brcond_i32; | |
1426 | op->args[1] = op->args[2]; | |
1427 | op->args[2] = cond; | |
1428 | op->args[3] = label; | |
1429 | return fold_brcond(ctx, op); | |
1430 | ||
764d2aba RH |
1431 | do_brcond_high: |
1432 | op->opc = INDEX_op_brcond_i32; | |
1433 | op->args[0] = op->args[1]; | |
1434 | op->args[1] = op->args[3]; | |
1435 | op->args[2] = cond; | |
1436 | op->args[3] = label; | |
ceb9ee06 | 1437 | return fold_brcond(ctx, op); |
764d2aba RH |
1438 | |
1439 | do_brcond_const: | |
1440 | if (i == 0) { | |
1441 | tcg_op_remove(ctx->tcg, op); | |
1442 | return true; | |
1443 | } | |
1444 | op->opc = INDEX_op_br; | |
1445 | op->args[0] = label; | |
1446 | break; | |
1447 | } | |
1448 | return false; | |
1449 | } | |
1450 | ||
09bacdc2 RH |
1451 | static bool fold_bswap(OptContext *ctx, TCGOp *op) |
1452 | { | |
57fe5c6d | 1453 | uint64_t z_mask, s_mask, sign; |
fae450ba | 1454 | |
09bacdc2 RH |
1455 | if (arg_is_const(op->args[1])) { |
1456 | uint64_t t = arg_info(op->args[1])->val; | |
1457 | ||
67f84c96 | 1458 | t = do_constant_folding(op->opc, ctx->type, t, op->args[2]); |
09bacdc2 RH |
1459 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1460 | } | |
fae450ba RH |
1461 | |
1462 | z_mask = arg_info(op->args[1])->z_mask; | |
57fe5c6d | 1463 | |
fae450ba RH |
1464 | switch (op->opc) { |
1465 | case INDEX_op_bswap16_i32: | |
1466 | case INDEX_op_bswap16_i64: | |
1467 | z_mask = bswap16(z_mask); | |
1468 | sign = INT16_MIN; | |
1469 | break; | |
1470 | case INDEX_op_bswap32_i32: | |
1471 | case INDEX_op_bswap32_i64: | |
1472 | z_mask = bswap32(z_mask); | |
1473 | sign = INT32_MIN; | |
1474 | break; | |
1475 | case INDEX_op_bswap64_i64: | |
1476 | z_mask = bswap64(z_mask); | |
1477 | sign = INT64_MIN; | |
1478 | break; | |
1479 | default: | |
1480 | g_assert_not_reached(); | |
1481 | } | |
57fe5c6d | 1482 | s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1483 | |
1484 | switch (op->args[2] & (TCG_BSWAP_OZ | TCG_BSWAP_OS)) { | |
1485 | case TCG_BSWAP_OZ: | |
1486 | break; | |
1487 | case TCG_BSWAP_OS: | |
1488 | /* If the sign bit may be 1, force all the bits above to 1. */ | |
1489 | if (z_mask & sign) { | |
1490 | z_mask |= sign; | |
57fe5c6d | 1491 | s_mask = sign << 1; |
fae450ba RH |
1492 | } |
1493 | break; | |
1494 | default: | |
1495 | /* The high bits are undefined: force all bits above the sign to 1. */ | |
1496 | z_mask |= sign << 1; | |
57fe5c6d | 1497 | s_mask = 0; |
fae450ba RH |
1498 | break; |
1499 | } | |
1500 | ctx->z_mask = z_mask; | |
57fe5c6d | 1501 | ctx->s_mask = s_mask; |
fae450ba RH |
1502 | |
1503 | return fold_masks(ctx, op); | |
09bacdc2 RH |
1504 | } |
1505 | ||
5cf32be7 RH |
1506 | static bool fold_call(OptContext *ctx, TCGOp *op) |
1507 | { | |
1508 | TCGContext *s = ctx->tcg; | |
1509 | int nb_oargs = TCGOP_CALLO(op); | |
1510 | int nb_iargs = TCGOP_CALLI(op); | |
1511 | int flags, i; | |
1512 | ||
1513 | init_arguments(ctx, op, nb_oargs + nb_iargs); | |
1514 | copy_propagate(ctx, op, nb_oargs, nb_iargs); | |
1515 | ||
1516 | /* If the function reads or writes globals, reset temp data. */ | |
1517 | flags = tcg_call_flags(op); | |
1518 | if (!(flags & (TCG_CALL_NO_READ_GLOBALS | TCG_CALL_NO_WRITE_GLOBALS))) { | |
1519 | int nb_globals = s->nb_globals; | |
1520 | ||
1521 | for (i = 0; i < nb_globals; i++) { | |
1522 | if (test_bit(i, ctx->temps_used.l)) { | |
986cac1d | 1523 | reset_ts(ctx, &ctx->tcg->temps[i]); |
5cf32be7 RH |
1524 | } |
1525 | } | |
1526 | } | |
1527 | ||
ab84dc39 RH |
1528 | /* If the function has side effects, reset mem data. */ |
1529 | if (!(flags & TCG_CALL_NO_SIDE_EFFECTS)) { | |
1530 | remove_mem_copy_all(ctx); | |
1531 | } | |
1532 | ||
5cf32be7 RH |
1533 | /* Reset temp data for outputs. */ |
1534 | for (i = 0; i < nb_oargs; i++) { | |
986cac1d | 1535 | reset_temp(ctx, op->args[i]); |
5cf32be7 RH |
1536 | } |
1537 | ||
1538 | /* Stop optimizing MB across calls. */ | |
1539 | ctx->prev_mb = NULL; | |
1540 | return true; | |
1541 | } | |
1542 | ||
30dd0bfe RH |
1543 | static bool fold_count_zeros(OptContext *ctx, TCGOp *op) |
1544 | { | |
fae450ba RH |
1545 | uint64_t z_mask; |
1546 | ||
30dd0bfe RH |
1547 | if (arg_is_const(op->args[1])) { |
1548 | uint64_t t = arg_info(op->args[1])->val; | |
1549 | ||
1550 | if (t != 0) { | |
67f84c96 | 1551 | t = do_constant_folding(op->opc, ctx->type, t, 0); |
30dd0bfe RH |
1552 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1553 | } | |
1554 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[2]); | |
1555 | } | |
fae450ba RH |
1556 | |
1557 | switch (ctx->type) { | |
1558 | case TCG_TYPE_I32: | |
1559 | z_mask = 31; | |
1560 | break; | |
1561 | case TCG_TYPE_I64: | |
1562 | z_mask = 63; | |
1563 | break; | |
1564 | default: | |
1565 | g_assert_not_reached(); | |
1566 | } | |
1567 | ctx->z_mask = arg_info(op->args[2])->z_mask | z_mask; | |
2b9d0c59 | 1568 | ctx->s_mask = smask_from_zmask(ctx->z_mask); |
30dd0bfe RH |
1569 | return false; |
1570 | } | |
1571 | ||
2f9f08ba RH |
1572 | static bool fold_ctpop(OptContext *ctx, TCGOp *op) |
1573 | { | |
fae450ba RH |
1574 | if (fold_const1(ctx, op)) { |
1575 | return true; | |
1576 | } | |
1577 | ||
1578 | switch (ctx->type) { | |
1579 | case TCG_TYPE_I32: | |
1580 | ctx->z_mask = 32 | 31; | |
1581 | break; | |
1582 | case TCG_TYPE_I64: | |
1583 | ctx->z_mask = 64 | 63; | |
1584 | break; | |
1585 | default: | |
1586 | g_assert_not_reached(); | |
1587 | } | |
2b9d0c59 | 1588 | ctx->s_mask = smask_from_zmask(ctx->z_mask); |
fae450ba | 1589 | return false; |
2f9f08ba RH |
1590 | } |
1591 | ||
1b1907b8 RH |
1592 | static bool fold_deposit(OptContext *ctx, TCGOp *op) |
1593 | { | |
8f7a840d RH |
1594 | TCGOpcode and_opc; |
1595 | ||
1b1907b8 RH |
1596 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { |
1597 | uint64_t t1 = arg_info(op->args[1])->val; | |
1598 | uint64_t t2 = arg_info(op->args[2])->val; | |
1599 | ||
1600 | t1 = deposit64(t1, op->args[3], op->args[4], t2); | |
1601 | return tcg_opt_gen_movi(ctx, op, op->args[0], t1); | |
1602 | } | |
fae450ba | 1603 | |
8f7a840d RH |
1604 | switch (ctx->type) { |
1605 | case TCG_TYPE_I32: | |
1606 | and_opc = INDEX_op_and_i32; | |
1607 | break; | |
1608 | case TCG_TYPE_I64: | |
1609 | and_opc = INDEX_op_and_i64; | |
1610 | break; | |
1611 | default: | |
1612 | g_assert_not_reached(); | |
1613 | } | |
1614 | ||
1615 | /* Inserting a value into zero at offset 0. */ | |
27cdb85d | 1616 | if (arg_is_const_val(op->args[1], 0) && op->args[3] == 0) { |
8f7a840d RH |
1617 | uint64_t mask = MAKE_64BIT_MASK(0, op->args[4]); |
1618 | ||
1619 | op->opc = and_opc; | |
1620 | op->args[1] = op->args[2]; | |
26aac97c | 1621 | op->args[2] = arg_new_constant(ctx, mask); |
8f7a840d RH |
1622 | ctx->z_mask = mask & arg_info(op->args[1])->z_mask; |
1623 | return false; | |
1624 | } | |
1625 | ||
1626 | /* Inserting zero into a value. */ | |
27cdb85d | 1627 | if (arg_is_const_val(op->args[2], 0)) { |
8f7a840d RH |
1628 | uint64_t mask = deposit64(-1, op->args[3], op->args[4], 0); |
1629 | ||
1630 | op->opc = and_opc; | |
26aac97c | 1631 | op->args[2] = arg_new_constant(ctx, mask); |
8f7a840d RH |
1632 | ctx->z_mask = mask & arg_info(op->args[1])->z_mask; |
1633 | return false; | |
1634 | } | |
1635 | ||
fae450ba RH |
1636 | ctx->z_mask = deposit64(arg_info(op->args[1])->z_mask, |
1637 | op->args[3], op->args[4], | |
1638 | arg_info(op->args[2])->z_mask); | |
1b1907b8 RH |
1639 | return false; |
1640 | } | |
1641 | ||
2f9f08ba RH |
1642 | static bool fold_divide(OptContext *ctx, TCGOp *op) |
1643 | { | |
2f9d9a34 RH |
1644 | if (fold_const2(ctx, op) || |
1645 | fold_xi_to_x(ctx, op, 1)) { | |
1646 | return true; | |
1647 | } | |
1648 | return false; | |
2f9f08ba RH |
1649 | } |
1650 | ||
8cdb3fcb RH |
1651 | static bool fold_dup(OptContext *ctx, TCGOp *op) |
1652 | { | |
1653 | if (arg_is_const(op->args[1])) { | |
1654 | uint64_t t = arg_info(op->args[1])->val; | |
1655 | t = dup_const(TCGOP_VECE(op), t); | |
1656 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1657 | } | |
1658 | return false; | |
1659 | } | |
1660 | ||
1661 | static bool fold_dup2(OptContext *ctx, TCGOp *op) | |
1662 | { | |
1663 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1664 | uint64_t t = deposit64(arg_info(op->args[1])->val, 32, 32, | |
1665 | arg_info(op->args[2])->val); | |
1666 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); | |
1667 | } | |
1668 | ||
1669 | if (args_are_copies(op->args[1], op->args[2])) { | |
1670 | op->opc = INDEX_op_dup_vec; | |
1671 | TCGOP_VECE(op) = MO_32; | |
1672 | } | |
1673 | return false; | |
1674 | } | |
1675 | ||
2f9f08ba RH |
1676 | static bool fold_eqv(OptContext *ctx, TCGOp *op) |
1677 | { | |
7a2f7084 | 1678 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 1679 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
1680 | fold_xi_to_not(ctx, op, 0)) { |
1681 | return true; | |
1682 | } | |
3f2b1f83 RH |
1683 | |
1684 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1685 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1686 | return false; |
2f9f08ba RH |
1687 | } |
1688 | ||
b6617c88 RH |
1689 | static bool fold_extract(OptContext *ctx, TCGOp *op) |
1690 | { | |
fae450ba | 1691 | uint64_t z_mask_old, z_mask; |
57fe5c6d RH |
1692 | int pos = op->args[2]; |
1693 | int len = op->args[3]; | |
fae450ba | 1694 | |
b6617c88 RH |
1695 | if (arg_is_const(op->args[1])) { |
1696 | uint64_t t; | |
1697 | ||
1698 | t = arg_info(op->args[1])->val; | |
57fe5c6d | 1699 | t = extract64(t, pos, len); |
b6617c88 RH |
1700 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
1701 | } | |
fae450ba RH |
1702 | |
1703 | z_mask_old = arg_info(op->args[1])->z_mask; | |
57fe5c6d RH |
1704 | z_mask = extract64(z_mask_old, pos, len); |
1705 | if (pos == 0) { | |
fae450ba RH |
1706 | ctx->a_mask = z_mask_old ^ z_mask; |
1707 | } | |
1708 | ctx->z_mask = z_mask; | |
57fe5c6d | 1709 | ctx->s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1710 | |
1711 | return fold_masks(ctx, op); | |
b6617c88 RH |
1712 | } |
1713 | ||
dcd08996 RH |
1714 | static bool fold_extract2(OptContext *ctx, TCGOp *op) |
1715 | { | |
1716 | if (arg_is_const(op->args[1]) && arg_is_const(op->args[2])) { | |
1717 | uint64_t v1 = arg_info(op->args[1])->val; | |
1718 | uint64_t v2 = arg_info(op->args[2])->val; | |
1719 | int shr = op->args[3]; | |
1720 | ||
1721 | if (op->opc == INDEX_op_extract2_i64) { | |
1722 | v1 >>= shr; | |
1723 | v2 <<= 64 - shr; | |
1724 | } else { | |
1725 | v1 = (uint32_t)v1 >> shr; | |
225bec0c | 1726 | v2 = (uint64_t)((int32_t)v2 << (32 - shr)); |
dcd08996 RH |
1727 | } |
1728 | return tcg_opt_gen_movi(ctx, op, op->args[0], v1 | v2); | |
1729 | } | |
1730 | return false; | |
1731 | } | |
1732 | ||
2f9f08ba RH |
1733 | static bool fold_exts(OptContext *ctx, TCGOp *op) |
1734 | { | |
57fe5c6d | 1735 | uint64_t s_mask_old, s_mask, z_mask, sign; |
fae450ba RH |
1736 | bool type_change = false; |
1737 | ||
1738 | if (fold_const1(ctx, op)) { | |
1739 | return true; | |
1740 | } | |
1741 | ||
57fe5c6d RH |
1742 | z_mask = arg_info(op->args[1])->z_mask; |
1743 | s_mask = arg_info(op->args[1])->s_mask; | |
1744 | s_mask_old = s_mask; | |
fae450ba RH |
1745 | |
1746 | switch (op->opc) { | |
1747 | CASE_OP_32_64(ext8s): | |
1748 | sign = INT8_MIN; | |
1749 | z_mask = (uint8_t)z_mask; | |
1750 | break; | |
1751 | CASE_OP_32_64(ext16s): | |
1752 | sign = INT16_MIN; | |
1753 | z_mask = (uint16_t)z_mask; | |
1754 | break; | |
1755 | case INDEX_op_ext_i32_i64: | |
1756 | type_change = true; | |
1757 | QEMU_FALLTHROUGH; | |
1758 | case INDEX_op_ext32s_i64: | |
1759 | sign = INT32_MIN; | |
1760 | z_mask = (uint32_t)z_mask; | |
1761 | break; | |
1762 | default: | |
1763 | g_assert_not_reached(); | |
1764 | } | |
1765 | ||
1766 | if (z_mask & sign) { | |
1767 | z_mask |= sign; | |
fae450ba | 1768 | } |
57fe5c6d RH |
1769 | s_mask |= sign << 1; |
1770 | ||
fae450ba | 1771 | ctx->z_mask = z_mask; |
57fe5c6d RH |
1772 | ctx->s_mask = s_mask; |
1773 | if (!type_change) { | |
1774 | ctx->a_mask = s_mask & ~s_mask_old; | |
1775 | } | |
fae450ba RH |
1776 | |
1777 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1778 | } |
1779 | ||
1780 | static bool fold_extu(OptContext *ctx, TCGOp *op) | |
1781 | { | |
fae450ba RH |
1782 | uint64_t z_mask_old, z_mask; |
1783 | bool type_change = false; | |
1784 | ||
1785 | if (fold_const1(ctx, op)) { | |
1786 | return true; | |
1787 | } | |
1788 | ||
1789 | z_mask_old = z_mask = arg_info(op->args[1])->z_mask; | |
1790 | ||
1791 | switch (op->opc) { | |
1792 | CASE_OP_32_64(ext8u): | |
1793 | z_mask = (uint8_t)z_mask; | |
1794 | break; | |
1795 | CASE_OP_32_64(ext16u): | |
1796 | z_mask = (uint16_t)z_mask; | |
1797 | break; | |
1798 | case INDEX_op_extrl_i64_i32: | |
1799 | case INDEX_op_extu_i32_i64: | |
1800 | type_change = true; | |
1801 | QEMU_FALLTHROUGH; | |
1802 | case INDEX_op_ext32u_i64: | |
1803 | z_mask = (uint32_t)z_mask; | |
1804 | break; | |
1805 | case INDEX_op_extrh_i64_i32: | |
1806 | type_change = true; | |
1807 | z_mask >>= 32; | |
1808 | break; | |
1809 | default: | |
1810 | g_assert_not_reached(); | |
1811 | } | |
1812 | ||
1813 | ctx->z_mask = z_mask; | |
57fe5c6d | 1814 | ctx->s_mask = smask_from_zmask(z_mask); |
fae450ba RH |
1815 | if (!type_change) { |
1816 | ctx->a_mask = z_mask_old ^ z_mask; | |
1817 | } | |
1818 | return fold_masks(ctx, op); | |
2f9f08ba RH |
1819 | } |
1820 | ||
3eefdf2b RH |
1821 | static bool fold_mb(OptContext *ctx, TCGOp *op) |
1822 | { | |
1823 | /* Eliminate duplicate and redundant fence instructions. */ | |
1824 | if (ctx->prev_mb) { | |
1825 | /* | |
1826 | * Merge two barriers of the same type into one, | |
1827 | * or a weaker barrier into a stronger one, | |
1828 | * or two weaker barriers into a stronger one. | |
1829 | * mb X; mb Y => mb X|Y | |
1830 | * mb; strl => mb; st | |
1831 | * ldaq; mb => ld; mb | |
1832 | * ldaq; strl => ld; mb; st | |
1833 | * Other combinations are also merged into a strong | |
1834 | * barrier. This is stricter than specified but for | |
1835 | * the purposes of TCG is better than not optimizing. | |
1836 | */ | |
1837 | ctx->prev_mb->args[0] |= op->args[0]; | |
1838 | tcg_op_remove(ctx->tcg, op); | |
1839 | } else { | |
1840 | ctx->prev_mb = op; | |
1841 | } | |
1842 | return true; | |
1843 | } | |
1844 | ||
2cfac7fa RH |
1845 | static bool fold_mov(OptContext *ctx, TCGOp *op) |
1846 | { | |
1847 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[1]); | |
1848 | } | |
1849 | ||
0c310a30 RH |
1850 | static bool fold_movcond(OptContext *ctx, TCGOp *op) |
1851 | { | |
7a2f7084 RH |
1852 | int i; |
1853 | ||
7a2f7084 RH |
1854 | /* |
1855 | * Canonicalize the "false" input reg to match the destination reg so | |
1856 | * that the tcg backend can implement a "move if true" operation. | |
1857 | */ | |
1858 | if (swap_commutative(op->args[0], &op->args[4], &op->args[3])) { | |
246c4b72 | 1859 | op->args[5] = tcg_invert_cond(op->args[5]); |
7a2f7084 | 1860 | } |
0c310a30 | 1861 | |
fb04ab7d | 1862 | i = do_constant_folding_cond1(ctx, op, NO_DEST, &op->args[1], |
246c4b72 | 1863 | &op->args[2], &op->args[5]); |
0c310a30 RH |
1864 | if (i >= 0) { |
1865 | return tcg_opt_gen_mov(ctx, op, op->args[0], op->args[4 - i]); | |
1866 | } | |
1867 | ||
fae450ba RH |
1868 | ctx->z_mask = arg_info(op->args[3])->z_mask |
1869 | | arg_info(op->args[4])->z_mask; | |
3f2b1f83 RH |
1870 | ctx->s_mask = arg_info(op->args[3])->s_mask |
1871 | & arg_info(op->args[4])->s_mask; | |
fae450ba | 1872 | |
0c310a30 RH |
1873 | if (arg_is_const(op->args[3]) && arg_is_const(op->args[4])) { |
1874 | uint64_t tv = arg_info(op->args[3])->val; | |
1875 | uint64_t fv = arg_info(op->args[4])->val; | |
3635502d | 1876 | TCGOpcode opc, negopc = 0; |
246c4b72 | 1877 | TCGCond cond = op->args[5]; |
0c310a30 | 1878 | |
67f84c96 RH |
1879 | switch (ctx->type) { |
1880 | case TCG_TYPE_I32: | |
1881 | opc = INDEX_op_setcond_i32; | |
3635502d RH |
1882 | if (TCG_TARGET_HAS_negsetcond_i32) { |
1883 | negopc = INDEX_op_negsetcond_i32; | |
1884 | } | |
1885 | tv = (int32_t)tv; | |
1886 | fv = (int32_t)fv; | |
67f84c96 RH |
1887 | break; |
1888 | case TCG_TYPE_I64: | |
1889 | opc = INDEX_op_setcond_i64; | |
3635502d RH |
1890 | if (TCG_TARGET_HAS_negsetcond_i64) { |
1891 | negopc = INDEX_op_negsetcond_i64; | |
1892 | } | |
67f84c96 RH |
1893 | break; |
1894 | default: | |
1895 | g_assert_not_reached(); | |
1896 | } | |
0c310a30 RH |
1897 | |
1898 | if (tv == 1 && fv == 0) { | |
1899 | op->opc = opc; | |
1900 | op->args[3] = cond; | |
1901 | } else if (fv == 1 && tv == 0) { | |
1902 | op->opc = opc; | |
1903 | op->args[3] = tcg_invert_cond(cond); | |
3635502d RH |
1904 | } else if (negopc) { |
1905 | if (tv == -1 && fv == 0) { | |
1906 | op->opc = negopc; | |
1907 | op->args[3] = cond; | |
1908 | } else if (fv == -1 && tv == 0) { | |
1909 | op->opc = negopc; | |
1910 | op->args[3] = tcg_invert_cond(cond); | |
1911 | } | |
0c310a30 RH |
1912 | } |
1913 | } | |
1914 | return false; | |
1915 | } | |
1916 | ||
2f9f08ba RH |
1917 | static bool fold_mul(OptContext *ctx, TCGOp *op) |
1918 | { | |
e8679955 | 1919 | if (fold_const2(ctx, op) || |
5b5cf479 RH |
1920 | fold_xi_to_i(ctx, op, 0) || |
1921 | fold_xi_to_x(ctx, op, 1)) { | |
e8679955 RH |
1922 | return true; |
1923 | } | |
1924 | return false; | |
2f9f08ba RH |
1925 | } |
1926 | ||
1927 | static bool fold_mul_highpart(OptContext *ctx, TCGOp *op) | |
1928 | { | |
7a2f7084 | 1929 | if (fold_const2_commutative(ctx, op) || |
e8679955 RH |
1930 | fold_xi_to_i(ctx, op, 0)) { |
1931 | return true; | |
1932 | } | |
1933 | return false; | |
2f9f08ba RH |
1934 | } |
1935 | ||
407112b0 | 1936 | static bool fold_multiply2(OptContext *ctx, TCGOp *op) |
6b8ac0d1 | 1937 | { |
7a2f7084 RH |
1938 | swap_commutative(op->args[0], &op->args[2], &op->args[3]); |
1939 | ||
6b8ac0d1 | 1940 | if (arg_is_const(op->args[2]) && arg_is_const(op->args[3])) { |
407112b0 RH |
1941 | uint64_t a = arg_info(op->args[2])->val; |
1942 | uint64_t b = arg_info(op->args[3])->val; | |
1943 | uint64_t h, l; | |
6b8ac0d1 | 1944 | TCGArg rl, rh; |
407112b0 RH |
1945 | TCGOp *op2; |
1946 | ||
1947 | switch (op->opc) { | |
1948 | case INDEX_op_mulu2_i32: | |
1949 | l = (uint64_t)(uint32_t)a * (uint32_t)b; | |
1950 | h = (int32_t)(l >> 32); | |
1951 | l = (int32_t)l; | |
1952 | break; | |
1953 | case INDEX_op_muls2_i32: | |
1954 | l = (int64_t)(int32_t)a * (int32_t)b; | |
1955 | h = l >> 32; | |
1956 | l = (int32_t)l; | |
1957 | break; | |
1958 | case INDEX_op_mulu2_i64: | |
1959 | mulu64(&l, &h, a, b); | |
1960 | break; | |
1961 | case INDEX_op_muls2_i64: | |
1962 | muls64(&l, &h, a, b); | |
1963 | break; | |
1964 | default: | |
1965 | g_assert_not_reached(); | |
1966 | } | |
6b8ac0d1 RH |
1967 | |
1968 | rl = op->args[0]; | |
1969 | rh = op->args[1]; | |
407112b0 RH |
1970 | |
1971 | /* The proper opcode is supplied by tcg_opt_gen_mov. */ | |
d4478943 | 1972 | op2 = tcg_op_insert_before(ctx->tcg, op, 0, 2); |
407112b0 RH |
1973 | |
1974 | tcg_opt_gen_movi(ctx, op, rl, l); | |
1975 | tcg_opt_gen_movi(ctx, op2, rh, h); | |
6b8ac0d1 RH |
1976 | return true; |
1977 | } | |
1978 | return false; | |
1979 | } | |
1980 | ||
2f9f08ba RH |
1981 | static bool fold_nand(OptContext *ctx, TCGOp *op) |
1982 | { | |
7a2f7084 | 1983 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
1984 | fold_xi_to_not(ctx, op, -1)) { |
1985 | return true; | |
1986 | } | |
3f2b1f83 RH |
1987 | |
1988 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
1989 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 1990 | return false; |
2f9f08ba RH |
1991 | } |
1992 | ||
1993 | static bool fold_neg(OptContext *ctx, TCGOp *op) | |
1994 | { | |
fae450ba RH |
1995 | uint64_t z_mask; |
1996 | ||
9caca88a RH |
1997 | if (fold_const1(ctx, op)) { |
1998 | return true; | |
1999 | } | |
fae450ba RH |
2000 | |
2001 | /* Set to 1 all bits to the left of the rightmost. */ | |
2002 | z_mask = arg_info(op->args[1])->z_mask; | |
2003 | ctx->z_mask = -(z_mask & -z_mask); | |
2004 | ||
9caca88a RH |
2005 | /* |
2006 | * Because of fold_sub_to_neg, we want to always return true, | |
2007 | * via finish_folding. | |
2008 | */ | |
2009 | finish_folding(ctx, op); | |
2010 | return true; | |
2f9f08ba RH |
2011 | } |
2012 | ||
2013 | static bool fold_nor(OptContext *ctx, TCGOp *op) | |
2014 | { | |
7a2f7084 | 2015 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba RH |
2016 | fold_xi_to_not(ctx, op, 0)) { |
2017 | return true; | |
2018 | } | |
3f2b1f83 RH |
2019 | |
2020 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
2021 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 2022 | return false; |
2f9f08ba RH |
2023 | } |
2024 | ||
2025 | static bool fold_not(OptContext *ctx, TCGOp *op) | |
2026 | { | |
0e0a32ba RH |
2027 | if (fold_const1(ctx, op)) { |
2028 | return true; | |
2029 | } | |
2030 | ||
3f2b1f83 RH |
2031 | ctx->s_mask = arg_info(op->args[1])->s_mask; |
2032 | ||
0e0a32ba RH |
2033 | /* Because of fold_to_not, we want to always return true, via finish. */ |
2034 | finish_folding(ctx, op); | |
2035 | return true; | |
2f9f08ba RH |
2036 | } |
2037 | ||
2038 | static bool fold_or(OptContext *ctx, TCGOp *op) | |
2039 | { | |
7a2f7084 | 2040 | if (fold_const2_commutative(ctx, op) || |
a63ce0e9 | 2041 | fold_xi_to_x(ctx, op, 0) || |
ca7bb049 RH |
2042 | fold_xx_to_x(ctx, op)) { |
2043 | return true; | |
2044 | } | |
fae450ba RH |
2045 | |
2046 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
2047 | | arg_info(op->args[2])->z_mask; | |
3f2b1f83 RH |
2048 | ctx->s_mask = arg_info(op->args[1])->s_mask |
2049 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 2050 | return fold_masks(ctx, op); |
2f9f08ba RH |
2051 | } |
2052 | ||
2053 | static bool fold_orc(OptContext *ctx, TCGOp *op) | |
2054 | { | |
0e0a32ba | 2055 | if (fold_const2(ctx, op) || |
4e858d96 | 2056 | fold_xx_to_i(ctx, op, -1) || |
a63ce0e9 | 2057 | fold_xi_to_x(ctx, op, -1) || |
0e0a32ba RH |
2058 | fold_ix_to_not(ctx, op, 0)) { |
2059 | return true; | |
2060 | } | |
3f2b1f83 RH |
2061 | |
2062 | ctx->s_mask = arg_info(op->args[1])->s_mask | |
2063 | & arg_info(op->args[2])->s_mask; | |
0e0a32ba | 2064 | return false; |
2f9f08ba RH |
2065 | } |
2066 | ||
3eefdf2b RH |
2067 | static bool fold_qemu_ld(OptContext *ctx, TCGOp *op) |
2068 | { | |
fae450ba RH |
2069 | const TCGOpDef *def = &tcg_op_defs[op->opc]; |
2070 | MemOpIdx oi = op->args[def->nb_oargs + def->nb_iargs]; | |
2071 | MemOp mop = get_memop(oi); | |
2072 | int width = 8 * memop_size(mop); | |
2073 | ||
57fe5c6d RH |
2074 | if (width < 64) { |
2075 | ctx->s_mask = MAKE_64BIT_MASK(width, 64 - width); | |
2076 | if (!(mop & MO_SIGN)) { | |
2077 | ctx->z_mask = MAKE_64BIT_MASK(0, width); | |
2078 | ctx->s_mask <<= 1; | |
2079 | } | |
fae450ba RH |
2080 | } |
2081 | ||
3eefdf2b RH |
2082 | /* Opcodes that touch guest memory stop the mb optimization. */ |
2083 | ctx->prev_mb = NULL; | |
2084 | return false; | |
2085 | } | |
2086 | ||
2087 | static bool fold_qemu_st(OptContext *ctx, TCGOp *op) | |
2088 | { | |
2089 | /* Opcodes that touch guest memory stop the mb optimization. */ | |
2090 | ctx->prev_mb = NULL; | |
2091 | return false; | |
2092 | } | |
2093 | ||
2f9f08ba RH |
2094 | static bool fold_remainder(OptContext *ctx, TCGOp *op) |
2095 | { | |
267c17e8 RH |
2096 | if (fold_const2(ctx, op) || |
2097 | fold_xx_to_i(ctx, op, 0)) { | |
2098 | return true; | |
2099 | } | |
2100 | return false; | |
2f9f08ba RH |
2101 | } |
2102 | ||
ceb9ee06 RH |
2103 | static void fold_setcond_tst_pow2(OptContext *ctx, TCGOp *op, bool neg) |
2104 | { | |
ff202817 PB |
2105 | TCGOpcode and_opc, sub_opc, xor_opc, neg_opc, shr_opc; |
2106 | TCGOpcode uext_opc = 0, sext_opc = 0; | |
ceb9ee06 RH |
2107 | TCGCond cond = op->args[3]; |
2108 | TCGArg ret, src1, src2; | |
2109 | TCGOp *op2; | |
2110 | uint64_t val; | |
2111 | int sh; | |
2112 | bool inv; | |
2113 | ||
2114 | if (!is_tst_cond(cond) || !arg_is_const(op->args[2])) { | |
2115 | return; | |
2116 | } | |
2117 | ||
2118 | src2 = op->args[2]; | |
2119 | val = arg_info(src2)->val; | |
2120 | if (!is_power_of_2(val)) { | |
2121 | return; | |
2122 | } | |
2123 | sh = ctz64(val); | |
2124 | ||
2125 | switch (ctx->type) { | |
2126 | case TCG_TYPE_I32: | |
2127 | and_opc = INDEX_op_and_i32; | |
2128 | sub_opc = INDEX_op_sub_i32; | |
2129 | xor_opc = INDEX_op_xor_i32; | |
2130 | shr_opc = INDEX_op_shr_i32; | |
2131 | neg_opc = INDEX_op_neg_i32; | |
2132 | if (TCG_TARGET_extract_i32_valid(sh, 1)) { | |
2133 | uext_opc = TCG_TARGET_HAS_extract_i32 ? INDEX_op_extract_i32 : 0; | |
2134 | sext_opc = TCG_TARGET_HAS_sextract_i32 ? INDEX_op_sextract_i32 : 0; | |
2135 | } | |
2136 | break; | |
2137 | case TCG_TYPE_I64: | |
2138 | and_opc = INDEX_op_and_i64; | |
2139 | sub_opc = INDEX_op_sub_i64; | |
2140 | xor_opc = INDEX_op_xor_i64; | |
2141 | shr_opc = INDEX_op_shr_i64; | |
2142 | neg_opc = INDEX_op_neg_i64; | |
2143 | if (TCG_TARGET_extract_i64_valid(sh, 1)) { | |
2144 | uext_opc = TCG_TARGET_HAS_extract_i64 ? INDEX_op_extract_i64 : 0; | |
2145 | sext_opc = TCG_TARGET_HAS_sextract_i64 ? INDEX_op_sextract_i64 : 0; | |
2146 | } | |
2147 | break; | |
2148 | default: | |
2149 | g_assert_not_reached(); | |
2150 | } | |
2151 | ||
2152 | ret = op->args[0]; | |
2153 | src1 = op->args[1]; | |
2154 | inv = cond == TCG_COND_TSTEQ; | |
2155 | ||
2156 | if (sh && sext_opc && neg && !inv) { | |
2157 | op->opc = sext_opc; | |
2158 | op->args[1] = src1; | |
2159 | op->args[2] = sh; | |
2160 | op->args[3] = 1; | |
2161 | return; | |
2162 | } else if (sh && uext_opc) { | |
2163 | op->opc = uext_opc; | |
2164 | op->args[1] = src1; | |
2165 | op->args[2] = sh; | |
2166 | op->args[3] = 1; | |
2167 | } else { | |
2168 | if (sh) { | |
2169 | op2 = tcg_op_insert_before(ctx->tcg, op, shr_opc, 3); | |
2170 | op2->args[0] = ret; | |
2171 | op2->args[1] = src1; | |
2172 | op2->args[2] = arg_new_constant(ctx, sh); | |
2173 | src1 = ret; | |
2174 | } | |
2175 | op->opc = and_opc; | |
2176 | op->args[1] = src1; | |
2177 | op->args[2] = arg_new_constant(ctx, 1); | |
2178 | } | |
2179 | ||
2180 | if (neg && inv) { | |
2181 | op2 = tcg_op_insert_after(ctx->tcg, op, sub_opc, 3); | |
2182 | op2->args[0] = ret; | |
2183 | op2->args[1] = ret; | |
2184 | op2->args[2] = arg_new_constant(ctx, 1); | |
2185 | } else if (inv) { | |
2186 | op2 = tcg_op_insert_after(ctx->tcg, op, xor_opc, 3); | |
2187 | op2->args[0] = ret; | |
2188 | op2->args[1] = ret; | |
2189 | op2->args[2] = arg_new_constant(ctx, 1); | |
2190 | } else if (neg) { | |
2191 | op2 = tcg_op_insert_after(ctx->tcg, op, neg_opc, 2); | |
2192 | op2->args[0] = ret; | |
2193 | op2->args[1] = ret; | |
2194 | } | |
2195 | } | |
2196 | ||
c63ff55c RH |
2197 | static bool fold_setcond(OptContext *ctx, TCGOp *op) |
2198 | { | |
fb04ab7d | 2199 | int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], |
246c4b72 | 2200 | &op->args[2], &op->args[3]); |
c63ff55c RH |
2201 | if (i >= 0) { |
2202 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
2203 | } | |
ceb9ee06 | 2204 | fold_setcond_tst_pow2(ctx, op, false); |
fae450ba RH |
2205 | |
2206 | ctx->z_mask = 1; | |
275d7d8e | 2207 | ctx->s_mask = smask_from_zmask(1); |
c63ff55c RH |
2208 | return false; |
2209 | } | |
2210 | ||
3635502d RH |
2211 | static bool fold_negsetcond(OptContext *ctx, TCGOp *op) |
2212 | { | |
fb04ab7d | 2213 | int i = do_constant_folding_cond1(ctx, op, op->args[0], &op->args[1], |
246c4b72 | 2214 | &op->args[2], &op->args[3]); |
3635502d RH |
2215 | if (i >= 0) { |
2216 | return tcg_opt_gen_movi(ctx, op, op->args[0], -i); | |
2217 | } | |
ceb9ee06 | 2218 | fold_setcond_tst_pow2(ctx, op, true); |
3635502d RH |
2219 | |
2220 | /* Value is {0,-1} so all bits are repetitions of the sign. */ | |
2221 | ctx->s_mask = -1; | |
2222 | return false; | |
2223 | } | |
2224 | ||
bc47b1aa RH |
2225 | static bool fold_setcond2(OptContext *ctx, TCGOp *op) |
2226 | { | |
7e64b114 | 2227 | TCGCond cond; |
7a2f7084 | 2228 | int i, inv = 0; |
bc47b1aa | 2229 | |
fb04ab7d | 2230 | i = do_constant_folding_cond2(ctx, op, &op->args[1]); |
7e64b114 | 2231 | cond = op->args[5]; |
bc47b1aa RH |
2232 | if (i >= 0) { |
2233 | goto do_setcond_const; | |
2234 | } | |
2235 | ||
2236 | switch (cond) { | |
2237 | case TCG_COND_LT: | |
2238 | case TCG_COND_GE: | |
2239 | /* | |
2240 | * Simplify LT/GE comparisons vs zero to a single compare | |
2241 | * vs the high word of the input. | |
2242 | */ | |
27cdb85d RH |
2243 | if (arg_is_const_val(op->args[3], 0) && |
2244 | arg_is_const_val(op->args[4], 0)) { | |
bc47b1aa RH |
2245 | goto do_setcond_high; |
2246 | } | |
2247 | break; | |
2248 | ||
2249 | case TCG_COND_NE: | |
2250 | inv = 1; | |
2251 | QEMU_FALLTHROUGH; | |
2252 | case TCG_COND_EQ: | |
2253 | /* | |
2254 | * Simplify EQ/NE comparisons where one of the pairs | |
2255 | * can be simplified. | |
2256 | */ | |
67f84c96 | 2257 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[1], |
bc47b1aa RH |
2258 | op->args[3], cond); |
2259 | switch (i ^ inv) { | |
2260 | case 0: | |
2261 | goto do_setcond_const; | |
2262 | case 1: | |
2263 | goto do_setcond_high; | |
2264 | } | |
2265 | ||
67f84c96 | 2266 | i = do_constant_folding_cond(TCG_TYPE_I32, op->args[2], |
bc47b1aa RH |
2267 | op->args[4], cond); |
2268 | switch (i ^ inv) { | |
2269 | case 0: | |
2270 | goto do_setcond_const; | |
2271 | case 1: | |
ceb9ee06 RH |
2272 | goto do_setcond_low; |
2273 | } | |
2274 | break; | |
2275 | ||
2276 | case TCG_COND_TSTEQ: | |
2277 | case TCG_COND_TSTNE: | |
2278 | if (arg_is_const_val(op->args[2], 0)) { | |
2279 | goto do_setcond_high; | |
2280 | } | |
2281 | if (arg_is_const_val(op->args[4], 0)) { | |
2282 | goto do_setcond_low; | |
bc47b1aa RH |
2283 | } |
2284 | break; | |
2285 | ||
2286 | default: | |
2287 | break; | |
2288 | ||
ceb9ee06 RH |
2289 | do_setcond_low: |
2290 | op->args[2] = op->args[3]; | |
2291 | op->args[3] = cond; | |
2292 | op->opc = INDEX_op_setcond_i32; | |
2293 | return fold_setcond(ctx, op); | |
2294 | ||
bc47b1aa RH |
2295 | do_setcond_high: |
2296 | op->args[1] = op->args[2]; | |
2297 | op->args[2] = op->args[4]; | |
2298 | op->args[3] = cond; | |
2299 | op->opc = INDEX_op_setcond_i32; | |
ceb9ee06 | 2300 | return fold_setcond(ctx, op); |
bc47b1aa | 2301 | } |
fae450ba RH |
2302 | |
2303 | ctx->z_mask = 1; | |
275d7d8e | 2304 | ctx->s_mask = smask_from_zmask(1); |
bc47b1aa RH |
2305 | return false; |
2306 | ||
2307 | do_setcond_const: | |
2308 | return tcg_opt_gen_movi(ctx, op, op->args[0], i); | |
2309 | } | |
2310 | ||
b6617c88 RH |
2311 | static bool fold_sextract(OptContext *ctx, TCGOp *op) |
2312 | { | |
57fe5c6d RH |
2313 | uint64_t z_mask, s_mask, s_mask_old; |
2314 | int pos = op->args[2]; | |
2315 | int len = op->args[3]; | |
fae450ba | 2316 | |
b6617c88 RH |
2317 | if (arg_is_const(op->args[1])) { |
2318 | uint64_t t; | |
2319 | ||
2320 | t = arg_info(op->args[1])->val; | |
57fe5c6d | 2321 | t = sextract64(t, pos, len); |
b6617c88 RH |
2322 | return tcg_opt_gen_movi(ctx, op, op->args[0], t); |
2323 | } | |
fae450ba | 2324 | |
57fe5c6d RH |
2325 | z_mask = arg_info(op->args[1])->z_mask; |
2326 | z_mask = sextract64(z_mask, pos, len); | |
fae450ba RH |
2327 | ctx->z_mask = z_mask; |
2328 | ||
57fe5c6d RH |
2329 | s_mask_old = arg_info(op->args[1])->s_mask; |
2330 | s_mask = sextract64(s_mask_old, pos, len); | |
2331 | s_mask |= MAKE_64BIT_MASK(len, 64 - len); | |
2332 | ctx->s_mask = s_mask; | |
2333 | ||
2334 | if (pos == 0) { | |
2335 | ctx->a_mask = s_mask & ~s_mask_old; | |
2336 | } | |
2337 | ||
fae450ba | 2338 | return fold_masks(ctx, op); |
b6617c88 RH |
2339 | } |
2340 | ||
2f9f08ba RH |
2341 | static bool fold_shift(OptContext *ctx, TCGOp *op) |
2342 | { | |
93a967fb RH |
2343 | uint64_t s_mask, z_mask, sign; |
2344 | ||
a63ce0e9 | 2345 | if (fold_const2(ctx, op) || |
da48e272 | 2346 | fold_ix_to_i(ctx, op, 0) || |
a63ce0e9 RH |
2347 | fold_xi_to_x(ctx, op, 0)) { |
2348 | return true; | |
2349 | } | |
fae450ba | 2350 | |
93a967fb RH |
2351 | s_mask = arg_info(op->args[1])->s_mask; |
2352 | z_mask = arg_info(op->args[1])->z_mask; | |
2353 | ||
fae450ba | 2354 | if (arg_is_const(op->args[2])) { |
93a967fb RH |
2355 | int sh = arg_info(op->args[2])->val; |
2356 | ||
2357 | ctx->z_mask = do_constant_folding(op->opc, ctx->type, z_mask, sh); | |
2358 | ||
2359 | s_mask = do_constant_folding(op->opc, ctx->type, s_mask, sh); | |
2360 | ctx->s_mask = smask_from_smask(s_mask); | |
2361 | ||
fae450ba RH |
2362 | return fold_masks(ctx, op); |
2363 | } | |
93a967fb RH |
2364 | |
2365 | switch (op->opc) { | |
2366 | CASE_OP_32_64(sar): | |
2367 | /* | |
2368 | * Arithmetic right shift will not reduce the number of | |
2369 | * input sign repetitions. | |
2370 | */ | |
2371 | ctx->s_mask = s_mask; | |
2372 | break; | |
2373 | CASE_OP_32_64(shr): | |
2374 | /* | |
2375 | * If the sign bit is known zero, then logical right shift | |
2376 | * will not reduced the number of input sign repetitions. | |
2377 | */ | |
2378 | sign = (s_mask & -s_mask) >> 1; | |
2911e9b9 | 2379 | if (sign && !(z_mask & sign)) { |
93a967fb RH |
2380 | ctx->s_mask = s_mask; |
2381 | } | |
2382 | break; | |
2383 | default: | |
2384 | break; | |
2385 | } | |
2386 | ||
a63ce0e9 | 2387 | return false; |
2f9f08ba RH |
2388 | } |
2389 | ||
9caca88a RH |
2390 | static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) |
2391 | { | |
2392 | TCGOpcode neg_op; | |
2393 | bool have_neg; | |
2394 | ||
2395 | if (!arg_is_const(op->args[1]) || arg_info(op->args[1])->val != 0) { | |
2396 | return false; | |
2397 | } | |
2398 | ||
2399 | switch (ctx->type) { | |
2400 | case TCG_TYPE_I32: | |
2401 | neg_op = INDEX_op_neg_i32; | |
b701f195 | 2402 | have_neg = true; |
9caca88a RH |
2403 | break; |
2404 | case TCG_TYPE_I64: | |
2405 | neg_op = INDEX_op_neg_i64; | |
b701f195 | 2406 | have_neg = true; |
9caca88a RH |
2407 | break; |
2408 | case TCG_TYPE_V64: | |
2409 | case TCG_TYPE_V128: | |
2410 | case TCG_TYPE_V256: | |
2411 | neg_op = INDEX_op_neg_vec; | |
2412 | have_neg = (TCG_TARGET_HAS_neg_vec && | |
2413 | tcg_can_emit_vec_op(neg_op, ctx->type, TCGOP_VECE(op)) > 0); | |
2414 | break; | |
2415 | default: | |
2416 | g_assert_not_reached(); | |
2417 | } | |
2418 | if (have_neg) { | |
2419 | op->opc = neg_op; | |
2420 | op->args[1] = op->args[2]; | |
2421 | return fold_neg(ctx, op); | |
2422 | } | |
2423 | return false; | |
2424 | } | |
2425 | ||
c578ff18 RH |
2426 | /* We cannot as yet do_constant_folding with vectors. */ |
2427 | static bool fold_sub_vec(OptContext *ctx, TCGOp *op) | |
2f9f08ba | 2428 | { |
c578ff18 | 2429 | if (fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 2430 | fold_xi_to_x(ctx, op, 0) || |
9caca88a | 2431 | fold_sub_to_neg(ctx, op)) { |
cbe42fb2 RH |
2432 | return true; |
2433 | } | |
2434 | return false; | |
2f9f08ba RH |
2435 | } |
2436 | ||
c578ff18 RH |
2437 | static bool fold_sub(OptContext *ctx, TCGOp *op) |
2438 | { | |
6334a968 RH |
2439 | if (fold_const2(ctx, op) || fold_sub_vec(ctx, op)) { |
2440 | return true; | |
2441 | } | |
2442 | ||
2443 | /* Fold sub r,x,i to add r,x,-i */ | |
2444 | if (arg_is_const(op->args[2])) { | |
2445 | uint64_t val = arg_info(op->args[2])->val; | |
2446 | ||
2447 | op->opc = (ctx->type == TCG_TYPE_I32 | |
2448 | ? INDEX_op_add_i32 : INDEX_op_add_i64); | |
2449 | op->args[2] = arg_new_constant(ctx, -val); | |
2450 | } | |
2451 | return false; | |
c578ff18 RH |
2452 | } |
2453 | ||
9531c078 | 2454 | static bool fold_sub2(OptContext *ctx, TCGOp *op) |
e3f7dc21 | 2455 | { |
9531c078 | 2456 | return fold_addsub2(ctx, op, false); |
e3f7dc21 RH |
2457 | } |
2458 | ||
fae450ba RH |
2459 | static bool fold_tcg_ld(OptContext *ctx, TCGOp *op) |
2460 | { | |
2461 | /* We can't do any folding with a load, but we can record bits. */ | |
2462 | switch (op->opc) { | |
57fe5c6d RH |
2463 | CASE_OP_32_64(ld8s): |
2464 | ctx->s_mask = MAKE_64BIT_MASK(8, 56); | |
2465 | break; | |
fae450ba RH |
2466 | CASE_OP_32_64(ld8u): |
2467 | ctx->z_mask = MAKE_64BIT_MASK(0, 8); | |
57fe5c6d RH |
2468 | ctx->s_mask = MAKE_64BIT_MASK(9, 55); |
2469 | break; | |
2470 | CASE_OP_32_64(ld16s): | |
2471 | ctx->s_mask = MAKE_64BIT_MASK(16, 48); | |
fae450ba RH |
2472 | break; |
2473 | CASE_OP_32_64(ld16u): | |
2474 | ctx->z_mask = MAKE_64BIT_MASK(0, 16); | |
57fe5c6d RH |
2475 | ctx->s_mask = MAKE_64BIT_MASK(17, 47); |
2476 | break; | |
2477 | case INDEX_op_ld32s_i64: | |
2478 | ctx->s_mask = MAKE_64BIT_MASK(32, 32); | |
fae450ba RH |
2479 | break; |
2480 | case INDEX_op_ld32u_i64: | |
2481 | ctx->z_mask = MAKE_64BIT_MASK(0, 32); | |
57fe5c6d | 2482 | ctx->s_mask = MAKE_64BIT_MASK(33, 31); |
fae450ba RH |
2483 | break; |
2484 | default: | |
2485 | g_assert_not_reached(); | |
2486 | } | |
2487 | return false; | |
2488 | } | |
2489 | ||
ab84dc39 RH |
2490 | static bool fold_tcg_ld_memcopy(OptContext *ctx, TCGOp *op) |
2491 | { | |
2492 | TCGTemp *dst, *src; | |
2493 | intptr_t ofs; | |
2494 | TCGType type; | |
2495 | ||
2496 | if (op->args[1] != tcgv_ptr_arg(tcg_env)) { | |
2497 | return false; | |
2498 | } | |
2499 | ||
2500 | type = ctx->type; | |
2501 | ofs = op->args[2]; | |
2502 | dst = arg_temp(op->args[0]); | |
2503 | src = find_mem_copy_for(ctx, type, ofs); | |
2504 | if (src && src->base_type == type) { | |
2505 | return tcg_opt_gen_mov(ctx, op, temp_arg(dst), temp_arg(src)); | |
2506 | } | |
2507 | ||
2508 | reset_ts(ctx, dst); | |
2509 | record_mem_copy(ctx, type, dst, ofs, ofs + tcg_type_size(type) - 1); | |
2510 | return true; | |
2511 | } | |
2512 | ||
2513 | static bool fold_tcg_st(OptContext *ctx, TCGOp *op) | |
2514 | { | |
2515 | intptr_t ofs = op->args[2]; | |
2516 | intptr_t lm1; | |
2517 | ||
2518 | if (op->args[1] != tcgv_ptr_arg(tcg_env)) { | |
2519 | remove_mem_copy_all(ctx); | |
2520 | return false; | |
2521 | } | |
2522 | ||
2523 | switch (op->opc) { | |
2524 | CASE_OP_32_64(st8): | |
2525 | lm1 = 0; | |
2526 | break; | |
2527 | CASE_OP_32_64(st16): | |
2528 | lm1 = 1; | |
2529 | break; | |
2530 | case INDEX_op_st32_i64: | |
2531 | case INDEX_op_st_i32: | |
2532 | lm1 = 3; | |
2533 | break; | |
2534 | case INDEX_op_st_i64: | |
2535 | lm1 = 7; | |
2536 | break; | |
2537 | case INDEX_op_st_vec: | |
2538 | lm1 = tcg_type_size(ctx->type) - 1; | |
2539 | break; | |
2540 | default: | |
2541 | g_assert_not_reached(); | |
2542 | } | |
2543 | remove_mem_copy_in(ctx, ofs, ofs + lm1); | |
2544 | return false; | |
2545 | } | |
2546 | ||
2547 | static bool fold_tcg_st_memcopy(OptContext *ctx, TCGOp *op) | |
2548 | { | |
2549 | TCGTemp *src; | |
2550 | intptr_t ofs, last; | |
2551 | TCGType type; | |
2552 | ||
2553 | if (op->args[1] != tcgv_ptr_arg(tcg_env)) { | |
2554 | fold_tcg_st(ctx, op); | |
2555 | return false; | |
2556 | } | |
2557 | ||
2558 | src = arg_temp(op->args[0]); | |
2559 | ofs = op->args[2]; | |
2560 | type = ctx->type; | |
3eaadaeb RH |
2561 | |
2562 | /* | |
2563 | * Eliminate duplicate stores of a constant. | |
2564 | * This happens frequently when the target ISA zero-extends. | |
2565 | */ | |
2566 | if (ts_is_const(src)) { | |
2567 | TCGTemp *prev = find_mem_copy_for(ctx, type, ofs); | |
2568 | if (src == prev) { | |
2569 | tcg_op_remove(ctx->tcg, op); | |
2570 | return true; | |
2571 | } | |
2572 | } | |
2573 | ||
ab84dc39 RH |
2574 | last = ofs + tcg_type_size(type) - 1; |
2575 | remove_mem_copy_in(ctx, ofs, last); | |
2576 | record_mem_copy(ctx, type, src, ofs, last); | |
2577 | return false; | |
2578 | } | |
2579 | ||
2f9f08ba RH |
2580 | static bool fold_xor(OptContext *ctx, TCGOp *op) |
2581 | { | |
7a2f7084 | 2582 | if (fold_const2_commutative(ctx, op) || |
0e0a32ba | 2583 | fold_xx_to_i(ctx, op, 0) || |
a63ce0e9 | 2584 | fold_xi_to_x(ctx, op, 0) || |
0e0a32ba | 2585 | fold_xi_to_not(ctx, op, -1)) { |
cbe42fb2 RH |
2586 | return true; |
2587 | } | |
fae450ba RH |
2588 | |
2589 | ctx->z_mask = arg_info(op->args[1])->z_mask | |
2590 | | arg_info(op->args[2])->z_mask; | |
3f2b1f83 RH |
2591 | ctx->s_mask = arg_info(op->args[1])->s_mask |
2592 | & arg_info(op->args[2])->s_mask; | |
fae450ba | 2593 | return fold_masks(ctx, op); |
2f9f08ba RH |
2594 | } |
2595 | ||
22613af4 | 2596 | /* Propagate constants and copies, fold constant expressions. */ |
36e60ef6 | 2597 | void tcg_optimize(TCGContext *s) |
8f2e8c07 | 2598 | { |
5cf32be7 | 2599 | int nb_temps, i; |
d0ed5151 | 2600 | TCGOp *op, *op_next; |
dc84988a | 2601 | OptContext ctx = { .tcg = s }; |
5d8f5363 | 2602 | |
ab84dc39 RH |
2603 | QSIMPLEQ_INIT(&ctx.mem_free); |
2604 | ||
22613af4 KB |
2605 | /* Array VALS has an element for each temp. |
2606 | If this temp holds a constant then its value is kept in VALS' element. | |
e590d4e6 AJ |
2607 | If this temp is a copy of other ones then the other copies are |
2608 | available through the doubly linked circular list. */ | |
8f2e8c07 KB |
2609 | |
2610 | nb_temps = s->nb_temps; | |
8f17a975 RH |
2611 | for (i = 0; i < nb_temps; ++i) { |
2612 | s->temps[i].state_ptr = NULL; | |
2613 | } | |
8f2e8c07 | 2614 | |
15fa08f8 | 2615 | QTAILQ_FOREACH_SAFE(op, &s->ops, link, op_next) { |
c45cb8bb | 2616 | TCGOpcode opc = op->opc; |
5cf32be7 | 2617 | const TCGOpDef *def; |
404a148d | 2618 | bool done = false; |
c45cb8bb | 2619 | |
5cf32be7 | 2620 | /* Calls are special. */ |
c45cb8bb | 2621 | if (opc == INDEX_op_call) { |
5cf32be7 RH |
2622 | fold_call(&ctx, op); |
2623 | continue; | |
cf066674 | 2624 | } |
5cf32be7 RH |
2625 | |
2626 | def = &tcg_op_defs[opc]; | |
ec5d4cbe RH |
2627 | init_arguments(&ctx, op, def->nb_oargs + def->nb_iargs); |
2628 | copy_propagate(&ctx, op, def->nb_oargs, def->nb_iargs); | |
22613af4 | 2629 | |
67f84c96 RH |
2630 | /* Pre-compute the type of the operation. */ |
2631 | if (def->flags & TCG_OPF_VECTOR) { | |
2632 | ctx.type = TCG_TYPE_V64 + TCGOP_VECL(op); | |
2633 | } else if (def->flags & TCG_OPF_64BIT) { | |
2634 | ctx.type = TCG_TYPE_I64; | |
2635 | } else { | |
2636 | ctx.type = TCG_TYPE_I32; | |
2637 | } | |
2638 | ||
57fe5c6d | 2639 | /* Assume all bits affected, no bits known zero, no sign reps. */ |
fae450ba RH |
2640 | ctx.a_mask = -1; |
2641 | ctx.z_mask = -1; | |
57fe5c6d | 2642 | ctx.s_mask = 0; |
633f6502 | 2643 | |
2cfac7fa RH |
2644 | /* |
2645 | * Process each opcode. | |
2646 | * Sorted alphabetically by opcode as much as possible. | |
2647 | */ | |
c45cb8bb | 2648 | switch (opc) { |
c578ff18 | 2649 | CASE_OP_32_64(add): |
2f9f08ba RH |
2650 | done = fold_add(&ctx, op); |
2651 | break; | |
c578ff18 RH |
2652 | case INDEX_op_add_vec: |
2653 | done = fold_add_vec(&ctx, op); | |
2654 | break; | |
9531c078 RH |
2655 | CASE_OP_32_64(add2): |
2656 | done = fold_add2(&ctx, op); | |
e3f7dc21 | 2657 | break; |
2f9f08ba RH |
2658 | CASE_OP_32_64_VEC(and): |
2659 | done = fold_and(&ctx, op); | |
2660 | break; | |
2661 | CASE_OP_32_64_VEC(andc): | |
2662 | done = fold_andc(&ctx, op); | |
2663 | break; | |
079b0804 RH |
2664 | CASE_OP_32_64(brcond): |
2665 | done = fold_brcond(&ctx, op); | |
2666 | break; | |
764d2aba RH |
2667 | case INDEX_op_brcond2_i32: |
2668 | done = fold_brcond2(&ctx, op); | |
2669 | break; | |
09bacdc2 RH |
2670 | CASE_OP_32_64(bswap16): |
2671 | CASE_OP_32_64(bswap32): | |
2672 | case INDEX_op_bswap64_i64: | |
2673 | done = fold_bswap(&ctx, op); | |
2674 | break; | |
30dd0bfe RH |
2675 | CASE_OP_32_64(clz): |
2676 | CASE_OP_32_64(ctz): | |
2677 | done = fold_count_zeros(&ctx, op); | |
2678 | break; | |
2f9f08ba RH |
2679 | CASE_OP_32_64(ctpop): |
2680 | done = fold_ctpop(&ctx, op); | |
2681 | break; | |
1b1907b8 RH |
2682 | CASE_OP_32_64(deposit): |
2683 | done = fold_deposit(&ctx, op); | |
2684 | break; | |
2f9f08ba RH |
2685 | CASE_OP_32_64(div): |
2686 | CASE_OP_32_64(divu): | |
2687 | done = fold_divide(&ctx, op); | |
2688 | break; | |
8cdb3fcb RH |
2689 | case INDEX_op_dup_vec: |
2690 | done = fold_dup(&ctx, op); | |
2691 | break; | |
2692 | case INDEX_op_dup2_vec: | |
2693 | done = fold_dup2(&ctx, op); | |
2694 | break; | |
ed523473 | 2695 | CASE_OP_32_64_VEC(eqv): |
2f9f08ba RH |
2696 | done = fold_eqv(&ctx, op); |
2697 | break; | |
b6617c88 RH |
2698 | CASE_OP_32_64(extract): |
2699 | done = fold_extract(&ctx, op); | |
2700 | break; | |
dcd08996 RH |
2701 | CASE_OP_32_64(extract2): |
2702 | done = fold_extract2(&ctx, op); | |
2703 | break; | |
2f9f08ba RH |
2704 | CASE_OP_32_64(ext8s): |
2705 | CASE_OP_32_64(ext16s): | |
2706 | case INDEX_op_ext32s_i64: | |
2707 | case INDEX_op_ext_i32_i64: | |
2708 | done = fold_exts(&ctx, op); | |
2709 | break; | |
2710 | CASE_OP_32_64(ext8u): | |
2711 | CASE_OP_32_64(ext16u): | |
2712 | case INDEX_op_ext32u_i64: | |
2713 | case INDEX_op_extu_i32_i64: | |
2714 | case INDEX_op_extrl_i64_i32: | |
2715 | case INDEX_op_extrh_i64_i32: | |
2716 | done = fold_extu(&ctx, op); | |
2717 | break; | |
57fe5c6d | 2718 | CASE_OP_32_64(ld8s): |
fae450ba | 2719 | CASE_OP_32_64(ld8u): |
57fe5c6d | 2720 | CASE_OP_32_64(ld16s): |
fae450ba | 2721 | CASE_OP_32_64(ld16u): |
57fe5c6d | 2722 | case INDEX_op_ld32s_i64: |
fae450ba RH |
2723 | case INDEX_op_ld32u_i64: |
2724 | done = fold_tcg_ld(&ctx, op); | |
2725 | break; | |
ab84dc39 RH |
2726 | case INDEX_op_ld_i32: |
2727 | case INDEX_op_ld_i64: | |
2728 | case INDEX_op_ld_vec: | |
2729 | done = fold_tcg_ld_memcopy(&ctx, op); | |
2730 | break; | |
2731 | CASE_OP_32_64(st8): | |
2732 | CASE_OP_32_64(st16): | |
2733 | case INDEX_op_st32_i64: | |
2734 | done = fold_tcg_st(&ctx, op); | |
2735 | break; | |
2736 | case INDEX_op_st_i32: | |
2737 | case INDEX_op_st_i64: | |
2738 | case INDEX_op_st_vec: | |
2739 | done = fold_tcg_st_memcopy(&ctx, op); | |
2740 | break; | |
3eefdf2b RH |
2741 | case INDEX_op_mb: |
2742 | done = fold_mb(&ctx, op); | |
0c310a30 | 2743 | break; |
2cfac7fa RH |
2744 | CASE_OP_32_64_VEC(mov): |
2745 | done = fold_mov(&ctx, op); | |
2746 | break; | |
0c310a30 RH |
2747 | CASE_OP_32_64(movcond): |
2748 | done = fold_movcond(&ctx, op); | |
3eefdf2b | 2749 | break; |
2f9f08ba RH |
2750 | CASE_OP_32_64(mul): |
2751 | done = fold_mul(&ctx, op); | |
2752 | break; | |
2753 | CASE_OP_32_64(mulsh): | |
2754 | CASE_OP_32_64(muluh): | |
2755 | done = fold_mul_highpart(&ctx, op); | |
2756 | break; | |
407112b0 RH |
2757 | CASE_OP_32_64(muls2): |
2758 | CASE_OP_32_64(mulu2): | |
2759 | done = fold_multiply2(&ctx, op); | |
6b8ac0d1 | 2760 | break; |
ed523473 | 2761 | CASE_OP_32_64_VEC(nand): |
2f9f08ba RH |
2762 | done = fold_nand(&ctx, op); |
2763 | break; | |
2764 | CASE_OP_32_64(neg): | |
2765 | done = fold_neg(&ctx, op); | |
2766 | break; | |
ed523473 | 2767 | CASE_OP_32_64_VEC(nor): |
2f9f08ba RH |
2768 | done = fold_nor(&ctx, op); |
2769 | break; | |
2770 | CASE_OP_32_64_VEC(not): | |
2771 | done = fold_not(&ctx, op); | |
2772 | break; | |
2773 | CASE_OP_32_64_VEC(or): | |
2774 | done = fold_or(&ctx, op); | |
2775 | break; | |
2776 | CASE_OP_32_64_VEC(orc): | |
2777 | done = fold_orc(&ctx, op); | |
2778 | break; | |
fecccfcc RH |
2779 | case INDEX_op_qemu_ld_a32_i32: |
2780 | case INDEX_op_qemu_ld_a64_i32: | |
2781 | case INDEX_op_qemu_ld_a32_i64: | |
2782 | case INDEX_op_qemu_ld_a64_i64: | |
2783 | case INDEX_op_qemu_ld_a32_i128: | |
2784 | case INDEX_op_qemu_ld_a64_i128: | |
3eefdf2b RH |
2785 | done = fold_qemu_ld(&ctx, op); |
2786 | break; | |
fecccfcc RH |
2787 | case INDEX_op_qemu_st8_a32_i32: |
2788 | case INDEX_op_qemu_st8_a64_i32: | |
2789 | case INDEX_op_qemu_st_a32_i32: | |
2790 | case INDEX_op_qemu_st_a64_i32: | |
2791 | case INDEX_op_qemu_st_a32_i64: | |
2792 | case INDEX_op_qemu_st_a64_i64: | |
2793 | case INDEX_op_qemu_st_a32_i128: | |
2794 | case INDEX_op_qemu_st_a64_i128: | |
3eefdf2b RH |
2795 | done = fold_qemu_st(&ctx, op); |
2796 | break; | |
2f9f08ba RH |
2797 | CASE_OP_32_64(rem): |
2798 | CASE_OP_32_64(remu): | |
2799 | done = fold_remainder(&ctx, op); | |
2800 | break; | |
2801 | CASE_OP_32_64(rotl): | |
2802 | CASE_OP_32_64(rotr): | |
2803 | CASE_OP_32_64(sar): | |
2804 | CASE_OP_32_64(shl): | |
2805 | CASE_OP_32_64(shr): | |
2806 | done = fold_shift(&ctx, op); | |
2807 | break; | |
c63ff55c RH |
2808 | CASE_OP_32_64(setcond): |
2809 | done = fold_setcond(&ctx, op); | |
2810 | break; | |
3635502d RH |
2811 | CASE_OP_32_64(negsetcond): |
2812 | done = fold_negsetcond(&ctx, op); | |
2813 | break; | |
bc47b1aa RH |
2814 | case INDEX_op_setcond2_i32: |
2815 | done = fold_setcond2(&ctx, op); | |
2816 | break; | |
b6617c88 RH |
2817 | CASE_OP_32_64(sextract): |
2818 | done = fold_sextract(&ctx, op); | |
2819 | break; | |
c578ff18 | 2820 | CASE_OP_32_64(sub): |
2f9f08ba RH |
2821 | done = fold_sub(&ctx, op); |
2822 | break; | |
c578ff18 RH |
2823 | case INDEX_op_sub_vec: |
2824 | done = fold_sub_vec(&ctx, op); | |
2825 | break; | |
9531c078 RH |
2826 | CASE_OP_32_64(sub2): |
2827 | done = fold_sub2(&ctx, op); | |
e3f7dc21 | 2828 | break; |
2f9f08ba RH |
2829 | CASE_OP_32_64_VEC(xor): |
2830 | done = fold_xor(&ctx, op); | |
b10f3833 | 2831 | break; |
2cfac7fa RH |
2832 | default: |
2833 | break; | |
b10f3833 RH |
2834 | } |
2835 | ||
404a148d RH |
2836 | if (!done) { |
2837 | finish_folding(&ctx, op); | |
2838 | } | |
8f2e8c07 | 2839 | } |
8f2e8c07 | 2840 | } |