]>
Commit | Line | Data |
---|---|---|
223e47cc LB |
1 | Target Independent Opportunities: |
2 | ||
3 | //===---------------------------------------------------------------------===// | |
4 | ||
5 | We should recognized various "overflow detection" idioms and translate them into | |
6 | llvm.uadd.with.overflow and similar intrinsics. Here is a multiply idiom: | |
7 | ||
8 | unsigned int mul(unsigned int a,unsigned int b) { | |
9 | if ((unsigned long long)a*b>0xffffffff) | |
10 | exit(0); | |
11 | return a*b; | |
12 | } | |
13 | ||
14 | The legalization code for mul-with-overflow needs to be made more robust before | |
15 | this can be implemented though. | |
16 | ||
17 | //===---------------------------------------------------------------------===// | |
18 | ||
19 | Get the C front-end to expand hypot(x,y) -> llvm.sqrt(x*x+y*y) when errno and | |
20 | precision don't matter (ffastmath). Misc/mandel will like this. :) This isn't | |
21 | safe in general, even on darwin. See the libm implementation of hypot for | |
22 | examples (which special case when x/y are exactly zero to get signed zeros etc | |
23 | right). | |
24 | ||
25 | //===---------------------------------------------------------------------===// | |
26 | ||
27 | On targets with expensive 64-bit multiply, we could LSR this: | |
28 | ||
29 | for (i = ...; ++i) { | |
30 | x = 1ULL << i; | |
31 | ||
32 | into: | |
33 | long long tmp = 1; | |
34 | for (i = ...; ++i, tmp+=tmp) | |
35 | x = tmp; | |
36 | ||
37 | This would be a win on ppc32, but not x86 or ppc64. | |
38 | ||
39 | //===---------------------------------------------------------------------===// | |
40 | ||
41 | Shrink: (setlt (loadi32 P), 0) -> (setlt (loadi8 Phi), 0) | |
42 | ||
43 | //===---------------------------------------------------------------------===// | |
44 | ||
45 | Reassociate should turn things like: | |
46 | ||
47 | int factorial(int X) { | |
48 | return X*X*X*X*X*X*X*X; | |
49 | } | |
50 | ||
51 | into llvm.powi calls, allowing the code generator to produce balanced | |
52 | multiplication trees. | |
53 | ||
54 | First, the intrinsic needs to be extended to support integers, and second the | |
55 | code generator needs to be enhanced to lower these to multiplication trees. | |
56 | ||
57 | //===---------------------------------------------------------------------===// | |
58 | ||
59 | Interesting? testcase for add/shift/mul reassoc: | |
60 | ||
61 | int bar(int x, int y) { | |
62 | return x*x*x+y+x*x*x*x*x*y*y*y*y; | |
63 | } | |
64 | int foo(int z, int n) { | |
65 | return bar(z, n) + bar(2*z, 2*n); | |
66 | } | |
67 | ||
68 | This is blocked on not handling X*X*X -> powi(X, 3) (see note above). The issue | |
69 | is that we end up getting t = 2*X s = t*t and don't turn this into 4*X*X, | |
70 | which is the same number of multiplies and is canonical, because the 2*X has | |
71 | multiple uses. Here's a simple example: | |
72 | ||
73 | define i32 @test15(i32 %X1) { | |
74 | %B = mul i32 %X1, 47 ; X1*47 | |
75 | %C = mul i32 %B, %B | |
76 | ret i32 %C | |
77 | } | |
78 | ||
79 | ||
80 | //===---------------------------------------------------------------------===// | |
81 | ||
82 | Reassociate should handle the example in GCC PR16157: | |
83 | ||
84 | extern int a0, a1, a2, a3, a4; extern int b0, b1, b2, b3, b4; | |
85 | void f () { /* this can be optimized to four additions... */ | |
86 | b4 = a4 + a3 + a2 + a1 + a0; | |
87 | b3 = a3 + a2 + a1 + a0; | |
88 | b2 = a2 + a1 + a0; | |
89 | b1 = a1 + a0; | |
90 | } | |
91 | ||
92 | This requires reassociating to forms of expressions that are already available, | |
93 | something that reassoc doesn't think about yet. | |
94 | ||
95 | ||
223e47cc LB |
96 | //===---------------------------------------------------------------------===// |
97 | ||
98 | These two functions should generate the same code on big-endian systems: | |
99 | ||
100 | int g(int *j,int *l) { return memcmp(j,l,4); } | |
101 | int h(int *j, int *l) { return *j - *l; } | |
102 | ||
103 | this could be done in SelectionDAGISel.cpp, along with other special cases, | |
104 | for 1,2,4,8 bytes. | |
105 | ||
106 | //===---------------------------------------------------------------------===// | |
107 | ||
108 | It would be nice to revert this patch: | |
109 | http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20060213/031986.html | |
110 | ||
111 | And teach the dag combiner enough to simplify the code expanded before | |
112 | legalize. It seems plausible that this knowledge would let it simplify other | |
113 | stuff too. | |
114 | ||
115 | //===---------------------------------------------------------------------===// | |
116 | ||
970d7e83 | 117 | For vector types, DataLayout.cpp::getTypeInfo() returns alignment that is equal |
223e47cc LB |
118 | to the type size. It works but can be overly conservative as the alignment of |
119 | specific vector types are target dependent. | |
120 | ||
121 | //===---------------------------------------------------------------------===// | |
122 | ||
123 | We should produce an unaligned load from code like this: | |
124 | ||
125 | v4sf example(float *P) { | |
126 | return (v4sf){P[0], P[1], P[2], P[3] }; | |
127 | } | |
128 | ||
129 | //===---------------------------------------------------------------------===// | |
130 | ||
131 | Add support for conditional increments, and other related patterns. Instead | |
132 | of: | |
133 | ||
134 | movl 136(%esp), %eax | |
135 | cmpl $0, %eax | |
136 | je LBB16_2 #cond_next | |
137 | LBB16_1: #cond_true | |
138 | incl _foo | |
139 | LBB16_2: #cond_next | |
140 | ||
141 | emit: | |
142 | movl _foo, %eax | |
143 | cmpl $1, %edi | |
144 | sbbl $-1, %eax | |
145 | movl %eax, _foo | |
146 | ||
147 | //===---------------------------------------------------------------------===// | |
148 | ||
149 | Combine: a = sin(x), b = cos(x) into a,b = sincos(x). | |
150 | ||
151 | Expand these to calls of sin/cos and stores: | |
152 | double sincos(double x, double *sin, double *cos); | |
153 | float sincosf(float x, float *sin, float *cos); | |
154 | long double sincosl(long double x, long double *sin, long double *cos); | |
155 | ||
156 | Doing so could allow SROA of the destination pointers. See also: | |
157 | http://gcc.gnu.org/bugzilla/show_bug.cgi?id=17687 | |
158 | ||
159 | This is now easily doable with MRVs. We could even make an intrinsic for this | |
160 | if anyone cared enough about sincos. | |
161 | ||
162 | //===---------------------------------------------------------------------===// | |
163 | ||
164 | quantum_sigma_x in 462.libquantum contains the following loop: | |
165 | ||
166 | for(i=0; i<reg->size; i++) | |
167 | { | |
168 | /* Flip the target bit of each basis state */ | |
169 | reg->node[i].state ^= ((MAX_UNSIGNED) 1 << target); | |
170 | } | |
171 | ||
172 | Where MAX_UNSIGNED/state is a 64-bit int. On a 32-bit platform it would be just | |
173 | so cool to turn it into something like: | |
174 | ||
175 | long long Res = ((MAX_UNSIGNED) 1 << target); | |
176 | if (target < 32) { | |
177 | for(i=0; i<reg->size; i++) | |
178 | reg->node[i].state ^= Res & 0xFFFFFFFFULL; | |
179 | } else { | |
180 | for(i=0; i<reg->size; i++) | |
181 | reg->node[i].state ^= Res & 0xFFFFFFFF00000000ULL | |
182 | } | |
183 | ||
184 | ... which would only do one 32-bit XOR per loop iteration instead of two. | |
185 | ||
186 | It would also be nice to recognize the reg->size doesn't alias reg->node[i], but | |
187 | this requires TBAA. | |
188 | ||
189 | //===---------------------------------------------------------------------===// | |
190 | ||
191 | This isn't recognized as bswap by instcombine (yes, it really is bswap): | |
192 | ||
193 | unsigned long reverse(unsigned v) { | |
194 | unsigned t; | |
195 | t = v ^ ((v << 16) | (v >> 16)); | |
196 | t &= ~0xff0000; | |
197 | v = (v << 24) | (v >> 8); | |
198 | return v ^ (t >> 8); | |
199 | } | |
200 | ||
201 | //===---------------------------------------------------------------------===// | |
202 | ||
203 | [LOOP DELETION] | |
204 | ||
205 | We don't delete this output free loop, because trip count analysis doesn't | |
206 | realize that it is finite (if it were infinite, it would be undefined). Not | |
207 | having this blocks Loop Idiom from matching strlen and friends. | |
208 | ||
209 | void foo(char *C) { | |
210 | int x = 0; | |
211 | while (*C) | |
212 | ++x,++C; | |
213 | } | |
214 | ||
215 | //===---------------------------------------------------------------------===// | |
216 | ||
217 | [LOOP RECOGNITION] | |
218 | ||
219 | These idioms should be recognized as popcount (see PR1488): | |
220 | ||
221 | unsigned countbits_slow(unsigned v) { | |
222 | unsigned c; | |
223 | for (c = 0; v; v >>= 1) | |
224 | c += v & 1; | |
225 | return c; | |
226 | } | |
223e47cc | 227 | |
223e47cc LB |
228 | unsigned int popcount(unsigned int input) { |
229 | unsigned int count = 0; | |
230 | for (unsigned int i = 0; i < 4 * 8; i++) | |
231 | count += (input >> i) & i; | |
232 | return count; | |
233 | } | |
234 | ||
235 | This should be recognized as CLZ: rdar://8459039 | |
236 | ||
237 | unsigned clz_a(unsigned a) { | |
238 | int i; | |
239 | for (i=0;i<32;i++) | |
240 | if (a & (1<<(31-i))) | |
241 | return i; | |
242 | return 32; | |
243 | } | |
244 | ||
245 | This sort of thing should be added to the loop idiom pass. | |
246 | ||
247 | //===---------------------------------------------------------------------===// | |
248 | ||
249 | These should turn into single 16-bit (unaligned?) loads on little/big endian | |
250 | processors. | |
251 | ||
252 | unsigned short read_16_le(const unsigned char *adr) { | |
253 | return adr[0] | (adr[1] << 8); | |
254 | } | |
255 | unsigned short read_16_be(const unsigned char *adr) { | |
256 | return (adr[0] << 8) | adr[1]; | |
257 | } | |
258 | ||
259 | //===---------------------------------------------------------------------===// | |
260 | ||
261 | -instcombine should handle this transform: | |
262 | icmp pred (sdiv X / C1 ), C2 | |
263 | when X, C1, and C2 are unsigned. Similarly for udiv and signed operands. | |
264 | ||
265 | Currently InstCombine avoids this transform but will do it when the signs of | |
266 | the operands and the sign of the divide match. See the FIXME in | |
267 | InstructionCombining.cpp in the visitSetCondInst method after the switch case | |
268 | for Instruction::UDiv (around line 4447) for more details. | |
269 | ||
270 | The SingleSource/Benchmarks/Shootout-C++/hash and hash2 tests have examples of | |
271 | this construct. | |
272 | ||
273 | //===---------------------------------------------------------------------===// | |
274 | ||
275 | [LOOP OPTIMIZATION] | |
276 | ||
277 | SingleSource/Benchmarks/Misc/dt.c shows several interesting optimization | |
278 | opportunities in its double_array_divs_variable function: it needs loop | |
279 | interchange, memory promotion (which LICM already does), vectorization and | |
280 | variable trip count loop unrolling (since it has a constant trip count). ICC | |
281 | apparently produces this very nice code with -ffast-math: | |
282 | ||
283 | ..B1.70: # Preds ..B1.70 ..B1.69 | |
284 | mulpd %xmm0, %xmm1 #108.2 | |
285 | mulpd %xmm0, %xmm1 #108.2 | |
286 | mulpd %xmm0, %xmm1 #108.2 | |
287 | mulpd %xmm0, %xmm1 #108.2 | |
288 | addl $8, %edx # | |
289 | cmpl $131072, %edx #108.2 | |
290 | jb ..B1.70 # Prob 99% #108.2 | |
291 | ||
292 | It would be better to count down to zero, but this is a lot better than what we | |
293 | do. | |
294 | ||
295 | //===---------------------------------------------------------------------===// | |
296 | ||
297 | Consider: | |
298 | ||
299 | typedef unsigned U32; | |
300 | typedef unsigned long long U64; | |
301 | int test (U32 *inst, U64 *regs) { | |
302 | U64 effective_addr2; | |
303 | U32 temp = *inst; | |
304 | int r1 = (temp >> 20) & 0xf; | |
305 | int b2 = (temp >> 16) & 0xf; | |
306 | effective_addr2 = temp & 0xfff; | |
307 | if (b2) effective_addr2 += regs[b2]; | |
308 | b2 = (temp >> 12) & 0xf; | |
309 | if (b2) effective_addr2 += regs[b2]; | |
310 | effective_addr2 &= regs[4]; | |
311 | if ((effective_addr2 & 3) == 0) | |
312 | return 1; | |
313 | return 0; | |
314 | } | |
315 | ||
316 | Note that only the low 2 bits of effective_addr2 are used. On 32-bit systems, | |
317 | we don't eliminate the computation of the top half of effective_addr2 because | |
318 | we don't have whole-function selection dags. On x86, this means we use one | |
319 | extra register for the function when effective_addr2 is declared as U64 than | |
320 | when it is declared U32. | |
321 | ||
322 | PHI Slicing could be extended to do this. | |
323 | ||
324 | //===---------------------------------------------------------------------===// | |
325 | ||
326 | Tail call elim should be more aggressive, checking to see if the call is | |
327 | followed by an uncond branch to an exit block. | |
328 | ||
329 | ; This testcase is due to tail-duplication not wanting to copy the return | |
330 | ; instruction into the terminating blocks because there was other code | |
331 | ; optimized out of the function after the taildup happened. | |
332 | ; RUN: llvm-as < %s | opt -tailcallelim | llvm-dis | not grep call | |
333 | ||
334 | define i32 @t4(i32 %a) { | |
335 | entry: | |
336 | %tmp.1 = and i32 %a, 1 ; <i32> [#uses=1] | |
337 | %tmp.2 = icmp ne i32 %tmp.1, 0 ; <i1> [#uses=1] | |
338 | br i1 %tmp.2, label %then.0, label %else.0 | |
339 | ||
340 | then.0: ; preds = %entry | |
341 | %tmp.5 = add i32 %a, -1 ; <i32> [#uses=1] | |
342 | %tmp.3 = call i32 @t4( i32 %tmp.5 ) ; <i32> [#uses=1] | |
343 | br label %return | |
344 | ||
345 | else.0: ; preds = %entry | |
346 | %tmp.7 = icmp ne i32 %a, 0 ; <i1> [#uses=1] | |
347 | br i1 %tmp.7, label %then.1, label %return | |
348 | ||
349 | then.1: ; preds = %else.0 | |
350 | %tmp.11 = add i32 %a, -2 ; <i32> [#uses=1] | |
351 | %tmp.9 = call i32 @t4( i32 %tmp.11 ) ; <i32> [#uses=1] | |
352 | br label %return | |
353 | ||
354 | return: ; preds = %then.1, %else.0, %then.0 | |
355 | %result.0 = phi i32 [ 0, %else.0 ], [ %tmp.3, %then.0 ], | |
356 | [ %tmp.9, %then.1 ] | |
357 | ret i32 %result.0 | |
358 | } | |
359 | ||
360 | //===---------------------------------------------------------------------===// | |
361 | ||
362 | Tail recursion elimination should handle: | |
363 | ||
364 | int pow2m1(int n) { | |
365 | if (n == 0) | |
366 | return 0; | |
367 | return 2 * pow2m1 (n - 1) + 1; | |
368 | } | |
369 | ||
370 | Also, multiplies can be turned into SHL's, so they should be handled as if | |
371 | they were associative. "return foo() << 1" can be tail recursion eliminated. | |
372 | ||
373 | //===---------------------------------------------------------------------===// | |
374 | ||
375 | Argument promotion should promote arguments for recursive functions, like | |
376 | this: | |
377 | ||
378 | ; RUN: llvm-as < %s | opt -argpromotion | llvm-dis | grep x.val | |
379 | ||
380 | define internal i32 @foo(i32* %x) { | |
381 | entry: | |
382 | %tmp = load i32* %x ; <i32> [#uses=0] | |
383 | %tmp.foo = call i32 @foo( i32* %x ) ; <i32> [#uses=1] | |
384 | ret i32 %tmp.foo | |
385 | } | |
386 | ||
387 | define i32 @bar(i32* %x) { | |
388 | entry: | |
389 | %tmp3 = call i32 @foo( i32* %x ) ; <i32> [#uses=1] | |
390 | ret i32 %tmp3 | |
391 | } | |
392 | ||
393 | //===---------------------------------------------------------------------===// | |
394 | ||
395 | We should investigate an instruction sinking pass. Consider this silly | |
396 | example in pic mode: | |
397 | ||
398 | #include <assert.h> | |
399 | void foo(int x) { | |
400 | assert(x); | |
401 | //... | |
402 | } | |
403 | ||
404 | we compile this to: | |
405 | _foo: | |
406 | subl $28, %esp | |
407 | call "L1$pb" | |
408 | "L1$pb": | |
409 | popl %eax | |
410 | cmpl $0, 32(%esp) | |
411 | je LBB1_2 # cond_true | |
412 | LBB1_1: # return | |
413 | # ... | |
414 | addl $28, %esp | |
415 | ret | |
416 | LBB1_2: # cond_true | |
417 | ... | |
418 | ||
419 | The PIC base computation (call+popl) is only used on one path through the | |
420 | code, but is currently always computed in the entry block. It would be | |
421 | better to sink the picbase computation down into the block for the | |
422 | assertion, as it is the only one that uses it. This happens for a lot of | |
423 | code with early outs. | |
424 | ||
425 | Another example is loads of arguments, which are usually emitted into the | |
426 | entry block on targets like x86. If not used in all paths through a | |
427 | function, they should be sunk into the ones that do. | |
428 | ||
429 | In this case, whole-function-isel would also handle this. | |
430 | ||
431 | //===---------------------------------------------------------------------===// | |
432 | ||
433 | Investigate lowering of sparse switch statements into perfect hash tables: | |
434 | http://burtleburtle.net/bob/hash/perfect.html | |
435 | ||
436 | //===---------------------------------------------------------------------===// | |
437 | ||
438 | We should turn things like "load+fabs+store" and "load+fneg+store" into the | |
439 | corresponding integer operations. On a yonah, this loop: | |
440 | ||
441 | double a[256]; | |
442 | void foo() { | |
443 | int i, b; | |
444 | for (b = 0; b < 10000000; b++) | |
445 | for (i = 0; i < 256; i++) | |
446 | a[i] = -a[i]; | |
447 | } | |
448 | ||
449 | is twice as slow as this loop: | |
450 | ||
451 | long long a[256]; | |
452 | void foo() { | |
453 | int i, b; | |
454 | for (b = 0; b < 10000000; b++) | |
455 | for (i = 0; i < 256; i++) | |
456 | a[i] ^= (1ULL << 63); | |
457 | } | |
458 | ||
459 | and I suspect other processors are similar. On X86 in particular this is a | |
460 | big win because doing this with integers allows the use of read/modify/write | |
461 | instructions. | |
462 | ||
463 | //===---------------------------------------------------------------------===// | |
464 | ||
465 | DAG Combiner should try to combine small loads into larger loads when | |
466 | profitable. For example, we compile this C++ example: | |
467 | ||
468 | struct THotKey { short Key; bool Control; bool Shift; bool Alt; }; | |
469 | extern THotKey m_HotKey; | |
470 | THotKey GetHotKey () { return m_HotKey; } | |
471 | ||
472 | into (-m64 -O3 -fno-exceptions -static -fomit-frame-pointer): | |
473 | ||
474 | __Z9GetHotKeyv: ## @_Z9GetHotKeyv | |
475 | movq _m_HotKey@GOTPCREL(%rip), %rax | |
476 | movzwl (%rax), %ecx | |
477 | movzbl 2(%rax), %edx | |
478 | shlq $16, %rdx | |
479 | orq %rcx, %rdx | |
480 | movzbl 3(%rax), %ecx | |
481 | shlq $24, %rcx | |
482 | orq %rdx, %rcx | |
483 | movzbl 4(%rax), %eax | |
484 | shlq $32, %rax | |
485 | orq %rcx, %rax | |
486 | ret | |
487 | ||
488 | //===---------------------------------------------------------------------===// | |
489 | ||
490 | We should add an FRINT node to the DAG to model targets that have legal | |
491 | implementations of ceil/floor/rint. | |
492 | ||
493 | //===---------------------------------------------------------------------===// | |
494 | ||
495 | Consider: | |
496 | ||
497 | int test() { | |
498 | long long input[8] = {1,0,1,0,1,0,1,0}; | |
499 | foo(input); | |
500 | } | |
501 | ||
502 | Clang compiles this into: | |
503 | ||
504 | call void @llvm.memset.p0i8.i64(i8* %tmp, i8 0, i64 64, i32 16, i1 false) | |
505 | %0 = getelementptr [8 x i64]* %input, i64 0, i64 0 | |
506 | store i64 1, i64* %0, align 16 | |
507 | %1 = getelementptr [8 x i64]* %input, i64 0, i64 2 | |
508 | store i64 1, i64* %1, align 16 | |
509 | %2 = getelementptr [8 x i64]* %input, i64 0, i64 4 | |
510 | store i64 1, i64* %2, align 16 | |
511 | %3 = getelementptr [8 x i64]* %input, i64 0, i64 6 | |
512 | store i64 1, i64* %3, align 16 | |
513 | ||
514 | Which gets codegen'd into: | |
515 | ||
516 | pxor %xmm0, %xmm0 | |
517 | movaps %xmm0, -16(%rbp) | |
518 | movaps %xmm0, -32(%rbp) | |
519 | movaps %xmm0, -48(%rbp) | |
520 | movaps %xmm0, -64(%rbp) | |
521 | movq $1, -64(%rbp) | |
522 | movq $1, -48(%rbp) | |
523 | movq $1, -32(%rbp) | |
524 | movq $1, -16(%rbp) | |
525 | ||
526 | It would be better to have 4 movq's of 0 instead of the movaps's. | |
527 | ||
528 | //===---------------------------------------------------------------------===// | |
529 | ||
530 | http://llvm.org/PR717: | |
531 | ||
532 | The following code should compile into "ret int undef". Instead, LLVM | |
533 | produces "ret int 0": | |
534 | ||
535 | int f() { | |
536 | int x = 4; | |
537 | int y; | |
538 | if (x == 3) y = 0; | |
539 | return y; | |
540 | } | |
541 | ||
542 | //===---------------------------------------------------------------------===// | |
543 | ||
544 | The loop unroller should partially unroll loops (instead of peeling them) | |
545 | when code growth isn't too bad and when an unroll count allows simplification | |
546 | of some code within the loop. One trivial example is: | |
547 | ||
548 | #include <stdio.h> | |
549 | int main() { | |
550 | int nRet = 17; | |
551 | int nLoop; | |
552 | for ( nLoop = 0; nLoop < 1000; nLoop++ ) { | |
553 | if ( nLoop & 1 ) | |
554 | nRet += 2; | |
555 | else | |
556 | nRet -= 1; | |
557 | } | |
558 | return nRet; | |
559 | } | |
560 | ||
561 | Unrolling by 2 would eliminate the '&1' in both copies, leading to a net | |
562 | reduction in code size. The resultant code would then also be suitable for | |
563 | exit value computation. | |
564 | ||
565 | //===---------------------------------------------------------------------===// | |
566 | ||
567 | We miss a bunch of rotate opportunities on various targets, including ppc, x86, | |
568 | etc. On X86, we miss a bunch of 'rotate by variable' cases because the rotate | |
569 | matching code in dag combine doesn't look through truncates aggressively | |
570 | enough. Here are some testcases reduces from GCC PR17886: | |
571 | ||
572 | unsigned long long f5(unsigned long long x, unsigned long long y) { | |
573 | return (x << 8) | ((y >> 48) & 0xffull); | |
574 | } | |
575 | unsigned long long f6(unsigned long long x, unsigned long long y, int z) { | |
576 | switch(z) { | |
577 | case 1: | |
578 | return (x << 8) | ((y >> 48) & 0xffull); | |
579 | case 2: | |
580 | return (x << 16) | ((y >> 40) & 0xffffull); | |
581 | case 3: | |
582 | return (x << 24) | ((y >> 32) & 0xffffffull); | |
583 | case 4: | |
584 | return (x << 32) | ((y >> 24) & 0xffffffffull); | |
585 | default: | |
586 | return (x << 40) | ((y >> 16) & 0xffffffffffull); | |
587 | } | |
588 | } | |
589 | ||
590 | //===---------------------------------------------------------------------===// | |
591 | ||
592 | This (and similar related idioms): | |
593 | ||
594 | unsigned int foo(unsigned char i) { | |
595 | return i | (i<<8) | (i<<16) | (i<<24); | |
596 | } | |
597 | ||
598 | compiles into: | |
599 | ||
600 | define i32 @foo(i8 zeroext %i) nounwind readnone ssp noredzone { | |
601 | entry: | |
602 | %conv = zext i8 %i to i32 | |
603 | %shl = shl i32 %conv, 8 | |
604 | %shl5 = shl i32 %conv, 16 | |
605 | %shl9 = shl i32 %conv, 24 | |
606 | %or = or i32 %shl9, %conv | |
607 | %or6 = or i32 %or, %shl5 | |
608 | %or10 = or i32 %or6, %shl | |
609 | ret i32 %or10 | |
610 | } | |
611 | ||
612 | it would be better as: | |
613 | ||
614 | unsigned int bar(unsigned char i) { | |
615 | unsigned int j=i | (i << 8); | |
616 | return j | (j<<16); | |
617 | } | |
618 | ||
619 | aka: | |
620 | ||
621 | define i32 @bar(i8 zeroext %i) nounwind readnone ssp noredzone { | |
622 | entry: | |
623 | %conv = zext i8 %i to i32 | |
624 | %shl = shl i32 %conv, 8 | |
625 | %or = or i32 %shl, %conv | |
626 | %shl5 = shl i32 %or, 16 | |
627 | %or6 = or i32 %shl5, %or | |
628 | ret i32 %or6 | |
629 | } | |
630 | ||
631 | or even i*0x01010101, depending on the speed of the multiplier. The best way to | |
632 | handle this is to canonicalize it to a multiply in IR and have codegen handle | |
633 | lowering multiplies to shifts on cpus where shifts are faster. | |
634 | ||
635 | //===---------------------------------------------------------------------===// | |
636 | ||
637 | We do a number of simplifications in simplify libcalls to strength reduce | |
638 | standard library functions, but we don't currently merge them together. For | |
639 | example, it is useful to merge memcpy(a,b,strlen(b)) -> strcpy. This can only | |
640 | be done safely if "b" isn't modified between the strlen and memcpy of course. | |
641 | ||
642 | //===---------------------------------------------------------------------===// | |
643 | ||
644 | We compile this program: (from GCC PR11680) | |
645 | http://gcc.gnu.org/bugzilla/attachment.cgi?id=4487 | |
646 | ||
647 | Into code that runs the same speed in fast/slow modes, but both modes run 2x | |
648 | slower than when compile with GCC (either 4.0 or 4.2): | |
649 | ||
650 | $ llvm-g++ perf.cpp -O3 -fno-exceptions | |
651 | $ time ./a.out fast | |
652 | 1.821u 0.003s 0:01.82 100.0% 0+0k 0+0io 0pf+0w | |
653 | ||
654 | $ g++ perf.cpp -O3 -fno-exceptions | |
655 | $ time ./a.out fast | |
656 | 0.821u 0.001s 0:00.82 100.0% 0+0k 0+0io 0pf+0w | |
657 | ||
658 | It looks like we are making the same inlining decisions, so this may be raw | |
659 | codegen badness or something else (haven't investigated). | |
660 | ||
661 | //===---------------------------------------------------------------------===// | |
662 | ||
663 | Divisibility by constant can be simplified (according to GCC PR12849) from | |
664 | being a mulhi to being a mul lo (cheaper). Testcase: | |
665 | ||
666 | void bar(unsigned n) { | |
667 | if (n % 3 == 0) | |
668 | true(); | |
669 | } | |
670 | ||
671 | This is equivalent to the following, where 2863311531 is the multiplicative | |
672 | inverse of 3, and 1431655766 is ((2^32)-1)/3+1: | |
673 | void bar(unsigned n) { | |
674 | if (n * 2863311531U < 1431655766U) | |
675 | true(); | |
676 | } | |
677 | ||
678 | The same transformation can work with an even modulo with the addition of a | |
679 | rotate: rotate the result of the multiply to the right by the number of bits | |
680 | which need to be zero for the condition to be true, and shrink the compare RHS | |
681 | by the same amount. Unless the target supports rotates, though, that | |
682 | transformation probably isn't worthwhile. | |
683 | ||
684 | The transformation can also easily be made to work with non-zero equality | |
685 | comparisons: just transform, for example, "n % 3 == 1" to "(n-1) % 3 == 0". | |
686 | ||
687 | //===---------------------------------------------------------------------===// | |
688 | ||
689 | Better mod/ref analysis for scanf would allow us to eliminate the vtable and a | |
690 | bunch of other stuff from this example (see PR1604): | |
691 | ||
692 | #include <cstdio> | |
693 | struct test { | |
694 | int val; | |
695 | virtual ~test() {} | |
696 | }; | |
697 | ||
698 | int main() { | |
699 | test t; | |
700 | std::scanf("%d", &t.val); | |
701 | std::printf("%d\n", t.val); | |
702 | } | |
703 | ||
704 | //===---------------------------------------------------------------------===// | |
705 | ||
706 | These functions perform the same computation, but produce different assembly. | |
707 | ||
708 | define i8 @select(i8 %x) readnone nounwind { | |
709 | %A = icmp ult i8 %x, 250 | |
710 | %B = select i1 %A, i8 0, i8 1 | |
711 | ret i8 %B | |
712 | } | |
713 | ||
714 | define i8 @addshr(i8 %x) readnone nounwind { | |
715 | %A = zext i8 %x to i9 | |
716 | %B = add i9 %A, 6 ;; 256 - 250 == 6 | |
717 | %C = lshr i9 %B, 8 | |
718 | %D = trunc i9 %C to i8 | |
719 | ret i8 %D | |
720 | } | |
721 | ||
722 | //===---------------------------------------------------------------------===// | |
723 | ||
724 | From gcc bug 24696: | |
725 | int | |
726 | f (unsigned long a, unsigned long b, unsigned long c) | |
727 | { | |
728 | return ((a & (c - 1)) != 0) || ((b & (c - 1)) != 0); | |
729 | } | |
730 | int | |
731 | f (unsigned long a, unsigned long b, unsigned long c) | |
732 | { | |
733 | return ((a & (c - 1)) != 0) | ((b & (c - 1)) != 0); | |
734 | } | |
735 | Both should combine to ((a|b) & (c-1)) != 0. Currently not optimized with | |
85aaf69f | 736 | "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
737 | |
738 | //===---------------------------------------------------------------------===// | |
739 | ||
740 | From GCC Bug 20192: | |
741 | #define PMD_MASK (~((1UL << 23) - 1)) | |
742 | void clear_pmd_range(unsigned long start, unsigned long end) | |
743 | { | |
744 | if (!(start & ~PMD_MASK) && !(end & ~PMD_MASK)) | |
745 | f(); | |
746 | } | |
747 | The expression should optimize to something like | |
748 | "!((start|end)&~PMD_MASK). Currently not optimized with "clang | |
85aaf69f | 749 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
750 | |
751 | //===---------------------------------------------------------------------===// | |
752 | ||
753 | unsigned int f(unsigned int i, unsigned int n) {++i; if (i == n) ++i; return | |
754 | i;} | |
755 | unsigned int f2(unsigned int i, unsigned int n) {++i; i += i == n; return i;} | |
756 | These should combine to the same thing. Currently, the first function | |
757 | produces better code on X86. | |
758 | ||
759 | //===---------------------------------------------------------------------===// | |
760 | ||
761 | From GCC Bug 15784: | |
762 | #define abs(x) x>0?x:-x | |
763 | int f(int x, int y) | |
764 | { | |
765 | return (abs(x)) >= 0; | |
766 | } | |
767 | This should optimize to x == INT_MIN. (With -fwrapv.) Currently not | |
85aaf69f | 768 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
769 | |
770 | //===---------------------------------------------------------------------===// | |
771 | ||
772 | From GCC Bug 14753: | |
773 | void | |
774 | rotate_cst (unsigned int a) | |
775 | { | |
776 | a = (a << 10) | (a >> 22); | |
777 | if (a == 123) | |
778 | bar (); | |
779 | } | |
780 | void | |
781 | minus_cst (unsigned int a) | |
782 | { | |
783 | unsigned int tem; | |
784 | ||
785 | tem = 20 - a; | |
786 | if (tem == 5) | |
787 | bar (); | |
788 | } | |
789 | void | |
790 | mask_gt (unsigned int a) | |
791 | { | |
792 | /* This is equivalent to a > 15. */ | |
793 | if ((a & ~7) > 8) | |
794 | bar (); | |
795 | } | |
796 | void | |
797 | rshift_gt (unsigned int a) | |
798 | { | |
799 | /* This is equivalent to a > 23. */ | |
800 | if ((a >> 2) > 5) | |
801 | bar (); | |
802 | } | |
803 | ||
804 | All should simplify to a single comparison. All of these are | |
805 | currently not optimized with "clang -emit-llvm-bc | opt | |
85aaf69f | 806 | -O3". |
223e47cc LB |
807 | |
808 | //===---------------------------------------------------------------------===// | |
809 | ||
810 | From GCC Bug 32605: | |
811 | int c(int* x) {return (char*)x+2 == (char*)x;} | |
812 | Should combine to 0. Currently not optimized with "clang | |
85aaf69f | 813 | -emit-llvm-bc | opt -O3" (although llc can optimize it). |
223e47cc LB |
814 | |
815 | //===---------------------------------------------------------------------===// | |
816 | ||
817 | int a(unsigned b) {return ((b << 31) | (b << 30)) >> 31;} | |
818 | Should be combined to "((b >> 1) | b) & 1". Currently not optimized | |
85aaf69f | 819 | with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
820 | |
821 | //===---------------------------------------------------------------------===// | |
822 | ||
823 | unsigned a(unsigned x, unsigned y) { return x | (y & 1) | (y & 2);} | |
824 | Should combine to "x | (y & 3)". Currently not optimized with "clang | |
85aaf69f | 825 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
826 | |
827 | //===---------------------------------------------------------------------===// | |
828 | ||
829 | int a(int a, int b, int c) {return (~a & c) | ((c|a) & b);} | |
830 | Should fold to "(~a & c) | (a & b)". Currently not optimized with | |
85aaf69f | 831 | "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
832 | |
833 | //===---------------------------------------------------------------------===// | |
834 | ||
835 | int a(int a,int b) {return (~(a|b))|a;} | |
836 | Should fold to "a|~b". Currently not optimized with "clang | |
85aaf69f | 837 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
838 | |
839 | //===---------------------------------------------------------------------===// | |
840 | ||
841 | int a(int a, int b) {return (a&&b) || (a&&!b);} | |
842 | Should fold to "a". Currently not optimized with "clang -emit-llvm-bc | |
85aaf69f | 843 | | opt -O3". |
223e47cc LB |
844 | |
845 | //===---------------------------------------------------------------------===// | |
846 | ||
847 | int a(int a, int b, int c) {return (a&&b) || (!a&&c);} | |
848 | Should fold to "a ? b : c", or at least something sane. Currently not | |
85aaf69f | 849 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
850 | |
851 | //===---------------------------------------------------------------------===// | |
852 | ||
853 | int a(int a, int b, int c) {return (a&&b) || (a&&c) || (a&&b&&c);} | |
854 | Should fold to a && (b || c). Currently not optimized with "clang | |
85aaf69f | 855 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
856 | |
857 | //===---------------------------------------------------------------------===// | |
858 | ||
859 | int a(int x) {return x | ((x & 8) ^ 8);} | |
860 | Should combine to x | 8. Currently not optimized with "clang | |
85aaf69f | 861 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
862 | |
863 | //===---------------------------------------------------------------------===// | |
864 | ||
865 | int a(int x) {return x ^ ((x & 8) ^ 8);} | |
866 | Should also combine to x | 8. Currently not optimized with "clang | |
85aaf69f | 867 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
868 | |
869 | //===---------------------------------------------------------------------===// | |
870 | ||
871 | int a(int x) {return ((x | -9) ^ 8) & x;} | |
872 | Should combine to x & -9. Currently not optimized with "clang | |
85aaf69f | 873 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
874 | |
875 | //===---------------------------------------------------------------------===// | |
876 | ||
877 | unsigned a(unsigned a) {return a * 0x11111111 >> 28 & 1;} | |
878 | Should combine to "a * 0x88888888 >> 31". Currently not optimized | |
85aaf69f | 879 | with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
880 | |
881 | //===---------------------------------------------------------------------===// | |
882 | ||
883 | unsigned a(char* x) {if ((*x & 32) == 0) return b();} | |
884 | There's an unnecessary zext in the generated code with "clang | |
85aaf69f | 885 | -emit-llvm-bc | opt -O3". |
223e47cc LB |
886 | |
887 | //===---------------------------------------------------------------------===// | |
888 | ||
889 | unsigned a(unsigned long long x) {return 40 * (x >> 1);} | |
890 | Should combine to "20 * (((unsigned)x) & -2)". Currently not | |
85aaf69f | 891 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
892 | |
893 | //===---------------------------------------------------------------------===// | |
894 | ||
895 | int g(int x) { return (x - 10) < 0; } | |
896 | Should combine to "x <= 9" (the sub has nsw). Currently not | |
85aaf69f | 897 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
898 | |
899 | //===---------------------------------------------------------------------===// | |
900 | ||
901 | int g(int x) { return (x + 10) < 0; } | |
902 | Should combine to "x < -10" (the add has nsw). Currently not | |
85aaf69f | 903 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
904 | |
905 | //===---------------------------------------------------------------------===// | |
906 | ||
907 | int f(int i, int j) { return i < j + 1; } | |
908 | int g(int i, int j) { return j > i - 1; } | |
909 | Should combine to "i <= j" (the add/sub has nsw). Currently not | |
85aaf69f | 910 | optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
911 | |
912 | //===---------------------------------------------------------------------===// | |
913 | ||
914 | unsigned f(unsigned x) { return ((x & 7) + 1) & 15; } | |
915 | The & 15 part should be optimized away, it doesn't change the result. Currently | |
85aaf69f | 916 | not optimized with "clang -emit-llvm-bc | opt -O3". |
223e47cc LB |
917 | |
918 | //===---------------------------------------------------------------------===// | |
919 | ||
920 | This was noticed in the entryblock for grokdeclarator in 403.gcc: | |
921 | ||
922 | %tmp = icmp eq i32 %decl_context, 4 | |
923 | %decl_context_addr.0 = select i1 %tmp, i32 3, i32 %decl_context | |
924 | %tmp1 = icmp eq i32 %decl_context_addr.0, 1 | |
925 | %decl_context_addr.1 = select i1 %tmp1, i32 0, i32 %decl_context_addr.0 | |
926 | ||
927 | tmp1 should be simplified to something like: | |
928 | (!tmp || decl_context == 1) | |
929 | ||
930 | This allows recursive simplifications, tmp1 is used all over the place in | |
931 | the function, e.g. by: | |
932 | ||
933 | %tmp23 = icmp eq i32 %decl_context_addr.1, 0 ; <i1> [#uses=1] | |
934 | %tmp24 = xor i1 %tmp1, true ; <i1> [#uses=1] | |
935 | %or.cond8 = and i1 %tmp23, %tmp24 ; <i1> [#uses=1] | |
936 | ||
937 | later. | |
938 | ||
939 | //===---------------------------------------------------------------------===// | |
940 | ||
941 | [STORE SINKING] | |
942 | ||
943 | Store sinking: This code: | |
944 | ||
945 | void f (int n, int *cond, int *res) { | |
946 | int i; | |
947 | *res = 0; | |
948 | for (i = 0; i < n; i++) | |
949 | if (*cond) | |
950 | *res ^= 234; /* (*) */ | |
951 | } | |
952 | ||
953 | On this function GVN hoists the fully redundant value of *res, but nothing | |
954 | moves the store out. This gives us this code: | |
955 | ||
956 | bb: ; preds = %bb2, %entry | |
957 | %.rle = phi i32 [ 0, %entry ], [ %.rle6, %bb2 ] | |
958 | %i.05 = phi i32 [ 0, %entry ], [ %indvar.next, %bb2 ] | |
959 | %1 = load i32* %cond, align 4 | |
960 | %2 = icmp eq i32 %1, 0 | |
961 | br i1 %2, label %bb2, label %bb1 | |
962 | ||
963 | bb1: ; preds = %bb | |
964 | %3 = xor i32 %.rle, 234 | |
965 | store i32 %3, i32* %res, align 4 | |
966 | br label %bb2 | |
967 | ||
968 | bb2: ; preds = %bb, %bb1 | |
969 | %.rle6 = phi i32 [ %3, %bb1 ], [ %.rle, %bb ] | |
970 | %indvar.next = add i32 %i.05, 1 | |
971 | %exitcond = icmp eq i32 %indvar.next, %n | |
972 | br i1 %exitcond, label %return, label %bb | |
973 | ||
974 | DSE should sink partially dead stores to get the store out of the loop. | |
975 | ||
976 | Here's another partial dead case: | |
977 | http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12395 | |
978 | ||
979 | //===---------------------------------------------------------------------===// | |
980 | ||
981 | Scalar PRE hoists the mul in the common block up to the else: | |
982 | ||
983 | int test (int a, int b, int c, int g) { | |
984 | int d, e; | |
985 | if (a) | |
986 | d = b * c; | |
987 | else | |
988 | d = b - c; | |
989 | e = b * c + g; | |
990 | return d + e; | |
991 | } | |
992 | ||
993 | It would be better to do the mul once to reduce codesize above the if. | |
994 | This is GCC PR38204. | |
995 | ||
996 | ||
997 | //===---------------------------------------------------------------------===// | |
998 | This simple function from 179.art: | |
999 | ||
1000 | int winner, numf2s; | |
1001 | struct { double y; int reset; } *Y; | |
1002 | ||
1003 | void find_match() { | |
1004 | int i; | |
1005 | winner = 0; | |
1006 | for (i=0;i<numf2s;i++) | |
1007 | if (Y[i].y > Y[winner].y) | |
1008 | winner =i; | |
1009 | } | |
1010 | ||
1011 | Compiles into (with clang TBAA): | |
1012 | ||
1013 | for.body: ; preds = %for.inc, %bb.nph | |
1014 | %indvar = phi i64 [ 0, %bb.nph ], [ %indvar.next, %for.inc ] | |
1015 | %i.01718 = phi i32 [ 0, %bb.nph ], [ %i.01719, %for.inc ] | |
1016 | %tmp4 = getelementptr inbounds %struct.anon* %tmp3, i64 %indvar, i32 0 | |
1017 | %tmp5 = load double* %tmp4, align 8, !tbaa !4 | |
1018 | %idxprom7 = sext i32 %i.01718 to i64 | |
1019 | %tmp10 = getelementptr inbounds %struct.anon* %tmp3, i64 %idxprom7, i32 0 | |
1020 | %tmp11 = load double* %tmp10, align 8, !tbaa !4 | |
1021 | %cmp12 = fcmp ogt double %tmp5, %tmp11 | |
1022 | br i1 %cmp12, label %if.then, label %for.inc | |
1023 | ||
1024 | if.then: ; preds = %for.body | |
1025 | %i.017 = trunc i64 %indvar to i32 | |
1026 | br label %for.inc | |
1027 | ||
1028 | for.inc: ; preds = %for.body, %if.then | |
1029 | %i.01719 = phi i32 [ %i.01718, %for.body ], [ %i.017, %if.then ] | |
1030 | %indvar.next = add i64 %indvar, 1 | |
1031 | %exitcond = icmp eq i64 %indvar.next, %tmp22 | |
1032 | br i1 %exitcond, label %for.cond.for.end_crit_edge, label %for.body | |
1033 | ||
1034 | ||
1035 | It is good that we hoisted the reloads of numf2's, and Y out of the loop and | |
1036 | sunk the store to winner out. | |
1037 | ||
1038 | However, this is awful on several levels: the conditional truncate in the loop | |
1039 | (-indvars at fault? why can't we completely promote the IV to i64?). | |
1040 | ||
1041 | Beyond that, we have a partially redundant load in the loop: if "winner" (aka | |
1042 | %i.01718) isn't updated, we reload Y[winner].y the next time through the loop. | |
1043 | Similarly, the addressing that feeds it (including the sext) is redundant. In | |
1044 | the end we get this generated assembly: | |
1045 | ||
1046 | LBB0_2: ## %for.body | |
1047 | ## =>This Inner Loop Header: Depth=1 | |
1048 | movsd (%rdi), %xmm0 | |
1049 | movslq %edx, %r8 | |
1050 | shlq $4, %r8 | |
1051 | ucomisd (%rcx,%r8), %xmm0 | |
1052 | jbe LBB0_4 | |
1053 | movl %esi, %edx | |
1054 | LBB0_4: ## %for.inc | |
1055 | addq $16, %rdi | |
1056 | incq %rsi | |
1057 | cmpq %rsi, %rax | |
1058 | jne LBB0_2 | |
1059 | ||
1060 | All things considered this isn't too bad, but we shouldn't need the movslq or | |
1061 | the shlq instruction, or the load folded into ucomisd every time through the | |
1062 | loop. | |
1063 | ||
1064 | On an x86-specific topic, if the loop can't be restructure, the movl should be a | |
1065 | cmov. | |
1066 | ||
1067 | //===---------------------------------------------------------------------===// | |
1068 | ||
1069 | [STORE SINKING] | |
1070 | ||
1071 | GCC PR37810 is an interesting case where we should sink load/store reload | |
1072 | into the if block and outside the loop, so we don't reload/store it on the | |
1073 | non-call path. | |
1074 | ||
1075 | for () { | |
1076 | *P += 1; | |
1077 | if () | |
1078 | call(); | |
1079 | else | |
1080 | ... | |
1081 | -> | |
1082 | tmp = *P | |
1083 | for () { | |
1084 | tmp += 1; | |
1085 | if () { | |
1086 | *P = tmp; | |
1087 | call(); | |
1088 | tmp = *P; | |
1089 | } else ... | |
1090 | } | |
1091 | *P = tmp; | |
1092 | ||
1093 | We now hoist the reload after the call (Transforms/GVN/lpre-call-wrap.ll), but | |
1094 | we don't sink the store. We need partially dead store sinking. | |
1095 | ||
1096 | //===---------------------------------------------------------------------===// | |
1097 | ||
1098 | [LOAD PRE CRIT EDGE SPLITTING] | |
1099 | ||
1100 | GCC PR37166: Sinking of loads prevents SROA'ing the "g" struct on the stack | |
1101 | leading to excess stack traffic. This could be handled by GVN with some crazy | |
1102 | symbolic phi translation. The code we get looks like (g is on the stack): | |
1103 | ||
1104 | bb2: ; preds = %bb1 | |
1105 | .. | |
1106 | %9 = getelementptr %struct.f* %g, i32 0, i32 0 | |
1107 | store i32 %8, i32* %9, align bel %bb3 | |
1108 | ||
1109 | bb3: ; preds = %bb1, %bb2, %bb | |
1110 | %c_addr.0 = phi %struct.f* [ %g, %bb2 ], [ %c, %bb ], [ %c, %bb1 ] | |
1111 | %b_addr.0 = phi %struct.f* [ %b, %bb2 ], [ %g, %bb ], [ %b, %bb1 ] | |
1112 | %10 = getelementptr %struct.f* %c_addr.0, i32 0, i32 0 | |
1113 | %11 = load i32* %10, align 4 | |
1114 | ||
1115 | %11 is partially redundant, an in BB2 it should have the value %8. | |
1116 | ||
1117 | GCC PR33344 and PR35287 are similar cases. | |
1118 | ||
1119 | ||
1120 | //===---------------------------------------------------------------------===// | |
1121 | ||
1122 | [LOAD PRE] | |
1123 | ||
1124 | There are many load PRE testcases in testsuite/gcc.dg/tree-ssa/loadpre* in the | |
1125 | GCC testsuite, ones we don't get yet are (checked through loadpre25): | |
1126 | ||
1127 | [CRIT EDGE BREAKING] | |
1a4d82fc | 1128 | predcom-4.c |
223e47cc LB |
1129 | |
1130 | [PRE OF READONLY CALL] | |
1131 | loadpre5.c | |
1132 | ||
1133 | [TURN SELECT INTO BRANCH] | |
1134 | loadpre14.c loadpre15.c | |
1135 | ||
1136 | actually a conditional increment: loadpre18.c loadpre19.c | |
1137 | ||
1138 | //===---------------------------------------------------------------------===// | |
1139 | ||
1140 | [LOAD PRE / STORE SINKING / SPEC HACK] | |
1141 | ||
1142 | This is a chunk of code from 456.hmmer: | |
1143 | ||
1144 | int f(int M, int *mc, int *mpp, int *tpmm, int *ip, int *tpim, int *dpp, | |
1145 | int *tpdm, int xmb, int *bp, int *ms) { | |
1146 | int k, sc; | |
1147 | for (k = 1; k <= M; k++) { | |
1148 | mc[k] = mpp[k-1] + tpmm[k-1]; | |
1149 | if ((sc = ip[k-1] + tpim[k-1]) > mc[k]) mc[k] = sc; | |
1150 | if ((sc = dpp[k-1] + tpdm[k-1]) > mc[k]) mc[k] = sc; | |
1151 | if ((sc = xmb + bp[k]) > mc[k]) mc[k] = sc; | |
1152 | mc[k] += ms[k]; | |
1153 | } | |
1154 | } | |
1155 | ||
1156 | It is very profitable for this benchmark to turn the conditional stores to mc[k] | |
1157 | into a conditional move (select instr in IR) and allow the final store to do the | |
1158 | store. See GCC PR27313 for more details. Note that this is valid to xform even | |
1159 | with the new C++ memory model, since mc[k] is previously loaded and later | |
1160 | stored. | |
1161 | ||
1162 | //===---------------------------------------------------------------------===// | |
1163 | ||
1164 | [SCALAR PRE] | |
1165 | There are many PRE testcases in testsuite/gcc.dg/tree-ssa/ssa-pre-*.c in the | |
1166 | GCC testsuite. | |
1167 | ||
1168 | //===---------------------------------------------------------------------===// | |
1169 | ||
1170 | There are some interesting cases in testsuite/gcc.dg/tree-ssa/pred-comm* in the | |
1171 | GCC testsuite. For example, we get the first example in predcom-1.c, but | |
1172 | miss the second one: | |
1173 | ||
1174 | unsigned fib[1000]; | |
1175 | unsigned avg[1000]; | |
1176 | ||
1177 | __attribute__ ((noinline)) | |
1178 | void count_averages(int n) { | |
1179 | int i; | |
1180 | for (i = 1; i < n; i++) | |
1181 | avg[i] = (((unsigned long) fib[i - 1] + fib[i] + fib[i + 1]) / 3) & 0xffff; | |
1182 | } | |
1183 | ||
1184 | which compiles into two loads instead of one in the loop. | |
1185 | ||
1186 | predcom-2.c is the same as predcom-1.c | |
1187 | ||
1188 | predcom-3.c is very similar but needs loads feeding each other instead of | |
1189 | store->load. | |
1190 | ||
1191 | ||
1192 | //===---------------------------------------------------------------------===// | |
1193 | ||
1194 | [ALIAS ANALYSIS] | |
1195 | ||
1196 | Type based alias analysis: | |
1197 | http://gcc.gnu.org/bugzilla/show_bug.cgi?id=14705 | |
1198 | ||
1199 | We should do better analysis of posix_memalign. At the least it should | |
1200 | no-capture its pointer argument, at best, we should know that the out-value | |
1201 | result doesn't point to anything (like malloc). One example of this is in | |
1202 | SingleSource/Benchmarks/Misc/dt.c | |
1203 | ||
1204 | //===---------------------------------------------------------------------===// | |
1205 | ||
1206 | Interesting missed case because of control flow flattening (should be 2 loads): | |
1207 | http://gcc.gnu.org/bugzilla/show_bug.cgi?id=26629 | |
1208 | With: llvm-gcc t2.c -S -o - -O0 -emit-llvm | llvm-as | | |
1209 | opt -mem2reg -gvn -instcombine | llvm-dis | |
1210 | we miss it because we need 1) CRIT EDGE 2) MULTIPLE DIFFERENT | |
1211 | VALS PRODUCED BY ONE BLOCK OVER DIFFERENT PATHS | |
1212 | ||
1213 | //===---------------------------------------------------------------------===// | |
1214 | ||
1215 | http://gcc.gnu.org/bugzilla/show_bug.cgi?id=19633 | |
1216 | We could eliminate the branch condition here, loading from null is undefined: | |
1217 | ||
1218 | struct S { int w, x, y, z; }; | |
1219 | struct T { int r; struct S s; }; | |
1220 | void bar (struct S, int); | |
1221 | void foo (int a, struct T b) | |
1222 | { | |
1223 | struct S *c = 0; | |
1224 | if (a) | |
1225 | c = &b.s; | |
1226 | bar (*c, a); | |
1227 | } | |
1228 | ||
1229 | //===---------------------------------------------------------------------===// | |
1230 | ||
1231 | simplifylibcalls should do several optimizations for strspn/strcspn: | |
1232 | ||
1233 | strcspn(x, "a") -> inlined loop for up to 3 letters (similarly for strspn): | |
1234 | ||
1235 | size_t __strcspn_c3 (__const char *__s, int __reject1, int __reject2, | |
1236 | int __reject3) { | |
1237 | register size_t __result = 0; | |
1238 | while (__s[__result] != '\0' && __s[__result] != __reject1 && | |
1239 | __s[__result] != __reject2 && __s[__result] != __reject3) | |
1240 | ++__result; | |
1241 | return __result; | |
1242 | } | |
1243 | ||
1244 | This should turn into a switch on the character. See PR3253 for some notes on | |
1245 | codegen. | |
1246 | ||
1247 | 456.hmmer apparently uses strcspn and strspn a lot. 471.omnetpp uses strspn. | |
1248 | ||
1249 | //===---------------------------------------------------------------------===// | |
1250 | ||
1251 | simplifylibcalls should turn these snprintf idioms into memcpy (GCC PR47917) | |
1252 | ||
1253 | char buf1[6], buf2[6], buf3[4], buf4[4]; | |
1254 | int i; | |
1255 | ||
1256 | int foo (void) { | |
1257 | int ret = snprintf (buf1, sizeof buf1, "abcde"); | |
1258 | ret += snprintf (buf2, sizeof buf2, "abcdef") * 16; | |
1259 | ret += snprintf (buf3, sizeof buf3, "%s", i++ < 6 ? "abc" : "def") * 256; | |
1260 | ret += snprintf (buf4, sizeof buf4, "%s", i++ > 10 ? "abcde" : "defgh")*4096; | |
1261 | return ret; | |
1262 | } | |
1263 | ||
1264 | //===---------------------------------------------------------------------===// | |
1265 | ||
1266 | "gas" uses this idiom: | |
1267 | else if (strchr ("+-/*%|&^:[]()~", *intel_parser.op_string)) | |
1268 | .. | |
1269 | else if (strchr ("<>", *intel_parser.op_string) | |
1270 | ||
1271 | Those should be turned into a switch. | |
1272 | ||
1273 | //===---------------------------------------------------------------------===// | |
1274 | ||
1275 | 252.eon contains this interesting code: | |
1276 | ||
1277 | %3072 = getelementptr [100 x i8]* %tempString, i32 0, i32 0 | |
1278 | %3073 = call i8* @strcpy(i8* %3072, i8* %3071) nounwind | |
1279 | %strlen = call i32 @strlen(i8* %3072) ; uses = 1 | |
1280 | %endptr = getelementptr [100 x i8]* %tempString, i32 0, i32 %strlen | |
1281 | call void @llvm.memcpy.i32(i8* %endptr, | |
1282 | i8* getelementptr ([5 x i8]* @"\01LC42", i32 0, i32 0), i32 5, i32 1) | |
1283 | %3074 = call i32 @strlen(i8* %endptr) nounwind readonly | |
1284 | ||
1285 | This is interesting for a couple reasons. First, in this: | |
1286 | ||
1287 | The memcpy+strlen strlen can be replaced with: | |
1288 | ||
1289 | %3074 = call i32 @strlen([5 x i8]* @"\01LC42") nounwind readonly | |
1290 | ||
1291 | Because the destination was just copied into the specified memory buffer. This, | |
1292 | in turn, can be constant folded to "4". | |
1293 | ||
1294 | In other code, it contains: | |
1295 | ||
1296 | %endptr6978 = bitcast i8* %endptr69 to i32* | |
1297 | store i32 7107374, i32* %endptr6978, align 1 | |
1298 | %3167 = call i32 @strlen(i8* %endptr69) nounwind readonly | |
1299 | ||
1300 | Which could also be constant folded. Whatever is producing this should probably | |
1301 | be fixed to leave this as a memcpy from a string. | |
1302 | ||
1303 | Further, eon also has an interesting partially redundant strlen call: | |
1304 | ||
1305 | bb8: ; preds = %_ZN18eonImageCalculatorC1Ev.exit | |
1306 | %682 = getelementptr i8** %argv, i32 6 ; <i8**> [#uses=2] | |
1307 | %683 = load i8** %682, align 4 ; <i8*> [#uses=4] | |
1308 | %684 = load i8* %683, align 1 ; <i8> [#uses=1] | |
1309 | %685 = icmp eq i8 %684, 0 ; <i1> [#uses=1] | |
1310 | br i1 %685, label %bb10, label %bb9 | |
1311 | ||
1312 | bb9: ; preds = %bb8 | |
1313 | %686 = call i32 @strlen(i8* %683) nounwind readonly | |
1314 | %687 = icmp ugt i32 %686, 254 ; <i1> [#uses=1] | |
1315 | br i1 %687, label %bb10, label %bb11 | |
1316 | ||
1317 | bb10: ; preds = %bb9, %bb8 | |
1318 | %688 = call i32 @strlen(i8* %683) nounwind readonly | |
1319 | ||
1320 | This could be eliminated by doing the strlen once in bb8, saving code size and | |
1321 | improving perf on the bb8->9->10 path. | |
1322 | ||
1323 | //===---------------------------------------------------------------------===// | |
1324 | ||
1325 | I see an interesting fully redundant call to strlen left in 186.crafty:InputMove | |
1326 | which looks like: | |
1327 | %movetext11 = getelementptr [128 x i8]* %movetext, i32 0, i32 0 | |
1328 | ||
1329 | ||
1330 | bb62: ; preds = %bb55, %bb53 | |
1331 | %promote.0 = phi i32 [ %169, %bb55 ], [ 0, %bb53 ] | |
1332 | %171 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1 | |
1333 | %172 = add i32 %171, -1 ; <i32> [#uses=1] | |
1334 | %173 = getelementptr [128 x i8]* %movetext, i32 0, i32 %172 | |
1335 | ||
1336 | ... no stores ... | |
1337 | br i1 %or.cond, label %bb65, label %bb72 | |
1338 | ||
1339 | bb65: ; preds = %bb62 | |
1340 | store i8 0, i8* %173, align 1 | |
1341 | br label %bb72 | |
1342 | ||
1343 | bb72: ; preds = %bb65, %bb62 | |
1344 | %trank.1 = phi i32 [ %176, %bb65 ], [ -1, %bb62 ] | |
1345 | %177 = call i32 @strlen(i8* %movetext11) nounwind readonly align 1 | |
1346 | ||
1347 | Note that on the bb62->bb72 path, that the %177 strlen call is partially | |
1348 | redundant with the %171 call. At worst, we could shove the %177 strlen call | |
1349 | up into the bb65 block moving it out of the bb62->bb72 path. However, note | |
1350 | that bb65 stores to the string, zeroing out the last byte. This means that on | |
1351 | that path the value of %177 is actually just %171-1. A sub is cheaper than a | |
1352 | strlen! | |
1353 | ||
1354 | This pattern repeats several times, basically doing: | |
1355 | ||
1356 | A = strlen(P); | |
1357 | P[A-1] = 0; | |
1358 | B = strlen(P); | |
1359 | where it is "obvious" that B = A-1. | |
1360 | ||
1361 | //===---------------------------------------------------------------------===// | |
1362 | ||
1363 | 186.crafty has this interesting pattern with the "out.4543" variable: | |
1364 | ||
1365 | call void @llvm.memcpy.i32( | |
1366 | i8* getelementptr ([10 x i8]* @out.4543, i32 0, i32 0), | |
1367 | i8* getelementptr ([7 x i8]* @"\01LC28700", i32 0, i32 0), i32 7, i32 1) | |
1368 | %101 = call@printf(i8* ... @out.4543, i32 0, i32 0)) nounwind | |
1369 | ||
1370 | It is basically doing: | |
1371 | ||
1372 | memcpy(globalarray, "string"); | |
1373 | printf(..., globalarray); | |
1374 | ||
1375 | Anyway, by knowing that printf just reads the memory and forward substituting | |
1376 | the string directly into the printf, this eliminates reads from globalarray. | |
1377 | Since this pattern occurs frequently in crafty (due to the "DisplayTime" and | |
1378 | other similar functions) there are many stores to "out". Once all the printfs | |
1379 | stop using "out", all that is left is the memcpy's into it. This should allow | |
1380 | globalopt to remove the "stored only" global. | |
1381 | ||
1382 | //===---------------------------------------------------------------------===// | |
1383 | ||
1384 | This code: | |
1385 | ||
1386 | define inreg i32 @foo(i8* inreg %p) nounwind { | |
1387 | %tmp0 = load i8* %p | |
1388 | %tmp1 = ashr i8 %tmp0, 5 | |
1389 | %tmp2 = sext i8 %tmp1 to i32 | |
1390 | ret i32 %tmp2 | |
1391 | } | |
1392 | ||
1393 | could be dagcombine'd to a sign-extending load with a shift. | |
1394 | For example, on x86 this currently gets this: | |
1395 | ||
1396 | movb (%eax), %al | |
1397 | sarb $5, %al | |
1398 | movsbl %al, %eax | |
1399 | ||
1400 | while it could get this: | |
1401 | ||
1402 | movsbl (%eax), %eax | |
1403 | sarl $5, %eax | |
1404 | ||
1405 | //===---------------------------------------------------------------------===// | |
1406 | ||
1407 | GCC PR31029: | |
1408 | ||
1409 | int test(int x) { return 1-x == x; } // --> return false | |
1410 | int test2(int x) { return 2-x == x; } // --> return x == 1 ? | |
1411 | ||
1412 | Always foldable for odd constants, what is the rule for even? | |
1413 | ||
1414 | //===---------------------------------------------------------------------===// | |
1415 | ||
1416 | PR 3381: GEP to field of size 0 inside a struct could be turned into GEP | |
1417 | for next field in struct (which is at same address). | |
1418 | ||
1419 | For example: store of float into { {{}}, float } could be turned into a store to | |
1420 | the float directly. | |
1421 | ||
1422 | //===---------------------------------------------------------------------===// | |
1423 | ||
1424 | The arg promotion pass should make use of nocapture to make its alias analysis | |
1425 | stuff much more precise. | |
1426 | ||
1427 | //===---------------------------------------------------------------------===// | |
1428 | ||
1429 | The following functions should be optimized to use a select instead of a | |
1430 | branch (from gcc PR40072): | |
1431 | ||
1432 | char char_int(int m) {if(m>7) return 0; return m;} | |
1433 | int int_char(char m) {if(m>7) return 0; return m;} | |
1434 | ||
1435 | //===---------------------------------------------------------------------===// | |
1436 | ||
1437 | int func(int a, int b) { if (a & 0x80) b |= 0x80; else b &= ~0x80; return b; } | |
1438 | ||
1439 | Generates this: | |
1440 | ||
1441 | define i32 @func(i32 %a, i32 %b) nounwind readnone ssp { | |
1442 | entry: | |
1443 | %0 = and i32 %a, 128 ; <i32> [#uses=1] | |
1444 | %1 = icmp eq i32 %0, 0 ; <i1> [#uses=1] | |
1445 | %2 = or i32 %b, 128 ; <i32> [#uses=1] | |
1446 | %3 = and i32 %b, -129 ; <i32> [#uses=1] | |
1447 | %b_addr.0 = select i1 %1, i32 %3, i32 %2 ; <i32> [#uses=1] | |
1448 | ret i32 %b_addr.0 | |
1449 | } | |
1450 | ||
1451 | However, it's functionally equivalent to: | |
1452 | ||
1453 | b = (b & ~0x80) | (a & 0x80); | |
1454 | ||
1455 | Which generates this: | |
1456 | ||
1457 | define i32 @func(i32 %a, i32 %b) nounwind readnone ssp { | |
1458 | entry: | |
1459 | %0 = and i32 %b, -129 ; <i32> [#uses=1] | |
1460 | %1 = and i32 %a, 128 ; <i32> [#uses=1] | |
1461 | %2 = or i32 %0, %1 ; <i32> [#uses=1] | |
1462 | ret i32 %2 | |
1463 | } | |
1464 | ||
1465 | This can be generalized for other forms: | |
1466 | ||
1467 | b = (b & ~0x80) | (a & 0x40) << 1; | |
1468 | ||
1469 | //===---------------------------------------------------------------------===// | |
1470 | ||
1471 | These two functions produce different code. They shouldn't: | |
1472 | ||
1473 | #include <stdint.h> | |
1474 | ||
1475 | uint8_t p1(uint8_t b, uint8_t a) { | |
1476 | b = (b & ~0xc0) | (a & 0xc0); | |
1477 | return (b); | |
1478 | } | |
1479 | ||
1480 | uint8_t p2(uint8_t b, uint8_t a) { | |
1481 | b = (b & ~0x40) | (a & 0x40); | |
1482 | b = (b & ~0x80) | (a & 0x80); | |
1483 | return (b); | |
1484 | } | |
1485 | ||
1486 | define zeroext i8 @p1(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp { | |
1487 | entry: | |
1488 | %0 = and i8 %b, 63 ; <i8> [#uses=1] | |
1489 | %1 = and i8 %a, -64 ; <i8> [#uses=1] | |
1490 | %2 = or i8 %1, %0 ; <i8> [#uses=1] | |
1491 | ret i8 %2 | |
1492 | } | |
1493 | ||
1494 | define zeroext i8 @p2(i8 zeroext %b, i8 zeroext %a) nounwind readnone ssp { | |
1495 | entry: | |
1496 | %0 = and i8 %b, 63 ; <i8> [#uses=1] | |
1497 | %.masked = and i8 %a, 64 ; <i8> [#uses=1] | |
1498 | %1 = and i8 %a, -128 ; <i8> [#uses=1] | |
1499 | %2 = or i8 %1, %0 ; <i8> [#uses=1] | |
1500 | %3 = or i8 %2, %.masked ; <i8> [#uses=1] | |
1501 | ret i8 %3 | |
1502 | } | |
1503 | ||
1504 | //===---------------------------------------------------------------------===// | |
1505 | ||
1506 | IPSCCP does not currently propagate argument dependent constants through | |
1507 | functions where it does not not all of the callers. This includes functions | |
1508 | with normal external linkage as well as templates, C99 inline functions etc. | |
1509 | Specifically, it does nothing to: | |
1510 | ||
1511 | define i32 @test(i32 %x, i32 %y, i32 %z) nounwind { | |
1512 | entry: | |
1513 | %0 = add nsw i32 %y, %z | |
1514 | %1 = mul i32 %0, %x | |
1515 | %2 = mul i32 %y, %z | |
1516 | %3 = add nsw i32 %1, %2 | |
1517 | ret i32 %3 | |
1518 | } | |
1519 | ||
1520 | define i32 @test2() nounwind { | |
1521 | entry: | |
1522 | %0 = call i32 @test(i32 1, i32 2, i32 4) nounwind | |
1523 | ret i32 %0 | |
1524 | } | |
1525 | ||
1526 | It would be interesting extend IPSCCP to be able to handle simple cases like | |
1527 | this, where all of the arguments to a call are constant. Because IPSCCP runs | |
1528 | before inlining, trivial templates and inline functions are not yet inlined. | |
1529 | The results for a function + set of constant arguments should be memoized in a | |
1530 | map. | |
1531 | ||
1532 | //===---------------------------------------------------------------------===// | |
1533 | ||
1534 | The libcall constant folding stuff should be moved out of SimplifyLibcalls into | |
1535 | libanalysis' constantfolding logic. This would allow IPSCCP to be able to | |
1536 | handle simple things like this: | |
1537 | ||
1538 | static int foo(const char *X) { return strlen(X); } | |
1539 | int bar() { return foo("abcd"); } | |
1540 | ||
1541 | //===---------------------------------------------------------------------===// | |
1542 | ||
1543 | functionattrs doesn't know much about memcpy/memset. This function should be | |
1544 | marked readnone rather than readonly, since it only twiddles local memory, but | |
1545 | functionattrs doesn't handle memset/memcpy/memmove aggressively: | |
1546 | ||
1547 | struct X { int *p; int *q; }; | |
1548 | int foo() { | |
1549 | int i = 0, j = 1; | |
1550 | struct X x, y; | |
1551 | int **p; | |
1552 | y.p = &i; | |
1553 | x.q = &j; | |
1554 | p = __builtin_memcpy (&x, &y, sizeof (int *)); | |
1555 | return **p; | |
1556 | } | |
1557 | ||
1558 | This can be seen at: | |
1559 | $ clang t.c -S -o - -mkernel -O0 -emit-llvm | opt -functionattrs -S | |
1560 | ||
1561 | ||
1562 | //===---------------------------------------------------------------------===// | |
1563 | ||
1564 | Missed instcombine transformation: | |
1565 | define i1 @a(i32 %x) nounwind readnone { | |
1566 | entry: | |
1567 | %cmp = icmp eq i32 %x, 30 | |
1568 | %sub = add i32 %x, -30 | |
1569 | %cmp2 = icmp ugt i32 %sub, 9 | |
1570 | %or = or i1 %cmp, %cmp2 | |
1571 | ret i1 %or | |
1572 | } | |
1573 | This should be optimized to a single compare. Testcase derived from gcc. | |
1574 | ||
1575 | //===---------------------------------------------------------------------===// | |
1576 | ||
1577 | Missed instcombine or reassociate transformation: | |
1578 | int a(int a, int b) { return (a==12)&(b>47)&(b<58); } | |
1579 | ||
1580 | The sgt and slt should be combined into a single comparison. Testcase derived | |
1581 | from gcc. | |
1582 | ||
1583 | //===---------------------------------------------------------------------===// | |
1584 | ||
1585 | Missed instcombine transformation: | |
1586 | ||
1587 | %382 = srem i32 %tmp14.i, 64 ; [#uses=1] | |
1588 | %383 = zext i32 %382 to i64 ; [#uses=1] | |
1589 | %384 = shl i64 %381, %383 ; [#uses=1] | |
1590 | %385 = icmp slt i32 %tmp14.i, 64 ; [#uses=1] | |
1591 | ||
1592 | The srem can be transformed to an and because if %tmp14.i is negative, the | |
1593 | shift is undefined. Testcase derived from 403.gcc. | |
1594 | ||
1595 | //===---------------------------------------------------------------------===// | |
1596 | ||
1597 | This is a range comparison on a divided result (from 403.gcc): | |
1598 | ||
1599 | %1337 = sdiv i32 %1336, 8 ; [#uses=1] | |
1600 | %.off.i208 = add i32 %1336, 7 ; [#uses=1] | |
1601 | %1338 = icmp ult i32 %.off.i208, 15 ; [#uses=1] | |
1602 | ||
1603 | We already catch this (removing the sdiv) if there isn't an add, we should | |
1604 | handle the 'add' as well. This is a common idiom with it's builtin_alloca code. | |
1605 | C testcase: | |
1606 | ||
1607 | int a(int x) { return (unsigned)(x/16+7) < 15; } | |
1608 | ||
1609 | Another similar case involves truncations on 64-bit targets: | |
1610 | ||
1611 | %361 = sdiv i64 %.046, 8 ; [#uses=1] | |
1612 | %362 = trunc i64 %361 to i32 ; [#uses=2] | |
1613 | ... | |
1614 | %367 = icmp eq i32 %362, 0 ; [#uses=1] | |
1615 | ||
1616 | //===---------------------------------------------------------------------===// | |
1617 | ||
1618 | Missed instcombine/dagcombine transformation: | |
1619 | define void @lshift_lt(i8 zeroext %a) nounwind { | |
1620 | entry: | |
1621 | %conv = zext i8 %a to i32 | |
1622 | %shl = shl i32 %conv, 3 | |
1623 | %cmp = icmp ult i32 %shl, 33 | |
1624 | br i1 %cmp, label %if.then, label %if.end | |
1625 | ||
1626 | if.then: | |
1627 | tail call void @bar() nounwind | |
1628 | ret void | |
1629 | ||
1630 | if.end: | |
1631 | ret void | |
1632 | } | |
1633 | declare void @bar() nounwind | |
1634 | ||
1635 | The shift should be eliminated. Testcase derived from gcc. | |
1636 | ||
1637 | //===---------------------------------------------------------------------===// | |
1638 | ||
1639 | These compile into different code, one gets recognized as a switch and the | |
1640 | other doesn't due to phase ordering issues (PR6212): | |
1641 | ||
1642 | int test1(int mainType, int subType) { | |
1643 | if (mainType == 7) | |
1644 | subType = 4; | |
1645 | else if (mainType == 9) | |
1646 | subType = 6; | |
1647 | else if (mainType == 11) | |
1648 | subType = 9; | |
1649 | return subType; | |
1650 | } | |
1651 | ||
1652 | int test2(int mainType, int subType) { | |
1653 | if (mainType == 7) | |
1654 | subType = 4; | |
1655 | if (mainType == 9) | |
1656 | subType = 6; | |
1657 | if (mainType == 11) | |
1658 | subType = 9; | |
1659 | return subType; | |
1660 | } | |
1661 | ||
1662 | //===---------------------------------------------------------------------===// | |
1663 | ||
1664 | The following test case (from PR6576): | |
1665 | ||
1666 | define i32 @mul(i32 %a, i32 %b) nounwind readnone { | |
1667 | entry: | |
1668 | %cond1 = icmp eq i32 %b, 0 ; <i1> [#uses=1] | |
1669 | br i1 %cond1, label %exit, label %bb.nph | |
1670 | bb.nph: ; preds = %entry | |
1671 | %tmp = mul i32 %b, %a ; <i32> [#uses=1] | |
1672 | ret i32 %tmp | |
1673 | exit: ; preds = %entry | |
1674 | ret i32 0 | |
1675 | } | |
1676 | ||
1677 | could be reduced to: | |
1678 | ||
1679 | define i32 @mul(i32 %a, i32 %b) nounwind readnone { | |
1680 | entry: | |
1681 | %tmp = mul i32 %b, %a | |
1682 | ret i32 %tmp | |
1683 | } | |
1684 | ||
1685 | //===---------------------------------------------------------------------===// | |
1686 | ||
1687 | We should use DSE + llvm.lifetime.end to delete dead vtable pointer updates. | |
1688 | See GCC PR34949 | |
1689 | ||
1690 | Another interesting case is that something related could be used for variables | |
1691 | that go const after their ctor has finished. In these cases, globalopt (which | |
1692 | can statically run the constructor) could mark the global const (so it gets put | |
1693 | in the readonly section). A testcase would be: | |
1694 | ||
1695 | #include <complex> | |
1696 | using namespace std; | |
1697 | const complex<char> should_be_in_rodata (42,-42); | |
1698 | complex<char> should_be_in_data (42,-42); | |
1699 | complex<char> should_be_in_bss; | |
1700 | ||
1701 | Where we currently evaluate the ctors but the globals don't become const because | |
1702 | the optimizer doesn't know they "become const" after the ctor is done. See | |
1703 | GCC PR4131 for more examples. | |
1704 | ||
1705 | //===---------------------------------------------------------------------===// | |
1706 | ||
1707 | In this code: | |
1708 | ||
1709 | long foo(long x) { | |
1710 | return x > 1 ? x : 1; | |
1711 | } | |
1712 | ||
1713 | LLVM emits a comparison with 1 instead of 0. 0 would be equivalent | |
1714 | and cheaper on most targets. | |
1715 | ||
1716 | LLVM prefers comparisons with zero over non-zero in general, but in this | |
1717 | case it choses instead to keep the max operation obvious. | |
1718 | ||
1719 | //===---------------------------------------------------------------------===// | |
1720 | ||
1721 | define void @a(i32 %x) nounwind { | |
1722 | entry: | |
1723 | switch i32 %x, label %if.end [ | |
1724 | i32 0, label %if.then | |
1725 | i32 1, label %if.then | |
1726 | i32 2, label %if.then | |
1727 | i32 3, label %if.then | |
1728 | i32 5, label %if.then | |
1729 | ] | |
1730 | if.then: | |
1731 | tail call void @foo() nounwind | |
1732 | ret void | |
1733 | if.end: | |
1734 | ret void | |
1735 | } | |
1736 | declare void @foo() | |
1737 | ||
1738 | Generated code on x86-64 (other platforms give similar results): | |
1739 | a: | |
1740 | cmpl $5, %edi | |
1741 | ja LBB2_2 | |
1742 | cmpl $4, %edi | |
1743 | jne LBB2_3 | |
1744 | .LBB0_2: | |
1745 | ret | |
1746 | .LBB0_3: | |
1747 | jmp foo # TAILCALL | |
1748 | ||
1749 | If we wanted to be really clever, we could simplify the whole thing to | |
1750 | something like the following, which eliminates a branch: | |
1751 | xorl $1, %edi | |
1752 | cmpl $4, %edi | |
1753 | ja .LBB0_2 | |
1754 | ret | |
1755 | .LBB0_2: | |
1756 | jmp foo # TAILCALL | |
1757 | ||
1758 | //===---------------------------------------------------------------------===// | |
1759 | ||
1760 | We compile this: | |
1761 | ||
1762 | int foo(int a) { return (a & (~15)) / 16; } | |
1763 | ||
1764 | Into: | |
1765 | ||
1766 | define i32 @foo(i32 %a) nounwind readnone ssp { | |
1767 | entry: | |
1768 | %and = and i32 %a, -16 | |
1769 | %div = sdiv i32 %and, 16 | |
1770 | ret i32 %div | |
1771 | } | |
1772 | ||
1773 | but this code (X & -A)/A is X >> log2(A) when A is a power of 2, so this case | |
1774 | should be instcombined into just "a >> 4". | |
1775 | ||
1776 | We do get this at the codegen level, so something knows about it, but | |
1777 | instcombine should catch it earlier: | |
1778 | ||
1779 | _foo: ## @foo | |
1780 | ## BB#0: ## %entry | |
1781 | movl %edi, %eax | |
1782 | sarl $4, %eax | |
1783 | ret | |
1784 | ||
1785 | //===---------------------------------------------------------------------===// | |
1786 | ||
1787 | This code (from GCC PR28685): | |
1788 | ||
1789 | int test(int a, int b) { | |
1790 | int lt = a < b; | |
1791 | int eq = a == b; | |
1792 | if (lt) | |
1793 | return 1; | |
1794 | return eq; | |
1795 | } | |
1796 | ||
1797 | Is compiled to: | |
1798 | ||
1799 | define i32 @test(i32 %a, i32 %b) nounwind readnone ssp { | |
1800 | entry: | |
1801 | %cmp = icmp slt i32 %a, %b | |
1802 | br i1 %cmp, label %return, label %if.end | |
1803 | ||
1804 | if.end: ; preds = %entry | |
1805 | %cmp5 = icmp eq i32 %a, %b | |
1806 | %conv6 = zext i1 %cmp5 to i32 | |
1807 | ret i32 %conv6 | |
1808 | ||
1809 | return: ; preds = %entry | |
1810 | ret i32 1 | |
1811 | } | |
1812 | ||
1813 | it could be: | |
1814 | ||
1815 | define i32 @test__(i32 %a, i32 %b) nounwind readnone ssp { | |
1816 | entry: | |
1817 | %0 = icmp sle i32 %a, %b | |
1818 | %retval = zext i1 %0 to i32 | |
1819 | ret i32 %retval | |
1820 | } | |
1821 | ||
1822 | //===---------------------------------------------------------------------===// | |
1823 | ||
1824 | This code can be seen in viterbi: | |
1825 | ||
1826 | %64 = call noalias i8* @malloc(i64 %62) nounwind | |
1827 | ... | |
1828 | %67 = call i64 @llvm.objectsize.i64(i8* %64, i1 false) nounwind | |
1829 | %68 = call i8* @__memset_chk(i8* %64, i32 0, i64 %62, i64 %67) nounwind | |
1830 | ||
1831 | llvm.objectsize.i64 should be taught about malloc/calloc, allowing it to | |
1832 | fold to %62. This is a security win (overflows of malloc will get caught) | |
1833 | and also a performance win by exposing more memsets to the optimizer. | |
1834 | ||
1835 | This occurs several times in viterbi. | |
1836 | ||
1837 | Note that this would change the semantics of @llvm.objectsize which by its | |
1838 | current definition always folds to a constant. We also should make sure that | |
1839 | we remove checking in code like | |
1840 | ||
1841 | char *p = malloc(strlen(s)+1); | |
1842 | __strcpy_chk(p, s, __builtin_objectsize(p, 0)); | |
1843 | ||
1844 | //===---------------------------------------------------------------------===// | |
1845 | ||
1846 | This code (from Benchmarks/Dhrystone/dry.c): | |
1847 | ||
1848 | define i32 @Func1(i32, i32) nounwind readnone optsize ssp { | |
1849 | entry: | |
1850 | %sext = shl i32 %0, 24 | |
1851 | %conv = ashr i32 %sext, 24 | |
1852 | %sext6 = shl i32 %1, 24 | |
1853 | %conv4 = ashr i32 %sext6, 24 | |
1854 | %cmp = icmp eq i32 %conv, %conv4 | |
1855 | %. = select i1 %cmp, i32 10000, i32 0 | |
1856 | ret i32 %. | |
1857 | } | |
1858 | ||
1859 | Should be simplified into something like: | |
1860 | ||
1861 | define i32 @Func1(i32, i32) nounwind readnone optsize ssp { | |
1862 | entry: | |
1863 | %sext = shl i32 %0, 24 | |
1864 | %conv = and i32 %sext, 0xFF000000 | |
1865 | %sext6 = shl i32 %1, 24 | |
1866 | %conv4 = and i32 %sext6, 0xFF000000 | |
1867 | %cmp = icmp eq i32 %conv, %conv4 | |
1868 | %. = select i1 %cmp, i32 10000, i32 0 | |
1869 | ret i32 %. | |
1870 | } | |
1871 | ||
1872 | and then to: | |
1873 | ||
1874 | define i32 @Func1(i32, i32) nounwind readnone optsize ssp { | |
1875 | entry: | |
1876 | %conv = and i32 %0, 0xFF | |
1877 | %conv4 = and i32 %1, 0xFF | |
1878 | %cmp = icmp eq i32 %conv, %conv4 | |
1879 | %. = select i1 %cmp, i32 10000, i32 0 | |
1880 | ret i32 %. | |
1881 | } | |
1882 | //===---------------------------------------------------------------------===// | |
1883 | ||
1884 | clang -O3 currently compiles this code | |
1885 | ||
1886 | int g(unsigned int a) { | |
1887 | unsigned int c[100]; | |
1888 | c[10] = a; | |
1889 | c[11] = a; | |
1890 | unsigned int b = c[10] + c[11]; | |
1891 | if(b > a*2) a = 4; | |
1892 | else a = 8; | |
1893 | return a + 7; | |
1894 | } | |
1895 | ||
1896 | into | |
1897 | ||
1898 | define i32 @g(i32 a) nounwind readnone { | |
1899 | %add = shl i32 %a, 1 | |
1900 | %mul = shl i32 %a, 1 | |
1901 | %cmp = icmp ugt i32 %add, %mul | |
1902 | %a.addr.0 = select i1 %cmp, i32 11, i32 15 | |
1903 | ret i32 %a.addr.0 | |
1904 | } | |
1905 | ||
1906 | The icmp should fold to false. This CSE opportunity is only available | |
1907 | after GVN and InstCombine have run. | |
1908 | ||
1909 | //===---------------------------------------------------------------------===// | |
1910 | ||
1911 | memcpyopt should turn this: | |
1912 | ||
1913 | define i8* @test10(i32 %x) { | |
1914 | %alloc = call noalias i8* @malloc(i32 %x) nounwind | |
1915 | call void @llvm.memset.p0i8.i32(i8* %alloc, i8 0, i32 %x, i32 1, i1 false) | |
1916 | ret i8* %alloc | |
1917 | } | |
1918 | ||
1919 | into a call to calloc. We should make sure that we analyze calloc as | |
1920 | aggressively as malloc though. | |
1921 | ||
1922 | //===---------------------------------------------------------------------===// | |
1923 | ||
1924 | clang -O3 doesn't optimize this: | |
1925 | ||
1926 | void f1(int* begin, int* end) { | |
1927 | std::fill(begin, end, 0); | |
1928 | } | |
1929 | ||
1930 | into a memset. This is PR8942. | |
1931 | ||
1932 | //===---------------------------------------------------------------------===// | |
1933 | ||
1934 | clang -O3 -fno-exceptions currently compiles this code: | |
1935 | ||
1936 | void f(int N) { | |
1937 | std::vector<int> v(N); | |
1938 | ||
1939 | extern void sink(void*); sink(&v); | |
1940 | } | |
1941 | ||
1942 | into | |
1943 | ||
1944 | define void @_Z1fi(i32 %N) nounwind { | |
1945 | entry: | |
1946 | %v2 = alloca [3 x i32*], align 8 | |
1947 | %v2.sub = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 0 | |
1948 | %tmpcast = bitcast [3 x i32*]* %v2 to %"class.std::vector"* | |
1949 | %conv = sext i32 %N to i64 | |
1950 | store i32* null, i32** %v2.sub, align 8, !tbaa !0 | |
1951 | %tmp3.i.i.i.i.i = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 1 | |
1952 | store i32* null, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 | |
1953 | %tmp4.i.i.i.i.i = getelementptr inbounds [3 x i32*]* %v2, i64 0, i64 2 | |
1954 | store i32* null, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 | |
1955 | %cmp.i.i.i.i = icmp eq i32 %N, 0 | |
1956 | br i1 %cmp.i.i.i.i, label %_ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.thread.i.i, label %cond.true.i.i.i.i | |
1957 | ||
1958 | _ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.thread.i.i: ; preds = %entry | |
1959 | store i32* null, i32** %v2.sub, align 8, !tbaa !0 | |
1960 | store i32* null, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 | |
1961 | %add.ptr.i5.i.i = getelementptr inbounds i32* null, i64 %conv | |
1962 | store i32* %add.ptr.i5.i.i, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 | |
1963 | br label %_ZNSt6vectorIiSaIiEEC1EmRKiRKS0_.exit | |
1964 | ||
1965 | cond.true.i.i.i.i: ; preds = %entry | |
1966 | %cmp.i.i.i.i.i = icmp slt i32 %N, 0 | |
1967 | br i1 %cmp.i.i.i.i.i, label %if.then.i.i.i.i.i, label %_ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.i.i | |
1968 | ||
1969 | if.then.i.i.i.i.i: ; preds = %cond.true.i.i.i.i | |
1970 | call void @_ZSt17__throw_bad_allocv() noreturn nounwind | |
1971 | unreachable | |
1972 | ||
1973 | _ZNSt12_Vector_baseIiSaIiEEC2EmRKS0_.exit.i.i: ; preds = %cond.true.i.i.i.i | |
1974 | %mul.i.i.i.i.i = shl i64 %conv, 2 | |
1975 | %call3.i.i.i.i.i = call noalias i8* @_Znwm(i64 %mul.i.i.i.i.i) nounwind | |
1976 | %0 = bitcast i8* %call3.i.i.i.i.i to i32* | |
1977 | store i32* %0, i32** %v2.sub, align 8, !tbaa !0 | |
1978 | store i32* %0, i32** %tmp3.i.i.i.i.i, align 8, !tbaa !0 | |
1979 | %add.ptr.i.i.i = getelementptr inbounds i32* %0, i64 %conv | |
1980 | store i32* %add.ptr.i.i.i, i32** %tmp4.i.i.i.i.i, align 8, !tbaa !0 | |
1981 | call void @llvm.memset.p0i8.i64(i8* %call3.i.i.i.i.i, i8 0, i64 %mul.i.i.i.i.i, i32 4, i1 false) | |
1982 | br label %_ZNSt6vectorIiSaIiEEC1EmRKiRKS0_.exit | |
1983 | ||
1984 | This is just the handling the construction of the vector. Most surprising here | |
1985 | is the fact that all three null stores in %entry are dead (because we do no | |
1986 | cross-block DSE). | |
1987 | ||
1988 | Also surprising is that %conv isn't simplified to 0 in %....exit.thread.i.i. | |
1989 | This is a because the client of LazyValueInfo doesn't simplify all instruction | |
1990 | operands, just selected ones. | |
1991 | ||
1992 | //===---------------------------------------------------------------------===// | |
1993 | ||
1994 | clang -O3 -fno-exceptions currently compiles this code: | |
1995 | ||
1996 | void f(char* a, int n) { | |
1997 | __builtin_memset(a, 0, n); | |
1998 | for (int i = 0; i < n; ++i) | |
1999 | a[i] = 0; | |
2000 | } | |
2001 | ||
2002 | into: | |
2003 | ||
2004 | define void @_Z1fPci(i8* nocapture %a, i32 %n) nounwind { | |
2005 | entry: | |
2006 | %conv = sext i32 %n to i64 | |
2007 | tail call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 %conv, i32 1, i1 false) | |
2008 | %cmp8 = icmp sgt i32 %n, 0 | |
2009 | br i1 %cmp8, label %for.body.lr.ph, label %for.end | |
2010 | ||
2011 | for.body.lr.ph: ; preds = %entry | |
2012 | %tmp10 = add i32 %n, -1 | |
2013 | %tmp11 = zext i32 %tmp10 to i64 | |
2014 | %tmp12 = add i64 %tmp11, 1 | |
2015 | call void @llvm.memset.p0i8.i64(i8* %a, i8 0, i64 %tmp12, i32 1, i1 false) | |
2016 | ret void | |
2017 | ||
2018 | for.end: ; preds = %entry | |
2019 | ret void | |
2020 | } | |
2021 | ||
2022 | This shouldn't need the ((zext (%n - 1)) + 1) game, and it should ideally fold | |
2023 | the two memset's together. | |
2024 | ||
2025 | The issue with the addition only occurs in 64-bit mode, and appears to be at | |
2026 | least partially caused by Scalar Evolution not keeping its cache updated: it | |
2027 | returns the "wrong" result immediately after indvars runs, but figures out the | |
2028 | expected result if it is run from scratch on IR resulting from running indvars. | |
2029 | ||
2030 | //===---------------------------------------------------------------------===// | |
2031 | ||
2032 | clang -O3 -fno-exceptions currently compiles this code: | |
2033 | ||
2034 | struct S { | |
2035 | unsigned short m1, m2; | |
2036 | unsigned char m3, m4; | |
2037 | }; | |
2038 | ||
2039 | void f(int N) { | |
2040 | std::vector<S> v(N); | |
2041 | extern void sink(void*); sink(&v); | |
2042 | } | |
2043 | ||
2044 | into poor code for zero-initializing 'v' when N is >0. The problem is that | |
2045 | S is only 6 bytes, but each element is 8 byte-aligned. We generate a loop and | |
2046 | 4 stores on each iteration. If the struct were 8 bytes, this gets turned into | |
2047 | a memset. | |
2048 | ||
2049 | In order to handle this we have to: | |
2050 | A) Teach clang to generate metadata for memsets of structs that have holes in | |
2051 | them. | |
2052 | B) Teach clang to use such a memset for zero init of this struct (since it has | |
2053 | a hole), instead of doing elementwise zeroing. | |
2054 | ||
2055 | //===---------------------------------------------------------------------===// | |
2056 | ||
2057 | clang -O3 currently compiles this code: | |
2058 | ||
2059 | extern const int magic; | |
2060 | double f() { return 0.0 * magic; } | |
2061 | ||
2062 | into | |
2063 | ||
2064 | @magic = external constant i32 | |
2065 | ||
2066 | define double @_Z1fv() nounwind readnone { | |
2067 | entry: | |
2068 | %tmp = load i32* @magic, align 4, !tbaa !0 | |
2069 | %conv = sitofp i32 %tmp to double | |
2070 | %mul = fmul double %conv, 0.000000e+00 | |
2071 | ret double %mul | |
2072 | } | |
2073 | ||
2074 | We should be able to fold away this fmul to 0.0. More generally, fmul(x,0.0) | |
2075 | can be folded to 0.0 if we can prove that the LHS is not -0.0, not a NaN, and | |
2076 | not an INF. The CannotBeNegativeZero predicate in value tracking should be | |
2077 | extended to support general "fpclassify" operations that can return | |
2078 | yes/no/unknown for each of these predicates. | |
2079 | ||
2080 | In this predicate, we know that uitofp is trivially never NaN or -0.0, and | |
2081 | we know that it isn't +/-Inf if the floating point type has enough exponent bits | |
2082 | to represent the largest integer value as < inf. | |
2083 | ||
2084 | //===---------------------------------------------------------------------===// | |
2085 | ||
2086 | When optimizing a transformation that can change the sign of 0.0 (such as the | |
2087 | 0.0*val -> 0.0 transformation above), it might be provable that the sign of the | |
2088 | expression doesn't matter. For example, by the above rules, we can't transform | |
2089 | fmul(sitofp(x), 0.0) into 0.0, because x might be -1 and the result of the | |
2090 | expression is defined to be -0.0. | |
2091 | ||
2092 | If we look at the uses of the fmul for example, we might be able to prove that | |
2093 | all uses don't care about the sign of zero. For example, if we have: | |
2094 | ||
2095 | fadd(fmul(sitofp(x), 0.0), 2.0) | |
2096 | ||
2097 | Since we know that x+2.0 doesn't care about the sign of any zeros in X, we can | |
2098 | transform the fmul to 0.0, and then the fadd to 2.0. | |
2099 | ||
2100 | //===---------------------------------------------------------------------===// | |
2101 | ||
2102 | We should enhance memcpy/memcpy/memset to allow a metadata node on them | |
2103 | indicating that some bytes of the transfer are undefined. This is useful for | |
2104 | frontends like clang when lowering struct copies, when some elements of the | |
2105 | struct are undefined. Consider something like this: | |
2106 | ||
2107 | struct x { | |
2108 | char a; | |
2109 | int b[4]; | |
2110 | }; | |
2111 | void foo(struct x*P); | |
2112 | struct x testfunc() { | |
2113 | struct x V1, V2; | |
2114 | foo(&V1); | |
2115 | V2 = V1; | |
2116 | ||
2117 | return V2; | |
2118 | } | |
2119 | ||
2120 | We currently compile this to: | |
2121 | $ clang t.c -S -o - -O0 -emit-llvm | opt -scalarrepl -S | |
2122 | ||
2123 | ||
2124 | %struct.x = type { i8, [4 x i32] } | |
2125 | ||
2126 | define void @testfunc(%struct.x* sret %agg.result) nounwind ssp { | |
2127 | entry: | |
2128 | %V1 = alloca %struct.x, align 4 | |
2129 | call void @foo(%struct.x* %V1) | |
2130 | %tmp1 = bitcast %struct.x* %V1 to i8* | |
2131 | %0 = bitcast %struct.x* %V1 to i160* | |
2132 | %srcval1 = load i160* %0, align 4 | |
2133 | %tmp2 = bitcast %struct.x* %agg.result to i8* | |
2134 | %1 = bitcast %struct.x* %agg.result to i160* | |
2135 | store i160 %srcval1, i160* %1, align 4 | |
2136 | ret void | |
2137 | } | |
2138 | ||
2139 | This happens because SRoA sees that the temp alloca has is being memcpy'd into | |
2140 | and out of and it has holes and it has to be conservative. If we knew about the | |
2141 | holes, then this could be much much better. | |
2142 | ||
2143 | Having information about these holes would also improve memcpy (etc) lowering at | |
2144 | llc time when it gets inlined, because we can use smaller transfers. This also | |
2145 | avoids partial register stalls in some important cases. | |
2146 | ||
2147 | //===---------------------------------------------------------------------===// | |
2148 | ||
2149 | We don't fold (icmp (add) (add)) unless the two adds only have a single use. | |
2150 | There are a lot of cases that we're refusing to fold in (e.g.) 256.bzip2, for | |
2151 | example: | |
2152 | ||
2153 | %indvar.next90 = add i64 %indvar89, 1 ;; Has 2 uses | |
2154 | %tmp96 = add i64 %tmp95, 1 ;; Has 1 use | |
2155 | %exitcond97 = icmp eq i64 %indvar.next90, %tmp96 | |
2156 | ||
2157 | We don't fold this because we don't want to introduce an overlapped live range | |
2158 | of the ivar. However if we can make this more aggressive without causing | |
2159 | performance issues in two ways: | |
2160 | ||
2161 | 1. If *either* the LHS or RHS has a single use, we can definitely do the | |
2162 | transformation. In the overlapping liverange case we're trading one register | |
2163 | use for one fewer operation, which is a reasonable trade. Before doing this | |
2164 | we should verify that the llc output actually shrinks for some benchmarks. | |
2165 | 2. If both ops have multiple uses, we can still fold it if the operations are | |
2166 | both sinkable to *after* the icmp (e.g. in a subsequent block) which doesn't | |
2167 | increase register pressure. | |
2168 | ||
2169 | There are a ton of icmp's we aren't simplifying because of the reg pressure | |
2170 | concern. Care is warranted here though because many of these are induction | |
2171 | variables and other cases that matter a lot to performance, like the above. | |
2172 | Here's a blob of code that you can drop into the bottom of visitICmp to see some | |
2173 | missed cases: | |
2174 | ||
2175 | { Value *A, *B, *C, *D; | |
2176 | if (match(Op0, m_Add(m_Value(A), m_Value(B))) && | |
2177 | match(Op1, m_Add(m_Value(C), m_Value(D))) && | |
2178 | (A == C || A == D || B == C || B == D)) { | |
2179 | errs() << "OP0 = " << *Op0 << " U=" << Op0->getNumUses() << "\n"; | |
2180 | errs() << "OP1 = " << *Op1 << " U=" << Op1->getNumUses() << "\n"; | |
2181 | errs() << "CMP = " << I << "\n\n"; | |
2182 | } | |
2183 | } | |
2184 | ||
2185 | //===---------------------------------------------------------------------===// | |
2186 | ||
2187 | define i1 @test1(i32 %x) nounwind { | |
2188 | %and = and i32 %x, 3 | |
2189 | %cmp = icmp ult i32 %and, 2 | |
2190 | ret i1 %cmp | |
2191 | } | |
2192 | ||
2193 | Can be folded to (x & 2) == 0. | |
2194 | ||
2195 | define i1 @test2(i32 %x) nounwind { | |
2196 | %and = and i32 %x, 3 | |
2197 | %cmp = icmp ugt i32 %and, 1 | |
2198 | ret i1 %cmp | |
2199 | } | |
2200 | ||
2201 | Can be folded to (x & 2) != 0. | |
2202 | ||
2203 | SimplifyDemandedBits shrinks the "and" constant to 2 but instcombine misses the | |
2204 | icmp transform. | |
2205 | ||
2206 | //===---------------------------------------------------------------------===// | |
2207 | ||
2208 | This code: | |
2209 | ||
2210 | typedef struct { | |
2211 | int f1:1; | |
2212 | int f2:1; | |
2213 | int f3:1; | |
2214 | int f4:29; | |
2215 | } t1; | |
2216 | ||
2217 | typedef struct { | |
2218 | int f1:1; | |
2219 | int f2:1; | |
2220 | int f3:30; | |
2221 | } t2; | |
2222 | ||
2223 | t1 s1; | |
2224 | t2 s2; | |
2225 | ||
2226 | void func1(void) | |
2227 | { | |
2228 | s1.f1 = s2.f1; | |
2229 | s1.f2 = s2.f2; | |
2230 | } | |
2231 | ||
2232 | Compiles into this IR (on x86-64 at least): | |
2233 | ||
2234 | %struct.t1 = type { i8, [3 x i8] } | |
2235 | @s2 = global %struct.t1 zeroinitializer, align 4 | |
2236 | @s1 = global %struct.t1 zeroinitializer, align 4 | |
2237 | define void @func1() nounwind ssp noredzone { | |
2238 | entry: | |
2239 | %0 = load i32* bitcast (%struct.t1* @s2 to i32*), align 4 | |
2240 | %bf.val.sext5 = and i32 %0, 1 | |
2241 | %1 = load i32* bitcast (%struct.t1* @s1 to i32*), align 4 | |
2242 | %2 = and i32 %1, -4 | |
2243 | %3 = or i32 %2, %bf.val.sext5 | |
2244 | %bf.val.sext26 = and i32 %0, 2 | |
2245 | %4 = or i32 %3, %bf.val.sext26 | |
2246 | store i32 %4, i32* bitcast (%struct.t1* @s1 to i32*), align 4 | |
2247 | ret void | |
2248 | } | |
2249 | ||
2250 | The two or/and's should be merged into one each. | |
2251 | ||
2252 | //===---------------------------------------------------------------------===// | |
2253 | ||
2254 | Machine level code hoisting can be useful in some cases. For example, PR9408 | |
2255 | is about: | |
2256 | ||
2257 | typedef union { | |
2258 | void (*f1)(int); | |
2259 | void (*f2)(long); | |
2260 | } funcs; | |
2261 | ||
2262 | void foo(funcs f, int which) { | |
2263 | int a = 5; | |
2264 | if (which) { | |
2265 | f.f1(a); | |
2266 | } else { | |
2267 | f.f2(a); | |
2268 | } | |
2269 | } | |
2270 | ||
2271 | which we compile to: | |
2272 | ||
2273 | foo: # @foo | |
2274 | # BB#0: # %entry | |
2275 | pushq %rbp | |
2276 | movq %rsp, %rbp | |
2277 | testl %esi, %esi | |
2278 | movq %rdi, %rax | |
2279 | je .LBB0_2 | |
2280 | # BB#1: # %if.then | |
2281 | movl $5, %edi | |
2282 | callq *%rax | |
2283 | popq %rbp | |
2284 | ret | |
2285 | .LBB0_2: # %if.else | |
2286 | movl $5, %edi | |
2287 | callq *%rax | |
2288 | popq %rbp | |
2289 | ret | |
2290 | ||
2291 | Note that bb1 and bb2 are the same. This doesn't happen at the IR level | |
2292 | because one call is passing an i32 and the other is passing an i64. | |
2293 | ||
2294 | //===---------------------------------------------------------------------===// | |
2295 | ||
2296 | I see this sort of pattern in 176.gcc in a few places (e.g. the start of | |
2297 | store_bit_field). The rem should be replaced with a multiply and subtract: | |
2298 | ||
2299 | %3 = sdiv i32 %A, %B | |
2300 | %4 = srem i32 %A, %B | |
2301 | ||
2302 | Similarly for udiv/urem. Note that this shouldn't be done on X86 or ARM, | |
2303 | which can do this in a single operation (instruction or libcall). It is | |
2304 | probably best to do this in the code generator. | |
2305 | ||
2306 | //===---------------------------------------------------------------------===// | |
2307 | ||
2308 | unsigned foo(unsigned x, unsigned y) { return (x & y) == 0 || x == 0; } | |
2309 | should fold to (x & y) == 0. | |
2310 | ||
2311 | //===---------------------------------------------------------------------===// | |
2312 | ||
2313 | unsigned foo(unsigned x, unsigned y) { return x > y && x != 0; } | |
2314 | should fold to x > y. | |
2315 | ||
2316 | //===---------------------------------------------------------------------===// |