]> git.proxmox.com Git - rustc.git/blame - src/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
Imported Upstream version 1.0.0+dfsg1
[rustc.git] / src / llvm / lib / Transforms / InstCombine / InstCombineCalls.cpp
CommitLineData
223e47cc
LB
1//===- InstCombineCalls.cpp -----------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the visitCall and visitInvoke functions.
11//
12//===----------------------------------------------------------------------===//
13
14#include "InstCombine.h"
970d7e83 15#include "llvm/ADT/Statistic.h"
223e47cc 16#include "llvm/Analysis/MemoryBuiltins.h"
1a4d82fc 17#include "llvm/IR/CallSite.h"
970d7e83 18#include "llvm/IR/DataLayout.h"
85aaf69f 19#include "llvm/IR/Dominators.h"
1a4d82fc 20#include "llvm/IR/PatternMatch.h"
85aaf69f 21#include "llvm/IR/Statepoint.h"
223e47cc
LB
22#include "llvm/Transforms/Utils/BuildLibCalls.h"
23#include "llvm/Transforms/Utils/Local.h"
24using namespace llvm;
970d7e83
LB
25using namespace PatternMatch;
26
1a4d82fc
JJ
27#define DEBUG_TYPE "instcombine"
28
970d7e83 29STATISTIC(NumSimplified, "Number of library calls simplified");
223e47cc
LB
30
31/// getPromotedType - Return the specified type promoted as it would be to pass
32/// though a va_arg area.
33static Type *getPromotedType(Type *Ty) {
34 if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
35 if (ITy->getBitWidth() < 32)
36 return Type::getInt32Ty(Ty->getContext());
37 }
38 return Ty;
39}
40
41/// reduceToSingleValueType - Given an aggregate type which ultimately holds a
42/// single scalar element, like {{{type}}} or [1 x type], return type.
43static Type *reduceToSingleValueType(Type *T) {
44 while (!T->isSingleValueType()) {
45 if (StructType *STy = dyn_cast<StructType>(T)) {
46 if (STy->getNumElements() == 1)
47 T = STy->getElementType(0);
48 else
49 break;
50 } else if (ArrayType *ATy = dyn_cast<ArrayType>(T)) {
51 if (ATy->getNumElements() == 1)
52 T = ATy->getElementType();
53 else
54 break;
55 } else
56 break;
57 }
58
59 return T;
60}
61
62Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
85aaf69f
SL
63 unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, AC, MI, DT);
64 unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, AC, MI, DT);
223e47cc
LB
65 unsigned MinAlign = std::min(DstAlign, SrcAlign);
66 unsigned CopyAlign = MI->getAlignment();
67
68 if (CopyAlign < MinAlign) {
69 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
70 MinAlign, false));
71 return MI;
72 }
73
74 // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
75 // load/store.
76 ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getArgOperand(2));
1a4d82fc 77 if (!MemOpLength) return nullptr;
223e47cc
LB
78
79 // Source and destination pointer types are always "i8*" for intrinsic. See
80 // if the size is something we can handle with a single primitive load/store.
81 // A single load+store correctly handles overlapping memory in the memmove
82 // case.
83 uint64_t Size = MemOpLength->getLimitedValue();
1a4d82fc 84 assert(Size && "0-sized memory transferring should be removed already.");
223e47cc
LB
85
86 if (Size > 8 || (Size&(Size-1)))
1a4d82fc 87 return nullptr; // If not 1/2/4/8 bytes, exit.
223e47cc
LB
88
89 // Use an integer load+store unless we can find something better.
90 unsigned SrcAddrSp =
91 cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
92 unsigned DstAddrSp =
93 cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
94
95 IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
96 Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
97 Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
98
99 // Memcpy forces the use of i8* for the source and destination. That means
100 // that if you're using memcpy to move one double around, you'll get a cast
101 // from double* to i8*. We'd much rather use a double load+store rather than
102 // an i64 load+store, here because this improves the odds that the source or
103 // dest address will be promotable. See if we can find a better type than the
104 // integer datatype.
105 Value *StrippedDest = MI->getArgOperand(0)->stripPointerCasts();
1a4d82fc 106 MDNode *CopyMD = nullptr;
223e47cc
LB
107 if (StrippedDest != MI->getArgOperand(0)) {
108 Type *SrcETy = cast<PointerType>(StrippedDest->getType())
109 ->getElementType();
1a4d82fc 110 if (DL && SrcETy->isSized() && DL->getTypeStoreSize(SrcETy) == Size) {
223e47cc
LB
111 // The SrcETy might be something like {{{double}}} or [1 x double]. Rip
112 // down through these levels if so.
113 SrcETy = reduceToSingleValueType(SrcETy);
114
115 if (SrcETy->isSingleValueType()) {
116 NewSrcPtrTy = PointerType::get(SrcETy, SrcAddrSp);
117 NewDstPtrTy = PointerType::get(SrcETy, DstAddrSp);
118
119 // If the memcpy has metadata describing the members, see if we can
120 // get the TBAA tag describing our copy.
121 if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
85aaf69f
SL
122 if (M->getNumOperands() == 3 && M->getOperand(0) &&
123 mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
124 mdconst::extract<ConstantInt>(M->getOperand(0))->isNullValue() &&
970d7e83 125 M->getOperand(1) &&
85aaf69f
SL
126 mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
127 mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
128 Size &&
129 M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
223e47cc
LB
130 CopyMD = cast<MDNode>(M->getOperand(2));
131 }
132 }
133 }
134 }
135
136 // If the memcpy/memmove provides better alignment info than we can
137 // infer, use it.
138 SrcAlign = std::max(SrcAlign, CopyAlign);
139 DstAlign = std::max(DstAlign, CopyAlign);
140
141 Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
142 Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
143 LoadInst *L = Builder->CreateLoad(Src, MI->isVolatile());
144 L->setAlignment(SrcAlign);
145 if (CopyMD)
146 L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
147 StoreInst *S = Builder->CreateStore(L, Dest, MI->isVolatile());
148 S->setAlignment(DstAlign);
149 if (CopyMD)
150 S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
151
152 // Set the size of the copy to 0, it will be deleted on the next iteration.
153 MI->setArgOperand(2, Constant::getNullValue(MemOpLength->getType()));
154 return MI;
155}
156
157Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
85aaf69f 158 unsigned Alignment = getKnownAlignment(MI->getDest(), DL, AC, MI, DT);
223e47cc
LB
159 if (MI->getAlignment() < Alignment) {
160 MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
161 Alignment, false));
162 return MI;
163 }
164
165 // Extract the length and alignment and fill if they are constant.
166 ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
167 ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
168 if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
1a4d82fc 169 return nullptr;
223e47cc
LB
170 uint64_t Len = LenC->getLimitedValue();
171 Alignment = MI->getAlignment();
172 assert(Len && "0-sized memory setting should be removed already.");
173
174 // memset(s,c,n) -> store s, c (for n=1,2,4,8)
175 if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
176 Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
177
178 Value *Dest = MI->getDest();
179 unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
180 Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
181 Dest = Builder->CreateBitCast(Dest, NewDstPtrTy);
182
183 // Alignment 0 is identity for alignment 1 for memset, but not store.
184 if (Alignment == 0) Alignment = 1;
185
186 // Extract the fill value and store.
187 uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
188 StoreInst *S = Builder->CreateStore(ConstantInt::get(ITy, Fill), Dest,
189 MI->isVolatile());
190 S->setAlignment(Alignment);
191
192 // Set the size of the copy to 0, it will be deleted on the next iteration.
193 MI->setLength(Constant::getNullValue(LenC->getType()));
194 return MI;
195 }
196
1a4d82fc 197 return nullptr;
223e47cc
LB
198}
199
200/// visitCallInst - CallInst simplification. This mostly only handles folding
201/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
202/// the heavy lifting.
203///
204Instruction *InstCombiner::visitCallInst(CallInst &CI) {
205 if (isFreeCall(&CI, TLI))
206 return visitFree(CI);
207
208 // If the caller function is nounwind, mark the call as nounwind, even if the
209 // callee isn't.
210 if (CI.getParent()->getParent()->doesNotThrow() &&
211 !CI.doesNotThrow()) {
212 CI.setDoesNotThrow();
213 return &CI;
214 }
215
216 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
217 if (!II) return visitCallSite(&CI);
218
219 // Intrinsics cannot occur in an invoke, so handle them here instead of in
220 // visitCallSite.
221 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
222 bool Changed = false;
223
224 // memmove/cpy/set of zero bytes is a noop.
225 if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
226 if (NumBytes->isNullValue())
227 return EraseInstFromFunction(CI);
228
229 if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
230 if (CI->getZExtValue() == 1) {
231 // Replace the instruction with just byte operations. We would
232 // transform other cases to loads/stores, but we don't know if
233 // alignment is sufficient.
234 }
235 }
236
237 // No other transformations apply to volatile transfers.
238 if (MI->isVolatile())
1a4d82fc 239 return nullptr;
223e47cc
LB
240
241 // If we have a memmove and the source operation is a constant global,
242 // then the source and dest pointers can't alias, so we can change this
243 // into a call to memcpy.
244 if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
245 if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
246 if (GVSrc->isConstant()) {
247 Module *M = CI.getParent()->getParent()->getParent();
248 Intrinsic::ID MemCpyID = Intrinsic::memcpy;
249 Type *Tys[3] = { CI.getArgOperand(0)->getType(),
250 CI.getArgOperand(1)->getType(),
251 CI.getArgOperand(2)->getType() };
252 CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
253 Changed = true;
254 }
255 }
256
257 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
258 // memmove(x,x,size) -> noop.
259 if (MTI->getSource() == MTI->getDest())
260 return EraseInstFromFunction(CI);
261 }
262
263 // If we can determine a pointer alignment that is bigger than currently
264 // set, update the alignment.
265 if (isa<MemTransferInst>(MI)) {
266 if (Instruction *I = SimplifyMemTransfer(MI))
267 return I;
268 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
269 if (Instruction *I = SimplifyMemSet(MSI))
270 return I;
271 }
272
273 if (Changed) return II;
274 }
275
276 switch (II->getIntrinsicID()) {
277 default: break;
278 case Intrinsic::objectsize: {
279 uint64_t Size;
1a4d82fc 280 if (getObjectSize(II->getArgOperand(0), Size, DL, TLI))
223e47cc 281 return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
1a4d82fc 282 return nullptr;
223e47cc 283 }
970d7e83
LB
284 case Intrinsic::bswap: {
285 Value *IIOperand = II->getArgOperand(0);
1a4d82fc 286 Value *X = nullptr;
970d7e83 287
223e47cc 288 // bswap(bswap(x)) -> x
970d7e83
LB
289 if (match(IIOperand, m_BSwap(m_Value(X))))
290 return ReplaceInstUsesWith(CI, X);
223e47cc
LB
291
292 // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
970d7e83
LB
293 if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
294 unsigned C = X->getType()->getPrimitiveSizeInBits() -
295 IIOperand->getType()->getPrimitiveSizeInBits();
296 Value *CV = ConstantInt::get(X->getType(), C);
297 Value *V = Builder->CreateLShr(X, CV);
298 return new TruncInst(V, IIOperand->getType());
223e47cc 299 }
223e47cc 300 break;
970d7e83
LB
301 }
302
223e47cc
LB
303 case Intrinsic::powi:
304 if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
305 // powi(x, 0) -> 1.0
306 if (Power->isZero())
307 return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
308 // powi(x, 1) -> x
309 if (Power->isOne())
310 return ReplaceInstUsesWith(CI, II->getArgOperand(0));
311 // powi(x, -1) -> 1/x
312 if (Power->isAllOnesValue())
313 return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
314 II->getArgOperand(0));
315 }
316 break;
317 case Intrinsic::cttz: {
318 // If all bits below the first known one are known zero,
319 // this value is constant.
320 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
321 // FIXME: Try to simplify vectors of integers.
322 if (!IT) break;
323 uint32_t BitWidth = IT->getBitWidth();
324 APInt KnownZero(BitWidth, 0);
325 APInt KnownOne(BitWidth, 0);
1a4d82fc 326 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
223e47cc
LB
327 unsigned TrailingZeros = KnownOne.countTrailingZeros();
328 APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
329 if ((Mask & KnownZero) == Mask)
330 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
331 APInt(BitWidth, TrailingZeros)));
332
333 }
334 break;
335 case Intrinsic::ctlz: {
336 // If all bits above the first known one are known zero,
337 // this value is constant.
338 IntegerType *IT = dyn_cast<IntegerType>(II->getArgOperand(0)->getType());
339 // FIXME: Try to simplify vectors of integers.
340 if (!IT) break;
341 uint32_t BitWidth = IT->getBitWidth();
342 APInt KnownZero(BitWidth, 0);
343 APInt KnownOne(BitWidth, 0);
1a4d82fc 344 computeKnownBits(II->getArgOperand(0), KnownZero, KnownOne, 0, II);
223e47cc
LB
345 unsigned LeadingZeros = KnownOne.countLeadingZeros();
346 APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
347 if ((Mask & KnownZero) == Mask)
348 return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
349 APInt(BitWidth, LeadingZeros)));
350
351 }
352 break;
353 case Intrinsic::uadd_with_overflow: {
354 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
85aaf69f
SL
355 OverflowResult OR = computeOverflowForUnsignedAdd(LHS, RHS, II);
356 if (OR == OverflowResult::NeverOverflows)
357 return CreateOverflowTuple(II, Builder->CreateNUWAdd(LHS, RHS), false);
358 if (OR == OverflowResult::AlwaysOverflows)
359 return CreateOverflowTuple(II, Builder->CreateAdd(LHS, RHS), true);
223e47cc
LB
360 }
361 // FALL THROUGH uadd into sadd
362 case Intrinsic::sadd_with_overflow:
363 // Canonicalize constants into the RHS.
364 if (isa<Constant>(II->getArgOperand(0)) &&
365 !isa<Constant>(II->getArgOperand(1))) {
366 Value *LHS = II->getArgOperand(0);
367 II->setArgOperand(0, II->getArgOperand(1));
368 II->setArgOperand(1, LHS);
369 return II;
370 }
371
372 // X + undef -> undef
373 if (isa<UndefValue>(II->getArgOperand(1)))
374 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
375
376 if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
377 // X + 0 -> {X, false}
378 if (RHS->isZero()) {
85aaf69f
SL
379 return CreateOverflowTuple(II, II->getArgOperand(0), false,
380 /*ReUseName*/false);
223e47cc
LB
381 }
382 }
1a4d82fc
JJ
383
384 // We can strength reduce reduce this signed add into a regular add if we
385 // can prove that it will never overflow.
386 if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow) {
387 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
388 if (WillNotOverflowSignedAdd(LHS, RHS, II)) {
85aaf69f 389 return CreateOverflowTuple(II, Builder->CreateNSWAdd(LHS, RHS), false);
1a4d82fc
JJ
390 }
391 }
392
223e47cc
LB
393 break;
394 case Intrinsic::usub_with_overflow:
85aaf69f
SL
395 case Intrinsic::ssub_with_overflow: {
396 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
223e47cc
LB
397 // undef - X -> undef
398 // X - undef -> undef
85aaf69f 399 if (isa<UndefValue>(LHS) || isa<UndefValue>(RHS))
223e47cc
LB
400 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
401
85aaf69f 402 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
223e47cc 403 // X - 0 -> {X, false}
85aaf69f
SL
404 if (ConstRHS->isZero()) {
405 return CreateOverflowTuple(II, LHS, false, /*ReUseName*/false);
406 }
407 }
408 if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow) {
409 if (WillNotOverflowSignedSub(LHS, RHS, II)) {
410 return CreateOverflowTuple(II, Builder->CreateNSWSub(LHS, RHS), false);
411 }
412 } else {
413 if (WillNotOverflowUnsignedSub(LHS, RHS, II)) {
414 return CreateOverflowTuple(II, Builder->CreateNUWSub(LHS, RHS), false);
223e47cc
LB
415 }
416 }
417 break;
85aaf69f 418 }
223e47cc
LB
419 case Intrinsic::umul_with_overflow: {
420 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
85aaf69f
SL
421 OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, II);
422 if (OR == OverflowResult::NeverOverflows)
423 return CreateOverflowTuple(II, Builder->CreateNUWMul(LHS, RHS), false);
424 if (OR == OverflowResult::AlwaysOverflows)
425 return CreateOverflowTuple(II, Builder->CreateMul(LHS, RHS), true);
223e47cc
LB
426 } // FALL THROUGH
427 case Intrinsic::smul_with_overflow:
428 // Canonicalize constants into the RHS.
429 if (isa<Constant>(II->getArgOperand(0)) &&
430 !isa<Constant>(II->getArgOperand(1))) {
431 Value *LHS = II->getArgOperand(0);
432 II->setArgOperand(0, II->getArgOperand(1));
433 II->setArgOperand(1, LHS);
434 return II;
435 }
436
437 // X * undef -> undef
438 if (isa<UndefValue>(II->getArgOperand(1)))
439 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
440
441 if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
442 // X*0 -> {0, false}
443 if (RHSI->isZero())
444 return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
445
446 // X * 1 -> {X, false}
447 if (RHSI->equalsInt(1)) {
85aaf69f
SL
448 return CreateOverflowTuple(II, II->getArgOperand(0), false,
449 /*ReUseName*/false);
450 }
451 }
452 if (II->getIntrinsicID() == Intrinsic::smul_with_overflow) {
453 Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1);
454 if (WillNotOverflowSignedMul(LHS, RHS, II)) {
455 return CreateOverflowTuple(II, Builder->CreateNSWMul(LHS, RHS), false);
456 }
457 }
458 break;
459 case Intrinsic::minnum:
460 case Intrinsic::maxnum: {
461 Value *Arg0 = II->getArgOperand(0);
462 Value *Arg1 = II->getArgOperand(1);
463
464 // fmin(x, x) -> x
465 if (Arg0 == Arg1)
466 return ReplaceInstUsesWith(CI, Arg0);
467
468 const ConstantFP *C0 = dyn_cast<ConstantFP>(Arg0);
469 const ConstantFP *C1 = dyn_cast<ConstantFP>(Arg1);
470
471 // Canonicalize constants into the RHS.
472 if (C0 && !C1) {
473 II->setArgOperand(0, Arg1);
474 II->setArgOperand(1, Arg0);
475 return II;
476 }
477
478 // fmin(x, nan) -> x
479 if (C1 && C1->isNaN())
480 return ReplaceInstUsesWith(CI, Arg0);
481
482 // This is the value because if undef were NaN, we would return the other
483 // value and cannot return a NaN unless both operands are.
484 //
485 // fmin(undef, x) -> x
486 if (isa<UndefValue>(Arg0))
487 return ReplaceInstUsesWith(CI, Arg1);
488
489 // fmin(x, undef) -> x
490 if (isa<UndefValue>(Arg1))
491 return ReplaceInstUsesWith(CI, Arg0);
492
493 Value *X = nullptr;
494 Value *Y = nullptr;
495 if (II->getIntrinsicID() == Intrinsic::minnum) {
496 // fmin(x, fmin(x, y)) -> fmin(x, y)
497 // fmin(y, fmin(x, y)) -> fmin(x, y)
498 if (match(Arg1, m_FMin(m_Value(X), m_Value(Y)))) {
499 if (Arg0 == X || Arg0 == Y)
500 return ReplaceInstUsesWith(CI, Arg1);
501 }
502
503 // fmin(fmin(x, y), x) -> fmin(x, y)
504 // fmin(fmin(x, y), y) -> fmin(x, y)
505 if (match(Arg0, m_FMin(m_Value(X), m_Value(Y)))) {
506 if (Arg1 == X || Arg1 == Y)
507 return ReplaceInstUsesWith(CI, Arg0);
508 }
509
510 // TODO: fmin(nnan x, inf) -> x
511 // TODO: fmin(nnan ninf x, flt_max) -> x
512 if (C1 && C1->isInfinity()) {
513 // fmin(x, -inf) -> -inf
514 if (C1->isNegative())
515 return ReplaceInstUsesWith(CI, Arg1);
516 }
517 } else {
518 assert(II->getIntrinsicID() == Intrinsic::maxnum);
519 // fmax(x, fmax(x, y)) -> fmax(x, y)
520 // fmax(y, fmax(x, y)) -> fmax(x, y)
521 if (match(Arg1, m_FMax(m_Value(X), m_Value(Y)))) {
522 if (Arg0 == X || Arg0 == Y)
523 return ReplaceInstUsesWith(CI, Arg1);
524 }
525
526 // fmax(fmax(x, y), x) -> fmax(x, y)
527 // fmax(fmax(x, y), y) -> fmax(x, y)
528 if (match(Arg0, m_FMax(m_Value(X), m_Value(Y)))) {
529 if (Arg1 == X || Arg1 == Y)
530 return ReplaceInstUsesWith(CI, Arg0);
531 }
532
533 // TODO: fmax(nnan x, -inf) -> x
534 // TODO: fmax(nnan ninf x, -flt_max) -> x
535 if (C1 && C1->isInfinity()) {
536 // fmax(x, inf) -> inf
537 if (!C1->isNegative())
538 return ReplaceInstUsesWith(CI, Arg1);
223e47cc
LB
539 }
540 }
541 break;
85aaf69f 542 }
223e47cc
LB
543 case Intrinsic::ppc_altivec_lvx:
544 case Intrinsic::ppc_altivec_lvxl:
545 // Turn PPC lvx -> load if the pointer is known aligned.
85aaf69f
SL
546 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, AC, II, DT) >=
547 16) {
223e47cc
LB
548 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
549 PointerType::getUnqual(II->getType()));
550 return new LoadInst(Ptr);
551 }
552 break;
85aaf69f
SL
553 case Intrinsic::ppc_vsx_lxvw4x:
554 case Intrinsic::ppc_vsx_lxvd2x: {
555 // Turn PPC VSX loads into normal loads.
556 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0),
557 PointerType::getUnqual(II->getType()));
558 return new LoadInst(Ptr, Twine(""), false, 1);
559 }
223e47cc
LB
560 case Intrinsic::ppc_altivec_stvx:
561 case Intrinsic::ppc_altivec_stvxl:
562 // Turn stvx -> store if the pointer is known aligned.
85aaf69f
SL
563 if (getOrEnforceKnownAlignment(II->getArgOperand(1), 16, DL, AC, II, DT) >=
564 16) {
223e47cc
LB
565 Type *OpPtrTy =
566 PointerType::getUnqual(II->getArgOperand(0)->getType());
567 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
568 return new StoreInst(II->getArgOperand(0), Ptr);
569 }
570 break;
85aaf69f
SL
571 case Intrinsic::ppc_vsx_stxvw4x:
572 case Intrinsic::ppc_vsx_stxvd2x: {
573 // Turn PPC VSX stores into normal stores.
574 Type *OpPtrTy = PointerType::getUnqual(II->getArgOperand(0)->getType());
575 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(1), OpPtrTy);
576 return new StoreInst(II->getArgOperand(0), Ptr, false, 1);
577 }
223e47cc
LB
578 case Intrinsic::x86_sse_storeu_ps:
579 case Intrinsic::x86_sse2_storeu_pd:
580 case Intrinsic::x86_sse2_storeu_dq:
581 // Turn X86 storeu -> store if the pointer is known aligned.
85aaf69f
SL
582 if (getOrEnforceKnownAlignment(II->getArgOperand(0), 16, DL, AC, II, DT) >=
583 16) {
223e47cc
LB
584 Type *OpPtrTy =
585 PointerType::getUnqual(II->getArgOperand(1)->getType());
586 Value *Ptr = Builder->CreateBitCast(II->getArgOperand(0), OpPtrTy);
587 return new StoreInst(II->getArgOperand(1), Ptr);
588 }
589 break;
590
591 case Intrinsic::x86_sse_cvtss2si:
592 case Intrinsic::x86_sse_cvtss2si64:
593 case Intrinsic::x86_sse_cvttss2si:
594 case Intrinsic::x86_sse_cvttss2si64:
595 case Intrinsic::x86_sse2_cvtsd2si:
596 case Intrinsic::x86_sse2_cvtsd2si64:
597 case Intrinsic::x86_sse2_cvttsd2si:
598 case Intrinsic::x86_sse2_cvttsd2si64: {
599 // These intrinsics only demand the 0th element of their input vectors. If
600 // we can simplify the input based on that, do so now.
601 unsigned VWidth =
602 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
603 APInt DemandedElts(VWidth, 1);
604 APInt UndefElts(VWidth, 0);
605 if (Value *V = SimplifyDemandedVectorElts(II->getArgOperand(0),
606 DemandedElts, UndefElts)) {
607 II->setArgOperand(0, V);
608 return II;
609 }
610 break;
611 }
612
1a4d82fc
JJ
613 // Constant fold <A x Bi> << Ci.
614 // FIXME: We don't handle _dq because it's a shift of an i128, but is
615 // represented in the IR as <2 x i64>. A per element shift is wrong.
616 case Intrinsic::x86_sse2_psll_d:
617 case Intrinsic::x86_sse2_psll_q:
618 case Intrinsic::x86_sse2_psll_w:
619 case Intrinsic::x86_sse2_pslli_d:
620 case Intrinsic::x86_sse2_pslli_q:
621 case Intrinsic::x86_sse2_pslli_w:
622 case Intrinsic::x86_avx2_psll_d:
623 case Intrinsic::x86_avx2_psll_q:
624 case Intrinsic::x86_avx2_psll_w:
625 case Intrinsic::x86_avx2_pslli_d:
626 case Intrinsic::x86_avx2_pslli_q:
627 case Intrinsic::x86_avx2_pslli_w:
628 case Intrinsic::x86_sse2_psrl_d:
629 case Intrinsic::x86_sse2_psrl_q:
630 case Intrinsic::x86_sse2_psrl_w:
631 case Intrinsic::x86_sse2_psrli_d:
632 case Intrinsic::x86_sse2_psrli_q:
633 case Intrinsic::x86_sse2_psrli_w:
634 case Intrinsic::x86_avx2_psrl_d:
635 case Intrinsic::x86_avx2_psrl_q:
636 case Intrinsic::x86_avx2_psrl_w:
637 case Intrinsic::x86_avx2_psrli_d:
638 case Intrinsic::x86_avx2_psrli_q:
639 case Intrinsic::x86_avx2_psrli_w: {
640 // Simplify if count is constant. To 0 if >= BitWidth,
641 // otherwise to shl/lshr.
642 auto CDV = dyn_cast<ConstantDataVector>(II->getArgOperand(1));
643 auto CInt = dyn_cast<ConstantInt>(II->getArgOperand(1));
644 if (!CDV && !CInt)
645 break;
646 ConstantInt *Count;
647 if (CDV)
648 Count = cast<ConstantInt>(CDV->getElementAsConstant(0));
649 else
650 Count = CInt;
651
652 auto Vec = II->getArgOperand(0);
653 auto VT = cast<VectorType>(Vec->getType());
654 if (Count->getZExtValue() >
655 VT->getElementType()->getPrimitiveSizeInBits() - 1)
656 return ReplaceInstUsesWith(
657 CI, ConstantAggregateZero::get(Vec->getType()));
658
659 bool isPackedShiftLeft = true;
660 switch (II->getIntrinsicID()) {
661 default : break;
662 case Intrinsic::x86_sse2_psrl_d:
663 case Intrinsic::x86_sse2_psrl_q:
664 case Intrinsic::x86_sse2_psrl_w:
665 case Intrinsic::x86_sse2_psrli_d:
666 case Intrinsic::x86_sse2_psrli_q:
667 case Intrinsic::x86_sse2_psrli_w:
668 case Intrinsic::x86_avx2_psrl_d:
669 case Intrinsic::x86_avx2_psrl_q:
670 case Intrinsic::x86_avx2_psrl_w:
671 case Intrinsic::x86_avx2_psrli_d:
672 case Intrinsic::x86_avx2_psrli_q:
673 case Intrinsic::x86_avx2_psrli_w: isPackedShiftLeft = false; break;
674 }
675
676 unsigned VWidth = VT->getNumElements();
677 // Get a constant vector of the same type as the first operand.
678 auto VTCI = ConstantInt::get(VT->getElementType(), Count->getZExtValue());
679 if (isPackedShiftLeft)
680 return BinaryOperator::CreateShl(Vec,
681 Builder->CreateVectorSplat(VWidth, VTCI));
682
683 return BinaryOperator::CreateLShr(Vec,
684 Builder->CreateVectorSplat(VWidth, VTCI));
685 }
223e47cc
LB
686
687 case Intrinsic::x86_sse41_pmovsxbw:
688 case Intrinsic::x86_sse41_pmovsxwd:
689 case Intrinsic::x86_sse41_pmovsxdq:
690 case Intrinsic::x86_sse41_pmovzxbw:
691 case Intrinsic::x86_sse41_pmovzxwd:
692 case Intrinsic::x86_sse41_pmovzxdq: {
693 // pmov{s|z}x ignores the upper half of their input vectors.
694 unsigned VWidth =
695 cast<VectorType>(II->getArgOperand(0)->getType())->getNumElements();
696 unsigned LowHalfElts = VWidth / 2;
697 APInt InputDemandedElts(APInt::getBitsSet(VWidth, 0, LowHalfElts));
698 APInt UndefElts(VWidth, 0);
699 if (Value *TmpV = SimplifyDemandedVectorElts(II->getArgOperand(0),
700 InputDemandedElts,
701 UndefElts)) {
702 II->setArgOperand(0, TmpV);
703 return II;
704 }
705 break;
706 }
707
1a4d82fc
JJ
708 case Intrinsic::x86_sse4a_insertqi: {
709 // insertqi x, y, 64, 0 can just copy y's lower bits and leave the top
710 // ones undef
711 // TODO: eventually we should lower this intrinsic to IR
712 if (auto CIWidth = dyn_cast<ConstantInt>(II->getArgOperand(2))) {
713 if (auto CIStart = dyn_cast<ConstantInt>(II->getArgOperand(3))) {
85aaf69f
SL
714 unsigned Index = CIStart->getZExtValue();
715 // From AMD documentation: "a value of zero in the field length is
716 // defined as length of 64".
717 unsigned Length = CIWidth->equalsInt(0) ? 64 : CIWidth->getZExtValue();
718
719 // From AMD documentation: "If the sum of the bit index + length field
720 // is greater than 64, the results are undefined".
721
722 // Note that both field index and field length are 8-bit quantities.
723 // Since variables 'Index' and 'Length' are unsigned values
724 // obtained from zero-extending field index and field length
725 // respectively, their sum should never wrap around.
726 if ((Index + Length) > 64)
727 return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
728
729 if (Length == 64 && Index == 0) {
1a4d82fc
JJ
730 Value *Vec = II->getArgOperand(1);
731 Value *Undef = UndefValue::get(Vec->getType());
732 const uint32_t Mask[] = { 0, 2 };
733 return ReplaceInstUsesWith(
734 CI,
735 Builder->CreateShuffleVector(
736 Vec, Undef, ConstantDataVector::get(
737 II->getContext(), makeArrayRef(Mask))));
738
739 } else if (auto Source =
740 dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
741 if (Source->hasOneUse() &&
742 Source->getArgOperand(1) == II->getArgOperand(1)) {
743 // If the source of the insert has only one use and it's another
744 // insert (and they're both inserting from the same vector), try to
745 // bundle both together.
746 auto CISourceWidth =
747 dyn_cast<ConstantInt>(Source->getArgOperand(2));
748 auto CISourceStart =
749 dyn_cast<ConstantInt>(Source->getArgOperand(3));
750 if (CISourceStart && CISourceWidth) {
751 unsigned Start = CIStart->getZExtValue();
752 unsigned Width = CIWidth->getZExtValue();
753 unsigned End = Start + Width;
754 unsigned SourceStart = CISourceStart->getZExtValue();
755 unsigned SourceWidth = CISourceWidth->getZExtValue();
756 unsigned SourceEnd = SourceStart + SourceWidth;
757 unsigned NewStart, NewWidth;
758 bool ShouldReplace = false;
759 if (Start <= SourceStart && SourceStart <= End) {
760 NewStart = Start;
761 NewWidth = std::max(End, SourceEnd) - NewStart;
762 ShouldReplace = true;
763 } else if (SourceStart <= Start && Start <= SourceEnd) {
764 NewStart = SourceStart;
765 NewWidth = std::max(SourceEnd, End) - NewStart;
766 ShouldReplace = true;
767 }
768
769 if (ShouldReplace) {
770 Constant *ConstantWidth = ConstantInt::get(
771 II->getArgOperand(2)->getType(), NewWidth, false);
772 Constant *ConstantStart = ConstantInt::get(
773 II->getArgOperand(3)->getType(), NewStart, false);
774 Value *Args[4] = { Source->getArgOperand(0),
775 II->getArgOperand(1), ConstantWidth,
776 ConstantStart };
777 Module *M = CI.getParent()->getParent()->getParent();
778 Value *F =
779 Intrinsic::getDeclaration(M, Intrinsic::x86_sse4a_insertqi);
780 return ReplaceInstUsesWith(CI, Builder->CreateCall(F, Args));
781 }
782 }
783 }
784 }
785 }
786 }
787 break;
788 }
789
790 case Intrinsic::x86_sse41_pblendvb:
791 case Intrinsic::x86_sse41_blendvps:
792 case Intrinsic::x86_sse41_blendvpd:
793 case Intrinsic::x86_avx_blendv_ps_256:
794 case Intrinsic::x86_avx_blendv_pd_256:
795 case Intrinsic::x86_avx2_pblendvb: {
796 // Convert blendv* to vector selects if the mask is constant.
797 // This optimization is convoluted because the intrinsic is defined as
798 // getting a vector of floats or doubles for the ps and pd versions.
799 // FIXME: That should be changed.
800 Value *Mask = II->getArgOperand(2);
801 if (auto C = dyn_cast<ConstantDataVector>(Mask)) {
802 auto Tyi1 = Builder->getInt1Ty();
803 auto SelectorType = cast<VectorType>(Mask->getType());
804 auto EltTy = SelectorType->getElementType();
805 unsigned Size = SelectorType->getNumElements();
806 unsigned BitWidth =
807 EltTy->isFloatTy()
808 ? 32
809 : (EltTy->isDoubleTy() ? 64 : EltTy->getIntegerBitWidth());
810 assert((BitWidth == 64 || BitWidth == 32 || BitWidth == 8) &&
811 "Wrong arguments for variable blend intrinsic");
812 SmallVector<Constant *, 32> Selectors;
813 for (unsigned I = 0; I < Size; ++I) {
814 // The intrinsics only read the top bit
815 uint64_t Selector;
816 if (BitWidth == 8)
817 Selector = C->getElementAsInteger(I);
818 else
819 Selector = C->getElementAsAPFloat(I).bitcastToAPInt().getZExtValue();
820 Selectors.push_back(ConstantInt::get(Tyi1, Selector >> (BitWidth - 1)));
821 }
822 auto NewSelector = ConstantVector::get(Selectors);
823 return SelectInst::Create(NewSelector, II->getArgOperand(1),
824 II->getArgOperand(0), "blendv");
825 } else {
826 break;
827 }
828 }
829
830 case Intrinsic::x86_avx_vpermilvar_ps:
831 case Intrinsic::x86_avx_vpermilvar_ps_256:
832 case Intrinsic::x86_avx_vpermilvar_pd:
833 case Intrinsic::x86_avx_vpermilvar_pd_256: {
834 // Convert vpermil* to shufflevector if the mask is constant.
835 Value *V = II->getArgOperand(1);
836 unsigned Size = cast<VectorType>(V->getType())->getNumElements();
837 assert(Size == 8 || Size == 4 || Size == 2);
838 uint32_t Indexes[8];
839 if (auto C = dyn_cast<ConstantDataVector>(V)) {
840 // The intrinsics only read one or two bits, clear the rest.
841 for (unsigned I = 0; I < Size; ++I) {
842 uint32_t Index = C->getElementAsInteger(I) & 0x3;
843 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd ||
844 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256)
845 Index >>= 1;
846 Indexes[I] = Index;
847 }
848 } else if (isa<ConstantAggregateZero>(V)) {
849 for (unsigned I = 0; I < Size; ++I)
850 Indexes[I] = 0;
851 } else {
852 break;
853 }
854 // The _256 variants are a bit trickier since the mask bits always index
855 // into the corresponding 128 half. In order to convert to a generic
856 // shuffle, we have to make that explicit.
857 if (II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_ps_256 ||
858 II->getIntrinsicID() == Intrinsic::x86_avx_vpermilvar_pd_256) {
859 for (unsigned I = Size / 2; I < Size; ++I)
860 Indexes[I] += Size / 2;
861 }
862 auto NewC =
863 ConstantDataVector::get(V->getContext(), makeArrayRef(Indexes, Size));
864 auto V1 = II->getArgOperand(0);
865 auto V2 = UndefValue::get(V1->getType());
866 auto Shuffle = Builder->CreateShuffleVector(V1, V2, NewC);
867 return ReplaceInstUsesWith(CI, Shuffle);
868 }
869
223e47cc
LB
870 case Intrinsic::ppc_altivec_vperm:
871 // Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
1a4d82fc
JJ
872 // Note that ppc_altivec_vperm has a big-endian bias, so when creating
873 // a vectorshuffle for little endian, we must undo the transformation
874 // performed on vec_perm in altivec.h. That is, we must complement
875 // the permutation mask with respect to 31 and reverse the order of
876 // V1 and V2.
223e47cc
LB
877 if (Constant *Mask = dyn_cast<Constant>(II->getArgOperand(2))) {
878 assert(Mask->getType()->getVectorNumElements() == 16 &&
879 "Bad type for intrinsic!");
880
881 // Check that all of the elements are integer constants or undefs.
882 bool AllEltsOk = true;
883 for (unsigned i = 0; i != 16; ++i) {
884 Constant *Elt = Mask->getAggregateElement(i);
1a4d82fc 885 if (!Elt || !(isa<ConstantInt>(Elt) || isa<UndefValue>(Elt))) {
223e47cc
LB
886 AllEltsOk = false;
887 break;
888 }
889 }
890
891 if (AllEltsOk) {
892 // Cast the input vectors to byte vectors.
893 Value *Op0 = Builder->CreateBitCast(II->getArgOperand(0),
894 Mask->getType());
895 Value *Op1 = Builder->CreateBitCast(II->getArgOperand(1),
896 Mask->getType());
897 Value *Result = UndefValue::get(Op0->getType());
898
899 // Only extract each element once.
900 Value *ExtractedElts[32];
901 memset(ExtractedElts, 0, sizeof(ExtractedElts));
902
903 for (unsigned i = 0; i != 16; ++i) {
904 if (isa<UndefValue>(Mask->getAggregateElement(i)))
905 continue;
906 unsigned Idx =
907 cast<ConstantInt>(Mask->getAggregateElement(i))->getZExtValue();
908 Idx &= 31; // Match the hardware behavior.
1a4d82fc
JJ
909 if (DL && DL->isLittleEndian())
910 Idx = 31 - Idx;
223e47cc 911
1a4d82fc
JJ
912 if (!ExtractedElts[Idx]) {
913 Value *Op0ToUse = (DL && DL->isLittleEndian()) ? Op1 : Op0;
914 Value *Op1ToUse = (DL && DL->isLittleEndian()) ? Op0 : Op1;
223e47cc 915 ExtractedElts[Idx] =
1a4d82fc 916 Builder->CreateExtractElement(Idx < 16 ? Op0ToUse : Op1ToUse,
223e47cc
LB
917 Builder->getInt32(Idx&15));
918 }
919
920 // Insert this value into the result vector.
921 Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
922 Builder->getInt32(i));
923 }
924 return CastInst::Create(Instruction::BitCast, Result, CI.getType());
925 }
926 }
927 break;
928
929 case Intrinsic::arm_neon_vld1:
930 case Intrinsic::arm_neon_vld2:
931 case Intrinsic::arm_neon_vld3:
932 case Intrinsic::arm_neon_vld4:
933 case Intrinsic::arm_neon_vld2lane:
934 case Intrinsic::arm_neon_vld3lane:
935 case Intrinsic::arm_neon_vld4lane:
936 case Intrinsic::arm_neon_vst1:
937 case Intrinsic::arm_neon_vst2:
938 case Intrinsic::arm_neon_vst3:
939 case Intrinsic::arm_neon_vst4:
940 case Intrinsic::arm_neon_vst2lane:
941 case Intrinsic::arm_neon_vst3lane:
942 case Intrinsic::arm_neon_vst4lane: {
85aaf69f 943 unsigned MemAlign = getKnownAlignment(II->getArgOperand(0), DL, AC, II, DT);
223e47cc
LB
944 unsigned AlignArg = II->getNumArgOperands() - 1;
945 ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
946 if (IntrAlign && IntrAlign->getZExtValue() < MemAlign) {
947 II->setArgOperand(AlignArg,
948 ConstantInt::get(Type::getInt32Ty(II->getContext()),
949 MemAlign, false));
950 return II;
951 }
952 break;
953 }
954
955 case Intrinsic::arm_neon_vmulls:
1a4d82fc
JJ
956 case Intrinsic::arm_neon_vmullu:
957 case Intrinsic::aarch64_neon_smull:
958 case Intrinsic::aarch64_neon_umull: {
223e47cc
LB
959 Value *Arg0 = II->getArgOperand(0);
960 Value *Arg1 = II->getArgOperand(1);
961
962 // Handle mul by zero first:
963 if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
964 return ReplaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
965 }
966
967 // Check for constant LHS & RHS - in this case we just simplify.
1a4d82fc
JJ
968 bool Zext = (II->getIntrinsicID() == Intrinsic::arm_neon_vmullu ||
969 II->getIntrinsicID() == Intrinsic::aarch64_neon_umull);
223e47cc 970 VectorType *NewVT = cast<VectorType>(II->getType());
1a4d82fc
JJ
971 if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
972 if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
973 CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
974 CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
975
976 return ReplaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
223e47cc
LB
977 }
978
1a4d82fc 979 // Couldn't simplify - canonicalize constant to the RHS.
223e47cc
LB
980 std::swap(Arg0, Arg1);
981 }
982
983 // Handle mul by one:
1a4d82fc 984 if (Constant *CV1 = dyn_cast<Constant>(Arg1))
223e47cc 985 if (ConstantInt *Splat =
1a4d82fc
JJ
986 dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
987 if (Splat->isOne())
988 return CastInst::CreateIntegerCast(Arg0, II->getType(),
989 /*isSigned=*/!Zext);
223e47cc
LB
990
991 break;
992 }
993
1a4d82fc
JJ
994 case Intrinsic::AMDGPU_rcp: {
995 if (const ConstantFP *C = dyn_cast<ConstantFP>(II->getArgOperand(0))) {
996 const APFloat &ArgVal = C->getValueAPF();
997 APFloat Val(ArgVal.getSemantics(), 1.0);
998 APFloat::opStatus Status = Val.divide(ArgVal,
999 APFloat::rmNearestTiesToEven);
1000 // Only do this if it was exact and therefore not dependent on the
1001 // rounding mode.
1002 if (Status == APFloat::opOK)
1003 return ReplaceInstUsesWith(CI, ConstantFP::get(II->getContext(), Val));
1004 }
1005
1006 break;
1007 }
223e47cc
LB
1008 case Intrinsic::stackrestore: {
1009 // If the save is right next to the restore, remove the restore. This can
1010 // happen when variable allocas are DCE'd.
1011 if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
1012 if (SS->getIntrinsicID() == Intrinsic::stacksave) {
1013 BasicBlock::iterator BI = SS;
1014 if (&*++BI == II)
1015 return EraseInstFromFunction(CI);
1016 }
1017 }
1018
1019 // Scan down this block to see if there is another stack restore in the
1020 // same block without an intervening call/alloca.
1021 BasicBlock::iterator BI = II;
1022 TerminatorInst *TI = II->getParent()->getTerminator();
1023 bool CannotRemove = false;
1024 for (++BI; &*BI != TI; ++BI) {
1025 if (isa<AllocaInst>(BI)) {
1026 CannotRemove = true;
1027 break;
1028 }
1029 if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
1030 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
1031 // If there is a stackrestore below this one, remove this one.
1032 if (II->getIntrinsicID() == Intrinsic::stackrestore)
1033 return EraseInstFromFunction(CI);
1034 // Otherwise, ignore the intrinsic.
1035 } else {
1036 // If we found a non-intrinsic call, we can't remove the stack
1037 // restore.
1038 CannotRemove = true;
1039 break;
1040 }
1041 }
1042 }
1043
1044 // If the stack restore is in a return, resume, or unwind block and if there
1045 // are no allocas or calls between the restore and the return, nuke the
1046 // restore.
1047 if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
1048 return EraseInstFromFunction(CI);
1049 break;
1050 }
1a4d82fc
JJ
1051 case Intrinsic::assume: {
1052 // Canonicalize assume(a && b) -> assume(a); assume(b);
1053 // Note: New assumption intrinsics created here are registered by
1054 // the InstCombineIRInserter object.
1055 Value *IIOperand = II->getArgOperand(0), *A, *B,
1056 *AssumeIntrinsic = II->getCalledValue();
1057 if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {
1058 Builder->CreateCall(AssumeIntrinsic, A, II->getName());
1059 Builder->CreateCall(AssumeIntrinsic, B, II->getName());
1060 return EraseInstFromFunction(*II);
1061 }
1062 // assume(!(a || b)) -> assume(!a); assume(!b);
1063 if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {
1064 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(A),
1065 II->getName());
1066 Builder->CreateCall(AssumeIntrinsic, Builder->CreateNot(B),
1067 II->getName());
1068 return EraseInstFromFunction(*II);
1069 }
85aaf69f
SL
1070
1071 // assume( (load addr) != null ) -> add 'nonnull' metadata to load
1072 // (if assume is valid at the load)
1073 if (ICmpInst* ICmp = dyn_cast<ICmpInst>(IIOperand)) {
1074 Value *LHS = ICmp->getOperand(0);
1075 Value *RHS = ICmp->getOperand(1);
1076 if (ICmpInst::ICMP_NE == ICmp->getPredicate() &&
1077 isa<LoadInst>(LHS) &&
1078 isa<Constant>(RHS) &&
1079 RHS->getType()->isPointerTy() &&
1080 cast<Constant>(RHS)->isNullValue()) {
1081 LoadInst* LI = cast<LoadInst>(LHS);
1082 if (isValidAssumeForContext(II, LI, DL, DT)) {
1083 MDNode *MD = MDNode::get(II->getContext(), None);
1084 LI->setMetadata(LLVMContext::MD_nonnull, MD);
1085 return EraseInstFromFunction(*II);
1086 }
1087 }
1088 // TODO: apply nonnull return attributes to calls and invokes
1089 // TODO: apply range metadata for range check patterns?
1090 }
1091 // If there is a dominating assume with the same condition as this one,
1092 // then this one is redundant, and should be removed.
1093 APInt KnownZero(1, 0), KnownOne(1, 0);
1094 computeKnownBits(IIOperand, KnownZero, KnownOne, 0, II);
1095 if (KnownOne.isAllOnesValue())
1096 return EraseInstFromFunction(*II);
1097
1a4d82fc
JJ
1098 break;
1099 }
85aaf69f
SL
1100 case Intrinsic::experimental_gc_relocate: {
1101 // Translate facts known about a pointer before relocating into
1102 // facts about the relocate value, while being careful to
1103 // preserve relocation semantics.
1104 GCRelocateOperands Operands(II);
1105 Value *DerivedPtr = Operands.derivedPtr();
1106
1107 // Remove the relocation if unused, note that this check is required
1108 // to prevent the cases below from looping forever.
1109 if (II->use_empty())
1110 return EraseInstFromFunction(*II);
1111
1112 // Undef is undef, even after relocation.
1113 // TODO: provide a hook for this in GCStrategy. This is clearly legal for
1114 // most practical collectors, but there was discussion in the review thread
1115 // about whether it was legal for all possible collectors.
1116 if (isa<UndefValue>(DerivedPtr))
1117 return ReplaceInstUsesWith(*II, DerivedPtr);
1118
1119 // The relocation of null will be null for most any collector.
1120 // TODO: provide a hook for this in GCStrategy. There might be some weird
1121 // collector this property does not hold for.
1122 if (isa<ConstantPointerNull>(DerivedPtr))
1123 return ReplaceInstUsesWith(*II, DerivedPtr);
1124
1125 // isKnownNonNull -> nonnull attribute
1126 if (isKnownNonNull(DerivedPtr))
1127 II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
1128
1129 // TODO: dereferenceable -> deref attribute
1130
1131 // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
1132 // Canonicalize on the type from the uses to the defs
1133
1134 // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
1135 }
223e47cc
LB
1136 }
1137
1138 return visitCallSite(II);
1139}
1140
1141// InvokeInst simplification
1142//
1143Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
1144 return visitCallSite(&II);
1145}
1146
1147/// isSafeToEliminateVarargsCast - If this cast does not affect the value
1148/// passed through the varargs area, we can eliminate the use of the cast.
1149static bool isSafeToEliminateVarargsCast(const CallSite CS,
1150 const CastInst * const CI,
1a4d82fc 1151 const DataLayout * const DL,
223e47cc
LB
1152 const int ix) {
1153 if (!CI->isLosslessCast())
1154 return false;
1155
85aaf69f
SL
1156 // If this is a GC intrinsic, avoid munging types. We need types for
1157 // statepoint reconstruction in SelectionDAG.
1158 // TODO: This is probably something which should be expanded to all
1159 // intrinsics since the entire point of intrinsics is that
1160 // they are understandable by the optimizer.
1161 if (isStatepoint(CS) || isGCRelocate(CS) || isGCResult(CS))
1162 return false;
1163
1a4d82fc 1164 // The size of ByVal or InAlloca arguments is derived from the type, so we
223e47cc
LB
1165 // can't change to a type with a different size. If the size were
1166 // passed explicitly we could avoid this check.
1a4d82fc 1167 if (!CS.isByValOrInAllocaArgument(ix))
223e47cc
LB
1168 return true;
1169
1170 Type* SrcTy =
1171 cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
1172 Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
1173 if (!SrcTy->isSized() || !DstTy->isSized())
1174 return false;
1a4d82fc 1175 if (!DL || DL->getTypeAllocSize(SrcTy) != DL->getTypeAllocSize(DstTy))
223e47cc
LB
1176 return false;
1177 return true;
1178}
1179
223e47cc
LB
1180// Try to fold some different type of calls here.
1181// Currently we're only working with the checking functions, memcpy_chk,
1182// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
1183// strcat_chk and strncat_chk.
1a4d82fc
JJ
1184Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const DataLayout *DL) {
1185 if (!CI->getCalledFunction()) return nullptr;
223e47cc 1186
970d7e83
LB
1187 if (Value *With = Simplifier->optimizeCall(CI)) {
1188 ++NumSimplified;
1189 return CI->use_empty() ? CI : ReplaceInstUsesWith(*CI, With);
1190 }
1191
1a4d82fc 1192 return nullptr;
223e47cc
LB
1193}
1194
1195static IntrinsicInst *FindInitTrampolineFromAlloca(Value *TrampMem) {
1196 // Strip off at most one level of pointer casts, looking for an alloca. This
1197 // is good enough in practice and simpler than handling any number of casts.
1198 Value *Underlying = TrampMem->stripPointerCasts();
1199 if (Underlying != TrampMem &&
1a4d82fc
JJ
1200 (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
1201 return nullptr;
223e47cc 1202 if (!isa<AllocaInst>(Underlying))
1a4d82fc 1203 return nullptr;
223e47cc 1204
1a4d82fc
JJ
1205 IntrinsicInst *InitTrampoline = nullptr;
1206 for (User *U : TrampMem->users()) {
1207 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
223e47cc 1208 if (!II)
1a4d82fc 1209 return nullptr;
223e47cc
LB
1210 if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
1211 if (InitTrampoline)
1212 // More than one init_trampoline writes to this value. Give up.
1a4d82fc 1213 return nullptr;
223e47cc
LB
1214 InitTrampoline = II;
1215 continue;
1216 }
1217 if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
1218 // Allow any number of calls to adjust.trampoline.
1219 continue;
1a4d82fc 1220 return nullptr;
223e47cc
LB
1221 }
1222
1223 // No call to init.trampoline found.
1224 if (!InitTrampoline)
1a4d82fc 1225 return nullptr;
223e47cc
LB
1226
1227 // Check that the alloca is being used in the expected way.
1228 if (InitTrampoline->getOperand(0) != TrampMem)
1a4d82fc 1229 return nullptr;
223e47cc
LB
1230
1231 return InitTrampoline;
1232}
1233
1234static IntrinsicInst *FindInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
1235 Value *TrampMem) {
1236 // Visit all the previous instructions in the basic block, and try to find a
1237 // init.trampoline which has a direct path to the adjust.trampoline.
1238 for (BasicBlock::iterator I = AdjustTramp,
1239 E = AdjustTramp->getParent()->begin(); I != E; ) {
1240 Instruction *Inst = --I;
1241 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1242 if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
1243 II->getOperand(0) == TrampMem)
1244 return II;
1245 if (Inst->mayWriteToMemory())
1a4d82fc 1246 return nullptr;
223e47cc 1247 }
1a4d82fc 1248 return nullptr;
223e47cc
LB
1249}
1250
1251// Given a call to llvm.adjust.trampoline, find and return the corresponding
1252// call to llvm.init.trampoline if the call to the trampoline can be optimized
1253// to a direct call to a function. Otherwise return NULL.
1254//
1255static IntrinsicInst *FindInitTrampoline(Value *Callee) {
1256 Callee = Callee->stripPointerCasts();
1257 IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
1258 if (!AdjustTramp ||
1259 AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
1a4d82fc 1260 return nullptr;
223e47cc
LB
1261
1262 Value *TrampMem = AdjustTramp->getOperand(0);
1263
1264 if (IntrinsicInst *IT = FindInitTrampolineFromAlloca(TrampMem))
1265 return IT;
1266 if (IntrinsicInst *IT = FindInitTrampolineFromBB(AdjustTramp, TrampMem))
1267 return IT;
1a4d82fc 1268 return nullptr;
223e47cc
LB
1269}
1270
1271// visitCallSite - Improvements for call and invoke instructions.
1272//
1273Instruction *InstCombiner::visitCallSite(CallSite CS) {
1274 if (isAllocLikeFn(CS.getInstruction(), TLI))
1275 return visitAllocSite(*CS.getInstruction());
1276
1277 bool Changed = false;
1278
1279 // If the callee is a pointer to a function, attempt to move any casts to the
1280 // arguments of the call/invoke.
1281 Value *Callee = CS.getCalledValue();
1282 if (!isa<Function>(Callee) && transformConstExprCastCall(CS))
1a4d82fc 1283 return nullptr;
223e47cc
LB
1284
1285 if (Function *CalleeF = dyn_cast<Function>(Callee))
1286 // If the call and callee calling conventions don't match, this call must
1287 // be unreachable, as the call is undefined.
1288 if (CalleeF->getCallingConv() != CS.getCallingConv() &&
1289 // Only do this for calls to a function with a body. A prototype may
1290 // not actually end up matching the implementation's calling conv for a
1291 // variety of reasons (e.g. it may be written in assembly).
1292 !CalleeF->isDeclaration()) {
1293 Instruction *OldCall = CS.getInstruction();
1294 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1295 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1296 OldCall);
970d7e83 1297 // If OldCall does not return void then replaceAllUsesWith undef.
223e47cc
LB
1298 // This allows ValueHandlers and custom metadata to adjust itself.
1299 if (!OldCall->getType()->isVoidTy())
1300 ReplaceInstUsesWith(*OldCall, UndefValue::get(OldCall->getType()));
1301 if (isa<CallInst>(OldCall))
1302 return EraseInstFromFunction(*OldCall);
1303
1304 // We cannot remove an invoke, because it would change the CFG, just
1305 // change the callee to a null pointer.
1306 cast<InvokeInst>(OldCall)->setCalledFunction(
1307 Constant::getNullValue(CalleeF->getType()));
1a4d82fc 1308 return nullptr;
223e47cc
LB
1309 }
1310
1311 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
1312 // If CS does not return void then replaceAllUsesWith undef.
1313 // This allows ValueHandlers and custom metadata to adjust itself.
1314 if (!CS.getInstruction()->getType()->isVoidTy())
1315 ReplaceInstUsesWith(*CS.getInstruction(),
1316 UndefValue::get(CS.getInstruction()->getType()));
1317
1318 if (isa<InvokeInst>(CS.getInstruction())) {
1319 // Can't remove an invoke because we cannot change the CFG.
1a4d82fc 1320 return nullptr;
223e47cc
LB
1321 }
1322
1323 // This instruction is not reachable, just remove it. We insert a store to
1324 // undef so that we know that this code is not reachable, despite the fact
1325 // that we can't modify the CFG here.
1326 new StoreInst(ConstantInt::getTrue(Callee->getContext()),
1327 UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
1328 CS.getInstruction());
1329
1330 return EraseInstFromFunction(*CS.getInstruction());
1331 }
1332
1333 if (IntrinsicInst *II = FindInitTrampoline(Callee))
1334 return transformCallThroughTrampoline(CS, II);
1335
1336 PointerType *PTy = cast<PointerType>(Callee->getType());
1337 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1338 if (FTy->isVarArg()) {
1339 int ix = FTy->getNumParams();
1340 // See if we can optimize any arguments passed through the varargs area of
1341 // the call.
1a4d82fc 1342 for (CallSite::arg_iterator I = CS.arg_begin() + FTy->getNumParams(),
223e47cc
LB
1343 E = CS.arg_end(); I != E; ++I, ++ix) {
1344 CastInst *CI = dyn_cast<CastInst>(*I);
1a4d82fc 1345 if (CI && isSafeToEliminateVarargsCast(CS, CI, DL, ix)) {
223e47cc
LB
1346 *I = CI->getOperand(0);
1347 Changed = true;
1348 }
1349 }
1350 }
1351
1352 if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
1353 // Inline asm calls cannot throw - mark them 'nounwind'.
1354 CS.setDoesNotThrow();
1355 Changed = true;
1356 }
1357
970d7e83 1358 // Try to optimize the call if possible, we require DataLayout for most of
223e47cc
LB
1359 // this. None of these calls are seen as possibly dead so go ahead and
1360 // delete the instruction now.
1361 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1a4d82fc 1362 Instruction *I = tryOptimizeCall(CI, DL);
223e47cc
LB
1363 // If we changed something return the result, etc. Otherwise let
1364 // the fallthrough check.
1365 if (I) return EraseInstFromFunction(*I);
1366 }
1367
1a4d82fc 1368 return Changed ? CS.getInstruction() : nullptr;
223e47cc
LB
1369}
1370
1371// transformConstExprCastCall - If the callee is a constexpr cast of a function,
1372// attempt to move the cast to the arguments of the call/invoke.
1373//
1374bool InstCombiner::transformConstExprCastCall(CallSite CS) {
1375 Function *Callee =
1376 dyn_cast<Function>(CS.getCalledValue()->stripPointerCasts());
1a4d82fc 1377 if (!Callee)
223e47cc 1378 return false;
85aaf69f
SL
1379 // The prototype of thunks are a lie, don't try to directly call such
1380 // functions.
1381 if (Callee->hasFnAttribute("thunk"))
1382 return false;
223e47cc 1383 Instruction *Caller = CS.getInstruction();
970d7e83 1384 const AttributeSet &CallerPAL = CS.getAttributes();
223e47cc
LB
1385
1386 // Okay, this is a cast from a function to a different type. Unless doing so
1387 // would cause a type conversion of one of our arguments, change this call to
1388 // be a direct call with arguments casted to the appropriate types.
1389 //
1390 FunctionType *FT = Callee->getFunctionType();
1391 Type *OldRetTy = Caller->getType();
1392 Type *NewRetTy = FT->getReturnType();
1393
223e47cc
LB
1394 // Check to see if we are changing the return type...
1395 if (OldRetTy != NewRetTy) {
223e47cc 1396
1a4d82fc
JJ
1397 if (NewRetTy->isStructTy())
1398 return false; // TODO: Handle multiple return values.
1399
85aaf69f 1400 if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
1a4d82fc
JJ
1401 if (Callee->isDeclaration())
1402 return false; // Cannot transform this return value.
1403
1404 if (!Caller->use_empty() &&
1405 // void -> non-void is handled specially
1406 !NewRetTy->isVoidTy())
85aaf69f 1407 return false; // Cannot transform this return value.
1a4d82fc 1408 }
223e47cc
LB
1409
1410 if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
970d7e83
LB
1411 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
1412 if (RAttrs.
1413 hasAttributes(AttributeFuncs::
1414 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1415 AttributeSet::ReturnIndex))
223e47cc
LB
1416 return false; // Attribute not compatible with transformed value.
1417 }
1418
1419 // If the callsite is an invoke instruction, and the return value is used by
1420 // a PHI node in a successor, we cannot change the return type of the call
1421 // because there is no place to put the cast instruction (without breaking
1422 // the critical edge). Bail out in this case.
1423 if (!Caller->use_empty())
1424 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1a4d82fc
JJ
1425 for (User *U : II->users())
1426 if (PHINode *PN = dyn_cast<PHINode>(U))
223e47cc
LB
1427 if (PN->getParent() == II->getNormalDest() ||
1428 PN->getParent() == II->getUnwindDest())
1429 return false;
1430 }
1431
1a4d82fc 1432 unsigned NumActualArgs = CS.arg_size();
223e47cc
LB
1433 unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1434
85aaf69f
SL
1435 // Prevent us turning:
1436 // declare void @takes_i32_inalloca(i32* inalloca)
1437 // call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
1438 //
1439 // into:
1440 // call void @takes_i32_inalloca(i32* null)
1441 if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca))
1442 return false;
1443
223e47cc
LB
1444 CallSite::arg_iterator AI = CS.arg_begin();
1445 for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1446 Type *ParamTy = FT->getParamType(i);
1447 Type *ActTy = (*AI)->getType();
1448
85aaf69f 1449 if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
223e47cc
LB
1450 return false; // Cannot transform this parameter value.
1451
970d7e83
LB
1452 if (AttrBuilder(CallerPAL.getParamAttributes(i + 1), i + 1).
1453 hasAttributes(AttributeFuncs::
1454 typeIncompatible(ParamTy, i + 1), i + 1))
223e47cc
LB
1455 return false; // Attribute not compatible with transformed value.
1456
1a4d82fc
JJ
1457 if (CS.isInAllocaArgument(i))
1458 return false; // Cannot transform to and from inalloca.
1459
223e47cc
LB
1460 // If the parameter is passed as a byval argument, then we have to have a
1461 // sized type and the sized type has to have the same size as the old type.
970d7e83
LB
1462 if (ParamTy != ActTy &&
1463 CallerPAL.getParamAttributes(i + 1).hasAttribute(i + 1,
1464 Attribute::ByVal)) {
223e47cc 1465 PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
1a4d82fc 1466 if (!ParamPTy || !ParamPTy->getElementType()->isSized() || !DL)
223e47cc
LB
1467 return false;
1468
1a4d82fc
JJ
1469 Type *CurElTy = ActTy->getPointerElementType();
1470 if (DL->getTypeAllocSize(CurElTy) !=
1471 DL->getTypeAllocSize(ParamPTy->getElementType()))
223e47cc
LB
1472 return false;
1473 }
223e47cc
LB
1474 }
1475
1476 if (Callee->isDeclaration()) {
1477 // Do not delete arguments unless we have a function body.
1478 if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
1479 return false;
1480
1481 // If the callee is just a declaration, don't change the varargsness of the
1482 // call. We don't want to introduce a varargs call where one doesn't
1483 // already exist.
1484 PointerType *APTy = cast<PointerType>(CS.getCalledValue()->getType());
1485 if (FT->isVarArg()!=cast<FunctionType>(APTy->getElementType())->isVarArg())
1486 return false;
1487
1488 // If both the callee and the cast type are varargs, we still have to make
1489 // sure the number of fixed parameters are the same or we have the same
1490 // ABI issues as if we introduce a varargs call.
1491 if (FT->isVarArg() &&
1492 cast<FunctionType>(APTy->getElementType())->isVarArg() &&
1493 FT->getNumParams() !=
1494 cast<FunctionType>(APTy->getElementType())->getNumParams())
1495 return false;
1496 }
1497
1498 if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1499 !CallerPAL.isEmpty())
1500 // In this case we have more arguments than the new function type, but we
1501 // won't be dropping them. Check that these extra arguments have attributes
1502 // that are compatible with being a vararg call argument.
1503 for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
970d7e83
LB
1504 unsigned Index = CallerPAL.getSlotIndex(i - 1);
1505 if (Index <= FT->getNumParams())
223e47cc 1506 break;
970d7e83
LB
1507
1508 // Check if it has an attribute that's incompatible with varargs.
1509 AttributeSet PAttrs = CallerPAL.getSlotAttributes(i - 1);
1510 if (PAttrs.hasAttribute(Index, Attribute::StructRet))
223e47cc
LB
1511 return false;
1512 }
1513
1514
1515 // Okay, we decided that this is a safe thing to do: go ahead and start
1516 // inserting cast instructions as necessary.
1517 std::vector<Value*> Args;
1518 Args.reserve(NumActualArgs);
970d7e83 1519 SmallVector<AttributeSet, 8> attrVec;
223e47cc
LB
1520 attrVec.reserve(NumCommonArgs);
1521
1522 // Get any return attributes.
970d7e83 1523 AttrBuilder RAttrs(CallerPAL, AttributeSet::ReturnIndex);
223e47cc
LB
1524
1525 // If the return value is not being used, the type may not be compatible
1526 // with the existing attributes. Wipe out any problematic attributes.
970d7e83
LB
1527 RAttrs.
1528 removeAttributes(AttributeFuncs::
1529 typeIncompatible(NewRetTy, AttributeSet::ReturnIndex),
1530 AttributeSet::ReturnIndex);
223e47cc
LB
1531
1532 // Add the new return attributes.
970d7e83
LB
1533 if (RAttrs.hasAttributes())
1534 attrVec.push_back(AttributeSet::get(Caller->getContext(),
1535 AttributeSet::ReturnIndex, RAttrs));
223e47cc
LB
1536
1537 AI = CS.arg_begin();
1538 for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1539 Type *ParamTy = FT->getParamType(i);
1a4d82fc 1540
223e47cc
LB
1541 if ((*AI)->getType() == ParamTy) {
1542 Args.push_back(*AI);
1543 } else {
85aaf69f 1544 Args.push_back(Builder->CreateBitOrPointerCast(*AI, ParamTy));
223e47cc
LB
1545 }
1546
1547 // Add any parameter attributes.
970d7e83
LB
1548 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1549 if (PAttrs.hasAttributes())
1550 attrVec.push_back(AttributeSet::get(Caller->getContext(), i + 1,
1551 PAttrs));
223e47cc
LB
1552 }
1553
1554 // If the function takes more arguments than the call was taking, add them
1555 // now.
1556 for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1557 Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1558
1559 // If we are removing arguments to the function, emit an obnoxious warning.
1560 if (FT->getNumParams() < NumActualArgs) {
970d7e83
LB
1561 // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
1562 if (FT->isVarArg()) {
223e47cc
LB
1563 // Add all of the arguments in their promoted form to the arg list.
1564 for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1565 Type *PTy = getPromotedType((*AI)->getType());
1566 if (PTy != (*AI)->getType()) {
1567 // Must promote to pass through va_arg area!
1568 Instruction::CastOps opcode =
1569 CastInst::getCastOpcode(*AI, false, PTy, false);
1570 Args.push_back(Builder->CreateCast(opcode, *AI, PTy));
1571 } else {
1572 Args.push_back(*AI);
1573 }
1574
1575 // Add any parameter attributes.
970d7e83
LB
1576 AttrBuilder PAttrs(CallerPAL.getParamAttributes(i + 1), i + 1);
1577 if (PAttrs.hasAttributes())
1578 attrVec.push_back(AttributeSet::get(FT->getContext(), i + 1,
1579 PAttrs));
223e47cc
LB
1580 }
1581 }
1582 }
1583
970d7e83
LB
1584 AttributeSet FnAttrs = CallerPAL.getFnAttributes();
1585 if (CallerPAL.hasAttributes(AttributeSet::FunctionIndex))
1586 attrVec.push_back(AttributeSet::get(Callee->getContext(), FnAttrs));
223e47cc
LB
1587
1588 if (NewRetTy->isVoidTy())
1589 Caller->setName(""); // Void type should not have a name.
1590
970d7e83
LB
1591 const AttributeSet &NewCallerPAL = AttributeSet::get(Callee->getContext(),
1592 attrVec);
223e47cc
LB
1593
1594 Instruction *NC;
1595 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1596 NC = Builder->CreateInvoke(Callee, II->getNormalDest(),
1597 II->getUnwindDest(), Args);
1598 NC->takeName(II);
1599 cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1600 cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1601 } else {
1602 CallInst *CI = cast<CallInst>(Caller);
1603 NC = Builder->CreateCall(Callee, Args);
1604 NC->takeName(CI);
1605 if (CI->isTailCall())
1606 cast<CallInst>(NC)->setTailCall();
1607 cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1608 cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1609 }
1610
1611 // Insert a cast of the return type as necessary.
1612 Value *NV = NC;
1613 if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1614 if (!NV->getType()->isVoidTy()) {
85aaf69f 1615 NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
223e47cc
LB
1616 NC->setDebugLoc(Caller->getDebugLoc());
1617
1618 // If this is an invoke instruction, we should insert it after the first
1619 // non-phi, instruction in the normal successor block.
1620 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1621 BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
1622 InsertNewInstBefore(NC, *I);
1623 } else {
1624 // Otherwise, it's a call, just insert cast right after the call.
1625 InsertNewInstBefore(NC, *Caller);
1626 }
1627 Worklist.AddUsersToWorkList(*Caller);
1628 } else {
1629 NV = UndefValue::get(Caller->getType());
1630 }
1631 }
1632
1633 if (!Caller->use_empty())
1634 ReplaceInstUsesWith(*Caller, NV);
85aaf69f
SL
1635 else if (Caller->hasValueHandle()) {
1636 if (OldRetTy == NV->getType())
1637 ValueHandleBase::ValueIsRAUWd(Caller, NV);
1638 else
1639 // We cannot call ValueIsRAUWd with a different type, and the
1640 // actual tracked value will disappear.
1641 ValueHandleBase::ValueIsDeleted(Caller);
1642 }
223e47cc
LB
1643
1644 EraseInstFromFunction(*Caller);
1645 return true;
1646}
1647
1648// transformCallThroughTrampoline - Turn a call to a function created by
1649// init_trampoline / adjust_trampoline intrinsic pair into a direct call to the
1650// underlying function.
1651//
1652Instruction *
1653InstCombiner::transformCallThroughTrampoline(CallSite CS,
1654 IntrinsicInst *Tramp) {
1655 Value *Callee = CS.getCalledValue();
1656 PointerType *PTy = cast<PointerType>(Callee->getType());
1657 FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
970d7e83 1658 const AttributeSet &Attrs = CS.getAttributes();
223e47cc
LB
1659
1660 // If the call already has the 'nest' attribute somewhere then give up -
1661 // otherwise 'nest' would occur twice after splicing in the chain.
1662 if (Attrs.hasAttrSomewhere(Attribute::Nest))
1a4d82fc 1663 return nullptr;
223e47cc
LB
1664
1665 assert(Tramp &&
1666 "transformCallThroughTrampoline called with incorrect CallSite.");
1667
1668 Function *NestF =cast<Function>(Tramp->getArgOperand(1)->stripPointerCasts());
1669 PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1670 FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1671
970d7e83 1672 const AttributeSet &NestAttrs = NestF->getAttributes();
223e47cc
LB
1673 if (!NestAttrs.isEmpty()) {
1674 unsigned NestIdx = 1;
1a4d82fc 1675 Type *NestTy = nullptr;
970d7e83 1676 AttributeSet NestAttr;
223e47cc
LB
1677
1678 // Look for a parameter marked with the 'nest' attribute.
1679 for (FunctionType::param_iterator I = NestFTy->param_begin(),
1680 E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
970d7e83 1681 if (NestAttrs.hasAttribute(NestIdx, Attribute::Nest)) {
223e47cc
LB
1682 // Record the parameter type and any other attributes.
1683 NestTy = *I;
1684 NestAttr = NestAttrs.getParamAttributes(NestIdx);
1685 break;
1686 }
1687
1688 if (NestTy) {
1689 Instruction *Caller = CS.getInstruction();
1690 std::vector<Value*> NewArgs;
1a4d82fc 1691 NewArgs.reserve(CS.arg_size() + 1);
223e47cc 1692
970d7e83 1693 SmallVector<AttributeSet, 8> NewAttrs;
223e47cc
LB
1694 NewAttrs.reserve(Attrs.getNumSlots() + 1);
1695
1696 // Insert the nest argument into the call argument list, which may
1697 // mean appending it. Likewise for attributes.
1698
1699 // Add any result attributes.
970d7e83
LB
1700 if (Attrs.hasAttributes(AttributeSet::ReturnIndex))
1701 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1702 Attrs.getRetAttributes()));
223e47cc
LB
1703
1704 {
1705 unsigned Idx = 1;
1706 CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1707 do {
1708 if (Idx == NestIdx) {
1709 // Add the chain argument and attributes.
1710 Value *NestVal = Tramp->getArgOperand(2);
1711 if (NestVal->getType() != NestTy)
1712 NestVal = Builder->CreateBitCast(NestVal, NestTy, "nest");
1713 NewArgs.push_back(NestVal);
970d7e83
LB
1714 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1715 NestAttr));
223e47cc
LB
1716 }
1717
1718 if (I == E)
1719 break;
1720
1721 // Add the original argument and attributes.
1722 NewArgs.push_back(*I);
970d7e83
LB
1723 AttributeSet Attr = Attrs.getParamAttributes(Idx);
1724 if (Attr.hasAttributes(Idx)) {
1725 AttrBuilder B(Attr, Idx);
1726 NewAttrs.push_back(AttributeSet::get(Caller->getContext(),
1727 Idx + (Idx >= NestIdx), B));
1728 }
223e47cc
LB
1729
1730 ++Idx, ++I;
1731 } while (1);
1732 }
1733
1734 // Add any function attributes.
970d7e83
LB
1735 if (Attrs.hasAttributes(AttributeSet::FunctionIndex))
1736 NewAttrs.push_back(AttributeSet::get(FTy->getContext(),
1737 Attrs.getFnAttributes()));
223e47cc
LB
1738
1739 // The trampoline may have been bitcast to a bogus type (FTy).
1740 // Handle this by synthesizing a new function type, equal to FTy
1741 // with the chain parameter inserted.
1742
1743 std::vector<Type*> NewTypes;
1744 NewTypes.reserve(FTy->getNumParams()+1);
1745
1746 // Insert the chain's type into the list of parameter types, which may
1747 // mean appending it.
1748 {
1749 unsigned Idx = 1;
1750 FunctionType::param_iterator I = FTy->param_begin(),
1751 E = FTy->param_end();
1752
1753 do {
1754 if (Idx == NestIdx)
1755 // Add the chain's type.
1756 NewTypes.push_back(NestTy);
1757
1758 if (I == E)
1759 break;
1760
1761 // Add the original type.
1762 NewTypes.push_back(*I);
1763
1764 ++Idx, ++I;
1765 } while (1);
1766 }
1767
1768 // Replace the trampoline call with a direct call. Let the generic
1769 // code sort out any function type mismatches.
1770 FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1771 FTy->isVarArg());
1772 Constant *NewCallee =
1773 NestF->getType() == PointerType::getUnqual(NewFTy) ?
1774 NestF : ConstantExpr::getBitCast(NestF,
1775 PointerType::getUnqual(NewFTy));
1a4d82fc
JJ
1776 const AttributeSet &NewPAL =
1777 AttributeSet::get(FTy->getContext(), NewAttrs);
223e47cc
LB
1778
1779 Instruction *NewCaller;
1780 if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1781 NewCaller = InvokeInst::Create(NewCallee,
1782 II->getNormalDest(), II->getUnwindDest(),
1783 NewArgs);
1784 cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1785 cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1786 } else {
1787 NewCaller = CallInst::Create(NewCallee, NewArgs);
1788 if (cast<CallInst>(Caller)->isTailCall())
1789 cast<CallInst>(NewCaller)->setTailCall();
1790 cast<CallInst>(NewCaller)->
1791 setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1792 cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1793 }
1794
1795 return NewCaller;
1796 }
1797 }
1798
1799 // Replace the trampoline call with a direct call. Since there is no 'nest'
1800 // parameter, there is no need to adjust the argument list. Let the generic
1801 // code sort out any function type mismatches.
1802 Constant *NewCallee =
1803 NestF->getType() == PTy ? NestF :
1804 ConstantExpr::getBitCast(NestF, PTy);
1805 CS.setCalledFunction(NewCallee);
1806 return CS.getInstruction();
1807}