]>
git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
1 //===----------------------- AlignmentFromAssumptions.cpp -----------------===//
2 // Set Load/Store Alignments From Assumptions
4 // The LLVM Compiler Infrastructure
6 // This file is distributed under the University of Illinois Open Source
7 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
11 // This file implements a ScalarEvolution-based transformation to set
12 // the alignments of load, stores and memory intrinsics based on the truth
13 // expressions of assume intrinsics. The primary motivation is to handle
14 // complex alignment assumptions that apply to vector loads and stores that
15 // appear after vectorization and unrolling.
17 //===----------------------------------------------------------------------===//
19 #define AA_NAME "alignment-from-assumptions"
20 #define DEBUG_TYPE AA_NAME
21 #include "llvm/Transforms/Scalar.h"
22 #include "llvm/ADT/SmallPtrSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AssumptionCache.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/Analysis/ScalarEvolution.h"
28 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Instruction.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/IR/Intrinsics.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/raw_ostream.h"
39 STATISTIC(NumLoadAlignChanged
,
40 "Number of loads changed by alignment assumptions");
41 STATISTIC(NumStoreAlignChanged
,
42 "Number of stores changed by alignment assumptions");
43 STATISTIC(NumMemIntAlignChanged
,
44 "Number of memory intrinsics changed by alignment assumptions");
47 struct AlignmentFromAssumptions
: public FunctionPass
{
48 static char ID
; // Pass identification, replacement for typeid
49 AlignmentFromAssumptions() : FunctionPass(ID
) {
50 initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
53 bool runOnFunction(Function
&F
);
55 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
56 AU
.addRequired
<AssumptionCacheTracker
>();
57 AU
.addRequired
<ScalarEvolution
>();
58 AU
.addRequired
<DominatorTreeWrapperPass
>();
61 AU
.addPreserved
<LoopInfo
>();
62 AU
.addPreserved
<DominatorTreeWrapperPass
>();
63 AU
.addPreserved
<ScalarEvolution
>();
66 // For memory transfers, we need a common alignment for both the source and
67 // destination. If we have a new alignment for only one operand of a transfer
68 // instruction, save it in these maps. If we reach the other operand through
69 // another assumption later, then we may change the alignment at that point.
70 DenseMap
<MemTransferInst
*, unsigned> NewDestAlignments
, NewSrcAlignments
;
76 bool extractAlignmentInfo(CallInst
*I
, Value
*&AAPtr
, const SCEV
*&AlignSCEV
,
77 const SCEV
*&OffSCEV
);
78 bool processAssumption(CallInst
*I
);
82 char AlignmentFromAssumptions::ID
= 0;
83 static const char aip_name
[] = "Alignment from assumptions";
84 INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions
, AA_NAME
,
85 aip_name
, false, false)
86 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
87 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
88 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution
)
89 INITIALIZE_PASS_END(AlignmentFromAssumptions
, AA_NAME
,
90 aip_name
, false, false)
92 FunctionPass
*llvm::createAlignmentFromAssumptionsPass() {
93 return new AlignmentFromAssumptions();
96 // Given an expression for the (constant) alignment, AlignSCEV, and an
97 // expression for the displacement between a pointer and the aligned address,
98 // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
99 // to a constant. Using SCEV to compute alignment handles the case where
100 // DiffSCEV is a recurrence with constant start such that the aligned offset
101 // is constant. e.g. {16,+,32} % 32 -> 16.
102 static unsigned getNewAlignmentDiff(const SCEV
*DiffSCEV
,
103 const SCEV
*AlignSCEV
,
104 ScalarEvolution
*SE
) {
105 // DiffUnits = Diff % int64_t(Alignment)
106 const SCEV
*DiffAlignDiv
= SE
->getUDivExpr(DiffSCEV
, AlignSCEV
);
107 const SCEV
*DiffAlign
= SE
->getMulExpr(DiffAlignDiv
, AlignSCEV
);
108 const SCEV
*DiffUnitsSCEV
= SE
->getMinusSCEV(DiffAlign
, DiffSCEV
);
110 DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV
<< " is " <<
111 *DiffUnitsSCEV
<< " (diff: " << *DiffSCEV
<< ")\n");
113 if (const SCEVConstant
*ConstDUSCEV
=
114 dyn_cast
<SCEVConstant
>(DiffUnitsSCEV
)) {
115 int64_t DiffUnits
= ConstDUSCEV
->getValue()->getSExtValue();
117 // If the displacement is an exact multiple of the alignment, then the
118 // displaced pointer has the same alignment as the aligned pointer, so
119 // return the alignment value.
122 cast
<SCEVConstant
>(AlignSCEV
)->getValue()->getSExtValue();
124 // If the displacement is not an exact multiple, but the remainder is a
125 // constant, then return this remainder (but only if it is a power of 2).
126 uint64_t DiffUnitsAbs
= abs64(DiffUnits
);
127 if (isPowerOf2_64(DiffUnitsAbs
))
128 return (unsigned) DiffUnitsAbs
;
134 // There is an address given by an offset OffSCEV from AASCEV which has an
135 // alignment AlignSCEV. Use that information, if possible, to compute a new
136 // alignment for Ptr.
137 static unsigned getNewAlignment(const SCEV
*AASCEV
, const SCEV
*AlignSCEV
,
138 const SCEV
*OffSCEV
, Value
*Ptr
,
139 ScalarEvolution
*SE
) {
140 const SCEV
*PtrSCEV
= SE
->getSCEV(Ptr
);
141 const SCEV
*DiffSCEV
= SE
->getMinusSCEV(PtrSCEV
, AASCEV
);
143 // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
144 // sign-extended OffSCEV to i64, so make sure they agree again.
145 DiffSCEV
= SE
->getNoopOrSignExtend(DiffSCEV
, OffSCEV
->getType());
147 // What we really want to know is the overall offset to the aligned
148 // address. This address is displaced by the provided offset.
149 DiffSCEV
= SE
->getMinusSCEV(DiffSCEV
, OffSCEV
);
151 DEBUG(dbgs() << "AFI: alignment of " << *Ptr
<< " relative to " <<
152 *AlignSCEV
<< " and offset " << *OffSCEV
<<
153 " using diff " << *DiffSCEV
<< "\n");
155 unsigned NewAlignment
= getNewAlignmentDiff(DiffSCEV
, AlignSCEV
, SE
);
156 DEBUG(dbgs() << "\tnew alignment: " << NewAlignment
<< "\n");
160 } else if (const SCEVAddRecExpr
*DiffARSCEV
=
161 dyn_cast
<SCEVAddRecExpr
>(DiffSCEV
)) {
162 // The relative offset to the alignment assumption did not yield a constant,
163 // but we should try harder: if we assume that a is 32-byte aligned, then in
164 // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
165 // 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
166 // As a result, the new alignment will not be a constant, but can still
167 // be improved over the default (of 4) to 16.
169 const SCEV
*DiffStartSCEV
= DiffARSCEV
->getStart();
170 const SCEV
*DiffIncSCEV
= DiffARSCEV
->getStepRecurrence(*SE
);
172 DEBUG(dbgs() << "\ttrying start/inc alignment using start " <<
173 *DiffStartSCEV
<< " and inc " << *DiffIncSCEV
<< "\n");
175 // Now compute the new alignment using the displacement to the value in the
176 // first iteration, and also the alignment using the per-iteration delta.
177 // If these are the same, then use that answer. Otherwise, use the smaller
178 // one, but only if it divides the larger one.
179 NewAlignment
= getNewAlignmentDiff(DiffStartSCEV
, AlignSCEV
, SE
);
180 unsigned NewIncAlignment
= getNewAlignmentDiff(DiffIncSCEV
, AlignSCEV
, SE
);
182 DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment
<< "\n");
183 DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment
<< "\n");
185 if (!NewAlignment
|| !NewIncAlignment
) {
187 } else if (NewAlignment
> NewIncAlignment
) {
188 if (NewAlignment
% NewIncAlignment
== 0) {
189 DEBUG(dbgs() << "\tnew start/inc alignment: " <<
190 NewIncAlignment
<< "\n");
191 return NewIncAlignment
;
193 } else if (NewIncAlignment
> NewAlignment
) {
194 if (NewIncAlignment
% NewAlignment
== 0) {
195 DEBUG(dbgs() << "\tnew start/inc alignment: " <<
196 NewAlignment
<< "\n");
199 } else if (NewIncAlignment
== NewAlignment
) {
200 DEBUG(dbgs() << "\tnew start/inc alignment: " <<
201 NewAlignment
<< "\n");
209 bool AlignmentFromAssumptions::extractAlignmentInfo(CallInst
*I
,
210 Value
*&AAPtr
, const SCEV
*&AlignSCEV
,
211 const SCEV
*&OffSCEV
) {
212 // An alignment assume must be a statement about the least-significant
213 // bits of the pointer being zero, possibly with some offset.
214 ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(I
->getArgOperand(0));
218 // This must be an expression of the form: x & m == 0.
219 if (ICI
->getPredicate() != ICmpInst::ICMP_EQ
)
222 // Swap things around so that the RHS is 0.
223 Value
*CmpLHS
= ICI
->getOperand(0);
224 Value
*CmpRHS
= ICI
->getOperand(1);
225 const SCEV
*CmpLHSSCEV
= SE
->getSCEV(CmpLHS
);
226 const SCEV
*CmpRHSSCEV
= SE
->getSCEV(CmpRHS
);
227 if (CmpLHSSCEV
->isZero())
228 std::swap(CmpLHS
, CmpRHS
);
229 else if (!CmpRHSSCEV
->isZero())
232 BinaryOperator
*CmpBO
= dyn_cast
<BinaryOperator
>(CmpLHS
);
233 if (!CmpBO
|| CmpBO
->getOpcode() != Instruction::And
)
236 // Swap things around so that the right operand of the and is a constant
237 // (the mask); we cannot deal with variable masks.
238 Value
*AndLHS
= CmpBO
->getOperand(0);
239 Value
*AndRHS
= CmpBO
->getOperand(1);
240 const SCEV
*AndLHSSCEV
= SE
->getSCEV(AndLHS
);
241 const SCEV
*AndRHSSCEV
= SE
->getSCEV(AndRHS
);
242 if (isa
<SCEVConstant
>(AndLHSSCEV
)) {
243 std::swap(AndLHS
, AndRHS
);
244 std::swap(AndLHSSCEV
, AndRHSSCEV
);
247 const SCEVConstant
*MaskSCEV
= dyn_cast
<SCEVConstant
>(AndRHSSCEV
);
251 // The mask must have some trailing ones (otherwise the condition is
252 // trivial and tells us nothing about the alignment of the left operand).
253 unsigned TrailingOnes
=
254 MaskSCEV
->getValue()->getValue().countTrailingOnes();
258 // Cap the alignment at the maximum with which LLVM can deal (and make sure
259 // we don't overflow the shift).
261 TrailingOnes
= std::min(TrailingOnes
,
262 unsigned(sizeof(unsigned) * CHAR_BIT
- 1));
263 Alignment
= std::min(1u << TrailingOnes
, +Value::MaximumAlignment
);
265 Type
*Int64Ty
= Type::getInt64Ty(I
->getParent()->getParent()->getContext());
266 AlignSCEV
= SE
->getConstant(Int64Ty
, Alignment
);
268 // The LHS might be a ptrtoint instruction, or it might be the pointer
272 if (PtrToIntInst
*PToI
= dyn_cast
<PtrToIntInst
>(AndLHS
)) {
273 AAPtr
= PToI
->getPointerOperand();
274 OffSCEV
= SE
->getConstant(Int64Ty
, 0);
275 } else if (const SCEVAddExpr
* AndLHSAddSCEV
=
276 dyn_cast
<SCEVAddExpr
>(AndLHSSCEV
)) {
277 // Try to find the ptrtoint; subtract it and the rest is the offset.
278 for (SCEVAddExpr::op_iterator J
= AndLHSAddSCEV
->op_begin(),
279 JE
= AndLHSAddSCEV
->op_end(); J
!= JE
; ++J
)
280 if (const SCEVUnknown
*OpUnk
= dyn_cast
<SCEVUnknown
>(*J
))
281 if (PtrToIntInst
*PToI
= dyn_cast
<PtrToIntInst
>(OpUnk
->getValue())) {
282 AAPtr
= PToI
->getPointerOperand();
283 OffSCEV
= SE
->getMinusSCEV(AndLHSAddSCEV
, *J
);
291 // Sign extend the offset to 64 bits (so that it is like all of the other
293 unsigned OffSCEVBits
= OffSCEV
->getType()->getPrimitiveSizeInBits();
294 if (OffSCEVBits
< 64)
295 OffSCEV
= SE
->getSignExtendExpr(OffSCEV
, Int64Ty
);
296 else if (OffSCEVBits
> 64)
299 AAPtr
= AAPtr
->stripPointerCasts();
303 bool AlignmentFromAssumptions::processAssumption(CallInst
*ACall
) {
305 const SCEV
*AlignSCEV
, *OffSCEV
;
306 if (!extractAlignmentInfo(ACall
, AAPtr
, AlignSCEV
, OffSCEV
))
309 const SCEV
*AASCEV
= SE
->getSCEV(AAPtr
);
311 // Apply the assumption to all other users of the specified pointer.
312 SmallPtrSet
<Instruction
*, 32> Visited
;
313 SmallVector
<Instruction
*, 16> WorkList
;
314 for (User
*J
: AAPtr
->users()) {
318 if (Instruction
*K
= dyn_cast
<Instruction
>(J
))
319 if (isValidAssumeForContext(ACall
, K
, DL
, DT
))
320 WorkList
.push_back(K
);
323 while (!WorkList
.empty()) {
324 Instruction
*J
= WorkList
.pop_back_val();
326 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(J
)) {
327 unsigned NewAlignment
= getNewAlignment(AASCEV
, AlignSCEV
, OffSCEV
,
328 LI
->getPointerOperand(), SE
);
330 if (NewAlignment
> LI
->getAlignment()) {
331 LI
->setAlignment(NewAlignment
);
332 ++NumLoadAlignChanged
;
334 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(J
)) {
335 unsigned NewAlignment
= getNewAlignment(AASCEV
, AlignSCEV
, OffSCEV
,
336 SI
->getPointerOperand(), SE
);
338 if (NewAlignment
> SI
->getAlignment()) {
339 SI
->setAlignment(NewAlignment
);
340 ++NumStoreAlignChanged
;
342 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(J
)) {
343 unsigned NewDestAlignment
= getNewAlignment(AASCEV
, AlignSCEV
, OffSCEV
,
346 // For memory transfers, we need a common alignment for both the
347 // source and destination. If we have a new alignment for this
348 // instruction, but only for one operand, save it. If we reach the
349 // other operand through another assumption later, then we may
350 // change the alignment at that point.
351 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
352 unsigned NewSrcAlignment
= getNewAlignment(AASCEV
, AlignSCEV
, OffSCEV
,
353 MTI
->getSource(), SE
);
355 DenseMap
<MemTransferInst
*, unsigned>::iterator DI
=
356 NewDestAlignments
.find(MTI
);
357 unsigned AltDestAlignment
= (DI
== NewDestAlignments
.end()) ?
360 DenseMap
<MemTransferInst
*, unsigned>::iterator SI
=
361 NewSrcAlignments
.find(MTI
);
362 unsigned AltSrcAlignment
= (SI
== NewSrcAlignments
.end()) ?
365 DEBUG(dbgs() << "\tmem trans: " << NewDestAlignment
<< " " <<
366 AltDestAlignment
<< " " << NewSrcAlignment
<<
367 " " << AltSrcAlignment
<< "\n");
369 // Of these four alignments, pick the largest possible...
370 unsigned NewAlignment
= 0;
371 if (NewDestAlignment
<= std::max(NewSrcAlignment
, AltSrcAlignment
))
372 NewAlignment
= std::max(NewAlignment
, NewDestAlignment
);
373 if (AltDestAlignment
<= std::max(NewSrcAlignment
, AltSrcAlignment
))
374 NewAlignment
= std::max(NewAlignment
, AltDestAlignment
);
375 if (NewSrcAlignment
<= std::max(NewDestAlignment
, AltDestAlignment
))
376 NewAlignment
= std::max(NewAlignment
, NewSrcAlignment
);
377 if (AltSrcAlignment
<= std::max(NewDestAlignment
, AltDestAlignment
))
378 NewAlignment
= std::max(NewAlignment
, AltSrcAlignment
);
380 if (NewAlignment
> MI
->getAlignment()) {
381 MI
->setAlignment(ConstantInt::get(Type::getInt32Ty(
382 MI
->getParent()->getContext()), NewAlignment
));
383 ++NumMemIntAlignChanged
;
386 NewDestAlignments
.insert(std::make_pair(MTI
, NewDestAlignment
));
387 NewSrcAlignments
.insert(std::make_pair(MTI
, NewSrcAlignment
));
388 } else if (NewDestAlignment
> MI
->getAlignment()) {
389 assert((!isa
<MemIntrinsic
>(MI
) || isa
<MemSetInst
>(MI
)) &&
390 "Unknown memory intrinsic");
392 MI
->setAlignment(ConstantInt::get(Type::getInt32Ty(
393 MI
->getParent()->getContext()), NewDestAlignment
));
394 ++NumMemIntAlignChanged
;
398 // Now that we've updated that use of the pointer, look for other uses of
399 // the pointer to update.
401 for (User
*UJ
: J
->users()) {
402 Instruction
*K
= cast
<Instruction
>(UJ
);
403 if (!Visited
.count(K
) && isValidAssumeForContext(ACall
, K
, DL
, DT
))
404 WorkList
.push_back(K
);
411 bool AlignmentFromAssumptions::runOnFunction(Function
&F
) {
412 bool Changed
= false;
413 auto &AC
= getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
414 SE
= &getAnalysis
<ScalarEvolution
>();
415 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
416 DataLayoutPass
*DLP
= getAnalysisIfAvailable
<DataLayoutPass
>();
417 DL
= DLP
? &DLP
->getDataLayout() : nullptr;
419 NewDestAlignments
.clear();
420 NewSrcAlignments
.clear();
422 for (auto &AssumeVH
: AC
.assumptions())
424 Changed
|= processAssumption(cast
<CallInst
>(AssumeVH
));