]> git.proxmox.com Git - rustc.git/blame - src/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
Imported Upstream version 0.7
[rustc.git] / src / llvm / lib / CodeGen / SelectionDAG / SelectionDAG.cpp
CommitLineData
223e47cc
LB
1//===-- SelectionDAG.cpp - Implement the SelectionDAG data structures -----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAG class.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/CodeGen/SelectionDAG.h"
223e47cc 15#include "SDNodeDbgValue.h"
970d7e83
LB
16#include "SDNodeOrdering.h"
17#include "llvm/ADT/SetVector.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Analysis/TargetTransformInfo.h"
223e47cc
LB
23#include "llvm/Analysis/ValueTracking.h"
24#include "llvm/Assembly/Writer.h"
25#include "llvm/CodeGen/MachineBasicBlock.h"
26#include "llvm/CodeGen/MachineConstantPool.h"
27#include "llvm/CodeGen/MachineFrameInfo.h"
28#include "llvm/CodeGen/MachineModuleInfo.h"
970d7e83
LB
29#include "llvm/DebugInfo.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/Constants.h"
32#include "llvm/IR/DataLayout.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/GlobalAlias.h"
36#include "llvm/IR/GlobalVariable.h"
37#include "llvm/IR/Intrinsics.h"
223e47cc
LB
38#include "llvm/Support/CommandLine.h"
39#include "llvm/Support/Debug.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/ManagedStatic.h"
42#include "llvm/Support/MathExtras.h"
223e47cc 43#include "llvm/Support/Mutex.h"
970d7e83
LB
44#include "llvm/Support/raw_ostream.h"
45#include "llvm/Target/TargetInstrInfo.h"
46#include "llvm/Target/TargetIntrinsicInfo.h"
47#include "llvm/Target/TargetLowering.h"
48#include "llvm/Target/TargetMachine.h"
49#include "llvm/Target/TargetOptions.h"
50#include "llvm/Target/TargetRegisterInfo.h"
51#include "llvm/Target/TargetSelectionDAGInfo.h"
223e47cc
LB
52#include <algorithm>
53#include <cmath>
54using namespace llvm;
55
56/// makeVTList - Return an instance of the SDVTList struct initialized with the
57/// specified members.
58static SDVTList makeVTList(const EVT *VTs, unsigned NumVTs) {
59 SDVTList Res = {VTs, NumVTs};
60 return Res;
61}
62
223e47cc
LB
63// Default null implementations of the callbacks.
64void SelectionDAG::DAGUpdateListener::NodeDeleted(SDNode*, SDNode*) {}
65void SelectionDAG::DAGUpdateListener::NodeUpdated(SDNode*) {}
66
67//===----------------------------------------------------------------------===//
68// ConstantFPSDNode Class
69//===----------------------------------------------------------------------===//
70
71/// isExactlyValue - We don't rely on operator== working on double values, as
72/// it returns true for things that are clearly not equal, like -0.0 and 0.0.
73/// As such, this method can be used to do an exact bit-for-bit comparison of
74/// two floating point values.
75bool ConstantFPSDNode::isExactlyValue(const APFloat& V) const {
76 return getValueAPF().bitwiseIsEqual(V);
77}
78
79bool ConstantFPSDNode::isValueValidForType(EVT VT,
80 const APFloat& Val) {
81 assert(VT.isFloatingPoint() && "Can only convert between FP types");
82
223e47cc
LB
83 // convert modifies in place, so make a copy.
84 APFloat Val2 = APFloat(Val);
85 bool losesInfo;
970d7e83
LB
86 (void) Val2.convert(SelectionDAG::EVTToAPFloatSemantics(VT),
87 APFloat::rmNearestTiesToEven,
223e47cc
LB
88 &losesInfo);
89 return !losesInfo;
90}
91
92//===----------------------------------------------------------------------===//
93// ISD Namespace
94//===----------------------------------------------------------------------===//
95
96/// isBuildVectorAllOnes - Return true if the specified node is a
97/// BUILD_VECTOR where all of the elements are ~0 or undef.
98bool ISD::isBuildVectorAllOnes(const SDNode *N) {
99 // Look through a bit convert.
100 if (N->getOpcode() == ISD::BITCAST)
101 N = N->getOperand(0).getNode();
102
103 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
104
105 unsigned i = 0, e = N->getNumOperands();
106
107 // Skip over all of the undef values.
108 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
109 ++i;
110
111 // Do not accept an all-undef vector.
112 if (i == e) return false;
113
114 // Do not accept build_vectors that aren't all constants or which have non-~0
115 // elements. We have to be a bit careful here, as the type of the constant
116 // may not be the same as the type of the vector elements due to type
117 // legalization (the elements are promoted to a legal type for the target and
118 // a vector of a type may be legal when the base element type is not).
119 // We only want to check enough bits to cover the vector elements, because
120 // we care if the resultant vector is all ones, not whether the individual
121 // constants are.
122 SDValue NotZero = N->getOperand(i);
123 unsigned EltSize = N->getValueType(0).getVectorElementType().getSizeInBits();
970d7e83
LB
124 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(NotZero)) {
125 if (CN->getAPIntValue().countTrailingOnes() < EltSize)
223e47cc 126 return false;
970d7e83
LB
127 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(NotZero)) {
128 if (CFPN->getValueAPF().bitcastToAPInt().countTrailingOnes() < EltSize)
223e47cc
LB
129 return false;
130 } else
131 return false;
132
133 // Okay, we have at least one ~0 value, check to see if the rest match or are
134 // undefs. Even with the above element type twiddling, this should be OK, as
135 // the same type legalization should have applied to all the elements.
136 for (++i; i != e; ++i)
137 if (N->getOperand(i) != NotZero &&
138 N->getOperand(i).getOpcode() != ISD::UNDEF)
139 return false;
140 return true;
141}
142
143
144/// isBuildVectorAllZeros - Return true if the specified node is a
145/// BUILD_VECTOR where all of the elements are 0 or undef.
146bool ISD::isBuildVectorAllZeros(const SDNode *N) {
147 // Look through a bit convert.
148 if (N->getOpcode() == ISD::BITCAST)
149 N = N->getOperand(0).getNode();
150
151 if (N->getOpcode() != ISD::BUILD_VECTOR) return false;
152
153 unsigned i = 0, e = N->getNumOperands();
154
155 // Skip over all of the undef values.
156 while (i != e && N->getOperand(i).getOpcode() == ISD::UNDEF)
157 ++i;
158
159 // Do not accept an all-undef vector.
160 if (i == e) return false;
161
162 // Do not accept build_vectors that aren't all constants or which have non-0
163 // elements.
164 SDValue Zero = N->getOperand(i);
970d7e83
LB
165 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Zero)) {
166 if (!CN->isNullValue())
223e47cc 167 return false;
970d7e83
LB
168 } else if (ConstantFPSDNode *CFPN = dyn_cast<ConstantFPSDNode>(Zero)) {
169 if (!CFPN->getValueAPF().isPosZero())
223e47cc
LB
170 return false;
171 } else
172 return false;
173
174 // Okay, we have at least one 0 value, check to see if the rest match or are
175 // undefs.
176 for (++i; i != e; ++i)
177 if (N->getOperand(i) != Zero &&
178 N->getOperand(i).getOpcode() != ISD::UNDEF)
179 return false;
180 return true;
181}
182
183/// isScalarToVector - Return true if the specified node is a
184/// ISD::SCALAR_TO_VECTOR node or a BUILD_VECTOR node where only the low
185/// element is not an undef.
186bool ISD::isScalarToVector(const SDNode *N) {
187 if (N->getOpcode() == ISD::SCALAR_TO_VECTOR)
188 return true;
189
190 if (N->getOpcode() != ISD::BUILD_VECTOR)
191 return false;
192 if (N->getOperand(0).getOpcode() == ISD::UNDEF)
193 return false;
194 unsigned NumElems = N->getNumOperands();
195 if (NumElems == 1)
196 return false;
197 for (unsigned i = 1; i < NumElems; ++i) {
198 SDValue V = N->getOperand(i);
199 if (V.getOpcode() != ISD::UNDEF)
200 return false;
201 }
202 return true;
203}
204
205/// allOperandsUndef - Return true if the node has at least one operand
206/// and all operands of the specified node are ISD::UNDEF.
207bool ISD::allOperandsUndef(const SDNode *N) {
208 // Return false if the node has no operands.
209 // This is "logically inconsistent" with the definition of "all" but
210 // is probably the desired behavior.
211 if (N->getNumOperands() == 0)
212 return false;
213
214 for (unsigned i = 0, e = N->getNumOperands(); i != e ; ++i)
215 if (N->getOperand(i).getOpcode() != ISD::UNDEF)
216 return false;
217
218 return true;
219}
220
221/// getSetCCSwappedOperands - Return the operation corresponding to (Y op X)
222/// when given the operation for (X op Y).
223ISD::CondCode ISD::getSetCCSwappedOperands(ISD::CondCode Operation) {
224 // To perform this operation, we just need to swap the L and G bits of the
225 // operation.
226 unsigned OldL = (Operation >> 2) & 1;
227 unsigned OldG = (Operation >> 1) & 1;
228 return ISD::CondCode((Operation & ~6) | // Keep the N, U, E bits
229 (OldL << 1) | // New G bit
230 (OldG << 2)); // New L bit.
231}
232
233/// getSetCCInverse - Return the operation corresponding to !(X op Y), where
234/// 'op' is a valid SetCC operation.
235ISD::CondCode ISD::getSetCCInverse(ISD::CondCode Op, bool isInteger) {
236 unsigned Operation = Op;
237 if (isInteger)
238 Operation ^= 7; // Flip L, G, E bits, but not U.
239 else
240 Operation ^= 15; // Flip all of the condition bits.
241
242 if (Operation > ISD::SETTRUE2)
243 Operation &= ~8; // Don't let N and U bits get set.
244
245 return ISD::CondCode(Operation);
246}
247
248
249/// isSignedOp - For an integer comparison, return 1 if the comparison is a
250/// signed operation and 2 if the result is an unsigned comparison. Return zero
251/// if the operation does not depend on the sign of the input (setne and seteq).
252static int isSignedOp(ISD::CondCode Opcode) {
253 switch (Opcode) {
254 default: llvm_unreachable("Illegal integer setcc operation!");
255 case ISD::SETEQ:
256 case ISD::SETNE: return 0;
257 case ISD::SETLT:
258 case ISD::SETLE:
259 case ISD::SETGT:
260 case ISD::SETGE: return 1;
261 case ISD::SETULT:
262 case ISD::SETULE:
263 case ISD::SETUGT:
264 case ISD::SETUGE: return 2;
265 }
266}
267
268/// getSetCCOrOperation - Return the result of a logical OR between different
269/// comparisons of identical values: ((X op1 Y) | (X op2 Y)). This function
270/// returns SETCC_INVALID if it is not possible to represent the resultant
271/// comparison.
272ISD::CondCode ISD::getSetCCOrOperation(ISD::CondCode Op1, ISD::CondCode Op2,
273 bool isInteger) {
274 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
275 // Cannot fold a signed integer setcc with an unsigned integer setcc.
276 return ISD::SETCC_INVALID;
277
278 unsigned Op = Op1 | Op2; // Combine all of the condition bits.
279
280 // If the N and U bits get set then the resultant comparison DOES suddenly
281 // care about orderedness, and is true when ordered.
282 if (Op > ISD::SETTRUE2)
283 Op &= ~16; // Clear the U bit if the N bit is set.
284
285 // Canonicalize illegal integer setcc's.
286 if (isInteger && Op == ISD::SETUNE) // e.g. SETUGT | SETULT
287 Op = ISD::SETNE;
288
289 return ISD::CondCode(Op);
290}
291
292/// getSetCCAndOperation - Return the result of a logical AND between different
293/// comparisons of identical values: ((X op1 Y) & (X op2 Y)). This
294/// function returns zero if it is not possible to represent the resultant
295/// comparison.
296ISD::CondCode ISD::getSetCCAndOperation(ISD::CondCode Op1, ISD::CondCode Op2,
297 bool isInteger) {
298 if (isInteger && (isSignedOp(Op1) | isSignedOp(Op2)) == 3)
299 // Cannot fold a signed setcc with an unsigned setcc.
300 return ISD::SETCC_INVALID;
301
302 // Combine all of the condition bits.
303 ISD::CondCode Result = ISD::CondCode(Op1 & Op2);
304
305 // Canonicalize illegal integer setcc's.
306 if (isInteger) {
307 switch (Result) {
308 default: break;
309 case ISD::SETUO : Result = ISD::SETFALSE; break; // SETUGT & SETULT
310 case ISD::SETOEQ: // SETEQ & SETU[LG]E
311 case ISD::SETUEQ: Result = ISD::SETEQ ; break; // SETUGE & SETULE
312 case ISD::SETOLT: Result = ISD::SETULT ; break; // SETULT & SETNE
313 case ISD::SETOGT: Result = ISD::SETUGT ; break; // SETUGT & SETNE
314 }
315 }
316
317 return Result;
318}
319
320//===----------------------------------------------------------------------===//
321// SDNode Profile Support
322//===----------------------------------------------------------------------===//
323
324/// AddNodeIDOpcode - Add the node opcode to the NodeID data.
325///
326static void AddNodeIDOpcode(FoldingSetNodeID &ID, unsigned OpC) {
327 ID.AddInteger(OpC);
328}
329
330/// AddNodeIDValueTypes - Value type lists are intern'd so we can represent them
331/// solely with their pointer.
332static void AddNodeIDValueTypes(FoldingSetNodeID &ID, SDVTList VTList) {
333 ID.AddPointer(VTList.VTs);
334}
335
336/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
337///
338static void AddNodeIDOperands(FoldingSetNodeID &ID,
339 const SDValue *Ops, unsigned NumOps) {
340 for (; NumOps; --NumOps, ++Ops) {
341 ID.AddPointer(Ops->getNode());
342 ID.AddInteger(Ops->getResNo());
343 }
344}
345
346/// AddNodeIDOperands - Various routines for adding operands to the NodeID data.
347///
348static void AddNodeIDOperands(FoldingSetNodeID &ID,
349 const SDUse *Ops, unsigned NumOps) {
350 for (; NumOps; --NumOps, ++Ops) {
351 ID.AddPointer(Ops->getNode());
352 ID.AddInteger(Ops->getResNo());
353 }
354}
355
356static void AddNodeIDNode(FoldingSetNodeID &ID,
357 unsigned short OpC, SDVTList VTList,
358 const SDValue *OpList, unsigned N) {
359 AddNodeIDOpcode(ID, OpC);
360 AddNodeIDValueTypes(ID, VTList);
361 AddNodeIDOperands(ID, OpList, N);
362}
363
364/// AddNodeIDCustom - If this is an SDNode with special info, add this info to
365/// the NodeID data.
366static void AddNodeIDCustom(FoldingSetNodeID &ID, const SDNode *N) {
367 switch (N->getOpcode()) {
368 case ISD::TargetExternalSymbol:
369 case ISD::ExternalSymbol:
370 llvm_unreachable("Should only be used on nodes with operands");
371 default: break; // Normal nodes don't need extra info.
372 case ISD::TargetConstant:
373 case ISD::Constant:
374 ID.AddPointer(cast<ConstantSDNode>(N)->getConstantIntValue());
375 break;
376 case ISD::TargetConstantFP:
377 case ISD::ConstantFP: {
378 ID.AddPointer(cast<ConstantFPSDNode>(N)->getConstantFPValue());
379 break;
380 }
381 case ISD::TargetGlobalAddress:
382 case ISD::GlobalAddress:
383 case ISD::TargetGlobalTLSAddress:
384 case ISD::GlobalTLSAddress: {
385 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
386 ID.AddPointer(GA->getGlobal());
387 ID.AddInteger(GA->getOffset());
388 ID.AddInteger(GA->getTargetFlags());
389 ID.AddInteger(GA->getAddressSpace());
390 break;
391 }
392 case ISD::BasicBlock:
393 ID.AddPointer(cast<BasicBlockSDNode>(N)->getBasicBlock());
394 break;
395 case ISD::Register:
396 ID.AddInteger(cast<RegisterSDNode>(N)->getReg());
397 break;
398 case ISD::RegisterMask:
399 ID.AddPointer(cast<RegisterMaskSDNode>(N)->getRegMask());
400 break;
401 case ISD::SRCVALUE:
402 ID.AddPointer(cast<SrcValueSDNode>(N)->getValue());
403 break;
404 case ISD::FrameIndex:
405 case ISD::TargetFrameIndex:
406 ID.AddInteger(cast<FrameIndexSDNode>(N)->getIndex());
407 break;
408 case ISD::JumpTable:
409 case ISD::TargetJumpTable:
410 ID.AddInteger(cast<JumpTableSDNode>(N)->getIndex());
411 ID.AddInteger(cast<JumpTableSDNode>(N)->getTargetFlags());
412 break;
413 case ISD::ConstantPool:
414 case ISD::TargetConstantPool: {
415 const ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(N);
416 ID.AddInteger(CP->getAlignment());
417 ID.AddInteger(CP->getOffset());
418 if (CP->isMachineConstantPoolEntry())
419 CP->getMachineCPVal()->addSelectionDAGCSEId(ID);
420 else
421 ID.AddPointer(CP->getConstVal());
422 ID.AddInteger(CP->getTargetFlags());
423 break;
424 }
425 case ISD::TargetIndex: {
426 const TargetIndexSDNode *TI = cast<TargetIndexSDNode>(N);
427 ID.AddInteger(TI->getIndex());
428 ID.AddInteger(TI->getOffset());
429 ID.AddInteger(TI->getTargetFlags());
430 break;
431 }
432 case ISD::LOAD: {
433 const LoadSDNode *LD = cast<LoadSDNode>(N);
434 ID.AddInteger(LD->getMemoryVT().getRawBits());
435 ID.AddInteger(LD->getRawSubclassData());
436 ID.AddInteger(LD->getPointerInfo().getAddrSpace());
437 break;
438 }
439 case ISD::STORE: {
440 const StoreSDNode *ST = cast<StoreSDNode>(N);
441 ID.AddInteger(ST->getMemoryVT().getRawBits());
442 ID.AddInteger(ST->getRawSubclassData());
443 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
444 break;
445 }
446 case ISD::ATOMIC_CMP_SWAP:
447 case ISD::ATOMIC_SWAP:
448 case ISD::ATOMIC_LOAD_ADD:
449 case ISD::ATOMIC_LOAD_SUB:
450 case ISD::ATOMIC_LOAD_AND:
451 case ISD::ATOMIC_LOAD_OR:
452 case ISD::ATOMIC_LOAD_XOR:
453 case ISD::ATOMIC_LOAD_NAND:
454 case ISD::ATOMIC_LOAD_MIN:
455 case ISD::ATOMIC_LOAD_MAX:
456 case ISD::ATOMIC_LOAD_UMIN:
457 case ISD::ATOMIC_LOAD_UMAX:
458 case ISD::ATOMIC_LOAD:
459 case ISD::ATOMIC_STORE: {
460 const AtomicSDNode *AT = cast<AtomicSDNode>(N);
461 ID.AddInteger(AT->getMemoryVT().getRawBits());
462 ID.AddInteger(AT->getRawSubclassData());
463 ID.AddInteger(AT->getPointerInfo().getAddrSpace());
464 break;
465 }
466 case ISD::PREFETCH: {
467 const MemSDNode *PF = cast<MemSDNode>(N);
468 ID.AddInteger(PF->getPointerInfo().getAddrSpace());
469 break;
470 }
471 case ISD::VECTOR_SHUFFLE: {
472 const ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
473 for (unsigned i = 0, e = N->getValueType(0).getVectorNumElements();
474 i != e; ++i)
475 ID.AddInteger(SVN->getMaskElt(i));
476 break;
477 }
478 case ISD::TargetBlockAddress:
479 case ISD::BlockAddress: {
480 const BlockAddressSDNode *BA = cast<BlockAddressSDNode>(N);
481 ID.AddPointer(BA->getBlockAddress());
482 ID.AddInteger(BA->getOffset());
483 ID.AddInteger(BA->getTargetFlags());
484 break;
485 }
486 } // end switch (N->getOpcode())
487
488 // Target specific memory nodes could also have address spaces to check.
489 if (N->isTargetMemoryOpcode())
490 ID.AddInteger(cast<MemSDNode>(N)->getPointerInfo().getAddrSpace());
491}
492
493/// AddNodeIDNode - Generic routine for adding a nodes info to the NodeID
494/// data.
495static void AddNodeIDNode(FoldingSetNodeID &ID, const SDNode *N) {
496 AddNodeIDOpcode(ID, N->getOpcode());
497 // Add the return value info.
498 AddNodeIDValueTypes(ID, N->getVTList());
499 // Add the operand info.
500 AddNodeIDOperands(ID, N->op_begin(), N->getNumOperands());
501
502 // Handle SDNode leafs with special info.
503 AddNodeIDCustom(ID, N);
504}
505
506/// encodeMemSDNodeFlags - Generic routine for computing a value for use in
507/// the CSE map that carries volatility, temporalness, indexing mode, and
508/// extension/truncation information.
509///
510static inline unsigned
511encodeMemSDNodeFlags(int ConvType, ISD::MemIndexedMode AM, bool isVolatile,
512 bool isNonTemporal, bool isInvariant) {
513 assert((ConvType & 3) == ConvType &&
514 "ConvType may not require more than 2 bits!");
515 assert((AM & 7) == AM &&
516 "AM may not require more than 3 bits!");
517 return ConvType |
518 (AM << 2) |
519 (isVolatile << 5) |
520 (isNonTemporal << 6) |
521 (isInvariant << 7);
522}
523
524//===----------------------------------------------------------------------===//
525// SelectionDAG Class
526//===----------------------------------------------------------------------===//
527
528/// doNotCSE - Return true if CSE should not be performed for this node.
529static bool doNotCSE(SDNode *N) {
530 if (N->getValueType(0) == MVT::Glue)
531 return true; // Never CSE anything that produces a flag.
532
533 switch (N->getOpcode()) {
534 default: break;
535 case ISD::HANDLENODE:
536 case ISD::EH_LABEL:
537 return true; // Never CSE these nodes.
538 }
539
540 // Check that remaining values produced are not flags.
541 for (unsigned i = 1, e = N->getNumValues(); i != e; ++i)
542 if (N->getValueType(i) == MVT::Glue)
543 return true; // Never CSE anything that produces a flag.
544
545 return false;
546}
547
548/// RemoveDeadNodes - This method deletes all unreachable nodes in the
549/// SelectionDAG.
550void SelectionDAG::RemoveDeadNodes() {
551 // Create a dummy node (which is not added to allnodes), that adds a reference
552 // to the root node, preventing it from being deleted.
553 HandleSDNode Dummy(getRoot());
554
555 SmallVector<SDNode*, 128> DeadNodes;
556
557 // Add all obviously-dead nodes to the DeadNodes worklist.
558 for (allnodes_iterator I = allnodes_begin(), E = allnodes_end(); I != E; ++I)
559 if (I->use_empty())
560 DeadNodes.push_back(I);
561
562 RemoveDeadNodes(DeadNodes);
563
564 // If the root changed (e.g. it was a dead load, update the root).
565 setRoot(Dummy.getValue());
566}
567
568/// RemoveDeadNodes - This method deletes the unreachable nodes in the
569/// given list, and any nodes that become unreachable as a result.
570void SelectionDAG::RemoveDeadNodes(SmallVectorImpl<SDNode *> &DeadNodes) {
571
572 // Process the worklist, deleting the nodes and adding their uses to the
573 // worklist.
574 while (!DeadNodes.empty()) {
575 SDNode *N = DeadNodes.pop_back_val();
576
577 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
578 DUL->NodeDeleted(N, 0);
579
580 // Take the node out of the appropriate CSE map.
581 RemoveNodeFromCSEMaps(N);
582
583 // Next, brutally remove the operand list. This is safe to do, as there are
584 // no cycles in the graph.
585 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
586 SDUse &Use = *I++;
587 SDNode *Operand = Use.getNode();
588 Use.set(SDValue());
589
590 // Now that we removed this operand, see if there are no uses of it left.
591 if (Operand->use_empty())
592 DeadNodes.push_back(Operand);
593 }
594
595 DeallocateNode(N);
596 }
597}
598
599void SelectionDAG::RemoveDeadNode(SDNode *N){
600 SmallVector<SDNode*, 16> DeadNodes(1, N);
601
602 // Create a dummy node that adds a reference to the root node, preventing
603 // it from being deleted. (This matters if the root is an operand of the
604 // dead node.)
605 HandleSDNode Dummy(getRoot());
606
607 RemoveDeadNodes(DeadNodes);
608}
609
610void SelectionDAG::DeleteNode(SDNode *N) {
611 // First take this out of the appropriate CSE map.
612 RemoveNodeFromCSEMaps(N);
613
614 // Finally, remove uses due to operands of this node, remove from the
615 // AllNodes list, and delete the node.
616 DeleteNodeNotInCSEMaps(N);
617}
618
619void SelectionDAG::DeleteNodeNotInCSEMaps(SDNode *N) {
620 assert(N != AllNodes.begin() && "Cannot delete the entry node!");
621 assert(N->use_empty() && "Cannot delete a node that is not dead!");
622
623 // Drop all of the operands and decrement used node's use counts.
624 N->DropOperands();
625
626 DeallocateNode(N);
627}
628
629void SelectionDAG::DeallocateNode(SDNode *N) {
630 if (N->OperandsNeedDelete)
631 delete[] N->OperandList;
632
633 // Set the opcode to DELETED_NODE to help catch bugs when node
634 // memory is reallocated.
635 N->NodeType = ISD::DELETED_NODE;
636
637 NodeAllocator.Deallocate(AllNodes.remove(N));
638
639 // Remove the ordering of this node.
640 Ordering->remove(N);
641
642 // If any of the SDDbgValue nodes refer to this SDNode, invalidate them.
643 ArrayRef<SDDbgValue*> DbgVals = DbgInfo->getSDDbgValues(N);
644 for (unsigned i = 0, e = DbgVals.size(); i != e; ++i)
645 DbgVals[i]->setIsInvalidated();
646}
647
648/// RemoveNodeFromCSEMaps - Take the specified node out of the CSE map that
649/// correspond to it. This is useful when we're about to delete or repurpose
650/// the node. We don't want future request for structurally identical nodes
651/// to return N anymore.
652bool SelectionDAG::RemoveNodeFromCSEMaps(SDNode *N) {
653 bool Erased = false;
654 switch (N->getOpcode()) {
655 case ISD::HANDLENODE: return false; // noop.
656 case ISD::CONDCODE:
657 assert(CondCodeNodes[cast<CondCodeSDNode>(N)->get()] &&
658 "Cond code doesn't exist!");
659 Erased = CondCodeNodes[cast<CondCodeSDNode>(N)->get()] != 0;
660 CondCodeNodes[cast<CondCodeSDNode>(N)->get()] = 0;
661 break;
662 case ISD::ExternalSymbol:
663 Erased = ExternalSymbols.erase(cast<ExternalSymbolSDNode>(N)->getSymbol());
664 break;
665 case ISD::TargetExternalSymbol: {
666 ExternalSymbolSDNode *ESN = cast<ExternalSymbolSDNode>(N);
667 Erased = TargetExternalSymbols.erase(
668 std::pair<std::string,unsigned char>(ESN->getSymbol(),
669 ESN->getTargetFlags()));
670 break;
671 }
672 case ISD::VALUETYPE: {
673 EVT VT = cast<VTSDNode>(N)->getVT();
674 if (VT.isExtended()) {
675 Erased = ExtendedValueTypeNodes.erase(VT);
676 } else {
677 Erased = ValueTypeNodes[VT.getSimpleVT().SimpleTy] != 0;
678 ValueTypeNodes[VT.getSimpleVT().SimpleTy] = 0;
679 }
680 break;
681 }
682 default:
683 // Remove it from the CSE Map.
684 assert(N->getOpcode() != ISD::DELETED_NODE && "DELETED_NODE in CSEMap!");
685 assert(N->getOpcode() != ISD::EntryToken && "EntryToken in CSEMap!");
686 Erased = CSEMap.RemoveNode(N);
687 break;
688 }
689#ifndef NDEBUG
690 // Verify that the node was actually in one of the CSE maps, unless it has a
691 // flag result (which cannot be CSE'd) or is one of the special cases that are
692 // not subject to CSE.
693 if (!Erased && N->getValueType(N->getNumValues()-1) != MVT::Glue &&
694 !N->isMachineOpcode() && !doNotCSE(N)) {
695 N->dump(this);
696 dbgs() << "\n";
697 llvm_unreachable("Node is not in map!");
698 }
699#endif
700 return Erased;
701}
702
703/// AddModifiedNodeToCSEMaps - The specified node has been removed from the CSE
704/// maps and modified in place. Add it back to the CSE maps, unless an identical
705/// node already exists, in which case transfer all its users to the existing
706/// node. This transfer can potentially trigger recursive merging.
707///
708void
709SelectionDAG::AddModifiedNodeToCSEMaps(SDNode *N) {
710 // For node types that aren't CSE'd, just act as if no identical node
711 // already exists.
712 if (!doNotCSE(N)) {
713 SDNode *Existing = CSEMap.GetOrInsertNode(N);
714 if (Existing != N) {
715 // If there was already an existing matching node, use ReplaceAllUsesWith
716 // to replace the dead one with the existing one. This can cause
717 // recursive merging of other unrelated nodes down the line.
718 ReplaceAllUsesWith(N, Existing);
719
720 // N is now dead. Inform the listeners and delete it.
721 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
722 DUL->NodeDeleted(N, Existing);
723 DeleteNodeNotInCSEMaps(N);
724 return;
725 }
726 }
727
728 // If the node doesn't already exist, we updated it. Inform listeners.
729 for (DAGUpdateListener *DUL = UpdateListeners; DUL; DUL = DUL->Next)
730 DUL->NodeUpdated(N);
731}
732
733/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
734/// were replaced with those specified. If this node is never memoized,
735/// return null, otherwise return a pointer to the slot it would take. If a
736/// node already exists with these operands, the slot will be non-null.
737SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N, SDValue Op,
738 void *&InsertPos) {
739 if (doNotCSE(N))
740 return 0;
741
742 SDValue Ops[] = { Op };
743 FoldingSetNodeID ID;
744 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 1);
745 AddNodeIDCustom(ID, N);
746 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
747 return Node;
748}
749
750/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
751/// were replaced with those specified. If this node is never memoized,
752/// return null, otherwise return a pointer to the slot it would take. If a
753/// node already exists with these operands, the slot will be non-null.
754SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
755 SDValue Op1, SDValue Op2,
756 void *&InsertPos) {
757 if (doNotCSE(N))
758 return 0;
759
760 SDValue Ops[] = { Op1, Op2 };
761 FoldingSetNodeID ID;
762 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, 2);
763 AddNodeIDCustom(ID, N);
764 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
765 return Node;
766}
767
768
769/// FindModifiedNodeSlot - Find a slot for the specified node if its operands
770/// were replaced with those specified. If this node is never memoized,
771/// return null, otherwise return a pointer to the slot it would take. If a
772/// node already exists with these operands, the slot will be non-null.
773SDNode *SelectionDAG::FindModifiedNodeSlot(SDNode *N,
774 const SDValue *Ops,unsigned NumOps,
775 void *&InsertPos) {
776 if (doNotCSE(N))
777 return 0;
778
779 FoldingSetNodeID ID;
780 AddNodeIDNode(ID, N->getOpcode(), N->getVTList(), Ops, NumOps);
781 AddNodeIDCustom(ID, N);
782 SDNode *Node = CSEMap.FindNodeOrInsertPos(ID, InsertPos);
783 return Node;
784}
785
786#ifndef NDEBUG
787/// VerifyNodeCommon - Sanity check the given node. Aborts if it is invalid.
788static void VerifyNodeCommon(SDNode *N) {
789 switch (N->getOpcode()) {
790 default:
791 break;
792 case ISD::BUILD_PAIR: {
793 EVT VT = N->getValueType(0);
794 assert(N->getNumValues() == 1 && "Too many results!");
795 assert(!VT.isVector() && (VT.isInteger() || VT.isFloatingPoint()) &&
796 "Wrong return type!");
797 assert(N->getNumOperands() == 2 && "Wrong number of operands!");
798 assert(N->getOperand(0).getValueType() == N->getOperand(1).getValueType() &&
799 "Mismatched operand types!");
800 assert(N->getOperand(0).getValueType().isInteger() == VT.isInteger() &&
801 "Wrong operand type!");
802 assert(VT.getSizeInBits() == 2 * N->getOperand(0).getValueSizeInBits() &&
803 "Wrong return type size");
804 break;
805 }
806 case ISD::BUILD_VECTOR: {
807 assert(N->getNumValues() == 1 && "Too many results!");
808 assert(N->getValueType(0).isVector() && "Wrong return type!");
809 assert(N->getNumOperands() == N->getValueType(0).getVectorNumElements() &&
810 "Wrong number of operands!");
811 EVT EltVT = N->getValueType(0).getVectorElementType();
812 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ++I) {
813 assert((I->getValueType() == EltVT ||
814 (EltVT.isInteger() && I->getValueType().isInteger() &&
815 EltVT.bitsLE(I->getValueType()))) &&
816 "Wrong operand type!");
817 assert(I->getValueType() == N->getOperand(0).getValueType() &&
818 "Operands must all have the same type");
819 }
820 break;
821 }
822 }
823}
824
825/// VerifySDNode - Sanity check the given SDNode. Aborts if it is invalid.
826static void VerifySDNode(SDNode *N) {
827 // The SDNode allocators cannot be used to allocate nodes with fields that are
828 // not present in an SDNode!
829 assert(!isa<MemSDNode>(N) && "Bad MemSDNode!");
830 assert(!isa<ShuffleVectorSDNode>(N) && "Bad ShuffleVectorSDNode!");
831 assert(!isa<ConstantSDNode>(N) && "Bad ConstantSDNode!");
832 assert(!isa<ConstantFPSDNode>(N) && "Bad ConstantFPSDNode!");
833 assert(!isa<GlobalAddressSDNode>(N) && "Bad GlobalAddressSDNode!");
834 assert(!isa<FrameIndexSDNode>(N) && "Bad FrameIndexSDNode!");
835 assert(!isa<JumpTableSDNode>(N) && "Bad JumpTableSDNode!");
836 assert(!isa<ConstantPoolSDNode>(N) && "Bad ConstantPoolSDNode!");
837 assert(!isa<BasicBlockSDNode>(N) && "Bad BasicBlockSDNode!");
838 assert(!isa<SrcValueSDNode>(N) && "Bad SrcValueSDNode!");
839 assert(!isa<MDNodeSDNode>(N) && "Bad MDNodeSDNode!");
840 assert(!isa<RegisterSDNode>(N) && "Bad RegisterSDNode!");
841 assert(!isa<BlockAddressSDNode>(N) && "Bad BlockAddressSDNode!");
842 assert(!isa<EHLabelSDNode>(N) && "Bad EHLabelSDNode!");
843 assert(!isa<ExternalSymbolSDNode>(N) && "Bad ExternalSymbolSDNode!");
844 assert(!isa<CondCodeSDNode>(N) && "Bad CondCodeSDNode!");
845 assert(!isa<CvtRndSatSDNode>(N) && "Bad CvtRndSatSDNode!");
846 assert(!isa<VTSDNode>(N) && "Bad VTSDNode!");
847 assert(!isa<MachineSDNode>(N) && "Bad MachineSDNode!");
848
849 VerifyNodeCommon(N);
850}
851
852/// VerifyMachineNode - Sanity check the given MachineNode. Aborts if it is
853/// invalid.
854static void VerifyMachineNode(SDNode *N) {
855 // The MachineNode allocators cannot be used to allocate nodes with fields
856 // that are not present in a MachineNode!
857 // Currently there are no such nodes.
858
859 VerifyNodeCommon(N);
860}
861#endif // NDEBUG
862
863/// getEVTAlignment - Compute the default alignment value for the
864/// given type.
865///
866unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
867 Type *Ty = VT == MVT::iPTR ?
868 PointerType::get(Type::getInt8Ty(*getContext()), 0) :
869 VT.getTypeForEVT(*getContext());
870
970d7e83 871 return TLI.getDataLayout()->getABITypeAlignment(Ty);
223e47cc
LB
872}
873
874// EntryNode could meaningfully have debug info if we can find it...
875SelectionDAG::SelectionDAG(const TargetMachine &tm, CodeGenOpt::Level OL)
876 : TM(tm), TLI(*tm.getTargetLowering()), TSI(*tm.getSelectionDAGInfo()),
970d7e83
LB
877 TTI(0), OptLevel(OL), EntryNode(ISD::EntryToken, DebugLoc(),
878 getVTList(MVT::Other)),
223e47cc
LB
879 Root(getEntryNode()), Ordering(0), UpdateListeners(0) {
880 AllNodes.push_back(&EntryNode);
881 Ordering = new SDNodeOrdering();
882 DbgInfo = new SDDbgInfo();
883}
884
970d7e83 885void SelectionDAG::init(MachineFunction &mf, const TargetTransformInfo *tti) {
223e47cc 886 MF = &mf;
970d7e83 887 TTI = tti;
223e47cc
LB
888 Context = &mf.getFunction()->getContext();
889}
890
891SelectionDAG::~SelectionDAG() {
892 assert(!UpdateListeners && "Dangling registered DAGUpdateListeners");
893 allnodes_clear();
894 delete Ordering;
895 delete DbgInfo;
896}
897
898void SelectionDAG::allnodes_clear() {
899 assert(&*AllNodes.begin() == &EntryNode);
900 AllNodes.remove(AllNodes.begin());
901 while (!AllNodes.empty())
902 DeallocateNode(AllNodes.begin());
903}
904
905void SelectionDAG::clear() {
906 allnodes_clear();
907 OperandAllocator.Reset();
908 CSEMap.clear();
909
910 ExtendedValueTypeNodes.clear();
911 ExternalSymbols.clear();
912 TargetExternalSymbols.clear();
913 std::fill(CondCodeNodes.begin(), CondCodeNodes.end(),
914 static_cast<CondCodeSDNode*>(0));
915 std::fill(ValueTypeNodes.begin(), ValueTypeNodes.end(),
916 static_cast<SDNode*>(0));
917
918 EntryNode.UseList = 0;
919 AllNodes.push_back(&EntryNode);
920 Root = getEntryNode();
921 Ordering->clear();
922 DbgInfo->clear();
923}
924
925SDValue SelectionDAG::getAnyExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
926 return VT.bitsGT(Op.getValueType()) ?
927 getNode(ISD::ANY_EXTEND, DL, VT, Op) :
928 getNode(ISD::TRUNCATE, DL, VT, Op);
929}
930
931SDValue SelectionDAG::getSExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
932 return VT.bitsGT(Op.getValueType()) ?
933 getNode(ISD::SIGN_EXTEND, DL, VT, Op) :
934 getNode(ISD::TRUNCATE, DL, VT, Op);
935}
936
937SDValue SelectionDAG::getZExtOrTrunc(SDValue Op, DebugLoc DL, EVT VT) {
938 return VT.bitsGT(Op.getValueType()) ?
939 getNode(ISD::ZERO_EXTEND, DL, VT, Op) :
940 getNode(ISD::TRUNCATE, DL, VT, Op);
941}
942
943SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, DebugLoc DL, EVT VT) {
944 assert(!VT.isVector() &&
945 "getZeroExtendInReg should use the vector element type instead of "
946 "the vector type!");
947 if (Op.getValueType() == VT) return Op;
948 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
949 APInt Imm = APInt::getLowBitsSet(BitWidth,
950 VT.getSizeInBits());
951 return getNode(ISD::AND, DL, Op.getValueType(), Op,
952 getConstant(Imm, Op.getValueType()));
953}
954
955/// getNOT - Create a bitwise NOT operation as (XOR Val, -1).
956///
957SDValue SelectionDAG::getNOT(DebugLoc DL, SDValue Val, EVT VT) {
958 EVT EltVT = VT.getScalarType();
959 SDValue NegOne =
960 getConstant(APInt::getAllOnesValue(EltVT.getSizeInBits()), VT);
961 return getNode(ISD::XOR, DL, VT, Val, NegOne);
962}
963
964SDValue SelectionDAG::getConstant(uint64_t Val, EVT VT, bool isT) {
965 EVT EltVT = VT.getScalarType();
966 assert((EltVT.getSizeInBits() >= 64 ||
967 (uint64_t)((int64_t)Val >> EltVT.getSizeInBits()) + 1 < 2) &&
968 "getConstant with a uint64_t value that doesn't fit in the type!");
969 return getConstant(APInt(EltVT.getSizeInBits(), Val), VT, isT);
970}
971
972SDValue SelectionDAG::getConstant(const APInt &Val, EVT VT, bool isT) {
973 return getConstant(*ConstantInt::get(*Context, Val), VT, isT);
974}
975
976SDValue SelectionDAG::getConstant(const ConstantInt &Val, EVT VT, bool isT) {
977 assert(VT.isInteger() && "Cannot create FP integer constant!");
978
979 EVT EltVT = VT.getScalarType();
980 const ConstantInt *Elt = &Val;
981
982 // In some cases the vector type is legal but the element type is illegal and
983 // needs to be promoted, for example v8i8 on ARM. In this case, promote the
984 // inserted value (the type does not need to match the vector element type).
985 // Any extra bits introduced will be truncated away.
986 if (VT.isVector() && TLI.getTypeAction(*getContext(), EltVT) ==
987 TargetLowering::TypePromoteInteger) {
988 EltVT = TLI.getTypeToTransformTo(*getContext(), EltVT);
989 APInt NewVal = Elt->getValue().zext(EltVT.getSizeInBits());
990 Elt = ConstantInt::get(*getContext(), NewVal);
991 }
992
993 assert(Elt->getBitWidth() == EltVT.getSizeInBits() &&
994 "APInt size does not match type size!");
995 unsigned Opc = isT ? ISD::TargetConstant : ISD::Constant;
996 FoldingSetNodeID ID;
997 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
998 ID.AddPointer(Elt);
999 void *IP = 0;
1000 SDNode *N = NULL;
1001 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1002 if (!VT.isVector())
1003 return SDValue(N, 0);
1004
1005 if (!N) {
1006 N = new (NodeAllocator) ConstantSDNode(isT, Elt, EltVT);
1007 CSEMap.InsertNode(N, IP);
1008 AllNodes.push_back(N);
1009 }
1010
1011 SDValue Result(N, 0);
1012 if (VT.isVector()) {
1013 SmallVector<SDValue, 8> Ops;
1014 Ops.assign(VT.getVectorNumElements(), Result);
1015 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
1016 }
1017 return Result;
1018}
1019
1020SDValue SelectionDAG::getIntPtrConstant(uint64_t Val, bool isTarget) {
1021 return getConstant(Val, TLI.getPointerTy(), isTarget);
1022}
1023
1024
1025SDValue SelectionDAG::getConstantFP(const APFloat& V, EVT VT, bool isTarget) {
1026 return getConstantFP(*ConstantFP::get(*getContext(), V), VT, isTarget);
1027}
1028
1029SDValue SelectionDAG::getConstantFP(const ConstantFP& V, EVT VT, bool isTarget){
1030 assert(VT.isFloatingPoint() && "Cannot create integer FP constant!");
1031
1032 EVT EltVT = VT.getScalarType();
1033
1034 // Do the map lookup using the actual bit pattern for the floating point
1035 // value, so that we don't have problems with 0.0 comparing equal to -0.0, and
1036 // we don't have issues with SNANs.
1037 unsigned Opc = isTarget ? ISD::TargetConstantFP : ISD::ConstantFP;
1038 FoldingSetNodeID ID;
1039 AddNodeIDNode(ID, Opc, getVTList(EltVT), 0, 0);
1040 ID.AddPointer(&V);
1041 void *IP = 0;
1042 SDNode *N = NULL;
1043 if ((N = CSEMap.FindNodeOrInsertPos(ID, IP)))
1044 if (!VT.isVector())
1045 return SDValue(N, 0);
1046
1047 if (!N) {
1048 N = new (NodeAllocator) ConstantFPSDNode(isTarget, &V, EltVT);
1049 CSEMap.InsertNode(N, IP);
1050 AllNodes.push_back(N);
1051 }
1052
1053 SDValue Result(N, 0);
1054 if (VT.isVector()) {
1055 SmallVector<SDValue, 8> Ops;
1056 Ops.assign(VT.getVectorNumElements(), Result);
1057 // FIXME DebugLoc info might be appropriate here
1058 Result = getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, &Ops[0], Ops.size());
1059 }
1060 return Result;
1061}
1062
1063SDValue SelectionDAG::getConstantFP(double Val, EVT VT, bool isTarget) {
1064 EVT EltVT = VT.getScalarType();
1065 if (EltVT==MVT::f32)
1066 return getConstantFP(APFloat((float)Val), VT, isTarget);
1067 else if (EltVT==MVT::f64)
1068 return getConstantFP(APFloat(Val), VT, isTarget);
970d7e83
LB
1069 else if (EltVT==MVT::f80 || EltVT==MVT::f128 || EltVT==MVT::ppcf128 ||
1070 EltVT==MVT::f16) {
223e47cc
LB
1071 bool ignored;
1072 APFloat apf = APFloat(Val);
970d7e83 1073 apf.convert(EVTToAPFloatSemantics(EltVT), APFloat::rmNearestTiesToEven,
223e47cc
LB
1074 &ignored);
1075 return getConstantFP(apf, VT, isTarget);
1076 } else
1077 llvm_unreachable("Unsupported type in getConstantFP");
1078}
1079
1080SDValue SelectionDAG::getGlobalAddress(const GlobalValue *GV, DebugLoc DL,
1081 EVT VT, int64_t Offset,
1082 bool isTargetGA,
1083 unsigned char TargetFlags) {
1084 assert((TargetFlags == 0 || isTargetGA) &&
1085 "Cannot set target flags on target-independent globals");
1086
1087 // Truncate (with sign-extension) the offset value to the pointer size.
1088 unsigned BitWidth = TLI.getPointerTy().getSizeInBits();
1089 if (BitWidth < 64)
1090 Offset = SignExtend64(Offset, BitWidth);
1091
1092 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
1093 if (!GVar) {
1094 // If GV is an alias then use the aliasee for determining thread-localness.
1095 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
1096 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal(false));
1097 }
1098
1099 unsigned Opc;
1100 if (GVar && GVar->isThreadLocal())
1101 Opc = isTargetGA ? ISD::TargetGlobalTLSAddress : ISD::GlobalTLSAddress;
1102 else
1103 Opc = isTargetGA ? ISD::TargetGlobalAddress : ISD::GlobalAddress;
1104
1105 FoldingSetNodeID ID;
1106 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1107 ID.AddPointer(GV);
1108 ID.AddInteger(Offset);
1109 ID.AddInteger(TargetFlags);
1110 ID.AddInteger(GV->getType()->getAddressSpace());
1111 void *IP = 0;
1112 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1113 return SDValue(E, 0);
1114
1115 SDNode *N = new (NodeAllocator) GlobalAddressSDNode(Opc, DL, GV, VT,
1116 Offset, TargetFlags);
1117 CSEMap.InsertNode(N, IP);
1118 AllNodes.push_back(N);
1119 return SDValue(N, 0);
1120}
1121
1122SDValue SelectionDAG::getFrameIndex(int FI, EVT VT, bool isTarget) {
1123 unsigned Opc = isTarget ? ISD::TargetFrameIndex : ISD::FrameIndex;
1124 FoldingSetNodeID ID;
1125 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1126 ID.AddInteger(FI);
1127 void *IP = 0;
1128 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1129 return SDValue(E, 0);
1130
1131 SDNode *N = new (NodeAllocator) FrameIndexSDNode(FI, VT, isTarget);
1132 CSEMap.InsertNode(N, IP);
1133 AllNodes.push_back(N);
1134 return SDValue(N, 0);
1135}
1136
1137SDValue SelectionDAG::getJumpTable(int JTI, EVT VT, bool isTarget,
1138 unsigned char TargetFlags) {
1139 assert((TargetFlags == 0 || isTarget) &&
1140 "Cannot set target flags on target-independent jump tables");
1141 unsigned Opc = isTarget ? ISD::TargetJumpTable : ISD::JumpTable;
1142 FoldingSetNodeID ID;
1143 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1144 ID.AddInteger(JTI);
1145 ID.AddInteger(TargetFlags);
1146 void *IP = 0;
1147 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1148 return SDValue(E, 0);
1149
1150 SDNode *N = new (NodeAllocator) JumpTableSDNode(JTI, VT, isTarget,
1151 TargetFlags);
1152 CSEMap.InsertNode(N, IP);
1153 AllNodes.push_back(N);
1154 return SDValue(N, 0);
1155}
1156
1157SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
1158 unsigned Alignment, int Offset,
1159 bool isTarget,
1160 unsigned char TargetFlags) {
1161 assert((TargetFlags == 0 || isTarget) &&
1162 "Cannot set target flags on target-independent globals");
1163 if (Alignment == 0)
970d7e83 1164 Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
223e47cc
LB
1165 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1166 FoldingSetNodeID ID;
1167 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1168 ID.AddInteger(Alignment);
1169 ID.AddInteger(Offset);
1170 ID.AddPointer(C);
1171 ID.AddInteger(TargetFlags);
1172 void *IP = 0;
1173 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1174 return SDValue(E, 0);
1175
1176 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1177 Alignment, TargetFlags);
1178 CSEMap.InsertNode(N, IP);
1179 AllNodes.push_back(N);
1180 return SDValue(N, 0);
1181}
1182
1183
1184SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
1185 unsigned Alignment, int Offset,
1186 bool isTarget,
1187 unsigned char TargetFlags) {
1188 assert((TargetFlags == 0 || isTarget) &&
1189 "Cannot set target flags on target-independent globals");
1190 if (Alignment == 0)
970d7e83 1191 Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
223e47cc
LB
1192 unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
1193 FoldingSetNodeID ID;
1194 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1195 ID.AddInteger(Alignment);
1196 ID.AddInteger(Offset);
1197 C->addSelectionDAGCSEId(ID);
1198 ID.AddInteger(TargetFlags);
1199 void *IP = 0;
1200 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1201 return SDValue(E, 0);
1202
1203 SDNode *N = new (NodeAllocator) ConstantPoolSDNode(isTarget, C, VT, Offset,
1204 Alignment, TargetFlags);
1205 CSEMap.InsertNode(N, IP);
1206 AllNodes.push_back(N);
1207 return SDValue(N, 0);
1208}
1209
1210SDValue SelectionDAG::getTargetIndex(int Index, EVT VT, int64_t Offset,
1211 unsigned char TargetFlags) {
1212 FoldingSetNodeID ID;
1213 AddNodeIDNode(ID, ISD::TargetIndex, getVTList(VT), 0, 0);
1214 ID.AddInteger(Index);
1215 ID.AddInteger(Offset);
1216 ID.AddInteger(TargetFlags);
1217 void *IP = 0;
1218 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1219 return SDValue(E, 0);
1220
1221 SDNode *N = new (NodeAllocator) TargetIndexSDNode(Index, VT, Offset,
1222 TargetFlags);
1223 CSEMap.InsertNode(N, IP);
1224 AllNodes.push_back(N);
1225 return SDValue(N, 0);
1226}
1227
1228SDValue SelectionDAG::getBasicBlock(MachineBasicBlock *MBB) {
1229 FoldingSetNodeID ID;
1230 AddNodeIDNode(ID, ISD::BasicBlock, getVTList(MVT::Other), 0, 0);
1231 ID.AddPointer(MBB);
1232 void *IP = 0;
1233 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1234 return SDValue(E, 0);
1235
1236 SDNode *N = new (NodeAllocator) BasicBlockSDNode(MBB);
1237 CSEMap.InsertNode(N, IP);
1238 AllNodes.push_back(N);
1239 return SDValue(N, 0);
1240}
1241
1242SDValue SelectionDAG::getValueType(EVT VT) {
1243 if (VT.isSimple() && (unsigned)VT.getSimpleVT().SimpleTy >=
1244 ValueTypeNodes.size())
1245 ValueTypeNodes.resize(VT.getSimpleVT().SimpleTy+1);
1246
1247 SDNode *&N = VT.isExtended() ?
1248 ExtendedValueTypeNodes[VT] : ValueTypeNodes[VT.getSimpleVT().SimpleTy];
1249
1250 if (N) return SDValue(N, 0);
1251 N = new (NodeAllocator) VTSDNode(VT);
1252 AllNodes.push_back(N);
1253 return SDValue(N, 0);
1254}
1255
1256SDValue SelectionDAG::getExternalSymbol(const char *Sym, EVT VT) {
1257 SDNode *&N = ExternalSymbols[Sym];
1258 if (N) return SDValue(N, 0);
1259 N = new (NodeAllocator) ExternalSymbolSDNode(false, Sym, 0, VT);
1260 AllNodes.push_back(N);
1261 return SDValue(N, 0);
1262}
1263
1264SDValue SelectionDAG::getTargetExternalSymbol(const char *Sym, EVT VT,
1265 unsigned char TargetFlags) {
1266 SDNode *&N =
1267 TargetExternalSymbols[std::pair<std::string,unsigned char>(Sym,
1268 TargetFlags)];
1269 if (N) return SDValue(N, 0);
1270 N = new (NodeAllocator) ExternalSymbolSDNode(true, Sym, TargetFlags, VT);
1271 AllNodes.push_back(N);
1272 return SDValue(N, 0);
1273}
1274
1275SDValue SelectionDAG::getCondCode(ISD::CondCode Cond) {
1276 if ((unsigned)Cond >= CondCodeNodes.size())
1277 CondCodeNodes.resize(Cond+1);
1278
1279 if (CondCodeNodes[Cond] == 0) {
1280 CondCodeSDNode *N = new (NodeAllocator) CondCodeSDNode(Cond);
1281 CondCodeNodes[Cond] = N;
1282 AllNodes.push_back(N);
1283 }
1284
1285 return SDValue(CondCodeNodes[Cond], 0);
1286}
1287
1288// commuteShuffle - swaps the values of N1 and N2, and swaps all indices in
1289// the shuffle mask M that point at N1 to point at N2, and indices that point
1290// N2 to point at N1.
1291static void commuteShuffle(SDValue &N1, SDValue &N2, SmallVectorImpl<int> &M) {
1292 std::swap(N1, N2);
1293 int NElts = M.size();
1294 for (int i = 0; i != NElts; ++i) {
1295 if (M[i] >= NElts)
1296 M[i] -= NElts;
1297 else if (M[i] >= 0)
1298 M[i] += NElts;
1299 }
1300}
1301
1302SDValue SelectionDAG::getVectorShuffle(EVT VT, DebugLoc dl, SDValue N1,
1303 SDValue N2, const int *Mask) {
1304 assert(N1.getValueType() == N2.getValueType() && "Invalid VECTOR_SHUFFLE");
1305 assert(VT.isVector() && N1.getValueType().isVector() &&
1306 "Vector Shuffle VTs must be a vectors");
1307 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType()
1308 && "Vector Shuffle VTs must have same element type");
1309
1310 // Canonicalize shuffle undef, undef -> undef
1311 if (N1.getOpcode() == ISD::UNDEF && N2.getOpcode() == ISD::UNDEF)
1312 return getUNDEF(VT);
1313
1314 // Validate that all indices in Mask are within the range of the elements
1315 // input to the shuffle.
1316 unsigned NElts = VT.getVectorNumElements();
1317 SmallVector<int, 8> MaskVec;
1318 for (unsigned i = 0; i != NElts; ++i) {
1319 assert(Mask[i] < (int)(NElts * 2) && "Index out of range");
1320 MaskVec.push_back(Mask[i]);
1321 }
1322
1323 // Canonicalize shuffle v, v -> v, undef
1324 if (N1 == N2) {
1325 N2 = getUNDEF(VT);
1326 for (unsigned i = 0; i != NElts; ++i)
1327 if (MaskVec[i] >= (int)NElts) MaskVec[i] -= NElts;
1328 }
1329
1330 // Canonicalize shuffle undef, v -> v, undef. Commute the shuffle mask.
1331 if (N1.getOpcode() == ISD::UNDEF)
1332 commuteShuffle(N1, N2, MaskVec);
1333
1334 // Canonicalize all index into lhs, -> shuffle lhs, undef
1335 // Canonicalize all index into rhs, -> shuffle rhs, undef
1336 bool AllLHS = true, AllRHS = true;
1337 bool N2Undef = N2.getOpcode() == ISD::UNDEF;
1338 for (unsigned i = 0; i != NElts; ++i) {
1339 if (MaskVec[i] >= (int)NElts) {
1340 if (N2Undef)
1341 MaskVec[i] = -1;
1342 else
1343 AllLHS = false;
1344 } else if (MaskVec[i] >= 0) {
1345 AllRHS = false;
1346 }
1347 }
1348 if (AllLHS && AllRHS)
1349 return getUNDEF(VT);
1350 if (AllLHS && !N2Undef)
1351 N2 = getUNDEF(VT);
1352 if (AllRHS) {
1353 N1 = getUNDEF(VT);
1354 commuteShuffle(N1, N2, MaskVec);
1355 }
1356
1357 // If Identity shuffle, or all shuffle in to undef, return that node.
1358 bool AllUndef = true;
1359 bool Identity = true;
1360 for (unsigned i = 0; i != NElts; ++i) {
1361 if (MaskVec[i] >= 0 && MaskVec[i] != (int)i) Identity = false;
1362 if (MaskVec[i] >= 0) AllUndef = false;
1363 }
1364 if (Identity && NElts == N1.getValueType().getVectorNumElements())
1365 return N1;
1366 if (AllUndef)
1367 return getUNDEF(VT);
1368
1369 FoldingSetNodeID ID;
1370 SDValue Ops[2] = { N1, N2 };
1371 AddNodeIDNode(ID, ISD::VECTOR_SHUFFLE, getVTList(VT), Ops, 2);
1372 for (unsigned i = 0; i != NElts; ++i)
1373 ID.AddInteger(MaskVec[i]);
1374
1375 void* IP = 0;
1376 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1377 return SDValue(E, 0);
1378
1379 // Allocate the mask array for the node out of the BumpPtrAllocator, since
1380 // SDNode doesn't have access to it. This memory will be "leaked" when
1381 // the node is deallocated, but recovered when the NodeAllocator is released.
1382 int *MaskAlloc = OperandAllocator.Allocate<int>(NElts);
1383 memcpy(MaskAlloc, &MaskVec[0], NElts * sizeof(int));
1384
1385 ShuffleVectorSDNode *N =
1386 new (NodeAllocator) ShuffleVectorSDNode(VT, dl, N1, N2, MaskAlloc);
1387 CSEMap.InsertNode(N, IP);
1388 AllNodes.push_back(N);
1389 return SDValue(N, 0);
1390}
1391
1392SDValue SelectionDAG::getConvertRndSat(EVT VT, DebugLoc dl,
1393 SDValue Val, SDValue DTy,
1394 SDValue STy, SDValue Rnd, SDValue Sat,
1395 ISD::CvtCode Code) {
1396 // If the src and dest types are the same and the conversion is between
1397 // integer types of the same sign or two floats, no conversion is necessary.
1398 if (DTy == STy &&
1399 (Code == ISD::CVT_UU || Code == ISD::CVT_SS || Code == ISD::CVT_FF))
1400 return Val;
1401
1402 FoldingSetNodeID ID;
1403 SDValue Ops[] = { Val, DTy, STy, Rnd, Sat };
1404 AddNodeIDNode(ID, ISD::CONVERT_RNDSAT, getVTList(VT), &Ops[0], 5);
1405 void* IP = 0;
1406 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1407 return SDValue(E, 0);
1408
1409 CvtRndSatSDNode *N = new (NodeAllocator) CvtRndSatSDNode(VT, dl, Ops, 5,
1410 Code);
1411 CSEMap.InsertNode(N, IP);
1412 AllNodes.push_back(N);
1413 return SDValue(N, 0);
1414}
1415
1416SDValue SelectionDAG::getRegister(unsigned RegNo, EVT VT) {
1417 FoldingSetNodeID ID;
1418 AddNodeIDNode(ID, ISD::Register, getVTList(VT), 0, 0);
1419 ID.AddInteger(RegNo);
1420 void *IP = 0;
1421 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1422 return SDValue(E, 0);
1423
1424 SDNode *N = new (NodeAllocator) RegisterSDNode(RegNo, VT);
1425 CSEMap.InsertNode(N, IP);
1426 AllNodes.push_back(N);
1427 return SDValue(N, 0);
1428}
1429
1430SDValue SelectionDAG::getRegisterMask(const uint32_t *RegMask) {
1431 FoldingSetNodeID ID;
1432 AddNodeIDNode(ID, ISD::RegisterMask, getVTList(MVT::Untyped), 0, 0);
1433 ID.AddPointer(RegMask);
1434 void *IP = 0;
1435 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1436 return SDValue(E, 0);
1437
1438 SDNode *N = new (NodeAllocator) RegisterMaskSDNode(RegMask);
1439 CSEMap.InsertNode(N, IP);
1440 AllNodes.push_back(N);
1441 return SDValue(N, 0);
1442}
1443
1444SDValue SelectionDAG::getEHLabel(DebugLoc dl, SDValue Root, MCSymbol *Label) {
1445 FoldingSetNodeID ID;
1446 SDValue Ops[] = { Root };
1447 AddNodeIDNode(ID, ISD::EH_LABEL, getVTList(MVT::Other), &Ops[0], 1);
1448 ID.AddPointer(Label);
1449 void *IP = 0;
1450 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1451 return SDValue(E, 0);
1452
1453 SDNode *N = new (NodeAllocator) EHLabelSDNode(dl, Root, Label);
1454 CSEMap.InsertNode(N, IP);
1455 AllNodes.push_back(N);
1456 return SDValue(N, 0);
1457}
1458
1459
1460SDValue SelectionDAG::getBlockAddress(const BlockAddress *BA, EVT VT,
1461 int64_t Offset,
1462 bool isTarget,
1463 unsigned char TargetFlags) {
1464 unsigned Opc = isTarget ? ISD::TargetBlockAddress : ISD::BlockAddress;
1465
1466 FoldingSetNodeID ID;
1467 AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
1468 ID.AddPointer(BA);
1469 ID.AddInteger(Offset);
1470 ID.AddInteger(TargetFlags);
1471 void *IP = 0;
1472 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1473 return SDValue(E, 0);
1474
1475 SDNode *N = new (NodeAllocator) BlockAddressSDNode(Opc, VT, BA, Offset,
1476 TargetFlags);
1477 CSEMap.InsertNode(N, IP);
1478 AllNodes.push_back(N);
1479 return SDValue(N, 0);
1480}
1481
1482SDValue SelectionDAG::getSrcValue(const Value *V) {
1483 assert((!V || V->getType()->isPointerTy()) &&
1484 "SrcValue is not a pointer?");
1485
1486 FoldingSetNodeID ID;
1487 AddNodeIDNode(ID, ISD::SRCVALUE, getVTList(MVT::Other), 0, 0);
1488 ID.AddPointer(V);
1489
1490 void *IP = 0;
1491 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1492 return SDValue(E, 0);
1493
1494 SDNode *N = new (NodeAllocator) SrcValueSDNode(V);
1495 CSEMap.InsertNode(N, IP);
1496 AllNodes.push_back(N);
1497 return SDValue(N, 0);
1498}
1499
1500/// getMDNode - Return an MDNodeSDNode which holds an MDNode.
1501SDValue SelectionDAG::getMDNode(const MDNode *MD) {
1502 FoldingSetNodeID ID;
1503 AddNodeIDNode(ID, ISD::MDNODE_SDNODE, getVTList(MVT::Other), 0, 0);
1504 ID.AddPointer(MD);
1505
1506 void *IP = 0;
1507 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
1508 return SDValue(E, 0);
1509
1510 SDNode *N = new (NodeAllocator) MDNodeSDNode(MD);
1511 CSEMap.InsertNode(N, IP);
1512 AllNodes.push_back(N);
1513 return SDValue(N, 0);
1514}
1515
1516
1517/// getShiftAmountOperand - Return the specified value casted to
1518/// the target's desired shift amount type.
1519SDValue SelectionDAG::getShiftAmountOperand(EVT LHSTy, SDValue Op) {
1520 EVT OpTy = Op.getValueType();
970d7e83 1521 EVT ShTy = TLI.getShiftAmountTy(LHSTy);
223e47cc
LB
1522 if (OpTy == ShTy || OpTy.isVector()) return Op;
1523
1524 ISD::NodeType Opcode = OpTy.bitsGT(ShTy) ? ISD::TRUNCATE : ISD::ZERO_EXTEND;
1525 return getNode(Opcode, Op.getDebugLoc(), ShTy, Op);
1526}
1527
1528/// CreateStackTemporary - Create a stack temporary, suitable for holding the
1529/// specified value type.
1530SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
1531 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1532 unsigned ByteSize = VT.getStoreSize();
1533 Type *Ty = VT.getTypeForEVT(*getContext());
1534 unsigned StackAlign =
970d7e83 1535 std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
223e47cc
LB
1536
1537 int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
1538 return getFrameIndex(FrameIdx, TLI.getPointerTy());
1539}
1540
1541/// CreateStackTemporary - Create a stack temporary suitable for holding
1542/// either of the specified value types.
1543SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
1544 unsigned Bytes = std::max(VT1.getStoreSizeInBits(),
1545 VT2.getStoreSizeInBits())/8;
1546 Type *Ty1 = VT1.getTypeForEVT(*getContext());
1547 Type *Ty2 = VT2.getTypeForEVT(*getContext());
970d7e83 1548 const DataLayout *TD = TLI.getDataLayout();
223e47cc
LB
1549 unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
1550 TD->getPrefTypeAlignment(Ty2));
1551
1552 MachineFrameInfo *FrameInfo = getMachineFunction().getFrameInfo();
1553 int FrameIdx = FrameInfo->CreateStackObject(Bytes, Align, false);
1554 return getFrameIndex(FrameIdx, TLI.getPointerTy());
1555}
1556
1557SDValue SelectionDAG::FoldSetCC(EVT VT, SDValue N1,
1558 SDValue N2, ISD::CondCode Cond, DebugLoc dl) {
1559 // These setcc operations always fold.
1560 switch (Cond) {
1561 default: break;
1562 case ISD::SETFALSE:
1563 case ISD::SETFALSE2: return getConstant(0, VT);
1564 case ISD::SETTRUE:
1565 case ISD::SETTRUE2: return getConstant(1, VT);
1566
1567 case ISD::SETOEQ:
1568 case ISD::SETOGT:
1569 case ISD::SETOGE:
1570 case ISD::SETOLT:
1571 case ISD::SETOLE:
1572 case ISD::SETONE:
1573 case ISD::SETO:
1574 case ISD::SETUO:
1575 case ISD::SETUEQ:
1576 case ISD::SETUNE:
1577 assert(!N1.getValueType().isInteger() && "Illegal setcc for integer!");
1578 break;
1579 }
1580
1581 if (ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode())) {
1582 const APInt &C2 = N2C->getAPIntValue();
1583 if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) {
1584 const APInt &C1 = N1C->getAPIntValue();
1585
1586 switch (Cond) {
1587 default: llvm_unreachable("Unknown integer setcc!");
1588 case ISD::SETEQ: return getConstant(C1 == C2, VT);
1589 case ISD::SETNE: return getConstant(C1 != C2, VT);
1590 case ISD::SETULT: return getConstant(C1.ult(C2), VT);
1591 case ISD::SETUGT: return getConstant(C1.ugt(C2), VT);
1592 case ISD::SETULE: return getConstant(C1.ule(C2), VT);
1593 case ISD::SETUGE: return getConstant(C1.uge(C2), VT);
1594 case ISD::SETLT: return getConstant(C1.slt(C2), VT);
1595 case ISD::SETGT: return getConstant(C1.sgt(C2), VT);
1596 case ISD::SETLE: return getConstant(C1.sle(C2), VT);
1597 case ISD::SETGE: return getConstant(C1.sge(C2), VT);
1598 }
1599 }
1600 }
1601 if (ConstantFPSDNode *N1C = dyn_cast<ConstantFPSDNode>(N1.getNode())) {
1602 if (ConstantFPSDNode *N2C = dyn_cast<ConstantFPSDNode>(N2.getNode())) {
223e47cc
LB
1603 APFloat::cmpResult R = N1C->getValueAPF().compare(N2C->getValueAPF());
1604 switch (Cond) {
1605 default: break;
1606 case ISD::SETEQ: if (R==APFloat::cmpUnordered)
1607 return getUNDEF(VT);
1608 // fall through
1609 case ISD::SETOEQ: return getConstant(R==APFloat::cmpEqual, VT);
1610 case ISD::SETNE: if (R==APFloat::cmpUnordered)
1611 return getUNDEF(VT);
1612 // fall through
1613 case ISD::SETONE: return getConstant(R==APFloat::cmpGreaterThan ||
1614 R==APFloat::cmpLessThan, VT);
1615 case ISD::SETLT: if (R==APFloat::cmpUnordered)
1616 return getUNDEF(VT);
1617 // fall through
1618 case ISD::SETOLT: return getConstant(R==APFloat::cmpLessThan, VT);
1619 case ISD::SETGT: if (R==APFloat::cmpUnordered)
1620 return getUNDEF(VT);
1621 // fall through
1622 case ISD::SETOGT: return getConstant(R==APFloat::cmpGreaterThan, VT);
1623 case ISD::SETLE: if (R==APFloat::cmpUnordered)
1624 return getUNDEF(VT);
1625 // fall through
1626 case ISD::SETOLE: return getConstant(R==APFloat::cmpLessThan ||
1627 R==APFloat::cmpEqual, VT);
1628 case ISD::SETGE: if (R==APFloat::cmpUnordered)
1629 return getUNDEF(VT);
1630 // fall through
1631 case ISD::SETOGE: return getConstant(R==APFloat::cmpGreaterThan ||
1632 R==APFloat::cmpEqual, VT);
1633 case ISD::SETO: return getConstant(R!=APFloat::cmpUnordered, VT);
1634 case ISD::SETUO: return getConstant(R==APFloat::cmpUnordered, VT);
1635 case ISD::SETUEQ: return getConstant(R==APFloat::cmpUnordered ||
1636 R==APFloat::cmpEqual, VT);
1637 case ISD::SETUNE: return getConstant(R!=APFloat::cmpEqual, VT);
1638 case ISD::SETULT: return getConstant(R==APFloat::cmpUnordered ||
1639 R==APFloat::cmpLessThan, VT);
1640 case ISD::SETUGT: return getConstant(R==APFloat::cmpGreaterThan ||
1641 R==APFloat::cmpUnordered, VT);
1642 case ISD::SETULE: return getConstant(R!=APFloat::cmpGreaterThan, VT);
1643 case ISD::SETUGE: return getConstant(R!=APFloat::cmpLessThan, VT);
1644 }
1645 } else {
1646 // Ensure that the constant occurs on the RHS.
1647 return getSetCC(dl, VT, N2, N1, ISD::getSetCCSwappedOperands(Cond));
1648 }
1649 }
1650
1651 // Could not fold it.
1652 return SDValue();
1653}
1654
1655/// SignBitIsZero - Return true if the sign bit of Op is known to be zero. We
1656/// use this predicate to simplify operations downstream.
1657bool SelectionDAG::SignBitIsZero(SDValue Op, unsigned Depth) const {
1658 // This predicate is not safe for vector operations.
1659 if (Op.getValueType().isVector())
1660 return false;
1661
1662 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1663 return MaskedValueIsZero(Op, APInt::getSignBit(BitWidth), Depth);
1664}
1665
1666/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
1667/// this predicate to simplify operations downstream. Mask is known to be zero
1668/// for bits that V cannot have.
1669bool SelectionDAG::MaskedValueIsZero(SDValue Op, const APInt &Mask,
1670 unsigned Depth) const {
1671 APInt KnownZero, KnownOne;
1672 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
1673 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1674 return (KnownZero & Mask) == Mask;
1675}
1676
1677/// ComputeMaskedBits - Determine which of the bits specified in Mask are
1678/// known to be either zero or one and return them in the KnownZero/KnownOne
1679/// bitsets. This code only analyzes bits in Mask, in order to short-circuit
1680/// processing.
1681void SelectionDAG::ComputeMaskedBits(SDValue Op, APInt &KnownZero,
1682 APInt &KnownOne, unsigned Depth) const {
1683 unsigned BitWidth = Op.getValueType().getScalarType().getSizeInBits();
1684
1685 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
1686 if (Depth == 6)
1687 return; // Limit search depth.
1688
1689 APInt KnownZero2, KnownOne2;
1690
1691 switch (Op.getOpcode()) {
1692 case ISD::Constant:
1693 // We know all of the bits for a constant!
1694 KnownOne = cast<ConstantSDNode>(Op)->getAPIntValue();
1695 KnownZero = ~KnownOne;
1696 return;
1697 case ISD::AND:
1698 // If either the LHS or the RHS are Zero, the result is zero.
1699 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1700 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1701 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1702 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1703
1704 // Output known-1 bits are only known if set in both the LHS & RHS.
1705 KnownOne &= KnownOne2;
1706 // Output known-0 are known to be clear if zero in either the LHS | RHS.
1707 KnownZero |= KnownZero2;
1708 return;
1709 case ISD::OR:
1710 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1711 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1712 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1713 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1714
1715 // Output known-0 bits are only known if clear in both the LHS & RHS.
1716 KnownZero &= KnownZero2;
1717 // Output known-1 are known to be set if set in either the LHS | RHS.
1718 KnownOne |= KnownOne2;
1719 return;
1720 case ISD::XOR: {
1721 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1722 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1723 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1724 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1725
1726 // Output known-0 bits are known if clear or set in both the LHS & RHS.
1727 APInt KnownZeroOut = (KnownZero & KnownZero2) | (KnownOne & KnownOne2);
1728 // Output known-1 are known to be set if set in only one of the LHS, RHS.
1729 KnownOne = (KnownZero & KnownOne2) | (KnownOne & KnownZero2);
1730 KnownZero = KnownZeroOut;
1731 return;
1732 }
1733 case ISD::MUL: {
1734 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
1735 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1736 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1737 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1738
1739 // If low bits are zero in either operand, output low known-0 bits.
1740 // Also compute a conserative estimate for high known-0 bits.
1741 // More trickiness is possible, but this is sufficient for the
1742 // interesting case of alignment computation.
1743 KnownOne.clearAllBits();
1744 unsigned TrailZ = KnownZero.countTrailingOnes() +
1745 KnownZero2.countTrailingOnes();
1746 unsigned LeadZ = std::max(KnownZero.countLeadingOnes() +
1747 KnownZero2.countLeadingOnes(),
1748 BitWidth) - BitWidth;
1749
1750 TrailZ = std::min(TrailZ, BitWidth);
1751 LeadZ = std::min(LeadZ, BitWidth);
1752 KnownZero = APInt::getLowBitsSet(BitWidth, TrailZ) |
1753 APInt::getHighBitsSet(BitWidth, LeadZ);
1754 return;
1755 }
1756 case ISD::UDIV: {
1757 // For the purposes of computing leading zeros we can conservatively
1758 // treat a udiv as a logical right shift by the power of 2 known to
1759 // be less than the denominator.
1760 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
1761 unsigned LeadZ = KnownZero2.countLeadingOnes();
1762
1763 KnownOne2.clearAllBits();
1764 KnownZero2.clearAllBits();
1765 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1766 unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
1767 if (RHSUnknownLeadingOnes != BitWidth)
1768 LeadZ = std::min(BitWidth,
1769 LeadZ + BitWidth - RHSUnknownLeadingOnes - 1);
1770
1771 KnownZero = APInt::getHighBitsSet(BitWidth, LeadZ);
1772 return;
1773 }
1774 case ISD::SELECT:
1775 ComputeMaskedBits(Op.getOperand(2), KnownZero, KnownOne, Depth+1);
1776 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
1777 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1778 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1779
1780 // Only known if known in both the LHS and RHS.
1781 KnownOne &= KnownOne2;
1782 KnownZero &= KnownZero2;
1783 return;
1784 case ISD::SELECT_CC:
1785 ComputeMaskedBits(Op.getOperand(3), KnownZero, KnownOne, Depth+1);
1786 ComputeMaskedBits(Op.getOperand(2), KnownZero2, KnownOne2, Depth+1);
1787 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1788 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
1789
1790 // Only known if known in both the LHS and RHS.
1791 KnownOne &= KnownOne2;
1792 KnownZero &= KnownZero2;
1793 return;
1794 case ISD::SADDO:
1795 case ISD::UADDO:
1796 case ISD::SSUBO:
1797 case ISD::USUBO:
1798 case ISD::SMULO:
1799 case ISD::UMULO:
1800 if (Op.getResNo() != 1)
1801 return;
1802 // The boolean result conforms to getBooleanContents. Fall through.
1803 case ISD::SETCC:
1804 // If we know the result of a setcc has the top bits zero, use this info.
1805 if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
1806 TargetLowering::ZeroOrOneBooleanContent && BitWidth > 1)
1807 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1808 return;
1809 case ISD::SHL:
1810 // (shl X, C1) & C2 == 0 iff (X & C2 >>u C1) == 0
1811 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1812 unsigned ShAmt = SA->getZExtValue();
1813
1814 // If the shift count is an invalid immediate, don't do anything.
1815 if (ShAmt >= BitWidth)
1816 return;
1817
1818 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1819 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1820 KnownZero <<= ShAmt;
1821 KnownOne <<= ShAmt;
1822 // low bits known zero.
1823 KnownZero |= APInt::getLowBitsSet(BitWidth, ShAmt);
1824 }
1825 return;
1826 case ISD::SRL:
1827 // (ushr X, C1) & C2 == 0 iff (-1 >> C1) & C2 == 0
1828 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1829 unsigned ShAmt = SA->getZExtValue();
1830
1831 // If the shift count is an invalid immediate, don't do anything.
1832 if (ShAmt >= BitWidth)
1833 return;
1834
1835 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1836 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1837 KnownZero = KnownZero.lshr(ShAmt);
1838 KnownOne = KnownOne.lshr(ShAmt);
1839
1840 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1841 KnownZero |= HighBits; // High bits known zero.
1842 }
1843 return;
1844 case ISD::SRA:
1845 if (ConstantSDNode *SA = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
1846 unsigned ShAmt = SA->getZExtValue();
1847
1848 // If the shift count is an invalid immediate, don't do anything.
1849 if (ShAmt >= BitWidth)
1850 return;
1851
1852 // If any of the demanded bits are produced by the sign extension, we also
1853 // demand the input sign bit.
1854 APInt HighBits = APInt::getHighBitsSet(BitWidth, ShAmt);
1855
1856 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1857 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1858 KnownZero = KnownZero.lshr(ShAmt);
1859 KnownOne = KnownOne.lshr(ShAmt);
1860
1861 // Handle the sign bits.
1862 APInt SignBit = APInt::getSignBit(BitWidth);
1863 SignBit = SignBit.lshr(ShAmt); // Adjust to where it is now in the mask.
1864
1865 if (KnownZero.intersects(SignBit)) {
1866 KnownZero |= HighBits; // New bits are known zero.
1867 } else if (KnownOne.intersects(SignBit)) {
1868 KnownOne |= HighBits; // New bits are known one.
1869 }
1870 }
1871 return;
1872 case ISD::SIGN_EXTEND_INREG: {
1873 EVT EVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1874 unsigned EBits = EVT.getScalarType().getSizeInBits();
1875
1876 // Sign extension. Compute the demanded bits in the result that are not
1877 // present in the input.
1878 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - EBits);
1879
1880 APInt InSignBit = APInt::getSignBit(EBits);
1881 APInt InputDemandedBits = APInt::getLowBitsSet(BitWidth, EBits);
1882
1883 // If the sign extended bits are demanded, we know that the sign
1884 // bit is demanded.
1885 InSignBit = InSignBit.zext(BitWidth);
1886 if (NewBits.getBoolValue())
1887 InputDemandedBits |= InSignBit;
1888
1889 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1890 KnownOne &= InputDemandedBits;
1891 KnownZero &= InputDemandedBits;
1892 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1893
1894 // If the sign bit of the input is known set or clear, then we know the
1895 // top bits of the result.
1896 if (KnownZero.intersects(InSignBit)) { // Input sign bit known clear
1897 KnownZero |= NewBits;
1898 KnownOne &= ~NewBits;
1899 } else if (KnownOne.intersects(InSignBit)) { // Input sign bit known set
1900 KnownOne |= NewBits;
1901 KnownZero &= ~NewBits;
1902 } else { // Input sign bit unknown
1903 KnownZero &= ~NewBits;
1904 KnownOne &= ~NewBits;
1905 }
1906 return;
1907 }
1908 case ISD::CTTZ:
1909 case ISD::CTTZ_ZERO_UNDEF:
1910 case ISD::CTLZ:
1911 case ISD::CTLZ_ZERO_UNDEF:
1912 case ISD::CTPOP: {
1913 unsigned LowBits = Log2_32(BitWidth)+1;
1914 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - LowBits);
1915 KnownOne.clearAllBits();
1916 return;
1917 }
1918 case ISD::LOAD: {
1919 LoadSDNode *LD = cast<LoadSDNode>(Op);
1920 if (ISD::isZEXTLoad(Op.getNode())) {
1921 EVT VT = LD->getMemoryVT();
1922 unsigned MemBits = VT.getScalarType().getSizeInBits();
1923 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
1924 } else if (const MDNode *Ranges = LD->getRanges()) {
1925 computeMaskedBitsLoad(*Ranges, KnownZero);
1926 }
1927 return;
1928 }
1929 case ISD::ZERO_EXTEND: {
1930 EVT InVT = Op.getOperand(0).getValueType();
1931 unsigned InBits = InVT.getScalarType().getSizeInBits();
1932 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1933 KnownZero = KnownZero.trunc(InBits);
1934 KnownOne = KnownOne.trunc(InBits);
1935 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1936 KnownZero = KnownZero.zext(BitWidth);
1937 KnownOne = KnownOne.zext(BitWidth);
1938 KnownZero |= NewBits;
1939 return;
1940 }
1941 case ISD::SIGN_EXTEND: {
1942 EVT InVT = Op.getOperand(0).getValueType();
1943 unsigned InBits = InVT.getScalarType().getSizeInBits();
1944 APInt InSignBit = APInt::getSignBit(InBits);
1945 APInt NewBits = APInt::getHighBitsSet(BitWidth, BitWidth - InBits);
1946
1947 KnownZero = KnownZero.trunc(InBits);
1948 KnownOne = KnownOne.trunc(InBits);
1949 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1950
1951 // Note if the sign bit is known to be zero or one.
1952 bool SignBitKnownZero = KnownZero.isNegative();
1953 bool SignBitKnownOne = KnownOne.isNegative();
1954 assert(!(SignBitKnownZero && SignBitKnownOne) &&
1955 "Sign bit can't be known to be both zero and one!");
1956
1957 KnownZero = KnownZero.zext(BitWidth);
1958 KnownOne = KnownOne.zext(BitWidth);
1959
1960 // If the sign bit is known zero or one, the top bits match.
1961 if (SignBitKnownZero)
1962 KnownZero |= NewBits;
1963 else if (SignBitKnownOne)
1964 KnownOne |= NewBits;
1965 return;
1966 }
1967 case ISD::ANY_EXTEND: {
1968 EVT InVT = Op.getOperand(0).getValueType();
1969 unsigned InBits = InVT.getScalarType().getSizeInBits();
1970 KnownZero = KnownZero.trunc(InBits);
1971 KnownOne = KnownOne.trunc(InBits);
1972 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1973 KnownZero = KnownZero.zext(BitWidth);
1974 KnownOne = KnownOne.zext(BitWidth);
1975 return;
1976 }
1977 case ISD::TRUNCATE: {
1978 EVT InVT = Op.getOperand(0).getValueType();
1979 unsigned InBits = InVT.getScalarType().getSizeInBits();
1980 KnownZero = KnownZero.zext(InBits);
1981 KnownOne = KnownOne.zext(InBits);
1982 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1983 assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
1984 KnownZero = KnownZero.trunc(BitWidth);
1985 KnownOne = KnownOne.trunc(BitWidth);
1986 break;
1987 }
1988 case ISD::AssertZext: {
1989 EVT VT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1990 APInt InMask = APInt::getLowBitsSet(BitWidth, VT.getSizeInBits());
1991 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
1992 KnownZero |= (~InMask);
1993 KnownOne &= (~KnownZero);
1994 return;
1995 }
1996 case ISD::FGETSIGN:
1997 // All bits are zero except the low bit.
1998 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - 1);
1999 return;
2000
2001 case ISD::SUB: {
2002 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0))) {
2003 // We know that the top bits of C-X are clear if X contains less bits
2004 // than C (i.e. no wrap-around can happen). For example, 20-X is
2005 // positive if we can prove that X is >= 0 and < 16.
2006 if (CLHS->getAPIntValue().isNonNegative()) {
2007 unsigned NLZ = (CLHS->getAPIntValue()+1).countLeadingZeros();
2008 // NLZ can't be BitWidth with no sign bit
2009 APInt MaskV = APInt::getHighBitsSet(BitWidth, NLZ+1);
2010 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2011
2012 // If all of the MaskV bits are known to be zero, then we know the
2013 // output top bits are zero, because we now know that the output is
2014 // from [0-C].
2015 if ((KnownZero2 & MaskV) == MaskV) {
2016 unsigned NLZ2 = CLHS->getAPIntValue().countLeadingZeros();
2017 // Top bits known zero.
2018 KnownZero = APInt::getHighBitsSet(BitWidth, NLZ2);
2019 }
2020 }
2021 }
2022 }
2023 // fall through
2024 case ISD::ADD:
2025 case ISD::ADDE: {
2026 // Output known-0 bits are known if clear or set in both the low clear bits
2027 // common to both LHS & RHS. For example, 8+(X<<3) is known to have the
2028 // low 3 bits clear.
2029 ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
2030 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2031 unsigned KnownZeroOut = KnownZero2.countTrailingOnes();
2032
2033 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2034 assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
2035 KnownZeroOut = std::min(KnownZeroOut,
2036 KnownZero2.countTrailingOnes());
2037
2038 if (Op.getOpcode() == ISD::ADD) {
2039 KnownZero |= APInt::getLowBitsSet(BitWidth, KnownZeroOut);
2040 return;
2041 }
2042
2043 // With ADDE, a carry bit may be added in, so we can only use this
2044 // information if we know (at least) that the low two bits are clear. We
2045 // then return to the caller that the low bit is unknown but that other bits
2046 // are known zero.
2047 if (KnownZeroOut >= 2) // ADDE
2048 KnownZero |= APInt::getBitsSet(BitWidth, 1, KnownZeroOut);
2049 return;
2050 }
2051 case ISD::SREM:
2052 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2053 const APInt &RA = Rem->getAPIntValue().abs();
2054 if (RA.isPowerOf2()) {
2055 APInt LowBits = RA - 1;
2056 APInt Mask2 = LowBits | APInt::getSignBit(BitWidth);
2057 ComputeMaskedBits(Op.getOperand(0), KnownZero2,KnownOne2,Depth+1);
2058
2059 // The low bits of the first operand are unchanged by the srem.
2060 KnownZero = KnownZero2 & LowBits;
2061 KnownOne = KnownOne2 & LowBits;
2062
2063 // If the first operand is non-negative or has all low bits zero, then
2064 // the upper bits are all zero.
2065 if (KnownZero2[BitWidth-1] || ((KnownZero2 & LowBits) == LowBits))
2066 KnownZero |= ~LowBits;
2067
2068 // If the first operand is negative and not all low bits are zero, then
2069 // the upper bits are all one.
2070 if (KnownOne2[BitWidth-1] && ((KnownOne2 & LowBits) != 0))
2071 KnownOne |= ~LowBits;
2072 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2073 }
2074 }
2075 return;
2076 case ISD::UREM: {
2077 if (ConstantSDNode *Rem = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2078 const APInt &RA = Rem->getAPIntValue();
2079 if (RA.isPowerOf2()) {
2080 APInt LowBits = (RA - 1);
2081 KnownZero |= ~LowBits;
2082 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne,Depth+1);
2083 assert((KnownZero & KnownOne) == 0&&"Bits known to be one AND zero?");
2084 break;
2085 }
2086 }
2087
2088 // Since the result is less than or equal to either operand, any leading
2089 // zero bits in either operand must also exist in the result.
2090 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2091 ComputeMaskedBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
2092
2093 uint32_t Leaders = std::max(KnownZero.countLeadingOnes(),
2094 KnownZero2.countLeadingOnes());
2095 KnownOne.clearAllBits();
2096 KnownZero = APInt::getHighBitsSet(BitWidth, Leaders);
2097 return;
2098 }
2099 case ISD::FrameIndex:
2100 case ISD::TargetFrameIndex:
2101 if (unsigned Align = InferPtrAlignment(Op)) {
2102 // The low bits are known zero if the pointer is aligned.
2103 KnownZero = APInt::getLowBitsSet(BitWidth, Log2_32(Align));
2104 return;
2105 }
2106 break;
2107
2108 default:
2109 if (Op.getOpcode() < ISD::BUILTIN_OP_END)
2110 break;
2111 // Fallthrough
2112 case ISD::INTRINSIC_WO_CHAIN:
2113 case ISD::INTRINSIC_W_CHAIN:
2114 case ISD::INTRINSIC_VOID:
2115 // Allow the target to implement this method for its nodes.
2116 TLI.computeMaskedBitsForTargetNode(Op, KnownZero, KnownOne, *this, Depth);
2117 return;
2118 }
2119}
2120
2121/// ComputeNumSignBits - Return the number of times the sign bit of the
2122/// register is replicated into the other bits. We know that at least 1 bit
2123/// is always equal to the sign bit (itself), but other cases can give us
2124/// information. For example, immediately after an "SRA X, 2", we know that
2125/// the top 3 bits are all equal to each other, so we return 3.
2126unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, unsigned Depth) const{
2127 EVT VT = Op.getValueType();
2128 assert(VT.isInteger() && "Invalid VT!");
2129 unsigned VTBits = VT.getScalarType().getSizeInBits();
2130 unsigned Tmp, Tmp2;
2131 unsigned FirstAnswer = 1;
2132
2133 if (Depth == 6)
2134 return 1; // Limit search depth.
2135
2136 switch (Op.getOpcode()) {
2137 default: break;
2138 case ISD::AssertSext:
2139 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2140 return VTBits-Tmp+1;
2141 case ISD::AssertZext:
2142 Tmp = cast<VTSDNode>(Op.getOperand(1))->getVT().getSizeInBits();
2143 return VTBits-Tmp;
2144
2145 case ISD::Constant: {
2146 const APInt &Val = cast<ConstantSDNode>(Op)->getAPIntValue();
2147 return Val.getNumSignBits();
2148 }
2149
2150 case ISD::SIGN_EXTEND:
2151 Tmp = VTBits-Op.getOperand(0).getValueType().getScalarType().getSizeInBits();
2152 return ComputeNumSignBits(Op.getOperand(0), Depth+1) + Tmp;
2153
2154 case ISD::SIGN_EXTEND_INREG:
2155 // Max of the input and what this extends.
2156 Tmp =
2157 cast<VTSDNode>(Op.getOperand(1))->getVT().getScalarType().getSizeInBits();
2158 Tmp = VTBits-Tmp+1;
2159
2160 Tmp2 = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2161 return std::max(Tmp, Tmp2);
2162
2163 case ISD::SRA:
2164 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2165 // SRA X, C -> adds C sign bits.
2166 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2167 Tmp += C->getZExtValue();
2168 if (Tmp > VTBits) Tmp = VTBits;
2169 }
2170 return Tmp;
2171 case ISD::SHL:
2172 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2173 // shl destroys sign bits.
2174 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2175 if (C->getZExtValue() >= VTBits || // Bad shift.
2176 C->getZExtValue() >= Tmp) break; // Shifted all sign bits out.
2177 return Tmp - C->getZExtValue();
2178 }
2179 break;
2180 case ISD::AND:
2181 case ISD::OR:
2182 case ISD::XOR: // NOT is handled here.
2183 // Logical binary ops preserve the number of sign bits at the worst.
2184 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2185 if (Tmp != 1) {
2186 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2187 FirstAnswer = std::min(Tmp, Tmp2);
2188 // We computed what we know about the sign bits as our first
2189 // answer. Now proceed to the generic code that uses
2190 // ComputeMaskedBits, and pick whichever answer is better.
2191 }
2192 break;
2193
2194 case ISD::SELECT:
2195 Tmp = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2196 if (Tmp == 1) return 1; // Early out.
2197 Tmp2 = ComputeNumSignBits(Op.getOperand(2), Depth+1);
2198 return std::min(Tmp, Tmp2);
2199
2200 case ISD::SADDO:
2201 case ISD::UADDO:
2202 case ISD::SSUBO:
2203 case ISD::USUBO:
2204 case ISD::SMULO:
2205 case ISD::UMULO:
2206 if (Op.getResNo() != 1)
2207 break;
2208 // The boolean result conforms to getBooleanContents. Fall through.
2209 case ISD::SETCC:
2210 // If setcc returns 0/-1, all bits are sign bits.
2211 if (TLI.getBooleanContents(Op.getValueType().isVector()) ==
2212 TargetLowering::ZeroOrNegativeOneBooleanContent)
2213 return VTBits;
2214 break;
2215 case ISD::ROTL:
2216 case ISD::ROTR:
2217 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
2218 unsigned RotAmt = C->getZExtValue() & (VTBits-1);
2219
2220 // Handle rotate right by N like a rotate left by 32-N.
2221 if (Op.getOpcode() == ISD::ROTR)
2222 RotAmt = (VTBits-RotAmt) & (VTBits-1);
2223
2224 // If we aren't rotating out all of the known-in sign bits, return the
2225 // number that are left. This handles rotl(sext(x), 1) for example.
2226 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2227 if (Tmp > RotAmt+1) return Tmp-RotAmt;
2228 }
2229 break;
2230 case ISD::ADD:
2231 // Add can have at most one carry bit. Thus we know that the output
2232 // is, at worst, one more bit than the inputs.
2233 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2234 if (Tmp == 1) return 1; // Early out.
2235
2236 // Special case decrementing a value (ADD X, -1):
2237 if (ConstantSDNode *CRHS = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2238 if (CRHS->isAllOnesValue()) {
2239 APInt KnownZero, KnownOne;
2240 ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
2241
2242 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2243 // sign bits set.
2244 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2245 return VTBits;
2246
2247 // If we are subtracting one from a positive number, there is no carry
2248 // out of the result.
2249 if (KnownZero.isNegative())
2250 return Tmp;
2251 }
2252
2253 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2254 if (Tmp2 == 1) return 1;
2255 return std::min(Tmp, Tmp2)-1;
2256
2257 case ISD::SUB:
2258 Tmp2 = ComputeNumSignBits(Op.getOperand(1), Depth+1);
2259 if (Tmp2 == 1) return 1;
2260
2261 // Handle NEG.
2262 if (ConstantSDNode *CLHS = dyn_cast<ConstantSDNode>(Op.getOperand(0)))
2263 if (CLHS->isNullValue()) {
2264 APInt KnownZero, KnownOne;
2265 ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
2266 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2267 // sign bits set.
2268 if ((KnownZero | APInt(VTBits, 1)).isAllOnesValue())
2269 return VTBits;
2270
2271 // If the input is known to be positive (the sign bit is known clear),
2272 // the output of the NEG has the same number of sign bits as the input.
2273 if (KnownZero.isNegative())
2274 return Tmp2;
2275
2276 // Otherwise, we treat this like a SUB.
2277 }
2278
2279 // Sub can have at most one carry bit. Thus we know that the output
2280 // is, at worst, one more bit than the inputs.
2281 Tmp = ComputeNumSignBits(Op.getOperand(0), Depth+1);
2282 if (Tmp == 1) return 1; // Early out.
2283 return std::min(Tmp, Tmp2)-1;
2284 case ISD::TRUNCATE:
2285 // FIXME: it's tricky to do anything useful for this, but it is an important
2286 // case for targets like X86.
2287 break;
2288 }
2289
2290 // Handle LOADX separately here. EXTLOAD case will fallthrough.
2291 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(Op)) {
2292 unsigned ExtType = LD->getExtensionType();
2293 switch (ExtType) {
2294 default: break;
2295 case ISD::SEXTLOAD: // '17' bits known
2296 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2297 return VTBits-Tmp+1;
2298 case ISD::ZEXTLOAD: // '16' bits known
2299 Tmp = LD->getMemoryVT().getScalarType().getSizeInBits();
2300 return VTBits-Tmp;
2301 }
2302 }
2303
2304 // Allow the target to implement this method for its nodes.
2305 if (Op.getOpcode() >= ISD::BUILTIN_OP_END ||
2306 Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
2307 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN ||
2308 Op.getOpcode() == ISD::INTRINSIC_VOID) {
2309 unsigned NumBits = TLI.ComputeNumSignBitsForTargetNode(Op, Depth);
2310 if (NumBits > 1) FirstAnswer = std::max(FirstAnswer, NumBits);
2311 }
2312
2313 // Finally, if we can prove that the top bits of the result are 0's or 1's,
2314 // use this information.
2315 APInt KnownZero, KnownOne;
2316 ComputeMaskedBits(Op, KnownZero, KnownOne, Depth);
2317
2318 APInt Mask;
2319 if (KnownZero.isNegative()) { // sign bit is 0
2320 Mask = KnownZero;
2321 } else if (KnownOne.isNegative()) { // sign bit is 1;
2322 Mask = KnownOne;
2323 } else {
2324 // Nothing known.
2325 return FirstAnswer;
2326 }
2327
2328 // Okay, we know that the sign bit in Mask is set. Use CLZ to determine
2329 // the number of identical bits in the top of the input value.
2330 Mask = ~Mask;
2331 Mask <<= Mask.getBitWidth()-VTBits;
2332 // Return # leading zeros. We use 'min' here in case Val was zero before
2333 // shifting. We don't want to return '64' as for an i32 "0".
2334 return std::max(FirstAnswer, std::min(VTBits, Mask.countLeadingZeros()));
2335}
2336
2337/// isBaseWithConstantOffset - Return true if the specified operand is an
2338/// ISD::ADD with a ConstantSDNode on the right-hand side, or if it is an
2339/// ISD::OR with a ConstantSDNode that is guaranteed to have the same
2340/// semantics as an ADD. This handles the equivalence:
2341/// X|Cst == X+Cst iff X&Cst = 0.
2342bool SelectionDAG::isBaseWithConstantOffset(SDValue Op) const {
2343 if ((Op.getOpcode() != ISD::ADD && Op.getOpcode() != ISD::OR) ||
2344 !isa<ConstantSDNode>(Op.getOperand(1)))
2345 return false;
2346
2347 if (Op.getOpcode() == ISD::OR &&
2348 !MaskedValueIsZero(Op.getOperand(0),
2349 cast<ConstantSDNode>(Op.getOperand(1))->getAPIntValue()))
2350 return false;
2351
2352 return true;
2353}
2354
2355
2356bool SelectionDAG::isKnownNeverNaN(SDValue Op) const {
2357 // If we're told that NaNs won't happen, assume they won't.
2358 if (getTarget().Options.NoNaNsFPMath)
2359 return true;
2360
2361 // If the value is a constant, we can obviously see if it is a NaN or not.
2362 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2363 return !C->getValueAPF().isNaN();
2364
2365 // TODO: Recognize more cases here.
2366
2367 return false;
2368}
2369
2370bool SelectionDAG::isKnownNeverZero(SDValue Op) const {
2371 // If the value is a constant, we can obviously see if it is a zero or not.
2372 if (const ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op))
2373 return !C->isZero();
2374
2375 // TODO: Recognize more cases here.
2376 switch (Op.getOpcode()) {
2377 default: break;
2378 case ISD::OR:
2379 if (const ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
2380 return !C->isNullValue();
2381 break;
2382 }
2383
2384 return false;
2385}
2386
2387bool SelectionDAG::isEqualTo(SDValue A, SDValue B) const {
2388 // Check the obvious case.
2389 if (A == B) return true;
2390
2391 // For for negative and positive zero.
2392 if (const ConstantFPSDNode *CA = dyn_cast<ConstantFPSDNode>(A))
2393 if (const ConstantFPSDNode *CB = dyn_cast<ConstantFPSDNode>(B))
2394 if (CA->isZero() && CB->isZero()) return true;
2395
2396 // Otherwise they may not be equal.
2397 return false;
2398}
2399
2400/// getNode - Gets or creates the specified node.
2401///
2402SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT) {
2403 FoldingSetNodeID ID;
2404 AddNodeIDNode(ID, Opcode, getVTList(VT), 0, 0);
2405 void *IP = 0;
2406 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2407 return SDValue(E, 0);
2408
2409 SDNode *N = new (NodeAllocator) SDNode(Opcode, DL, getVTList(VT));
2410 CSEMap.InsertNode(N, IP);
2411
2412 AllNodes.push_back(N);
2413#ifndef NDEBUG
2414 VerifySDNode(N);
2415#endif
2416 return SDValue(N, 0);
2417}
2418
2419SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
2420 EVT VT, SDValue Operand) {
2421 // Constant fold unary operations with an integer constant operand.
2422 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Operand.getNode())) {
2423 const APInt &Val = C->getAPIntValue();
2424 switch (Opcode) {
2425 default: break;
2426 case ISD::SIGN_EXTEND:
2427 return getConstant(Val.sextOrTrunc(VT.getSizeInBits()), VT);
2428 case ISD::ANY_EXTEND:
2429 case ISD::ZERO_EXTEND:
2430 case ISD::TRUNCATE:
2431 return getConstant(Val.zextOrTrunc(VT.getSizeInBits()), VT);
2432 case ISD::UINT_TO_FP:
2433 case ISD::SINT_TO_FP: {
970d7e83
LB
2434 APFloat apf(EVTToAPFloatSemantics(VT),
2435 APInt::getNullValue(VT.getSizeInBits()));
223e47cc
LB
2436 (void)apf.convertFromAPInt(Val,
2437 Opcode==ISD::SINT_TO_FP,
2438 APFloat::rmNearestTiesToEven);
2439 return getConstantFP(apf, VT);
2440 }
2441 case ISD::BITCAST:
2442 if (VT == MVT::f32 && C->getValueType(0) == MVT::i32)
970d7e83 2443 return getConstantFP(APFloat(APFloat::IEEEsingle, Val), VT);
223e47cc 2444 else if (VT == MVT::f64 && C->getValueType(0) == MVT::i64)
970d7e83 2445 return getConstantFP(APFloat(APFloat::IEEEdouble, Val), VT);
223e47cc
LB
2446 break;
2447 case ISD::BSWAP:
2448 return getConstant(Val.byteSwap(), VT);
2449 case ISD::CTPOP:
2450 return getConstant(Val.countPopulation(), VT);
2451 case ISD::CTLZ:
2452 case ISD::CTLZ_ZERO_UNDEF:
2453 return getConstant(Val.countLeadingZeros(), VT);
2454 case ISD::CTTZ:
2455 case ISD::CTTZ_ZERO_UNDEF:
2456 return getConstant(Val.countTrailingZeros(), VT);
2457 }
2458 }
2459
2460 // Constant fold unary operations with a floating point constant operand.
2461 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Operand.getNode())) {
2462 APFloat V = C->getValueAPF(); // make copy
970d7e83
LB
2463 switch (Opcode) {
2464 case ISD::FNEG:
2465 V.changeSign();
2466 return getConstantFP(V, VT);
2467 case ISD::FABS:
2468 V.clearSign();
2469 return getConstantFP(V, VT);
2470 case ISD::FCEIL: {
2471 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardPositive);
2472 if (fs == APFloat::opOK || fs == APFloat::opInexact)
223e47cc 2473 return getConstantFP(V, VT);
970d7e83
LB
2474 break;
2475 }
2476 case ISD::FTRUNC: {
2477 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardZero);
2478 if (fs == APFloat::opOK || fs == APFloat::opInexact)
223e47cc 2479 return getConstantFP(V, VT);
970d7e83
LB
2480 break;
2481 }
2482 case ISD::FFLOOR: {
2483 APFloat::opStatus fs = V.roundToIntegral(APFloat::rmTowardNegative);
2484 if (fs == APFloat::opOK || fs == APFloat::opInexact)
223e47cc 2485 return getConstantFP(V, VT);
970d7e83
LB
2486 break;
2487 }
2488 case ISD::FP_EXTEND: {
2489 bool ignored;
2490 // This can return overflow, underflow, or inexact; we don't care.
2491 // FIXME need to be more flexible about rounding mode.
2492 (void)V.convert(EVTToAPFloatSemantics(VT),
2493 APFloat::rmNearestTiesToEven, &ignored);
2494 return getConstantFP(V, VT);
2495 }
2496 case ISD::FP_TO_SINT:
2497 case ISD::FP_TO_UINT: {
2498 integerPart x[2];
2499 bool ignored;
2500 assert(integerPartWidth >= 64);
2501 // FIXME need to be more flexible about rounding mode.
2502 APFloat::opStatus s = V.convertToInteger(x, VT.getSizeInBits(),
2503 Opcode==ISD::FP_TO_SINT,
2504 APFloat::rmTowardZero, &ignored);
2505 if (s==APFloat::opInvalidOp) // inexact is OK, in fact usual
223e47cc 2506 break;
970d7e83
LB
2507 APInt api(VT.getSizeInBits(), x);
2508 return getConstant(api, VT);
2509 }
2510 case ISD::BITCAST:
2511 if (VT == MVT::i32 && C->getValueType(0) == MVT::f32)
2512 return getConstant((uint32_t)V.bitcastToAPInt().getZExtValue(), VT);
2513 else if (VT == MVT::i64 && C->getValueType(0) == MVT::f64)
2514 return getConstant(V.bitcastToAPInt().getZExtValue(), VT);
2515 break;
223e47cc
LB
2516 }
2517 }
2518
2519 unsigned OpOpcode = Operand.getNode()->getOpcode();
2520 switch (Opcode) {
2521 case ISD::TokenFactor:
2522 case ISD::MERGE_VALUES:
2523 case ISD::CONCAT_VECTORS:
2524 return Operand; // Factor, merge or concat of one node? No need.
2525 case ISD::FP_ROUND: llvm_unreachable("Invalid method to make FP_ROUND node");
2526 case ISD::FP_EXTEND:
2527 assert(VT.isFloatingPoint() &&
2528 Operand.getValueType().isFloatingPoint() && "Invalid FP cast!");
2529 if (Operand.getValueType() == VT) return Operand; // noop conversion.
2530 assert((!VT.isVector() ||
2531 VT.getVectorNumElements() ==
2532 Operand.getValueType().getVectorNumElements()) &&
2533 "Vector element count mismatch!");
2534 if (Operand.getOpcode() == ISD::UNDEF)
2535 return getUNDEF(VT);
2536 break;
2537 case ISD::SIGN_EXTEND:
2538 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2539 "Invalid SIGN_EXTEND!");
2540 if (Operand.getValueType() == VT) return Operand; // noop extension
2541 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2542 "Invalid sext node, dst < src!");
2543 assert((!VT.isVector() ||
2544 VT.getVectorNumElements() ==
2545 Operand.getValueType().getVectorNumElements()) &&
2546 "Vector element count mismatch!");
2547 if (OpOpcode == ISD::SIGN_EXTEND || OpOpcode == ISD::ZERO_EXTEND)
2548 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2549 else if (OpOpcode == ISD::UNDEF)
2550 // sext(undef) = 0, because the top bits will all be the same.
2551 return getConstant(0, VT);
2552 break;
2553 case ISD::ZERO_EXTEND:
2554 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2555 "Invalid ZERO_EXTEND!");
2556 if (Operand.getValueType() == VT) return Operand; // noop extension
2557 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2558 "Invalid zext node, dst < src!");
2559 assert((!VT.isVector() ||
2560 VT.getVectorNumElements() ==
2561 Operand.getValueType().getVectorNumElements()) &&
2562 "Vector element count mismatch!");
2563 if (OpOpcode == ISD::ZERO_EXTEND) // (zext (zext x)) -> (zext x)
2564 return getNode(ISD::ZERO_EXTEND, DL, VT,
2565 Operand.getNode()->getOperand(0));
2566 else if (OpOpcode == ISD::UNDEF)
2567 // zext(undef) = 0, because the top bits will be zero.
2568 return getConstant(0, VT);
2569 break;
2570 case ISD::ANY_EXTEND:
2571 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2572 "Invalid ANY_EXTEND!");
2573 if (Operand.getValueType() == VT) return Operand; // noop extension
2574 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) &&
2575 "Invalid anyext node, dst < src!");
2576 assert((!VT.isVector() ||
2577 VT.getVectorNumElements() ==
2578 Operand.getValueType().getVectorNumElements()) &&
2579 "Vector element count mismatch!");
2580
2581 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2582 OpOpcode == ISD::ANY_EXTEND)
2583 // (ext (zext x)) -> (zext x) and (ext (sext x)) -> (sext x)
2584 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2585 else if (OpOpcode == ISD::UNDEF)
2586 return getUNDEF(VT);
2587
2588 // (ext (trunx x)) -> x
2589 if (OpOpcode == ISD::TRUNCATE) {
2590 SDValue OpOp = Operand.getNode()->getOperand(0);
2591 if (OpOp.getValueType() == VT)
2592 return OpOp;
2593 }
2594 break;
2595 case ISD::TRUNCATE:
2596 assert(VT.isInteger() && Operand.getValueType().isInteger() &&
2597 "Invalid TRUNCATE!");
2598 if (Operand.getValueType() == VT) return Operand; // noop truncate
2599 assert(Operand.getValueType().getScalarType().bitsGT(VT.getScalarType()) &&
2600 "Invalid truncate node, src < dst!");
2601 assert((!VT.isVector() ||
2602 VT.getVectorNumElements() ==
2603 Operand.getValueType().getVectorNumElements()) &&
2604 "Vector element count mismatch!");
2605 if (OpOpcode == ISD::TRUNCATE)
2606 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2607 if (OpOpcode == ISD::ZERO_EXTEND || OpOpcode == ISD::SIGN_EXTEND ||
2608 OpOpcode == ISD::ANY_EXTEND) {
2609 // If the source is smaller than the dest, we still need an extend.
2610 if (Operand.getNode()->getOperand(0).getValueType().getScalarType()
2611 .bitsLT(VT.getScalarType()))
2612 return getNode(OpOpcode, DL, VT, Operand.getNode()->getOperand(0));
2613 if (Operand.getNode()->getOperand(0).getValueType().bitsGT(VT))
2614 return getNode(ISD::TRUNCATE, DL, VT, Operand.getNode()->getOperand(0));
2615 return Operand.getNode()->getOperand(0);
2616 }
2617 if (OpOpcode == ISD::UNDEF)
2618 return getUNDEF(VT);
2619 break;
2620 case ISD::BITCAST:
2621 // Basic sanity checking.
2622 assert(VT.getSizeInBits() == Operand.getValueType().getSizeInBits()
2623 && "Cannot BITCAST between types of different sizes!");
2624 if (VT == Operand.getValueType()) return Operand; // noop conversion.
2625 if (OpOpcode == ISD::BITCAST) // bitconv(bitconv(x)) -> bitconv(x)
2626 return getNode(ISD::BITCAST, DL, VT, Operand.getOperand(0));
2627 if (OpOpcode == ISD::UNDEF)
2628 return getUNDEF(VT);
2629 break;
2630 case ISD::SCALAR_TO_VECTOR:
2631 assert(VT.isVector() && !Operand.getValueType().isVector() &&
2632 (VT.getVectorElementType() == Operand.getValueType() ||
2633 (VT.getVectorElementType().isInteger() &&
2634 Operand.getValueType().isInteger() &&
2635 VT.getVectorElementType().bitsLE(Operand.getValueType()))) &&
2636 "Illegal SCALAR_TO_VECTOR node!");
2637 if (OpOpcode == ISD::UNDEF)
2638 return getUNDEF(VT);
2639 // scalar_to_vector(extract_vector_elt V, 0) -> V, top bits are undefined.
2640 if (OpOpcode == ISD::EXTRACT_VECTOR_ELT &&
2641 isa<ConstantSDNode>(Operand.getOperand(1)) &&
2642 Operand.getConstantOperandVal(1) == 0 &&
2643 Operand.getOperand(0).getValueType() == VT)
2644 return Operand.getOperand(0);
2645 break;
2646 case ISD::FNEG:
2647 // -(X-Y) -> (Y-X) is unsafe because when X==Y, -0.0 != +0.0
2648 if (getTarget().Options.UnsafeFPMath && OpOpcode == ISD::FSUB)
2649 return getNode(ISD::FSUB, DL, VT, Operand.getNode()->getOperand(1),
2650 Operand.getNode()->getOperand(0));
2651 if (OpOpcode == ISD::FNEG) // --X -> X
2652 return Operand.getNode()->getOperand(0);
2653 break;
2654 case ISD::FABS:
2655 if (OpOpcode == ISD::FNEG) // abs(-X) -> abs(X)
2656 return getNode(ISD::FABS, DL, VT, Operand.getNode()->getOperand(0));
2657 break;
2658 }
2659
2660 SDNode *N;
2661 SDVTList VTs = getVTList(VT);
2662 if (VT != MVT::Glue) { // Don't CSE flag producing nodes
2663 FoldingSetNodeID ID;
2664 SDValue Ops[1] = { Operand };
2665 AddNodeIDNode(ID, Opcode, VTs, Ops, 1);
2666 void *IP = 0;
2667 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
2668 return SDValue(E, 0);
2669
2670 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
2671 CSEMap.InsertNode(N, IP);
2672 } else {
2673 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTs, Operand);
2674 }
2675
2676 AllNodes.push_back(N);
2677#ifndef NDEBUG
2678 VerifySDNode(N);
2679#endif
2680 return SDValue(N, 0);
2681}
2682
970d7e83
LB
2683SDValue SelectionDAG::FoldConstantArithmetic(unsigned Opcode, EVT VT,
2684 SDNode *Cst1, SDNode *Cst2) {
2685 SmallVector<std::pair<ConstantSDNode *, ConstantSDNode *>, 4> Inputs;
2686 SmallVector<SDValue, 4> Outputs;
2687 EVT SVT = VT.getScalarType();
223e47cc 2688
970d7e83
LB
2689 ConstantSDNode *Scalar1 = dyn_cast<ConstantSDNode>(Cst1);
2690 ConstantSDNode *Scalar2 = dyn_cast<ConstantSDNode>(Cst2);
2691 if (Scalar1 && Scalar2) {
2692 // Scalar instruction.
2693 Inputs.push_back(std::make_pair(Scalar1, Scalar2));
2694 } else {
2695 // For vectors extract each constant element into Inputs so we can constant
2696 // fold them individually.
2697 BuildVectorSDNode *BV1 = dyn_cast<BuildVectorSDNode>(Cst1);
2698 BuildVectorSDNode *BV2 = dyn_cast<BuildVectorSDNode>(Cst2);
2699 if (!BV1 || !BV2)
2700 return SDValue();
2701
2702 assert(BV1->getNumOperands() == BV2->getNumOperands() && "Out of sync!");
2703
2704 for (unsigned I = 0, E = BV1->getNumOperands(); I != E; ++I) {
2705 ConstantSDNode *V1 = dyn_cast<ConstantSDNode>(BV1->getOperand(I));
2706 ConstantSDNode *V2 = dyn_cast<ConstantSDNode>(BV2->getOperand(I));
2707 if (!V1 || !V2) // Not a constant, bail.
2708 return SDValue();
2709
2710 // Avoid BUILD_VECTOR nodes that perform implicit truncation.
2711 // FIXME: This is valid and could be handled by truncating the APInts.
2712 if (V1->getValueType(0) != SVT || V2->getValueType(0) != SVT)
2713 return SDValue();
2714
2715 Inputs.push_back(std::make_pair(V1, V2));
2716 }
223e47cc
LB
2717 }
2718
970d7e83
LB
2719 // We have a number of constant values, constant fold them element by element.
2720 for (unsigned I = 0, E = Inputs.size(); I != E; ++I) {
2721 const APInt &C1 = Inputs[I].first->getAPIntValue();
2722 const APInt &C2 = Inputs[I].second->getAPIntValue();
2723
2724 switch (Opcode) {
2725 case ISD::ADD:
2726 Outputs.push_back(getConstant(C1 + C2, SVT));
2727 break;
2728 case ISD::SUB:
2729 Outputs.push_back(getConstant(C1 - C2, SVT));
2730 break;
2731 case ISD::MUL:
2732 Outputs.push_back(getConstant(C1 * C2, SVT));
2733 break;
2734 case ISD::UDIV:
2735 if (!C2.getBoolValue())
2736 return SDValue();
2737 Outputs.push_back(getConstant(C1.udiv(C2), SVT));
2738 break;
2739 case ISD::UREM:
2740 if (!C2.getBoolValue())
2741 return SDValue();
2742 Outputs.push_back(getConstant(C1.urem(C2), SVT));
2743 break;
2744 case ISD::SDIV:
2745 if (!C2.getBoolValue())
2746 return SDValue();
2747 Outputs.push_back(getConstant(C1.sdiv(C2), SVT));
2748 break;
2749 case ISD::SREM:
2750 if (!C2.getBoolValue())
2751 return SDValue();
2752 Outputs.push_back(getConstant(C1.srem(C2), SVT));
2753 break;
2754 case ISD::AND:
2755 Outputs.push_back(getConstant(C1 & C2, SVT));
2756 break;
2757 case ISD::OR:
2758 Outputs.push_back(getConstant(C1 | C2, SVT));
2759 break;
2760 case ISD::XOR:
2761 Outputs.push_back(getConstant(C1 ^ C2, SVT));
2762 break;
2763 case ISD::SHL:
2764 Outputs.push_back(getConstant(C1 << C2, SVT));
2765 break;
2766 case ISD::SRL:
2767 Outputs.push_back(getConstant(C1.lshr(C2), SVT));
2768 break;
2769 case ISD::SRA:
2770 Outputs.push_back(getConstant(C1.ashr(C2), SVT));
2771 break;
2772 case ISD::ROTL:
2773 Outputs.push_back(getConstant(C1.rotl(C2), SVT));
2774 break;
2775 case ISD::ROTR:
2776 Outputs.push_back(getConstant(C1.rotr(C2), SVT));
2777 break;
2778 default:
2779 return SDValue();
2780 }
2781 }
2782
2783 // Handle the scalar case first.
2784 if (Outputs.size() == 1)
2785 return Outputs.back();
2786
2787 // Otherwise build a big vector out of the scalar elements we generated.
2788 return getNode(ISD::BUILD_VECTOR, DebugLoc(), VT, Outputs.data(),
2789 Outputs.size());
223e47cc
LB
2790}
2791
970d7e83
LB
2792SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT, SDValue N1,
2793 SDValue N2) {
223e47cc
LB
2794 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
2795 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(N2.getNode());
2796 switch (Opcode) {
2797 default: break;
2798 case ISD::TokenFactor:
2799 assert(VT == MVT::Other && N1.getValueType() == MVT::Other &&
2800 N2.getValueType() == MVT::Other && "Invalid token factor!");
2801 // Fold trivial token factors.
2802 if (N1.getOpcode() == ISD::EntryToken) return N2;
2803 if (N2.getOpcode() == ISD::EntryToken) return N1;
2804 if (N1 == N2) return N1;
2805 break;
2806 case ISD::CONCAT_VECTORS:
2807 // Concat of UNDEFs is UNDEF.
2808 if (N1.getOpcode() == ISD::UNDEF &&
2809 N2.getOpcode() == ISD::UNDEF)
2810 return getUNDEF(VT);
2811
2812 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
2813 // one big BUILD_VECTOR.
2814 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
2815 N2.getOpcode() == ISD::BUILD_VECTOR) {
2816 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
2817 N1.getNode()->op_end());
2818 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
2819 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
2820 }
2821 break;
2822 case ISD::AND:
2823 assert(VT.isInteger() && "This operator does not apply to FP types!");
2824 assert(N1.getValueType() == N2.getValueType() &&
2825 N1.getValueType() == VT && "Binary operator types must match!");
2826 // (X & 0) -> 0. This commonly occurs when legalizing i64 values, so it's
2827 // worth handling here.
2828 if (N2C && N2C->isNullValue())
2829 return N2;
2830 if (N2C && N2C->isAllOnesValue()) // X & -1 -> X
2831 return N1;
2832 break;
2833 case ISD::OR:
2834 case ISD::XOR:
2835 case ISD::ADD:
2836 case ISD::SUB:
2837 assert(VT.isInteger() && "This operator does not apply to FP types!");
2838 assert(N1.getValueType() == N2.getValueType() &&
2839 N1.getValueType() == VT && "Binary operator types must match!");
2840 // (X ^|+- 0) -> X. This commonly occurs when legalizing i64 values, so
2841 // it's worth handling here.
2842 if (N2C && N2C->isNullValue())
2843 return N1;
2844 break;
2845 case ISD::UDIV:
2846 case ISD::UREM:
2847 case ISD::MULHU:
2848 case ISD::MULHS:
2849 case ISD::MUL:
2850 case ISD::SDIV:
2851 case ISD::SREM:
2852 assert(VT.isInteger() && "This operator does not apply to FP types!");
2853 assert(N1.getValueType() == N2.getValueType() &&
2854 N1.getValueType() == VT && "Binary operator types must match!");
2855 break;
2856 case ISD::FADD:
2857 case ISD::FSUB:
2858 case ISD::FMUL:
2859 case ISD::FDIV:
2860 case ISD::FREM:
2861 if (getTarget().Options.UnsafeFPMath) {
2862 if (Opcode == ISD::FADD) {
2863 // 0+x --> x
2864 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1))
2865 if (CFP->getValueAPF().isZero())
2866 return N2;
2867 // x+0 --> x
2868 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2869 if (CFP->getValueAPF().isZero())
2870 return N1;
2871 } else if (Opcode == ISD::FSUB) {
2872 // x-0 --> x
2873 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N2))
2874 if (CFP->getValueAPF().isZero())
2875 return N1;
2876 } else if (Opcode == ISD::FMUL) {
2877 ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(N1);
2878 SDValue V = N2;
2879
2880 // If the first operand isn't the constant, try the second
2881 if (!CFP) {
2882 CFP = dyn_cast<ConstantFPSDNode>(N2);
2883 V = N1;
2884 }
2885
2886 if (CFP) {
2887 // 0*x --> 0
2888 if (CFP->isZero())
2889 return SDValue(CFP,0);
2890 // 1*x --> x
2891 if (CFP->isExactlyValue(1.0))
2892 return V;
2893 }
2894 }
2895 }
2896 assert(VT.isFloatingPoint() && "This operator only applies to FP types!");
2897 assert(N1.getValueType() == N2.getValueType() &&
2898 N1.getValueType() == VT && "Binary operator types must match!");
2899 break;
2900 case ISD::FCOPYSIGN: // N1 and result must match. N1/N2 need not match.
2901 assert(N1.getValueType() == VT &&
2902 N1.getValueType().isFloatingPoint() &&
2903 N2.getValueType().isFloatingPoint() &&
2904 "Invalid FCOPYSIGN!");
2905 break;
2906 case ISD::SHL:
2907 case ISD::SRA:
2908 case ISD::SRL:
2909 case ISD::ROTL:
2910 case ISD::ROTR:
2911 assert(VT == N1.getValueType() &&
2912 "Shift operators return type must be the same as their first arg");
2913 assert(VT.isInteger() && N2.getValueType().isInteger() &&
2914 "Shifts only work on integers");
970d7e83
LB
2915 assert((!VT.isVector() || VT == N2.getValueType()) &&
2916 "Vector shift amounts must be in the same as their first arg");
223e47cc
LB
2917 // Verify that the shift amount VT is bit enough to hold valid shift
2918 // amounts. This catches things like trying to shift an i1024 value by an
2919 // i8, which is easy to fall into in generic code that uses
2920 // TLI.getShiftAmount().
2921 assert(N2.getValueType().getSizeInBits() >=
2922 Log2_32_Ceil(N1.getValueType().getSizeInBits()) &&
2923 "Invalid use of small shift amount with oversized value!");
2924
2925 // Always fold shifts of i1 values so the code generator doesn't need to
2926 // handle them. Since we know the size of the shift has to be less than the
2927 // size of the value, the shift/rotate count is guaranteed to be zero.
2928 if (VT == MVT::i1)
2929 return N1;
2930 if (N2C && N2C->isNullValue())
2931 return N1;
2932 break;
2933 case ISD::FP_ROUND_INREG: {
2934 EVT EVT = cast<VTSDNode>(N2)->getVT();
2935 assert(VT == N1.getValueType() && "Not an inreg round!");
2936 assert(VT.isFloatingPoint() && EVT.isFloatingPoint() &&
2937 "Cannot FP_ROUND_INREG integer types");
2938 assert(EVT.isVector() == VT.isVector() &&
2939 "FP_ROUND_INREG type should be vector iff the operand "
2940 "type is vector!");
2941 assert((!EVT.isVector() ||
2942 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2943 "Vector element counts must match in FP_ROUND_INREG");
2944 assert(EVT.bitsLE(VT) && "Not rounding down!");
2945 (void)EVT;
2946 if (cast<VTSDNode>(N2)->getVT() == VT) return N1; // Not actually rounding.
2947 break;
2948 }
2949 case ISD::FP_ROUND:
2950 assert(VT.isFloatingPoint() &&
2951 N1.getValueType().isFloatingPoint() &&
2952 VT.bitsLE(N1.getValueType()) &&
2953 isa<ConstantSDNode>(N2) && "Invalid FP_ROUND!");
2954 if (N1.getValueType() == VT) return N1; // noop conversion.
2955 break;
2956 case ISD::AssertSext:
2957 case ISD::AssertZext: {
2958 EVT EVT = cast<VTSDNode>(N2)->getVT();
2959 assert(VT == N1.getValueType() && "Not an inreg extend!");
2960 assert(VT.isInteger() && EVT.isInteger() &&
2961 "Cannot *_EXTEND_INREG FP types");
2962 assert(!EVT.isVector() &&
2963 "AssertSExt/AssertZExt type should be the vector element type "
2964 "rather than the vector type!");
2965 assert(EVT.bitsLE(VT) && "Not extending!");
2966 if (VT == EVT) return N1; // noop assertion.
2967 break;
2968 }
2969 case ISD::SIGN_EXTEND_INREG: {
2970 EVT EVT = cast<VTSDNode>(N2)->getVT();
2971 assert(VT == N1.getValueType() && "Not an inreg extend!");
2972 assert(VT.isInteger() && EVT.isInteger() &&
2973 "Cannot *_EXTEND_INREG FP types");
2974 assert(EVT.isVector() == VT.isVector() &&
2975 "SIGN_EXTEND_INREG type should be vector iff the operand "
2976 "type is vector!");
2977 assert((!EVT.isVector() ||
2978 EVT.getVectorNumElements() == VT.getVectorNumElements()) &&
2979 "Vector element counts must match in SIGN_EXTEND_INREG");
2980 assert(EVT.bitsLE(VT) && "Not extending!");
2981 if (EVT == VT) return N1; // Not actually extending
2982
2983 if (N1C) {
2984 APInt Val = N1C->getAPIntValue();
2985 unsigned FromBits = EVT.getScalarType().getSizeInBits();
2986 Val <<= Val.getBitWidth()-FromBits;
2987 Val = Val.ashr(Val.getBitWidth()-FromBits);
2988 return getConstant(Val, VT);
2989 }
2990 break;
2991 }
2992 case ISD::EXTRACT_VECTOR_ELT:
2993 // EXTRACT_VECTOR_ELT of an UNDEF is an UNDEF.
2994 if (N1.getOpcode() == ISD::UNDEF)
2995 return getUNDEF(VT);
2996
2997 // EXTRACT_VECTOR_ELT of CONCAT_VECTORS is often formed while lowering is
2998 // expanding copies of large vectors from registers.
2999 if (N2C &&
3000 N1.getOpcode() == ISD::CONCAT_VECTORS &&
3001 N1.getNumOperands() > 0) {
3002 unsigned Factor =
3003 N1.getOperand(0).getValueType().getVectorNumElements();
3004 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
3005 N1.getOperand(N2C->getZExtValue() / Factor),
3006 getConstant(N2C->getZExtValue() % Factor,
3007 N2.getValueType()));
3008 }
3009
3010 // EXTRACT_VECTOR_ELT of BUILD_VECTOR is often formed while lowering is
3011 // expanding large vector constants.
3012 if (N2C && N1.getOpcode() == ISD::BUILD_VECTOR) {
3013 SDValue Elt = N1.getOperand(N2C->getZExtValue());
3014
3015 if (VT != Elt.getValueType())
3016 // If the vector element type is not legal, the BUILD_VECTOR operands
3017 // are promoted and implicitly truncated, and the result implicitly
3018 // extended. Make that explicit here.
3019 Elt = getAnyExtOrTrunc(Elt, DL, VT);
3020
3021 return Elt;
3022 }
3023
3024 // EXTRACT_VECTOR_ELT of INSERT_VECTOR_ELT is often formed when vector
3025 // operations are lowered to scalars.
3026 if (N1.getOpcode() == ISD::INSERT_VECTOR_ELT) {
3027 // If the indices are the same, return the inserted element else
3028 // if the indices are known different, extract the element from
3029 // the original vector.
3030 SDValue N1Op2 = N1.getOperand(2);
3031 ConstantSDNode *N1Op2C = dyn_cast<ConstantSDNode>(N1Op2.getNode());
3032
3033 if (N1Op2C && N2C) {
3034 if (N1Op2C->getZExtValue() == N2C->getZExtValue()) {
3035 if (VT == N1.getOperand(1).getValueType())
3036 return N1.getOperand(1);
3037 else
3038 return getSExtOrTrunc(N1.getOperand(1), DL, VT);
3039 }
3040
3041 return getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, N1.getOperand(0), N2);
3042 }
3043 }
3044 break;
3045 case ISD::EXTRACT_ELEMENT:
3046 assert(N2C && (unsigned)N2C->getZExtValue() < 2 && "Bad EXTRACT_ELEMENT!");
3047 assert(!N1.getValueType().isVector() && !VT.isVector() &&
3048 (N1.getValueType().isInteger() == VT.isInteger()) &&
3049 N1.getValueType() != VT &&
3050 "Wrong types for EXTRACT_ELEMENT!");
3051
3052 // EXTRACT_ELEMENT of BUILD_PAIR is often formed while legalize is expanding
3053 // 64-bit integers into 32-bit parts. Instead of building the extract of
3054 // the BUILD_PAIR, only to have legalize rip it apart, just do it now.
3055 if (N1.getOpcode() == ISD::BUILD_PAIR)
3056 return N1.getOperand(N2C->getZExtValue());
3057
3058 // EXTRACT_ELEMENT of a constant int is also very common.
3059 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
3060 unsigned ElementSize = VT.getSizeInBits();
3061 unsigned Shift = ElementSize * N2C->getZExtValue();
3062 APInt ShiftedVal = C->getAPIntValue().lshr(Shift);
3063 return getConstant(ShiftedVal.trunc(ElementSize), VT);
3064 }
3065 break;
3066 case ISD::EXTRACT_SUBVECTOR: {
3067 SDValue Index = N2;
3068 if (VT.isSimple() && N1.getValueType().isSimple()) {
3069 assert(VT.isVector() && N1.getValueType().isVector() &&
3070 "Extract subvector VTs must be a vectors!");
3071 assert(VT.getVectorElementType() == N1.getValueType().getVectorElementType() &&
3072 "Extract subvector VTs must have the same element type!");
3073 assert(VT.getSimpleVT() <= N1.getValueType().getSimpleVT() &&
3074 "Extract subvector must be from larger vector to smaller vector!");
3075
3076 if (isa<ConstantSDNode>(Index.getNode())) {
3077 assert((VT.getVectorNumElements() +
3078 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3079 <= N1.getValueType().getVectorNumElements())
3080 && "Extract subvector overflow!");
3081 }
3082
3083 // Trivial extraction.
3084 if (VT.getSimpleVT() == N1.getValueType().getSimpleVT())
3085 return N1;
3086 }
3087 break;
3088 }
3089 }
3090
970d7e83
LB
3091 // Perform trivial constant folding.
3092 SDValue SV = FoldConstantArithmetic(Opcode, VT, N1.getNode(), N2.getNode());
3093 if (SV.getNode()) return SV;
3094
3095 // Canonicalize constant to RHS if commutative.
3096 if (N1C && !N2C && isCommutativeBinOp(Opcode)) {
3097 std::swap(N1C, N2C);
3098 std::swap(N1, N2);
223e47cc
LB
3099 }
3100
3101 // Constant fold FP operations.
3102 ConstantFPSDNode *N1CFP = dyn_cast<ConstantFPSDNode>(N1.getNode());
3103 ConstantFPSDNode *N2CFP = dyn_cast<ConstantFPSDNode>(N2.getNode());
3104 if (N1CFP) {
3105 if (!N2CFP && isCommutativeBinOp(Opcode)) {
970d7e83 3106 // Canonicalize constant to RHS if commutative.
223e47cc
LB
3107 std::swap(N1CFP, N2CFP);
3108 std::swap(N1, N2);
970d7e83 3109 } else if (N2CFP) {
223e47cc
LB
3110 APFloat V1 = N1CFP->getValueAPF(), V2 = N2CFP->getValueAPF();
3111 APFloat::opStatus s;
3112 switch (Opcode) {
3113 case ISD::FADD:
3114 s = V1.add(V2, APFloat::rmNearestTiesToEven);
3115 if (s != APFloat::opInvalidOp)
3116 return getConstantFP(V1, VT);
3117 break;
3118 case ISD::FSUB:
3119 s = V1.subtract(V2, APFloat::rmNearestTiesToEven);
3120 if (s!=APFloat::opInvalidOp)
3121 return getConstantFP(V1, VT);
3122 break;
3123 case ISD::FMUL:
3124 s = V1.multiply(V2, APFloat::rmNearestTiesToEven);
3125 if (s!=APFloat::opInvalidOp)
3126 return getConstantFP(V1, VT);
3127 break;
3128 case ISD::FDIV:
3129 s = V1.divide(V2, APFloat::rmNearestTiesToEven);
3130 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3131 return getConstantFP(V1, VT);
3132 break;
3133 case ISD::FREM :
3134 s = V1.mod(V2, APFloat::rmNearestTiesToEven);
3135 if (s!=APFloat::opInvalidOp && s!=APFloat::opDivByZero)
3136 return getConstantFP(V1, VT);
3137 break;
3138 case ISD::FCOPYSIGN:
3139 V1.copySign(V2);
3140 return getConstantFP(V1, VT);
3141 default: break;
3142 }
3143 }
3144
3145 if (Opcode == ISD::FP_ROUND) {
3146 APFloat V = N1CFP->getValueAPF(); // make copy
3147 bool ignored;
3148 // This can return overflow, underflow, or inexact; we don't care.
3149 // FIXME need to be more flexible about rounding mode.
970d7e83 3150 (void)V.convert(EVTToAPFloatSemantics(VT),
223e47cc
LB
3151 APFloat::rmNearestTiesToEven, &ignored);
3152 return getConstantFP(V, VT);
3153 }
3154 }
3155
3156 // Canonicalize an UNDEF to the RHS, even over a constant.
3157 if (N1.getOpcode() == ISD::UNDEF) {
3158 if (isCommutativeBinOp(Opcode)) {
3159 std::swap(N1, N2);
3160 } else {
3161 switch (Opcode) {
3162 case ISD::FP_ROUND_INREG:
3163 case ISD::SIGN_EXTEND_INREG:
3164 case ISD::SUB:
3165 case ISD::FSUB:
3166 case ISD::FDIV:
3167 case ISD::FREM:
3168 case ISD::SRA:
3169 return N1; // fold op(undef, arg2) -> undef
3170 case ISD::UDIV:
3171 case ISD::SDIV:
3172 case ISD::UREM:
3173 case ISD::SREM:
3174 case ISD::SRL:
3175 case ISD::SHL:
3176 if (!VT.isVector())
3177 return getConstant(0, VT); // fold op(undef, arg2) -> 0
3178 // For vectors, we can't easily build an all zero vector, just return
3179 // the LHS.
3180 return N2;
3181 }
3182 }
3183 }
3184
3185 // Fold a bunch of operators when the RHS is undef.
3186 if (N2.getOpcode() == ISD::UNDEF) {
3187 switch (Opcode) {
3188 case ISD::XOR:
3189 if (N1.getOpcode() == ISD::UNDEF)
3190 // Handle undef ^ undef -> 0 special case. This is a common
3191 // idiom (misuse).
3192 return getConstant(0, VT);
3193 // fallthrough
3194 case ISD::ADD:
3195 case ISD::ADDC:
3196 case ISD::ADDE:
3197 case ISD::SUB:
3198 case ISD::UDIV:
3199 case ISD::SDIV:
3200 case ISD::UREM:
3201 case ISD::SREM:
3202 return N2; // fold op(arg1, undef) -> undef
3203 case ISD::FADD:
3204 case ISD::FSUB:
3205 case ISD::FMUL:
3206 case ISD::FDIV:
3207 case ISD::FREM:
3208 if (getTarget().Options.UnsafeFPMath)
3209 return N2;
3210 break;
3211 case ISD::MUL:
3212 case ISD::AND:
3213 case ISD::SRL:
3214 case ISD::SHL:
3215 if (!VT.isVector())
3216 return getConstant(0, VT); // fold op(arg1, undef) -> 0
3217 // For vectors, we can't easily build an all zero vector, just return
3218 // the LHS.
3219 return N1;
3220 case ISD::OR:
3221 if (!VT.isVector())
3222 return getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), VT);
3223 // For vectors, we can't easily build an all one vector, just return
3224 // the LHS.
3225 return N1;
3226 case ISD::SRA:
3227 return N1;
3228 }
3229 }
3230
3231 // Memoize this node if possible.
3232 SDNode *N;
3233 SDVTList VTs = getVTList(VT);
3234 if (VT != MVT::Glue) {
3235 SDValue Ops[] = { N1, N2 };
3236 FoldingSetNodeID ID;
3237 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
3238 void *IP = 0;
3239 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3240 return SDValue(E, 0);
3241
3242 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
3243 CSEMap.InsertNode(N, IP);
3244 } else {
3245 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTs, N1, N2);
3246 }
3247
3248 AllNodes.push_back(N);
3249#ifndef NDEBUG
3250 VerifySDNode(N);
3251#endif
3252 return SDValue(N, 0);
3253}
3254
3255SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3256 SDValue N1, SDValue N2, SDValue N3) {
3257 // Perform various simplifications.
3258 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode());
3259 switch (Opcode) {
3260 case ISD::CONCAT_VECTORS:
3261 // A CONCAT_VECTOR with all operands BUILD_VECTOR can be simplified to
3262 // one big BUILD_VECTOR.
3263 if (N1.getOpcode() == ISD::BUILD_VECTOR &&
3264 N2.getOpcode() == ISD::BUILD_VECTOR &&
3265 N3.getOpcode() == ISD::BUILD_VECTOR) {
3266 SmallVector<SDValue, 16> Elts(N1.getNode()->op_begin(),
3267 N1.getNode()->op_end());
3268 Elts.append(N2.getNode()->op_begin(), N2.getNode()->op_end());
3269 Elts.append(N3.getNode()->op_begin(), N3.getNode()->op_end());
3270 return getNode(ISD::BUILD_VECTOR, DL, VT, &Elts[0], Elts.size());
3271 }
3272 break;
3273 case ISD::SETCC: {
3274 // Use FoldSetCC to simplify SETCC's.
3275 SDValue Simp = FoldSetCC(VT, N1, N2, cast<CondCodeSDNode>(N3)->get(), DL);
3276 if (Simp.getNode()) return Simp;
3277 break;
3278 }
3279 case ISD::SELECT:
3280 if (N1C) {
3281 if (N1C->getZExtValue())
3282 return N2; // select true, X, Y -> X
3283 return N3; // select false, X, Y -> Y
3284 }
3285
3286 if (N2 == N3) return N2; // select C, X, X -> X
3287 break;
3288 case ISD::VECTOR_SHUFFLE:
3289 llvm_unreachable("should use getVectorShuffle constructor!");
3290 case ISD::INSERT_SUBVECTOR: {
3291 SDValue Index = N3;
3292 if (VT.isSimple() && N1.getValueType().isSimple()
3293 && N2.getValueType().isSimple()) {
3294 assert(VT.isVector() && N1.getValueType().isVector() &&
3295 N2.getValueType().isVector() &&
3296 "Insert subvector VTs must be a vectors");
3297 assert(VT == N1.getValueType() &&
3298 "Dest and insert subvector source types must match!");
3299 assert(N2.getValueType().getSimpleVT() <= N1.getValueType().getSimpleVT() &&
3300 "Insert subvector must be from smaller vector to larger vector!");
3301 if (isa<ConstantSDNode>(Index.getNode())) {
3302 assert((N2.getValueType().getVectorNumElements() +
3303 cast<ConstantSDNode>(Index.getNode())->getZExtValue()
3304 <= VT.getVectorNumElements())
3305 && "Insert subvector overflow!");
3306 }
3307
3308 // Trivial insertion.
3309 if (VT.getSimpleVT() == N2.getValueType().getSimpleVT())
3310 return N2;
3311 }
3312 break;
3313 }
3314 case ISD::BITCAST:
3315 // Fold bit_convert nodes from a type to themselves.
3316 if (N1.getValueType() == VT)
3317 return N1;
3318 break;
3319 }
3320
3321 // Memoize node if it doesn't produce a flag.
3322 SDNode *N;
3323 SDVTList VTs = getVTList(VT);
3324 if (VT != MVT::Glue) {
3325 SDValue Ops[] = { N1, N2, N3 };
3326 FoldingSetNodeID ID;
3327 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
3328 void *IP = 0;
3329 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
3330 return SDValue(E, 0);
3331
3332 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3333 CSEMap.InsertNode(N, IP);
3334 } else {
3335 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTs, N1, N2, N3);
3336 }
3337
3338 AllNodes.push_back(N);
3339#ifndef NDEBUG
3340 VerifySDNode(N);
3341#endif
3342 return SDValue(N, 0);
3343}
3344
3345SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3346 SDValue N1, SDValue N2, SDValue N3,
3347 SDValue N4) {
3348 SDValue Ops[] = { N1, N2, N3, N4 };
3349 return getNode(Opcode, DL, VT, Ops, 4);
3350}
3351
3352SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
3353 SDValue N1, SDValue N2, SDValue N3,
3354 SDValue N4, SDValue N5) {
3355 SDValue Ops[] = { N1, N2, N3, N4, N5 };
3356 return getNode(Opcode, DL, VT, Ops, 5);
3357}
3358
3359/// getStackArgumentTokenFactor - Compute a TokenFactor to force all
3360/// the incoming stack arguments to be loaded from the stack.
3361SDValue SelectionDAG::getStackArgumentTokenFactor(SDValue Chain) {
3362 SmallVector<SDValue, 8> ArgChains;
3363
3364 // Include the original chain at the beginning of the list. When this is
3365 // used by target LowerCall hooks, this helps legalize find the
3366 // CALLSEQ_BEGIN node.
3367 ArgChains.push_back(Chain);
3368
3369 // Add a chain value for each stack argument.
3370 for (SDNode::use_iterator U = getEntryNode().getNode()->use_begin(),
3371 UE = getEntryNode().getNode()->use_end(); U != UE; ++U)
3372 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
3373 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
3374 if (FI->getIndex() < 0)
3375 ArgChains.push_back(SDValue(L, 1));
3376
3377 // Build a tokenfactor for all the chains.
3378 return getNode(ISD::TokenFactor, Chain.getDebugLoc(), MVT::Other,
3379 &ArgChains[0], ArgChains.size());
3380}
3381
223e47cc
LB
3382/// getMemsetValue - Vectorized representation of the memset value
3383/// operand.
3384static SDValue getMemsetValue(SDValue Value, EVT VT, SelectionDAG &DAG,
3385 DebugLoc dl) {
3386 assert(Value.getOpcode() != ISD::UNDEF);
3387
3388 unsigned NumBits = VT.getScalarType().getSizeInBits();
3389 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Value)) {
970d7e83
LB
3390 assert(C->getAPIntValue().getBitWidth() == 8);
3391 APInt Val = APInt::getSplat(NumBits, C->getAPIntValue());
223e47cc
LB
3392 if (VT.isInteger())
3393 return DAG.getConstant(Val, VT);
970d7e83 3394 return DAG.getConstantFP(APFloat(DAG.EVTToAPFloatSemantics(VT), Val), VT);
223e47cc
LB
3395 }
3396
3397 Value = DAG.getNode(ISD::ZERO_EXTEND, dl, VT, Value);
3398 if (NumBits > 8) {
3399 // Use a multiplication with 0x010101... to extend the input to the
3400 // required length.
970d7e83 3401 APInt Magic = APInt::getSplat(NumBits, APInt(8, 0x01));
223e47cc
LB
3402 Value = DAG.getNode(ISD::MUL, dl, VT, Value, DAG.getConstant(Magic, VT));
3403 }
3404
3405 return Value;
3406}
3407
3408/// getMemsetStringVal - Similar to getMemsetValue. Except this is only
3409/// used when a memcpy is turned into a memset when the source is a constant
3410/// string ptr.
3411static SDValue getMemsetStringVal(EVT VT, DebugLoc dl, SelectionDAG &DAG,
3412 const TargetLowering &TLI, StringRef Str) {
3413 // Handle vector with all elements zero.
3414 if (Str.empty()) {
3415 if (VT.isInteger())
3416 return DAG.getConstant(0, VT);
3417 else if (VT == MVT::f32 || VT == MVT::f64)
3418 return DAG.getConstantFP(0.0, VT);
3419 else if (VT.isVector()) {
3420 unsigned NumElts = VT.getVectorNumElements();
3421 MVT EltVT = (VT.getVectorElementType() == MVT::f32) ? MVT::i32 : MVT::i64;
3422 return DAG.getNode(ISD::BITCAST, dl, VT,
3423 DAG.getConstant(0, EVT::getVectorVT(*DAG.getContext(),
3424 EltVT, NumElts)));
3425 } else
3426 llvm_unreachable("Expected type!");
3427 }
3428
3429 assert(!VT.isVector() && "Can't handle vector type here!");
970d7e83
LB
3430 unsigned NumVTBits = VT.getSizeInBits();
3431 unsigned NumVTBytes = NumVTBits / 8;
223e47cc
LB
3432 unsigned NumBytes = std::min(NumVTBytes, unsigned(Str.size()));
3433
970d7e83 3434 APInt Val(NumVTBits, 0);
223e47cc
LB
3435 if (TLI.isLittleEndian()) {
3436 for (unsigned i = 0; i != NumBytes; ++i)
3437 Val |= (uint64_t)(unsigned char)Str[i] << i*8;
3438 } else {
3439 for (unsigned i = 0; i != NumBytes; ++i)
3440 Val |= (uint64_t)(unsigned char)Str[i] << (NumVTBytes-i-1)*8;
3441 }
3442
970d7e83
LB
3443 // If the "cost" of materializing the integer immediate is 1 or free, then
3444 // it is cost effective to turn the load into the immediate.
3445 const TargetTransformInfo *TTI = DAG.getTargetTransformInfo();
3446 if (TTI->getIntImmCost(Val, VT.getTypeForEVT(*DAG.getContext())) < 2)
3447 return DAG.getConstant(Val, VT);
3448 return SDValue(0, 0);
223e47cc
LB
3449}
3450
3451/// getMemBasePlusOffset - Returns base and offset node for the
3452///
3453static SDValue getMemBasePlusOffset(SDValue Base, unsigned Offset,
3454 SelectionDAG &DAG) {
3455 EVT VT = Base.getValueType();
3456 return DAG.getNode(ISD::ADD, Base.getDebugLoc(),
3457 VT, Base, DAG.getConstant(Offset, VT));
3458}
3459
3460/// isMemSrcFromString - Returns true if memcpy source is a string constant.
3461///
3462static bool isMemSrcFromString(SDValue Src, StringRef &Str) {
3463 unsigned SrcDelta = 0;
3464 GlobalAddressSDNode *G = NULL;
3465 if (Src.getOpcode() == ISD::GlobalAddress)
3466 G = cast<GlobalAddressSDNode>(Src);
3467 else if (Src.getOpcode() == ISD::ADD &&
3468 Src.getOperand(0).getOpcode() == ISD::GlobalAddress &&
3469 Src.getOperand(1).getOpcode() == ISD::Constant) {
3470 G = cast<GlobalAddressSDNode>(Src.getOperand(0));
3471 SrcDelta = cast<ConstantSDNode>(Src.getOperand(1))->getZExtValue();
3472 }
3473 if (!G)
3474 return false;
3475
3476 return getConstantStringInfo(G->getGlobal(), Str, SrcDelta, false);
3477}
3478
3479/// FindOptimalMemOpLowering - Determines the optimial series memory ops
3480/// to replace the memset / memcpy. Return true if the number of memory ops
3481/// is below the threshold. It returns the types of the sequence of
3482/// memory ops to perform memset / memcpy by reference.
3483static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
3484 unsigned Limit, uint64_t Size,
3485 unsigned DstAlign, unsigned SrcAlign,
970d7e83
LB
3486 bool IsMemset,
3487 bool ZeroMemset,
223e47cc 3488 bool MemcpyStrSrc,
970d7e83 3489 bool AllowOverlap,
223e47cc
LB
3490 SelectionDAG &DAG,
3491 const TargetLowering &TLI) {
3492 assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
3493 "Expecting memcpy / memset source to meet alignment requirement!");
3494 // If 'SrcAlign' is zero, that means the memory operation does not need to
3495 // load the value, i.e. memset or memcpy from constant string. Otherwise,
3496 // it's the inferred alignment of the source. 'DstAlign', on the other hand,
3497 // is the specified alignment of the memory operation. If it is zero, that
3498 // means it's possible to change the alignment of the destination.
3499 // 'MemcpyStrSrc' indicates whether the memcpy source is constant so it does
3500 // not need to be loaded.
3501 EVT VT = TLI.getOptimalMemOpType(Size, DstAlign, SrcAlign,
970d7e83 3502 IsMemset, ZeroMemset, MemcpyStrSrc,
223e47cc
LB
3503 DAG.getMachineFunction());
3504
3505 if (VT == MVT::Other) {
970d7e83 3506 if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
223e47cc
LB
3507 TLI.allowsUnalignedMemoryAccesses(VT)) {
3508 VT = TLI.getPointerTy();
3509 } else {
3510 switch (DstAlign & 7) {
3511 case 0: VT = MVT::i64; break;
3512 case 4: VT = MVT::i32; break;
3513 case 2: VT = MVT::i16; break;
3514 default: VT = MVT::i8; break;
3515 }
3516 }
3517
3518 MVT LVT = MVT::i64;
3519 while (!TLI.isTypeLegal(LVT))
3520 LVT = (MVT::SimpleValueType)(LVT.SimpleTy - 1);
3521 assert(LVT.isInteger());
3522
3523 if (VT.bitsGT(LVT))
3524 VT = LVT;
3525 }
3526
3527 unsigned NumMemOps = 0;
3528 while (Size != 0) {
3529 unsigned VTSize = VT.getSizeInBits() / 8;
3530 while (VTSize > Size) {
3531 // For now, only use non-vector load / store's for the left-over pieces.
970d7e83
LB
3532 EVT NewVT = VT;
3533 unsigned NewVTSize;
3534
3535 bool Found = false;
223e47cc 3536 if (VT.isVector() || VT.isFloatingPoint()) {
970d7e83
LB
3537 NewVT = (VT.getSizeInBits() > 64) ? MVT::i64 : MVT::i32;
3538 if (TLI.isOperationLegalOrCustom(ISD::STORE, NewVT) &&
3539 TLI.isSafeMemOpType(NewVT.getSimpleVT()))
3540 Found = true;
3541 else if (NewVT == MVT::i64 &&
3542 TLI.isOperationLegalOrCustom(ISD::STORE, MVT::f64) &&
3543 TLI.isSafeMemOpType(MVT::f64)) {
3544 // i64 is usually not legal on 32-bit targets, but f64 may be.
3545 NewVT = MVT::f64;
3546 Found = true;
3547 }
3548 }
3549
3550 if (!Found) {
3551 do {
3552 NewVT = (MVT::SimpleValueType)(NewVT.getSimpleVT().SimpleTy - 1);
3553 if (NewVT == MVT::i8)
3554 break;
3555 } while (!TLI.isSafeMemOpType(NewVT.getSimpleVT()));
3556 }
3557 NewVTSize = NewVT.getSizeInBits() / 8;
3558
3559 // If the new VT cannot cover all of the remaining bits, then consider
3560 // issuing a (or a pair of) unaligned and overlapping load / store.
3561 // FIXME: Only does this for 64-bit or more since we don't have proper
3562 // cost model for unaligned load / store.
3563 bool Fast;
3564 if (NumMemOps && AllowOverlap &&
3565 VTSize >= 8 && NewVTSize < Size &&
3566 TLI.allowsUnalignedMemoryAccesses(VT, &Fast) && Fast)
3567 VTSize = Size;
3568 else {
3569 VT = NewVT;
3570 VTSize = NewVTSize;
223e47cc
LB
3571 }
3572 }
3573
3574 if (++NumMemOps > Limit)
3575 return false;
970d7e83 3576
223e47cc
LB
3577 MemOps.push_back(VT);
3578 Size -= VTSize;
3579 }
3580
3581 return true;
3582}
3583
3584static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3585 SDValue Chain, SDValue Dst,
3586 SDValue Src, uint64_t Size,
3587 unsigned Align, bool isVol,
3588 bool AlwaysInline,
3589 MachinePointerInfo DstPtrInfo,
3590 MachinePointerInfo SrcPtrInfo) {
3591 // Turn a memcpy of undef to nop.
3592 if (Src.getOpcode() == ISD::UNDEF)
3593 return Chain;
3594
3595 // Expand memcpy to a series of load and store ops if the size operand falls
3596 // below a certain threshold.
3597 // TODO: In the AlwaysInline case, if the size is big then generate a loop
3598 // rather than maybe a humongous number of loads and stores.
3599 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3600 std::vector<EVT> MemOps;
3601 bool DstAlignCanChange = false;
3602 MachineFunction &MF = DAG.getMachineFunction();
3603 MachineFrameInfo *MFI = MF.getFrameInfo();
970d7e83
LB
3604 bool OptSize =
3605 MF.getFunction()->getAttributes().
3606 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
223e47cc
LB
3607 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3608 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3609 DstAlignCanChange = true;
3610 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3611 if (Align > SrcAlign)
3612 SrcAlign = Align;
3613 StringRef Str;
3614 bool CopyFromStr = isMemSrcFromString(Src, Str);
3615 bool isZeroStr = CopyFromStr && Str.empty();
3616 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemcpy(OptSize);
3617
3618 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
3619 (DstAlignCanChange ? 0 : Align),
3620 (isZeroStr ? 0 : SrcAlign),
970d7e83 3621 false, false, CopyFromStr, true, DAG, TLI))
223e47cc
LB
3622 return SDValue();
3623
3624 if (DstAlignCanChange) {
3625 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
970d7e83
LB
3626 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
3627
3628 // Don't promote to an alignment that would require dynamic stack
3629 // realignment.
3630 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
3631 if (!TRI->needsStackRealignment(MF))
3632 while (NewAlign > Align &&
3633 TLI.getDataLayout()->exceedsNaturalStackAlignment(NewAlign))
3634 NewAlign /= 2;
3635
223e47cc
LB
3636 if (NewAlign > Align) {
3637 // Give the stack frame object a larger alignment if needed.
3638 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3639 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3640 Align = NewAlign;
3641 }
3642 }
3643
3644 SmallVector<SDValue, 8> OutChains;
3645 unsigned NumMemOps = MemOps.size();
3646 uint64_t SrcOff = 0, DstOff = 0;
3647 for (unsigned i = 0; i != NumMemOps; ++i) {
3648 EVT VT = MemOps[i];
3649 unsigned VTSize = VT.getSizeInBits() / 8;
3650 SDValue Value, Store;
3651
970d7e83
LB
3652 if (VTSize > Size) {
3653 // Issuing an unaligned load / store pair that overlaps with the previous
3654 // pair. Adjust the offset accordingly.
3655 assert(i == NumMemOps-1 && i != 0);
3656 SrcOff -= VTSize - Size;
3657 DstOff -= VTSize - Size;
3658 }
3659
223e47cc
LB
3660 if (CopyFromStr &&
3661 (isZeroStr || (VT.isInteger() && !VT.isVector()))) {
3662 // It's unlikely a store of a vector immediate can be done in a single
3663 // instruction. It would require a load from a constantpool first.
3664 // We only handle zero vectors here.
3665 // FIXME: Handle other cases where store of vector immediate is done in
3666 // a single instruction.
3667 Value = getMemsetStringVal(VT, dl, DAG, TLI, Str.substr(SrcOff));
970d7e83
LB
3668 if (Value.getNode())
3669 Store = DAG.getStore(Chain, dl, Value,
3670 getMemBasePlusOffset(Dst, DstOff, DAG),
3671 DstPtrInfo.getWithOffset(DstOff), isVol,
3672 false, Align);
3673 }
3674
3675 if (!Store.getNode()) {
223e47cc
LB
3676 // The type might not be legal for the target. This should only happen
3677 // if the type is smaller than a legal type, as on PPC, so the right
3678 // thing to do is generate a LoadExt/StoreTrunc pair. These simplify
3679 // to Load/Store if NVT==VT.
3680 // FIXME does the case above also need this?
3681 EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
3682 assert(NVT.bitsGE(VT));
3683 Value = DAG.getExtLoad(ISD::EXTLOAD, dl, NVT, Chain,
3684 getMemBasePlusOffset(Src, SrcOff, DAG),
3685 SrcPtrInfo.getWithOffset(SrcOff), VT, isVol, false,
3686 MinAlign(SrcAlign, SrcOff));
3687 Store = DAG.getTruncStore(Chain, dl, Value,
3688 getMemBasePlusOffset(Dst, DstOff, DAG),
3689 DstPtrInfo.getWithOffset(DstOff), VT, isVol,
3690 false, Align);
3691 }
3692 OutChains.push_back(Store);
3693 SrcOff += VTSize;
3694 DstOff += VTSize;
970d7e83 3695 Size -= VTSize;
223e47cc
LB
3696 }
3697
3698 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3699 &OutChains[0], OutChains.size());
3700}
3701
3702static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
3703 SDValue Chain, SDValue Dst,
3704 SDValue Src, uint64_t Size,
3705 unsigned Align, bool isVol,
3706 bool AlwaysInline,
3707 MachinePointerInfo DstPtrInfo,
3708 MachinePointerInfo SrcPtrInfo) {
3709 // Turn a memmove of undef to nop.
3710 if (Src.getOpcode() == ISD::UNDEF)
3711 return Chain;
3712
3713 // Expand memmove to a series of load and store ops if the size operand falls
3714 // below a certain threshold.
3715 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3716 std::vector<EVT> MemOps;
3717 bool DstAlignCanChange = false;
3718 MachineFunction &MF = DAG.getMachineFunction();
3719 MachineFrameInfo *MFI = MF.getFrameInfo();
970d7e83
LB
3720 bool OptSize = MF.getFunction()->getAttributes().
3721 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
223e47cc
LB
3722 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3723 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3724 DstAlignCanChange = true;
3725 unsigned SrcAlign = DAG.InferPtrAlignment(Src);
3726 if (Align > SrcAlign)
3727 SrcAlign = Align;
3728 unsigned Limit = AlwaysInline ? ~0U : TLI.getMaxStoresPerMemmove(OptSize);
3729
3730 if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
970d7e83
LB
3731 (DstAlignCanChange ? 0 : Align), SrcAlign,
3732 false, false, false, false, DAG, TLI))
223e47cc
LB
3733 return SDValue();
3734
3735 if (DstAlignCanChange) {
3736 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
970d7e83 3737 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
223e47cc
LB
3738 if (NewAlign > Align) {
3739 // Give the stack frame object a larger alignment if needed.
3740 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3741 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3742 Align = NewAlign;
3743 }
3744 }
3745
3746 uint64_t SrcOff = 0, DstOff = 0;
3747 SmallVector<SDValue, 8> LoadValues;
3748 SmallVector<SDValue, 8> LoadChains;
3749 SmallVector<SDValue, 8> OutChains;
3750 unsigned NumMemOps = MemOps.size();
3751 for (unsigned i = 0; i < NumMemOps; i++) {
3752 EVT VT = MemOps[i];
3753 unsigned VTSize = VT.getSizeInBits() / 8;
3754 SDValue Value, Store;
3755
3756 Value = DAG.getLoad(VT, dl, Chain,
3757 getMemBasePlusOffset(Src, SrcOff, DAG),
3758 SrcPtrInfo.getWithOffset(SrcOff), isVol,
3759 false, false, SrcAlign);
3760 LoadValues.push_back(Value);
3761 LoadChains.push_back(Value.getValue(1));
3762 SrcOff += VTSize;
3763 }
3764 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3765 &LoadChains[0], LoadChains.size());
3766 OutChains.clear();
3767 for (unsigned i = 0; i < NumMemOps; i++) {
3768 EVT VT = MemOps[i];
3769 unsigned VTSize = VT.getSizeInBits() / 8;
3770 SDValue Value, Store;
3771
3772 Store = DAG.getStore(Chain, dl, LoadValues[i],
3773 getMemBasePlusOffset(Dst, DstOff, DAG),
3774 DstPtrInfo.getWithOffset(DstOff), isVol, false, Align);
3775 OutChains.push_back(Store);
3776 DstOff += VTSize;
3777 }
3778
3779 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3780 &OutChains[0], OutChains.size());
3781}
3782
3783static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
3784 SDValue Chain, SDValue Dst,
3785 SDValue Src, uint64_t Size,
3786 unsigned Align, bool isVol,
3787 MachinePointerInfo DstPtrInfo) {
3788 // Turn a memset of undef to nop.
3789 if (Src.getOpcode() == ISD::UNDEF)
3790 return Chain;
3791
3792 // Expand memset to a series of load/store ops if the size operand
3793 // falls below a certain threshold.
3794 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3795 std::vector<EVT> MemOps;
3796 bool DstAlignCanChange = false;
3797 MachineFunction &MF = DAG.getMachineFunction();
3798 MachineFrameInfo *MFI = MF.getFrameInfo();
970d7e83
LB
3799 bool OptSize = MF.getFunction()->getAttributes().
3800 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
223e47cc
LB
3801 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Dst);
3802 if (FI && !MFI->isFixedObjectIndex(FI->getIndex()))
3803 DstAlignCanChange = true;
3804 bool IsZeroVal =
3805 isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
3806 if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
3807 Size, (DstAlignCanChange ? 0 : Align), 0,
970d7e83 3808 true, IsZeroVal, false, true, DAG, TLI))
223e47cc
LB
3809 return SDValue();
3810
3811 if (DstAlignCanChange) {
3812 Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
970d7e83 3813 unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
223e47cc
LB
3814 if (NewAlign > Align) {
3815 // Give the stack frame object a larger alignment if needed.
3816 if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
3817 MFI->setObjectAlignment(FI->getIndex(), NewAlign);
3818 Align = NewAlign;
3819 }
3820 }
3821
3822 SmallVector<SDValue, 8> OutChains;
3823 uint64_t DstOff = 0;
3824 unsigned NumMemOps = MemOps.size();
3825
3826 // Find the largest store and generate the bit pattern for it.
3827 EVT LargestVT = MemOps[0];
3828 for (unsigned i = 1; i < NumMemOps; i++)
3829 if (MemOps[i].bitsGT(LargestVT))
3830 LargestVT = MemOps[i];
3831 SDValue MemSetValue = getMemsetValue(Src, LargestVT, DAG, dl);
3832
3833 for (unsigned i = 0; i < NumMemOps; i++) {
3834 EVT VT = MemOps[i];
970d7e83
LB
3835 unsigned VTSize = VT.getSizeInBits() / 8;
3836 if (VTSize > Size) {
3837 // Issuing an unaligned load / store pair that overlaps with the previous
3838 // pair. Adjust the offset accordingly.
3839 assert(i == NumMemOps-1 && i != 0);
3840 DstOff -= VTSize - Size;
3841 }
223e47cc
LB
3842
3843 // If this store is smaller than the largest store see whether we can get
3844 // the smaller value for free with a truncate.
3845 SDValue Value = MemSetValue;
3846 if (VT.bitsLT(LargestVT)) {
3847 if (!LargestVT.isVector() && !VT.isVector() &&
3848 TLI.isTruncateFree(LargestVT, VT))
3849 Value = DAG.getNode(ISD::TRUNCATE, dl, VT, MemSetValue);
3850 else
3851 Value = getMemsetValue(Src, VT, DAG, dl);
3852 }
3853 assert(Value.getValueType() == VT && "Value with wrong type.");
3854 SDValue Store = DAG.getStore(Chain, dl, Value,
3855 getMemBasePlusOffset(Dst, DstOff, DAG),
3856 DstPtrInfo.getWithOffset(DstOff),
3857 isVol, false, Align);
3858 OutChains.push_back(Store);
3859 DstOff += VT.getSizeInBits() / 8;
970d7e83 3860 Size -= VTSize;
223e47cc
LB
3861 }
3862
3863 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
3864 &OutChains[0], OutChains.size());
3865}
3866
3867SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
3868 SDValue Src, SDValue Size,
3869 unsigned Align, bool isVol, bool AlwaysInline,
3870 MachinePointerInfo DstPtrInfo,
3871 MachinePointerInfo SrcPtrInfo) {
970d7e83 3872 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
223e47cc
LB
3873
3874 // Check to see if we should lower the memcpy to loads and stores first.
3875 // For cases within the target-specified limits, this is the best choice.
3876 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3877 if (ConstantSize) {
3878 // Memcpy with size zero? Just return the original chain.
3879 if (ConstantSize->isNullValue())
3880 return Chain;
3881
3882 SDValue Result = getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3883 ConstantSize->getZExtValue(),Align,
3884 isVol, false, DstPtrInfo, SrcPtrInfo);
3885 if (Result.getNode())
3886 return Result;
3887 }
3888
3889 // Then check to see if we should lower the memcpy with target-specific
3890 // code. If the target chooses to do this, this is the next best.
3891 SDValue Result =
3892 TSI.EmitTargetCodeForMemcpy(*this, dl, Chain, Dst, Src, Size, Align,
3893 isVol, AlwaysInline,
3894 DstPtrInfo, SrcPtrInfo);
3895 if (Result.getNode())
3896 return Result;
3897
3898 // If we really need inline code and the target declined to provide it,
3899 // use a (potentially long) sequence of loads and stores.
3900 if (AlwaysInline) {
3901 assert(ConstantSize && "AlwaysInline requires a constant size!");
3902 return getMemcpyLoadsAndStores(*this, dl, Chain, Dst, Src,
3903 ConstantSize->getZExtValue(), Align, isVol,
3904 true, DstPtrInfo, SrcPtrInfo);
3905 }
3906
3907 // FIXME: If the memcpy is volatile (isVol), lowering it to a plain libc
3908 // memcpy is not guaranteed to be safe. libc memcpys aren't required to
3909 // respect volatile, so they may do things like read or write memory
3910 // beyond the given memory regions. But fixing this isn't easy, and most
3911 // people don't care.
3912
3913 // Emit a library call.
3914 TargetLowering::ArgListTy Args;
3915 TargetLowering::ArgListEntry Entry;
970d7e83 3916 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
223e47cc
LB
3917 Entry.Node = Dst; Args.push_back(Entry);
3918 Entry.Node = Src; Args.push_back(Entry);
3919 Entry.Node = Size; Args.push_back(Entry);
3920 // FIXME: pass in DebugLoc
3921 TargetLowering::
3922 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3923 false, false, false, false, 0,
3924 TLI.getLibcallCallingConv(RTLIB::MEMCPY),
3925 /*isTailCall=*/false,
3926 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3927 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMCPY),
3928 TLI.getPointerTy()),
3929 Args, *this, dl);
3930 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
3931
3932 return CallResult.second;
3933}
3934
3935SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
3936 SDValue Src, SDValue Size,
3937 unsigned Align, bool isVol,
3938 MachinePointerInfo DstPtrInfo,
3939 MachinePointerInfo SrcPtrInfo) {
970d7e83 3940 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
223e47cc
LB
3941
3942 // Check to see if we should lower the memmove to loads and stores first.
3943 // For cases within the target-specified limits, this is the best choice.
3944 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
3945 if (ConstantSize) {
3946 // Memmove with size zero? Just return the original chain.
3947 if (ConstantSize->isNullValue())
3948 return Chain;
3949
3950 SDValue Result =
3951 getMemmoveLoadsAndStores(*this, dl, Chain, Dst, Src,
3952 ConstantSize->getZExtValue(), Align, isVol,
3953 false, DstPtrInfo, SrcPtrInfo);
3954 if (Result.getNode())
3955 return Result;
3956 }
3957
3958 // Then check to see if we should lower the memmove with target-specific
3959 // code. If the target chooses to do this, this is the next best.
3960 SDValue Result =
3961 TSI.EmitTargetCodeForMemmove(*this, dl, Chain, Dst, Src, Size, Align, isVol,
3962 DstPtrInfo, SrcPtrInfo);
3963 if (Result.getNode())
3964 return Result;
3965
3966 // FIXME: If the memmove is volatile, lowering it to plain libc memmove may
3967 // not be safe. See memcpy above for more details.
3968
3969 // Emit a library call.
3970 TargetLowering::ArgListTy Args;
3971 TargetLowering::ArgListEntry Entry;
970d7e83 3972 Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
223e47cc
LB
3973 Entry.Node = Dst; Args.push_back(Entry);
3974 Entry.Node = Src; Args.push_back(Entry);
3975 Entry.Node = Size; Args.push_back(Entry);
3976 // FIXME: pass in DebugLoc
3977 TargetLowering::
3978 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
3979 false, false, false, false, 0,
3980 TLI.getLibcallCallingConv(RTLIB::MEMMOVE),
3981 /*isTailCall=*/false,
3982 /*doesNotReturn=*/false, /*isReturnValueUsed=*/false,
3983 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMMOVE),
3984 TLI.getPointerTy()),
3985 Args, *this, dl);
3986 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
3987
3988 return CallResult.second;
3989}
3990
3991SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
3992 SDValue Src, SDValue Size,
3993 unsigned Align, bool isVol,
3994 MachinePointerInfo DstPtrInfo) {
970d7e83 3995 assert(Align && "The SDAG layer expects explicit alignment and reserves 0");
223e47cc
LB
3996
3997 // Check to see if we should lower the memset to stores first.
3998 // For cases within the target-specified limits, this is the best choice.
3999 ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size);
4000 if (ConstantSize) {
4001 // Memset with size zero? Just return the original chain.
4002 if (ConstantSize->isNullValue())
4003 return Chain;
4004
4005 SDValue Result =
4006 getMemsetStores(*this, dl, Chain, Dst, Src, ConstantSize->getZExtValue(),
4007 Align, isVol, DstPtrInfo);
4008
4009 if (Result.getNode())
4010 return Result;
4011 }
4012
4013 // Then check to see if we should lower the memset with target-specific
4014 // code. If the target chooses to do this, this is the next best.
4015 SDValue Result =
4016 TSI.EmitTargetCodeForMemset(*this, dl, Chain, Dst, Src, Size, Align, isVol,
4017 DstPtrInfo);
4018 if (Result.getNode())
4019 return Result;
4020
4021 // Emit a library call.
970d7e83 4022 Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
223e47cc
LB
4023 TargetLowering::ArgListTy Args;
4024 TargetLowering::ArgListEntry Entry;
4025 Entry.Node = Dst; Entry.Ty = IntPtrTy;
4026 Args.push_back(Entry);
4027 // Extend or truncate the argument to be an i32 value for the call.
4028 if (Src.getValueType().bitsGT(MVT::i32))
4029 Src = getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
4030 else
4031 Src = getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Src);
4032 Entry.Node = Src;
4033 Entry.Ty = Type::getInt32Ty(*getContext());
4034 Entry.isSExt = true;
4035 Args.push_back(Entry);
4036 Entry.Node = Size;
4037 Entry.Ty = IntPtrTy;
4038 Entry.isSExt = false;
4039 Args.push_back(Entry);
4040 // FIXME: pass in DebugLoc
4041 TargetLowering::
4042 CallLoweringInfo CLI(Chain, Type::getVoidTy(*getContext()),
4043 false, false, false, false, 0,
4044 TLI.getLibcallCallingConv(RTLIB::MEMSET),
4045 /*isTailCall=*/false,
4046 /*doesNotReturn*/false, /*isReturnValueUsed=*/false,
4047 getExternalSymbol(TLI.getLibcallName(RTLIB::MEMSET),
4048 TLI.getPointerTy()),
4049 Args, *this, dl);
4050 std::pair<SDValue,SDValue> CallResult = TLI.LowerCallTo(CLI);
4051
4052 return CallResult.second;
4053}
4054
4055SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4056 SDValue Chain, SDValue Ptr, SDValue Cmp,
4057 SDValue Swp, MachinePointerInfo PtrInfo,
4058 unsigned Alignment,
4059 AtomicOrdering Ordering,
4060 SynchronizationScope SynchScope) {
4061 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4062 Alignment = getEVTAlignment(MemVT);
4063
4064 MachineFunction &MF = getMachineFunction();
4065
4066 // All atomics are load and store, except for ATMOIC_LOAD and ATOMIC_STORE.
4067 // For now, atomics are considered to be volatile always.
4068 // FIXME: Volatile isn't really correct; we should keep track of atomic
4069 // orderings in the memoperand.
4070 unsigned Flags = MachineMemOperand::MOVolatile;
4071 if (Opcode != ISD::ATOMIC_STORE)
4072 Flags |= MachineMemOperand::MOLoad;
4073 if (Opcode != ISD::ATOMIC_LOAD)
4074 Flags |= MachineMemOperand::MOStore;
4075
4076 MachineMemOperand *MMO =
4077 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment);
4078
4079 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Cmp, Swp, MMO,
4080 Ordering, SynchScope);
4081}
4082
4083SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4084 SDValue Chain,
4085 SDValue Ptr, SDValue Cmp,
4086 SDValue Swp, MachineMemOperand *MMO,
4087 AtomicOrdering Ordering,
4088 SynchronizationScope SynchScope) {
4089 assert(Opcode == ISD::ATOMIC_CMP_SWAP && "Invalid Atomic Op");
4090 assert(Cmp.getValueType() == Swp.getValueType() && "Invalid Atomic Op Types");
4091
4092 EVT VT = Cmp.getValueType();
4093
4094 SDVTList VTs = getVTList(VT, MVT::Other);
4095 FoldingSetNodeID ID;
4096 ID.AddInteger(MemVT.getRawBits());
4097 SDValue Ops[] = {Chain, Ptr, Cmp, Swp};
4098 AddNodeIDNode(ID, Opcode, VTs, Ops, 4);
4099 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4100 void* IP = 0;
4101 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4102 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4103 return SDValue(E, 0);
4104 }
4105 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4106 Ptr, Cmp, Swp, MMO, Ordering,
4107 SynchScope);
4108 CSEMap.InsertNode(N, IP);
4109 AllNodes.push_back(N);
4110 return SDValue(N, 0);
4111}
4112
4113SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4114 SDValue Chain,
4115 SDValue Ptr, SDValue Val,
4116 const Value* PtrVal,
4117 unsigned Alignment,
4118 AtomicOrdering Ordering,
4119 SynchronizationScope SynchScope) {
4120 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4121 Alignment = getEVTAlignment(MemVT);
4122
4123 MachineFunction &MF = getMachineFunction();
4124 // An atomic store does not load. An atomic load does not store.
4125 // (An atomicrmw obviously both loads and stores.)
4126 // For now, atomics are considered to be volatile always, and they are
4127 // chained as such.
4128 // FIXME: Volatile isn't really correct; we should keep track of atomic
4129 // orderings in the memoperand.
4130 unsigned Flags = MachineMemOperand::MOVolatile;
4131 if (Opcode != ISD::ATOMIC_STORE)
4132 Flags |= MachineMemOperand::MOLoad;
4133 if (Opcode != ISD::ATOMIC_LOAD)
4134 Flags |= MachineMemOperand::MOStore;
4135
4136 MachineMemOperand *MMO =
4137 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4138 MemVT.getStoreSize(), Alignment);
4139
4140 return getAtomic(Opcode, dl, MemVT, Chain, Ptr, Val, MMO,
4141 Ordering, SynchScope);
4142}
4143
4144SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4145 SDValue Chain,
4146 SDValue Ptr, SDValue Val,
4147 MachineMemOperand *MMO,
4148 AtomicOrdering Ordering,
4149 SynchronizationScope SynchScope) {
4150 assert((Opcode == ISD::ATOMIC_LOAD_ADD ||
4151 Opcode == ISD::ATOMIC_LOAD_SUB ||
4152 Opcode == ISD::ATOMIC_LOAD_AND ||
4153 Opcode == ISD::ATOMIC_LOAD_OR ||
4154 Opcode == ISD::ATOMIC_LOAD_XOR ||
4155 Opcode == ISD::ATOMIC_LOAD_NAND ||
4156 Opcode == ISD::ATOMIC_LOAD_MIN ||
4157 Opcode == ISD::ATOMIC_LOAD_MAX ||
4158 Opcode == ISD::ATOMIC_LOAD_UMIN ||
4159 Opcode == ISD::ATOMIC_LOAD_UMAX ||
4160 Opcode == ISD::ATOMIC_SWAP ||
4161 Opcode == ISD::ATOMIC_STORE) &&
4162 "Invalid Atomic Op");
4163
4164 EVT VT = Val.getValueType();
4165
4166 SDVTList VTs = Opcode == ISD::ATOMIC_STORE ? getVTList(MVT::Other) :
4167 getVTList(VT, MVT::Other);
4168 FoldingSetNodeID ID;
4169 ID.AddInteger(MemVT.getRawBits());
4170 SDValue Ops[] = {Chain, Ptr, Val};
4171 AddNodeIDNode(ID, Opcode, VTs, Ops, 3);
4172 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4173 void* IP = 0;
4174 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4175 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4176 return SDValue(E, 0);
4177 }
4178 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4179 Ptr, Val, MMO,
4180 Ordering, SynchScope);
4181 CSEMap.InsertNode(N, IP);
4182 AllNodes.push_back(N);
4183 return SDValue(N, 0);
4184}
4185
4186SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4187 EVT VT, SDValue Chain,
4188 SDValue Ptr,
4189 const Value* PtrVal,
4190 unsigned Alignment,
4191 AtomicOrdering Ordering,
4192 SynchronizationScope SynchScope) {
4193 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4194 Alignment = getEVTAlignment(MemVT);
4195
4196 MachineFunction &MF = getMachineFunction();
4197 // An atomic store does not load. An atomic load does not store.
4198 // (An atomicrmw obviously both loads and stores.)
4199 // For now, atomics are considered to be volatile always, and they are
4200 // chained as such.
4201 // FIXME: Volatile isn't really correct; we should keep track of atomic
4202 // orderings in the memoperand.
4203 unsigned Flags = MachineMemOperand::MOVolatile;
4204 if (Opcode != ISD::ATOMIC_STORE)
4205 Flags |= MachineMemOperand::MOLoad;
4206 if (Opcode != ISD::ATOMIC_LOAD)
4207 Flags |= MachineMemOperand::MOStore;
4208
4209 MachineMemOperand *MMO =
4210 MF.getMachineMemOperand(MachinePointerInfo(PtrVal), Flags,
4211 MemVT.getStoreSize(), Alignment);
4212
4213 return getAtomic(Opcode, dl, MemVT, VT, Chain, Ptr, MMO,
4214 Ordering, SynchScope);
4215}
4216
4217SDValue SelectionDAG::getAtomic(unsigned Opcode, DebugLoc dl, EVT MemVT,
4218 EVT VT, SDValue Chain,
4219 SDValue Ptr,
4220 MachineMemOperand *MMO,
4221 AtomicOrdering Ordering,
4222 SynchronizationScope SynchScope) {
4223 assert(Opcode == ISD::ATOMIC_LOAD && "Invalid Atomic Op");
4224
4225 SDVTList VTs = getVTList(VT, MVT::Other);
4226 FoldingSetNodeID ID;
4227 ID.AddInteger(MemVT.getRawBits());
4228 SDValue Ops[] = {Chain, Ptr};
4229 AddNodeIDNode(ID, Opcode, VTs, Ops, 2);
4230 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4231 void* IP = 0;
4232 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4233 cast<AtomicSDNode>(E)->refineAlignment(MMO);
4234 return SDValue(E, 0);
4235 }
4236 SDNode *N = new (NodeAllocator) AtomicSDNode(Opcode, dl, VTs, MemVT, Chain,
4237 Ptr, MMO, Ordering, SynchScope);
4238 CSEMap.InsertNode(N, IP);
4239 AllNodes.push_back(N);
4240 return SDValue(N, 0);
4241}
4242
4243/// getMergeValues - Create a MERGE_VALUES node from the given operands.
4244SDValue SelectionDAG::getMergeValues(const SDValue *Ops, unsigned NumOps,
4245 DebugLoc dl) {
4246 if (NumOps == 1)
4247 return Ops[0];
4248
4249 SmallVector<EVT, 4> VTs;
4250 VTs.reserve(NumOps);
4251 for (unsigned i = 0; i < NumOps; ++i)
4252 VTs.push_back(Ops[i].getValueType());
4253 return getNode(ISD::MERGE_VALUES, dl, getVTList(&VTs[0], NumOps),
4254 Ops, NumOps);
4255}
4256
4257SDValue
4258SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl,
4259 const EVT *VTs, unsigned NumVTs,
4260 const SDValue *Ops, unsigned NumOps,
4261 EVT MemVT, MachinePointerInfo PtrInfo,
4262 unsigned Align, bool Vol,
4263 bool ReadMem, bool WriteMem) {
4264 return getMemIntrinsicNode(Opcode, dl, makeVTList(VTs, NumVTs), Ops, NumOps,
4265 MemVT, PtrInfo, Align, Vol,
4266 ReadMem, WriteMem);
4267}
4268
4269SDValue
4270SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
4271 const SDValue *Ops, unsigned NumOps,
4272 EVT MemVT, MachinePointerInfo PtrInfo,
4273 unsigned Align, bool Vol,
4274 bool ReadMem, bool WriteMem) {
4275 if (Align == 0) // Ensure that codegen never sees alignment 0
4276 Align = getEVTAlignment(MemVT);
4277
4278 MachineFunction &MF = getMachineFunction();
4279 unsigned Flags = 0;
4280 if (WriteMem)
4281 Flags |= MachineMemOperand::MOStore;
4282 if (ReadMem)
4283 Flags |= MachineMemOperand::MOLoad;
4284 if (Vol)
4285 Flags |= MachineMemOperand::MOVolatile;
4286 MachineMemOperand *MMO =
4287 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Align);
4288
4289 return getMemIntrinsicNode(Opcode, dl, VTList, Ops, NumOps, MemVT, MMO);
4290}
4291
4292SDValue
4293SelectionDAG::getMemIntrinsicNode(unsigned Opcode, DebugLoc dl, SDVTList VTList,
4294 const SDValue *Ops, unsigned NumOps,
4295 EVT MemVT, MachineMemOperand *MMO) {
4296 assert((Opcode == ISD::INTRINSIC_VOID ||
4297 Opcode == ISD::INTRINSIC_W_CHAIN ||
4298 Opcode == ISD::PREFETCH ||
4299 Opcode == ISD::LIFETIME_START ||
4300 Opcode == ISD::LIFETIME_END ||
4301 (Opcode <= INT_MAX &&
4302 (int)Opcode >= ISD::FIRST_TARGET_MEMORY_OPCODE)) &&
4303 "Opcode is not a memory-accessing opcode!");
4304
4305 // Memoize the node unless it returns a flag.
4306 MemIntrinsicSDNode *N;
4307 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4308 FoldingSetNodeID ID;
4309 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4310 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4311 void *IP = 0;
4312 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4313 cast<MemIntrinsicSDNode>(E)->refineAlignment(MMO);
4314 return SDValue(E, 0);
4315 }
4316
4317 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
4318 MemVT, MMO);
4319 CSEMap.InsertNode(N, IP);
4320 } else {
4321 N = new (NodeAllocator) MemIntrinsicSDNode(Opcode, dl, VTList, Ops, NumOps,
4322 MemVT, MMO);
4323 }
4324 AllNodes.push_back(N);
4325 return SDValue(N, 0);
4326}
4327
4328/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4329/// MachinePointerInfo record from it. This is particularly useful because the
4330/// code generator has many cases where it doesn't bother passing in a
4331/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4332static MachinePointerInfo InferPointerInfo(SDValue Ptr, int64_t Offset = 0) {
4333 // If this is FI+Offset, we can model it.
4334 if (const FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr))
4335 return MachinePointerInfo::getFixedStack(FI->getIndex(), Offset);
4336
4337 // If this is (FI+Offset1)+Offset2, we can model it.
4338 if (Ptr.getOpcode() != ISD::ADD ||
4339 !isa<ConstantSDNode>(Ptr.getOperand(1)) ||
4340 !isa<FrameIndexSDNode>(Ptr.getOperand(0)))
4341 return MachinePointerInfo();
4342
4343 int FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
4344 return MachinePointerInfo::getFixedStack(FI, Offset+
4345 cast<ConstantSDNode>(Ptr.getOperand(1))->getSExtValue());
4346}
4347
4348/// InferPointerInfo - If the specified ptr/offset is a frame index, infer a
4349/// MachinePointerInfo record from it. This is particularly useful because the
4350/// code generator has many cases where it doesn't bother passing in a
4351/// MachinePointerInfo to getLoad or getStore when it has "FI+Cst".
4352static MachinePointerInfo InferPointerInfo(SDValue Ptr, SDValue OffsetOp) {
4353 // If the 'Offset' value isn't a constant, we can't handle this.
4354 if (ConstantSDNode *OffsetNode = dyn_cast<ConstantSDNode>(OffsetOp))
4355 return InferPointerInfo(Ptr, OffsetNode->getSExtValue());
4356 if (OffsetOp.getOpcode() == ISD::UNDEF)
4357 return InferPointerInfo(Ptr);
4358 return MachinePointerInfo();
4359}
4360
4361
4362SDValue
4363SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4364 EVT VT, DebugLoc dl, SDValue Chain,
4365 SDValue Ptr, SDValue Offset,
4366 MachinePointerInfo PtrInfo, EVT MemVT,
4367 bool isVolatile, bool isNonTemporal, bool isInvariant,
4368 unsigned Alignment, const MDNode *TBAAInfo,
4369 const MDNode *Ranges) {
4370 assert(Chain.getValueType() == MVT::Other &&
4371 "Invalid chain type");
4372 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4373 Alignment = getEVTAlignment(VT);
4374
4375 unsigned Flags = MachineMemOperand::MOLoad;
4376 if (isVolatile)
4377 Flags |= MachineMemOperand::MOVolatile;
4378 if (isNonTemporal)
4379 Flags |= MachineMemOperand::MONonTemporal;
4380 if (isInvariant)
4381 Flags |= MachineMemOperand::MOInvariant;
4382
4383 // If we don't have a PtrInfo, infer the trivial frame index case to simplify
4384 // clients.
4385 if (PtrInfo.V == 0)
4386 PtrInfo = InferPointerInfo(Ptr, Offset);
4387
4388 MachineFunction &MF = getMachineFunction();
4389 MachineMemOperand *MMO =
4390 MF.getMachineMemOperand(PtrInfo, Flags, MemVT.getStoreSize(), Alignment,
4391 TBAAInfo, Ranges);
4392 return getLoad(AM, ExtType, VT, dl, Chain, Ptr, Offset, MemVT, MMO);
4393}
4394
4395SDValue
4396SelectionDAG::getLoad(ISD::MemIndexedMode AM, ISD::LoadExtType ExtType,
4397 EVT VT, DebugLoc dl, SDValue Chain,
4398 SDValue Ptr, SDValue Offset, EVT MemVT,
4399 MachineMemOperand *MMO) {
4400 if (VT == MemVT) {
4401 ExtType = ISD::NON_EXTLOAD;
4402 } else if (ExtType == ISD::NON_EXTLOAD) {
4403 assert(VT == MemVT && "Non-extending load from different memory type!");
4404 } else {
4405 // Extending load.
4406 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) &&
4407 "Should only be an extending load, not truncating!");
4408 assert(VT.isInteger() == MemVT.isInteger() &&
4409 "Cannot convert from FP to Int or Int -> FP!");
4410 assert(VT.isVector() == MemVT.isVector() &&
4411 "Cannot use trunc store to convert to or from a vector!");
4412 assert((!VT.isVector() ||
4413 VT.getVectorNumElements() == MemVT.getVectorNumElements()) &&
4414 "Cannot use trunc store to change the number of vector elements!");
4415 }
4416
4417 bool Indexed = AM != ISD::UNINDEXED;
4418 assert((Indexed || Offset.getOpcode() == ISD::UNDEF) &&
4419 "Unindexed load with an offset!");
4420
4421 SDVTList VTs = Indexed ?
4422 getVTList(VT, Ptr.getValueType(), MVT::Other) : getVTList(VT, MVT::Other);
4423 SDValue Ops[] = { Chain, Ptr, Offset };
4424 FoldingSetNodeID ID;
4425 AddNodeIDNode(ID, ISD::LOAD, VTs, Ops, 3);
4426 ID.AddInteger(MemVT.getRawBits());
4427 ID.AddInteger(encodeMemSDNodeFlags(ExtType, AM, MMO->isVolatile(),
4428 MMO->isNonTemporal(),
4429 MMO->isInvariant()));
4430 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4431 void *IP = 0;
4432 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4433 cast<LoadSDNode>(E)->refineAlignment(MMO);
4434 return SDValue(E, 0);
4435 }
4436 SDNode *N = new (NodeAllocator) LoadSDNode(Ops, dl, VTs, AM, ExtType,
4437 MemVT, MMO);
4438 CSEMap.InsertNode(N, IP);
4439 AllNodes.push_back(N);
4440 return SDValue(N, 0);
4441}
4442
4443SDValue SelectionDAG::getLoad(EVT VT, DebugLoc dl,
4444 SDValue Chain, SDValue Ptr,
4445 MachinePointerInfo PtrInfo,
4446 bool isVolatile, bool isNonTemporal,
4447 bool isInvariant, unsigned Alignment,
4448 const MDNode *TBAAInfo,
4449 const MDNode *Ranges) {
4450 SDValue Undef = getUNDEF(Ptr.getValueType());
4451 return getLoad(ISD::UNINDEXED, ISD::NON_EXTLOAD, VT, dl, Chain, Ptr, Undef,
4452 PtrInfo, VT, isVolatile, isNonTemporal, isInvariant, Alignment,
4453 TBAAInfo, Ranges);
4454}
4455
4456SDValue SelectionDAG::getExtLoad(ISD::LoadExtType ExtType, DebugLoc dl, EVT VT,
4457 SDValue Chain, SDValue Ptr,
4458 MachinePointerInfo PtrInfo, EVT MemVT,
4459 bool isVolatile, bool isNonTemporal,
4460 unsigned Alignment, const MDNode *TBAAInfo) {
4461 SDValue Undef = getUNDEF(Ptr.getValueType());
4462 return getLoad(ISD::UNINDEXED, ExtType, VT, dl, Chain, Ptr, Undef,
4463 PtrInfo, MemVT, isVolatile, isNonTemporal, false, Alignment,
4464 TBAAInfo);
4465}
4466
4467
4468SDValue
4469SelectionDAG::getIndexedLoad(SDValue OrigLoad, DebugLoc dl, SDValue Base,
4470 SDValue Offset, ISD::MemIndexedMode AM) {
4471 LoadSDNode *LD = cast<LoadSDNode>(OrigLoad);
4472 assert(LD->getOffset().getOpcode() == ISD::UNDEF &&
4473 "Load is already a indexed load!");
4474 return getLoad(AM, LD->getExtensionType(), OrigLoad.getValueType(), dl,
4475 LD->getChain(), Base, Offset, LD->getPointerInfo(),
4476 LD->getMemoryVT(), LD->isVolatile(), LD->isNonTemporal(),
4477 false, LD->getAlignment());
4478}
4479
4480SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
4481 SDValue Ptr, MachinePointerInfo PtrInfo,
4482 bool isVolatile, bool isNonTemporal,
4483 unsigned Alignment, const MDNode *TBAAInfo) {
4484 assert(Chain.getValueType() == MVT::Other &&
4485 "Invalid chain type");
4486 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4487 Alignment = getEVTAlignment(Val.getValueType());
4488
4489 unsigned Flags = MachineMemOperand::MOStore;
4490 if (isVolatile)
4491 Flags |= MachineMemOperand::MOVolatile;
4492 if (isNonTemporal)
4493 Flags |= MachineMemOperand::MONonTemporal;
4494
4495 if (PtrInfo.V == 0)
4496 PtrInfo = InferPointerInfo(Ptr);
4497
4498 MachineFunction &MF = getMachineFunction();
4499 MachineMemOperand *MMO =
4500 MF.getMachineMemOperand(PtrInfo, Flags,
4501 Val.getValueType().getStoreSize(), Alignment,
4502 TBAAInfo);
4503
4504 return getStore(Chain, dl, Val, Ptr, MMO);
4505}
4506
4507SDValue SelectionDAG::getStore(SDValue Chain, DebugLoc dl, SDValue Val,
4508 SDValue Ptr, MachineMemOperand *MMO) {
4509 assert(Chain.getValueType() == MVT::Other &&
4510 "Invalid chain type");
4511 EVT VT = Val.getValueType();
4512 SDVTList VTs = getVTList(MVT::Other);
4513 SDValue Undef = getUNDEF(Ptr.getValueType());
4514 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4515 FoldingSetNodeID ID;
4516 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4517 ID.AddInteger(VT.getRawBits());
4518 ID.AddInteger(encodeMemSDNodeFlags(false, ISD::UNINDEXED, MMO->isVolatile(),
4519 MMO->isNonTemporal(), MMO->isInvariant()));
4520 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4521 void *IP = 0;
4522 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4523 cast<StoreSDNode>(E)->refineAlignment(MMO);
4524 return SDValue(E, 0);
4525 }
4526 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
4527 false, VT, MMO);
4528 CSEMap.InsertNode(N, IP);
4529 AllNodes.push_back(N);
4530 return SDValue(N, 0);
4531}
4532
4533SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
4534 SDValue Ptr, MachinePointerInfo PtrInfo,
4535 EVT SVT,bool isVolatile, bool isNonTemporal,
4536 unsigned Alignment,
4537 const MDNode *TBAAInfo) {
4538 assert(Chain.getValueType() == MVT::Other &&
4539 "Invalid chain type");
4540 if (Alignment == 0) // Ensure that codegen never sees alignment 0
4541 Alignment = getEVTAlignment(SVT);
4542
4543 unsigned Flags = MachineMemOperand::MOStore;
4544 if (isVolatile)
4545 Flags |= MachineMemOperand::MOVolatile;
4546 if (isNonTemporal)
4547 Flags |= MachineMemOperand::MONonTemporal;
4548
4549 if (PtrInfo.V == 0)
4550 PtrInfo = InferPointerInfo(Ptr);
4551
4552 MachineFunction &MF = getMachineFunction();
4553 MachineMemOperand *MMO =
4554 MF.getMachineMemOperand(PtrInfo, Flags, SVT.getStoreSize(), Alignment,
4555 TBAAInfo);
4556
4557 return getTruncStore(Chain, dl, Val, Ptr, SVT, MMO);
4558}
4559
4560SDValue SelectionDAG::getTruncStore(SDValue Chain, DebugLoc dl, SDValue Val,
4561 SDValue Ptr, EVT SVT,
4562 MachineMemOperand *MMO) {
4563 EVT VT = Val.getValueType();
4564
4565 assert(Chain.getValueType() == MVT::Other &&
4566 "Invalid chain type");
4567 if (VT == SVT)
4568 return getStore(Chain, dl, Val, Ptr, MMO);
4569
4570 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) &&
4571 "Should only be a truncating store, not extending!");
4572 assert(VT.isInteger() == SVT.isInteger() &&
4573 "Can't do FP-INT conversion!");
4574 assert(VT.isVector() == SVT.isVector() &&
4575 "Cannot use trunc store to convert to or from a vector!");
4576 assert((!VT.isVector() ||
4577 VT.getVectorNumElements() == SVT.getVectorNumElements()) &&
4578 "Cannot use trunc store to change the number of vector elements!");
4579
4580 SDVTList VTs = getVTList(MVT::Other);
4581 SDValue Undef = getUNDEF(Ptr.getValueType());
4582 SDValue Ops[] = { Chain, Val, Ptr, Undef };
4583 FoldingSetNodeID ID;
4584 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4585 ID.AddInteger(SVT.getRawBits());
4586 ID.AddInteger(encodeMemSDNodeFlags(true, ISD::UNINDEXED, MMO->isVolatile(),
4587 MMO->isNonTemporal(), MMO->isInvariant()));
4588 ID.AddInteger(MMO->getPointerInfo().getAddrSpace());
4589 void *IP = 0;
4590 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
4591 cast<StoreSDNode>(E)->refineAlignment(MMO);
4592 return SDValue(E, 0);
4593 }
4594 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, ISD::UNINDEXED,
4595 true, SVT, MMO);
4596 CSEMap.InsertNode(N, IP);
4597 AllNodes.push_back(N);
4598 return SDValue(N, 0);
4599}
4600
4601SDValue
4602SelectionDAG::getIndexedStore(SDValue OrigStore, DebugLoc dl, SDValue Base,
4603 SDValue Offset, ISD::MemIndexedMode AM) {
4604 StoreSDNode *ST = cast<StoreSDNode>(OrigStore);
4605 assert(ST->getOffset().getOpcode() == ISD::UNDEF &&
4606 "Store is already a indexed store!");
4607 SDVTList VTs = getVTList(Base.getValueType(), MVT::Other);
4608 SDValue Ops[] = { ST->getChain(), ST->getValue(), Base, Offset };
4609 FoldingSetNodeID ID;
4610 AddNodeIDNode(ID, ISD::STORE, VTs, Ops, 4);
4611 ID.AddInteger(ST->getMemoryVT().getRawBits());
4612 ID.AddInteger(ST->getRawSubclassData());
4613 ID.AddInteger(ST->getPointerInfo().getAddrSpace());
4614 void *IP = 0;
4615 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4616 return SDValue(E, 0);
4617
4618 SDNode *N = new (NodeAllocator) StoreSDNode(Ops, dl, VTs, AM,
4619 ST->isTruncatingStore(),
4620 ST->getMemoryVT(),
4621 ST->getMemOperand());
4622 CSEMap.InsertNode(N, IP);
4623 AllNodes.push_back(N);
4624 return SDValue(N, 0);
4625}
4626
4627SDValue SelectionDAG::getVAArg(EVT VT, DebugLoc dl,
4628 SDValue Chain, SDValue Ptr,
4629 SDValue SV,
4630 unsigned Align) {
4631 SDValue Ops[] = { Chain, Ptr, SV, getTargetConstant(Align, MVT::i32) };
4632 return getNode(ISD::VAARG, dl, getVTList(VT, MVT::Other), Ops, 4);
4633}
4634
4635SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4636 const SDUse *Ops, unsigned NumOps) {
4637 switch (NumOps) {
4638 case 0: return getNode(Opcode, DL, VT);
4639 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4640 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4641 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4642 default: break;
4643 }
4644
4645 // Copy from an SDUse array into an SDValue array for use with
4646 // the regular getNode logic.
4647 SmallVector<SDValue, 8> NewOps(Ops, Ops + NumOps);
4648 return getNode(Opcode, DL, VT, &NewOps[0], NumOps);
4649}
4650
4651SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, EVT VT,
4652 const SDValue *Ops, unsigned NumOps) {
4653 switch (NumOps) {
4654 case 0: return getNode(Opcode, DL, VT);
4655 case 1: return getNode(Opcode, DL, VT, Ops[0]);
4656 case 2: return getNode(Opcode, DL, VT, Ops[0], Ops[1]);
4657 case 3: return getNode(Opcode, DL, VT, Ops[0], Ops[1], Ops[2]);
4658 default: break;
4659 }
4660
4661 switch (Opcode) {
4662 default: break;
4663 case ISD::SELECT_CC: {
4664 assert(NumOps == 5 && "SELECT_CC takes 5 operands!");
4665 assert(Ops[0].getValueType() == Ops[1].getValueType() &&
4666 "LHS and RHS of condition must have same type!");
4667 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4668 "True and False arms of SelectCC must have same type!");
4669 assert(Ops[2].getValueType() == VT &&
4670 "select_cc node must be of same type as true and false value!");
4671 break;
4672 }
4673 case ISD::BR_CC: {
4674 assert(NumOps == 5 && "BR_CC takes 5 operands!");
4675 assert(Ops[2].getValueType() == Ops[3].getValueType() &&
4676 "LHS/RHS of comparison should match types!");
4677 break;
4678 }
4679 }
4680
4681 // Memoize nodes.
4682 SDNode *N;
4683 SDVTList VTs = getVTList(VT);
4684
4685 if (VT != MVT::Glue) {
4686 FoldingSetNodeID ID;
4687 AddNodeIDNode(ID, Opcode, VTs, Ops, NumOps);
4688 void *IP = 0;
4689
4690 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4691 return SDValue(E, 0);
4692
4693 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
4694 CSEMap.InsertNode(N, IP);
4695 } else {
4696 N = new (NodeAllocator) SDNode(Opcode, DL, VTs, Ops, NumOps);
4697 }
4698
4699 AllNodes.push_back(N);
4700#ifndef NDEBUG
4701 VerifySDNode(N);
4702#endif
4703 return SDValue(N, 0);
4704}
4705
4706SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
970d7e83 4707 ArrayRef<EVT> ResultTys,
223e47cc
LB
4708 const SDValue *Ops, unsigned NumOps) {
4709 return getNode(Opcode, DL, getVTList(&ResultTys[0], ResultTys.size()),
4710 Ops, NumOps);
4711}
4712
4713SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL,
4714 const EVT *VTs, unsigned NumVTs,
4715 const SDValue *Ops, unsigned NumOps) {
4716 if (NumVTs == 1)
4717 return getNode(Opcode, DL, VTs[0], Ops, NumOps);
4718 return getNode(Opcode, DL, makeVTList(VTs, NumVTs), Ops, NumOps);
4719}
4720
4721SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4722 const SDValue *Ops, unsigned NumOps) {
4723 if (VTList.NumVTs == 1)
4724 return getNode(Opcode, DL, VTList.VTs[0], Ops, NumOps);
4725
4726#if 0
4727 switch (Opcode) {
4728 // FIXME: figure out how to safely handle things like
4729 // int foo(int x) { return 1 << (x & 255); }
4730 // int bar() { return foo(256); }
4731 case ISD::SRA_PARTS:
4732 case ISD::SRL_PARTS:
4733 case ISD::SHL_PARTS:
4734 if (N3.getOpcode() == ISD::SIGN_EXTEND_INREG &&
4735 cast<VTSDNode>(N3.getOperand(1))->getVT() != MVT::i1)
4736 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4737 else if (N3.getOpcode() == ISD::AND)
4738 if (ConstantSDNode *AndRHS = dyn_cast<ConstantSDNode>(N3.getOperand(1))) {
4739 // If the and is only masking out bits that cannot effect the shift,
4740 // eliminate the and.
4741 unsigned NumBits = VT.getScalarType().getSizeInBits()*2;
4742 if ((AndRHS->getValue() & (NumBits-1)) == NumBits-1)
4743 return getNode(Opcode, DL, VT, N1, N2, N3.getOperand(0));
4744 }
4745 break;
4746 }
4747#endif
4748
4749 // Memoize the node unless it returns a flag.
4750 SDNode *N;
4751 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
4752 FoldingSetNodeID ID;
4753 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
4754 void *IP = 0;
4755 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
4756 return SDValue(E, 0);
4757
4758 if (NumOps == 1) {
4759 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4760 } else if (NumOps == 2) {
4761 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4762 } else if (NumOps == 3) {
4763 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
4764 Ops[2]);
4765 } else {
4766 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
4767 }
4768 CSEMap.InsertNode(N, IP);
4769 } else {
4770 if (NumOps == 1) {
4771 N = new (NodeAllocator) UnarySDNode(Opcode, DL, VTList, Ops[0]);
4772 } else if (NumOps == 2) {
4773 N = new (NodeAllocator) BinarySDNode(Opcode, DL, VTList, Ops[0], Ops[1]);
4774 } else if (NumOps == 3) {
4775 N = new (NodeAllocator) TernarySDNode(Opcode, DL, VTList, Ops[0], Ops[1],
4776 Ops[2]);
4777 } else {
4778 N = new (NodeAllocator) SDNode(Opcode, DL, VTList, Ops, NumOps);
4779 }
4780 }
4781 AllNodes.push_back(N);
4782#ifndef NDEBUG
4783 VerifySDNode(N);
4784#endif
4785 return SDValue(N, 0);
4786}
4787
4788SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList) {
4789 return getNode(Opcode, DL, VTList, 0, 0);
4790}
4791
4792SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4793 SDValue N1) {
4794 SDValue Ops[] = { N1 };
4795 return getNode(Opcode, DL, VTList, Ops, 1);
4796}
4797
4798SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4799 SDValue N1, SDValue N2) {
4800 SDValue Ops[] = { N1, N2 };
4801 return getNode(Opcode, DL, VTList, Ops, 2);
4802}
4803
4804SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4805 SDValue N1, SDValue N2, SDValue N3) {
4806 SDValue Ops[] = { N1, N2, N3 };
4807 return getNode(Opcode, DL, VTList, Ops, 3);
4808}
4809
4810SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4811 SDValue N1, SDValue N2, SDValue N3,
4812 SDValue N4) {
4813 SDValue Ops[] = { N1, N2, N3, N4 };
4814 return getNode(Opcode, DL, VTList, Ops, 4);
4815}
4816
4817SDValue SelectionDAG::getNode(unsigned Opcode, DebugLoc DL, SDVTList VTList,
4818 SDValue N1, SDValue N2, SDValue N3,
4819 SDValue N4, SDValue N5) {
4820 SDValue Ops[] = { N1, N2, N3, N4, N5 };
4821 return getNode(Opcode, DL, VTList, Ops, 5);
4822}
4823
4824SDVTList SelectionDAG::getVTList(EVT VT) {
4825 return makeVTList(SDNode::getValueTypeList(VT), 1);
4826}
4827
4828SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2) {
4829 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4830 E = VTList.rend(); I != E; ++I)
4831 if (I->NumVTs == 2 && I->VTs[0] == VT1 && I->VTs[1] == VT2)
4832 return *I;
4833
4834 EVT *Array = Allocator.Allocate<EVT>(2);
4835 Array[0] = VT1;
4836 Array[1] = VT2;
4837 SDVTList Result = makeVTList(Array, 2);
4838 VTList.push_back(Result);
4839 return Result;
4840}
4841
4842SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3) {
4843 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4844 E = VTList.rend(); I != E; ++I)
4845 if (I->NumVTs == 3 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4846 I->VTs[2] == VT3)
4847 return *I;
4848
4849 EVT *Array = Allocator.Allocate<EVT>(3);
4850 Array[0] = VT1;
4851 Array[1] = VT2;
4852 Array[2] = VT3;
4853 SDVTList Result = makeVTList(Array, 3);
4854 VTList.push_back(Result);
4855 return Result;
4856}
4857
4858SDVTList SelectionDAG::getVTList(EVT VT1, EVT VT2, EVT VT3, EVT VT4) {
4859 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4860 E = VTList.rend(); I != E; ++I)
4861 if (I->NumVTs == 4 && I->VTs[0] == VT1 && I->VTs[1] == VT2 &&
4862 I->VTs[2] == VT3 && I->VTs[3] == VT4)
4863 return *I;
4864
4865 EVT *Array = Allocator.Allocate<EVT>(4);
4866 Array[0] = VT1;
4867 Array[1] = VT2;
4868 Array[2] = VT3;
4869 Array[3] = VT4;
4870 SDVTList Result = makeVTList(Array, 4);
4871 VTList.push_back(Result);
4872 return Result;
4873}
4874
4875SDVTList SelectionDAG::getVTList(const EVT *VTs, unsigned NumVTs) {
4876 switch (NumVTs) {
4877 case 0: llvm_unreachable("Cannot have nodes without results!");
4878 case 1: return getVTList(VTs[0]);
4879 case 2: return getVTList(VTs[0], VTs[1]);
4880 case 3: return getVTList(VTs[0], VTs[1], VTs[2]);
4881 case 4: return getVTList(VTs[0], VTs[1], VTs[2], VTs[3]);
4882 default: break;
4883 }
4884
4885 for (std::vector<SDVTList>::reverse_iterator I = VTList.rbegin(),
4886 E = VTList.rend(); I != E; ++I) {
4887 if (I->NumVTs != NumVTs || VTs[0] != I->VTs[0] || VTs[1] != I->VTs[1])
4888 continue;
4889
4890 if (std::equal(&VTs[2], &VTs[NumVTs], &I->VTs[2]))
4891 return *I;
4892 }
4893
4894 EVT *Array = Allocator.Allocate<EVT>(NumVTs);
4895 std::copy(VTs, VTs+NumVTs, Array);
4896 SDVTList Result = makeVTList(Array, NumVTs);
4897 VTList.push_back(Result);
4898 return Result;
4899}
4900
4901
4902/// UpdateNodeOperands - *Mutate* the specified node in-place to have the
4903/// specified operands. If the resultant node already exists in the DAG,
4904/// this does not modify the specified node, instead it returns the node that
4905/// already exists. If the resultant node does not exist in the DAG, the
4906/// input node is returned. As a degenerate case, if you specify the same
4907/// input operands as the node already has, the input node is returned.
4908SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op) {
4909 assert(N->getNumOperands() == 1 && "Update with wrong number of operands");
4910
4911 // Check to see if there is no change.
4912 if (Op == N->getOperand(0)) return N;
4913
4914 // See if the modified node already exists.
4915 void *InsertPos = 0;
4916 if (SDNode *Existing = FindModifiedNodeSlot(N, Op, InsertPos))
4917 return Existing;
4918
4919 // Nope it doesn't. Remove the node from its current place in the maps.
4920 if (InsertPos)
4921 if (!RemoveNodeFromCSEMaps(N))
4922 InsertPos = 0;
4923
4924 // Now we update the operands.
4925 N->OperandList[0].set(Op);
4926
4927 // If this gets put into a CSE map, add it.
4928 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4929 return N;
4930}
4931
4932SDNode *SelectionDAG::UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2) {
4933 assert(N->getNumOperands() == 2 && "Update with wrong number of operands");
4934
4935 // Check to see if there is no change.
4936 if (Op1 == N->getOperand(0) && Op2 == N->getOperand(1))
4937 return N; // No operands changed, just return the input node.
4938
4939 // See if the modified node already exists.
4940 void *InsertPos = 0;
4941 if (SDNode *Existing = FindModifiedNodeSlot(N, Op1, Op2, InsertPos))
4942 return Existing;
4943
4944 // Nope it doesn't. Remove the node from its current place in the maps.
4945 if (InsertPos)
4946 if (!RemoveNodeFromCSEMaps(N))
4947 InsertPos = 0;
4948
4949 // Now we update the operands.
4950 if (N->OperandList[0] != Op1)
4951 N->OperandList[0].set(Op1);
4952 if (N->OperandList[1] != Op2)
4953 N->OperandList[1].set(Op2);
4954
4955 // If this gets put into a CSE map, add it.
4956 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
4957 return N;
4958}
4959
4960SDNode *SelectionDAG::
4961UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2, SDValue Op3) {
4962 SDValue Ops[] = { Op1, Op2, Op3 };
4963 return UpdateNodeOperands(N, Ops, 3);
4964}
4965
4966SDNode *SelectionDAG::
4967UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4968 SDValue Op3, SDValue Op4) {
4969 SDValue Ops[] = { Op1, Op2, Op3, Op4 };
4970 return UpdateNodeOperands(N, Ops, 4);
4971}
4972
4973SDNode *SelectionDAG::
4974UpdateNodeOperands(SDNode *N, SDValue Op1, SDValue Op2,
4975 SDValue Op3, SDValue Op4, SDValue Op5) {
4976 SDValue Ops[] = { Op1, Op2, Op3, Op4, Op5 };
4977 return UpdateNodeOperands(N, Ops, 5);
4978}
4979
4980SDNode *SelectionDAG::
4981UpdateNodeOperands(SDNode *N, const SDValue *Ops, unsigned NumOps) {
4982 assert(N->getNumOperands() == NumOps &&
4983 "Update with wrong number of operands");
4984
4985 // Check to see if there is no change.
4986 bool AnyChange = false;
4987 for (unsigned i = 0; i != NumOps; ++i) {
4988 if (Ops[i] != N->getOperand(i)) {
4989 AnyChange = true;
4990 break;
4991 }
4992 }
4993
4994 // No operands changed, just return the input node.
4995 if (!AnyChange) return N;
4996
4997 // See if the modified node already exists.
4998 void *InsertPos = 0;
4999 if (SDNode *Existing = FindModifiedNodeSlot(N, Ops, NumOps, InsertPos))
5000 return Existing;
5001
5002 // Nope it doesn't. Remove the node from its current place in the maps.
5003 if (InsertPos)
5004 if (!RemoveNodeFromCSEMaps(N))
5005 InsertPos = 0;
5006
5007 // Now we update the operands.
5008 for (unsigned i = 0; i != NumOps; ++i)
5009 if (N->OperandList[i] != Ops[i])
5010 N->OperandList[i].set(Ops[i]);
5011
5012 // If this gets put into a CSE map, add it.
5013 if (InsertPos) CSEMap.InsertNode(N, InsertPos);
5014 return N;
5015}
5016
5017/// DropOperands - Release the operands and set this node to have
5018/// zero operands.
5019void SDNode::DropOperands() {
5020 // Unlike the code in MorphNodeTo that does this, we don't need to
5021 // watch for dead nodes here.
5022 for (op_iterator I = op_begin(), E = op_end(); I != E; ) {
5023 SDUse &Use = *I++;
5024 Use.set(SDValue());
5025 }
5026}
5027
5028/// SelectNodeTo - These are wrappers around MorphNodeTo that accept a
5029/// machine opcode.
5030///
5031SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5032 EVT VT) {
5033 SDVTList VTs = getVTList(VT);
5034 return SelectNodeTo(N, MachineOpc, VTs, 0, 0);
5035}
5036
5037SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5038 EVT VT, SDValue Op1) {
5039 SDVTList VTs = getVTList(VT);
5040 SDValue Ops[] = { Op1 };
5041 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5042}
5043
5044SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5045 EVT VT, SDValue Op1,
5046 SDValue Op2) {
5047 SDVTList VTs = getVTList(VT);
5048 SDValue Ops[] = { Op1, Op2 };
5049 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5050}
5051
5052SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5053 EVT VT, SDValue Op1,
5054 SDValue Op2, SDValue Op3) {
5055 SDVTList VTs = getVTList(VT);
5056 SDValue Ops[] = { Op1, Op2, Op3 };
5057 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5058}
5059
5060SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5061 EVT VT, const SDValue *Ops,
5062 unsigned NumOps) {
5063 SDVTList VTs = getVTList(VT);
5064 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5065}
5066
5067SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5068 EVT VT1, EVT VT2, const SDValue *Ops,
5069 unsigned NumOps) {
5070 SDVTList VTs = getVTList(VT1, VT2);
5071 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5072}
5073
5074SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5075 EVT VT1, EVT VT2) {
5076 SDVTList VTs = getVTList(VT1, VT2);
5077 return SelectNodeTo(N, MachineOpc, VTs, (SDValue *)0, 0);
5078}
5079
5080SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5081 EVT VT1, EVT VT2, EVT VT3,
5082 const SDValue *Ops, unsigned NumOps) {
5083 SDVTList VTs = getVTList(VT1, VT2, VT3);
5084 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5085}
5086
5087SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5088 EVT VT1, EVT VT2, EVT VT3, EVT VT4,
5089 const SDValue *Ops, unsigned NumOps) {
5090 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5091 return SelectNodeTo(N, MachineOpc, VTs, Ops, NumOps);
5092}
5093
5094SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5095 EVT VT1, EVT VT2,
5096 SDValue Op1) {
5097 SDVTList VTs = getVTList(VT1, VT2);
5098 SDValue Ops[] = { Op1 };
5099 return SelectNodeTo(N, MachineOpc, VTs, Ops, 1);
5100}
5101
5102SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5103 EVT VT1, EVT VT2,
5104 SDValue Op1, SDValue Op2) {
5105 SDVTList VTs = getVTList(VT1, VT2);
5106 SDValue Ops[] = { Op1, Op2 };
5107 return SelectNodeTo(N, MachineOpc, VTs, Ops, 2);
5108}
5109
5110SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5111 EVT VT1, EVT VT2,
5112 SDValue Op1, SDValue Op2,
5113 SDValue Op3) {
5114 SDVTList VTs = getVTList(VT1, VT2);
5115 SDValue Ops[] = { Op1, Op2, Op3 };
5116 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5117}
5118
5119SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5120 EVT VT1, EVT VT2, EVT VT3,
5121 SDValue Op1, SDValue Op2,
5122 SDValue Op3) {
5123 SDVTList VTs = getVTList(VT1, VT2, VT3);
5124 SDValue Ops[] = { Op1, Op2, Op3 };
5125 return SelectNodeTo(N, MachineOpc, VTs, Ops, 3);
5126}
5127
5128SDNode *SelectionDAG::SelectNodeTo(SDNode *N, unsigned MachineOpc,
5129 SDVTList VTs, const SDValue *Ops,
5130 unsigned NumOps) {
5131 N = MorphNodeTo(N, ~MachineOpc, VTs, Ops, NumOps);
5132 // Reset the NodeID to -1.
5133 N->setNodeId(-1);
5134 return N;
5135}
5136
5137/// UpdadeDebugLocOnMergedSDNode - If the opt level is -O0 then it throws away
5138/// the line number information on the merged node since it is not possible to
5139/// preserve the information that operation is associated with multiple lines.
5140/// This will make the debugger working better at -O0, were there is a higher
5141/// probability having other instructions associated with that line.
5142///
5143SDNode *SelectionDAG::UpdadeDebugLocOnMergedSDNode(SDNode *N, DebugLoc OLoc) {
5144 DebugLoc NLoc = N->getDebugLoc();
5145 if (!(NLoc.isUnknown()) && (OptLevel == CodeGenOpt::None) && (OLoc != NLoc)) {
5146 N->setDebugLoc(DebugLoc());
5147 }
5148 return N;
5149}
5150
5151/// MorphNodeTo - This *mutates* the specified node to have the specified
5152/// return type, opcode, and operands.
5153///
5154/// Note that MorphNodeTo returns the resultant node. If there is already a
5155/// node of the specified opcode and operands, it returns that node instead of
5156/// the current one. Note that the DebugLoc need not be the same.
5157///
5158/// Using MorphNodeTo is faster than creating a new node and swapping it in
5159/// with ReplaceAllUsesWith both because it often avoids allocating a new
5160/// node, and because it doesn't require CSE recalculation for any of
5161/// the node's users.
5162///
5163SDNode *SelectionDAG::MorphNodeTo(SDNode *N, unsigned Opc,
5164 SDVTList VTs, const SDValue *Ops,
5165 unsigned NumOps) {
5166 // If an identical node already exists, use it.
5167 void *IP = 0;
5168 if (VTs.VTs[VTs.NumVTs-1] != MVT::Glue) {
5169 FoldingSetNodeID ID;
5170 AddNodeIDNode(ID, Opc, VTs, Ops, NumOps);
5171 if (SDNode *ON = CSEMap.FindNodeOrInsertPos(ID, IP))
5172 return UpdadeDebugLocOnMergedSDNode(ON, N->getDebugLoc());
5173 }
5174
5175 if (!RemoveNodeFromCSEMaps(N))
5176 IP = 0;
5177
5178 // Start the morphing.
5179 N->NodeType = Opc;
5180 N->ValueList = VTs.VTs;
5181 N->NumValues = VTs.NumVTs;
5182
5183 // Clear the operands list, updating used nodes to remove this from their
5184 // use list. Keep track of any operands that become dead as a result.
5185 SmallPtrSet<SDNode*, 16> DeadNodeSet;
5186 for (SDNode::op_iterator I = N->op_begin(), E = N->op_end(); I != E; ) {
5187 SDUse &Use = *I++;
5188 SDNode *Used = Use.getNode();
5189 Use.set(SDValue());
5190 if (Used->use_empty())
5191 DeadNodeSet.insert(Used);
5192 }
5193
5194 if (MachineSDNode *MN = dyn_cast<MachineSDNode>(N)) {
5195 // Initialize the memory references information.
5196 MN->setMemRefs(0, 0);
5197 // If NumOps is larger than the # of operands we can have in a
5198 // MachineSDNode, reallocate the operand list.
5199 if (NumOps > MN->NumOperands || !MN->OperandsNeedDelete) {
5200 if (MN->OperandsNeedDelete)
5201 delete[] MN->OperandList;
5202 if (NumOps > array_lengthof(MN->LocalOperands))
5203 // We're creating a final node that will live unmorphed for the
5204 // remainder of the current SelectionDAG iteration, so we can allocate
5205 // the operands directly out of a pool with no recycling metadata.
5206 MN->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5207 Ops, NumOps);
5208 else
5209 MN->InitOperands(MN->LocalOperands, Ops, NumOps);
5210 MN->OperandsNeedDelete = false;
5211 } else
5212 MN->InitOperands(MN->OperandList, Ops, NumOps);
5213 } else {
5214 // If NumOps is larger than the # of operands we currently have, reallocate
5215 // the operand list.
5216 if (NumOps > N->NumOperands) {
5217 if (N->OperandsNeedDelete)
5218 delete[] N->OperandList;
5219 N->InitOperands(new SDUse[NumOps], Ops, NumOps);
5220 N->OperandsNeedDelete = true;
5221 } else
5222 N->InitOperands(N->OperandList, Ops, NumOps);
5223 }
5224
5225 // Delete any nodes that are still dead after adding the uses for the
5226 // new operands.
5227 if (!DeadNodeSet.empty()) {
5228 SmallVector<SDNode *, 16> DeadNodes;
5229 for (SmallPtrSet<SDNode *, 16>::iterator I = DeadNodeSet.begin(),
5230 E = DeadNodeSet.end(); I != E; ++I)
5231 if ((*I)->use_empty())
5232 DeadNodes.push_back(*I);
5233 RemoveDeadNodes(DeadNodes);
5234 }
5235
5236 if (IP)
5237 CSEMap.InsertNode(N, IP); // Memoize the new node.
5238 return N;
5239}
5240
5241
5242/// getMachineNode - These are used for target selectors to create a new node
5243/// with specified return type(s), MachineInstr opcode, and operands.
5244///
5245/// Note that getMachineNode returns the resultant node. If there is already a
5246/// node of the specified opcode and operands, it returns that node instead of
5247/// the current one.
5248MachineSDNode *
5249SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT) {
5250 SDVTList VTs = getVTList(VT);
5251 return getMachineNode(Opcode, dl, VTs, 0, 0);
5252}
5253
5254MachineSDNode *
5255SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT, SDValue Op1) {
5256 SDVTList VTs = getVTList(VT);
5257 SDValue Ops[] = { Op1 };
5258 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5259}
5260
5261MachineSDNode *
5262SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5263 SDValue Op1, SDValue Op2) {
5264 SDVTList VTs = getVTList(VT);
5265 SDValue Ops[] = { Op1, Op2 };
5266 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5267}
5268
5269MachineSDNode *
5270SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5271 SDValue Op1, SDValue Op2, SDValue Op3) {
5272 SDVTList VTs = getVTList(VT);
5273 SDValue Ops[] = { Op1, Op2, Op3 };
5274 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5275}
5276
5277MachineSDNode *
5278SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT,
5279 const SDValue *Ops, unsigned NumOps) {
5280 SDVTList VTs = getVTList(VT);
5281 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5282}
5283
5284MachineSDNode *
5285SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1, EVT VT2) {
5286 SDVTList VTs = getVTList(VT1, VT2);
5287 return getMachineNode(Opcode, dl, VTs, 0, 0);
5288}
5289
5290MachineSDNode *
5291SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5292 EVT VT1, EVT VT2, SDValue Op1) {
5293 SDVTList VTs = getVTList(VT1, VT2);
5294 SDValue Ops[] = { Op1 };
5295 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5296}
5297
5298MachineSDNode *
5299SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5300 EVT VT1, EVT VT2, SDValue Op1, SDValue Op2) {
5301 SDVTList VTs = getVTList(VT1, VT2);
5302 SDValue Ops[] = { Op1, Op2 };
5303 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5304}
5305
5306MachineSDNode *
5307SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5308 EVT VT1, EVT VT2, SDValue Op1,
5309 SDValue Op2, SDValue Op3) {
5310 SDVTList VTs = getVTList(VT1, VT2);
5311 SDValue Ops[] = { Op1, Op2, Op3 };
5312 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5313}
5314
5315MachineSDNode *
5316SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5317 EVT VT1, EVT VT2,
5318 const SDValue *Ops, unsigned NumOps) {
5319 SDVTList VTs = getVTList(VT1, VT2);
5320 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5321}
5322
5323MachineSDNode *
5324SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5325 EVT VT1, EVT VT2, EVT VT3,
5326 SDValue Op1, SDValue Op2) {
5327 SDVTList VTs = getVTList(VT1, VT2, VT3);
5328 SDValue Ops[] = { Op1, Op2 };
5329 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5330}
5331
5332MachineSDNode *
5333SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5334 EVT VT1, EVT VT2, EVT VT3,
5335 SDValue Op1, SDValue Op2, SDValue Op3) {
5336 SDVTList VTs = getVTList(VT1, VT2, VT3);
5337 SDValue Ops[] = { Op1, Op2, Op3 };
5338 return getMachineNode(Opcode, dl, VTs, Ops, array_lengthof(Ops));
5339}
5340
5341MachineSDNode *
5342SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
5343 EVT VT1, EVT VT2, EVT VT3,
5344 const SDValue *Ops, unsigned NumOps) {
5345 SDVTList VTs = getVTList(VT1, VT2, VT3);
5346 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5347}
5348
5349MachineSDNode *
5350SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl, EVT VT1,
5351 EVT VT2, EVT VT3, EVT VT4,
5352 const SDValue *Ops, unsigned NumOps) {
5353 SDVTList VTs = getVTList(VT1, VT2, VT3, VT4);
5354 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5355}
5356
5357MachineSDNode *
5358SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc dl,
970d7e83 5359 ArrayRef<EVT> ResultTys,
223e47cc
LB
5360 const SDValue *Ops, unsigned NumOps) {
5361 SDVTList VTs = getVTList(&ResultTys[0], ResultTys.size());
5362 return getMachineNode(Opcode, dl, VTs, Ops, NumOps);
5363}
5364
5365MachineSDNode *
5366SelectionDAG::getMachineNode(unsigned Opcode, DebugLoc DL, SDVTList VTs,
5367 const SDValue *Ops, unsigned NumOps) {
5368 bool DoCSE = VTs.VTs[VTs.NumVTs-1] != MVT::Glue;
5369 MachineSDNode *N;
5370 void *IP = 0;
5371
5372 if (DoCSE) {
5373 FoldingSetNodeID ID;
5374 AddNodeIDNode(ID, ~Opcode, VTs, Ops, NumOps);
5375 IP = 0;
5376 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP)) {
5377 return cast<MachineSDNode>(UpdadeDebugLocOnMergedSDNode(E, DL));
5378 }
5379 }
5380
5381 // Allocate a new MachineSDNode.
5382 N = new (NodeAllocator) MachineSDNode(~Opcode, DL, VTs);
5383
5384 // Initialize the operands list.
5385 if (NumOps > array_lengthof(N->LocalOperands))
5386 // We're creating a final node that will live unmorphed for the
5387 // remainder of the current SelectionDAG iteration, so we can allocate
5388 // the operands directly out of a pool with no recycling metadata.
5389 N->InitOperands(OperandAllocator.Allocate<SDUse>(NumOps),
5390 Ops, NumOps);
5391 else
5392 N->InitOperands(N->LocalOperands, Ops, NumOps);
5393 N->OperandsNeedDelete = false;
5394
5395 if (DoCSE)
5396 CSEMap.InsertNode(N, IP);
5397
5398 AllNodes.push_back(N);
5399#ifndef NDEBUG
5400 VerifyMachineNode(N);
5401#endif
5402 return N;
5403}
5404
5405/// getTargetExtractSubreg - A convenience function for creating
5406/// TargetOpcode::EXTRACT_SUBREG nodes.
5407SDValue
5408SelectionDAG::getTargetExtractSubreg(int SRIdx, DebugLoc DL, EVT VT,
5409 SDValue Operand) {
5410 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5411 SDNode *Subreg = getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL,
5412 VT, Operand, SRIdxVal);
5413 return SDValue(Subreg, 0);
5414}
5415
5416/// getTargetInsertSubreg - A convenience function for creating
5417/// TargetOpcode::INSERT_SUBREG nodes.
5418SDValue
5419SelectionDAG::getTargetInsertSubreg(int SRIdx, DebugLoc DL, EVT VT,
5420 SDValue Operand, SDValue Subreg) {
5421 SDValue SRIdxVal = getTargetConstant(SRIdx, MVT::i32);
5422 SDNode *Result = getMachineNode(TargetOpcode::INSERT_SUBREG, DL,
5423 VT, Operand, Subreg, SRIdxVal);
5424 return SDValue(Result, 0);
5425}
5426
5427/// getNodeIfExists - Get the specified node if it's already available, or
5428/// else return NULL.
5429SDNode *SelectionDAG::getNodeIfExists(unsigned Opcode, SDVTList VTList,
5430 const SDValue *Ops, unsigned NumOps) {
5431 if (VTList.VTs[VTList.NumVTs-1] != MVT::Glue) {
5432 FoldingSetNodeID ID;
5433 AddNodeIDNode(ID, Opcode, VTList, Ops, NumOps);
5434 void *IP = 0;
5435 if (SDNode *E = CSEMap.FindNodeOrInsertPos(ID, IP))
5436 return E;
5437 }
5438 return NULL;
5439}
5440
5441/// getDbgValue - Creates a SDDbgValue node.
5442///
5443SDDbgValue *
5444SelectionDAG::getDbgValue(MDNode *MDPtr, SDNode *N, unsigned R, uint64_t Off,
5445 DebugLoc DL, unsigned O) {
5446 return new (Allocator) SDDbgValue(MDPtr, N, R, Off, DL, O);
5447}
5448
5449SDDbgValue *
5450SelectionDAG::getDbgValue(MDNode *MDPtr, const Value *C, uint64_t Off,
5451 DebugLoc DL, unsigned O) {
5452 return new (Allocator) SDDbgValue(MDPtr, C, Off, DL, O);
5453}
5454
5455SDDbgValue *
5456SelectionDAG::getDbgValue(MDNode *MDPtr, unsigned FI, uint64_t Off,
5457 DebugLoc DL, unsigned O) {
5458 return new (Allocator) SDDbgValue(MDPtr, FI, Off, DL, O);
5459}
5460
5461namespace {
5462
5463/// RAUWUpdateListener - Helper for ReplaceAllUsesWith - When the node
5464/// pointed to by a use iterator is deleted, increment the use iterator
5465/// so that it doesn't dangle.
5466///
5467class RAUWUpdateListener : public SelectionDAG::DAGUpdateListener {
5468 SDNode::use_iterator &UI;
5469 SDNode::use_iterator &UE;
5470
5471 virtual void NodeDeleted(SDNode *N, SDNode *E) {
5472 // Increment the iterator as needed.
5473 while (UI != UE && N == *UI)
5474 ++UI;
5475 }
5476
5477public:
5478 RAUWUpdateListener(SelectionDAG &d,
5479 SDNode::use_iterator &ui,
5480 SDNode::use_iterator &ue)
5481 : SelectionDAG::DAGUpdateListener(d), UI(ui), UE(ue) {}
5482};
5483
5484}
5485
5486/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5487/// This can cause recursive merging of nodes in the DAG.
5488///
5489/// This version assumes From has a single result value.
5490///
5491void SelectionDAG::ReplaceAllUsesWith(SDValue FromN, SDValue To) {
5492 SDNode *From = FromN.getNode();
5493 assert(From->getNumValues() == 1 && FromN.getResNo() == 0 &&
5494 "Cannot replace with this method!");
5495 assert(From != To.getNode() && "Cannot replace uses of with self");
5496
5497 // Iterate over all the existing uses of From. New uses will be added
5498 // to the beginning of the use list, which we avoid visiting.
5499 // This specifically avoids visiting uses of From that arise while the
5500 // replacement is happening, because any such uses would be the result
5501 // of CSE: If an existing node looks like From after one of its operands
5502 // is replaced by To, we don't want to replace of all its users with To
5503 // too. See PR3018 for more info.
5504 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5505 RAUWUpdateListener Listener(*this, UI, UE);
5506 while (UI != UE) {
5507 SDNode *User = *UI;
5508
5509 // This node is about to morph, remove its old self from the CSE maps.
5510 RemoveNodeFromCSEMaps(User);
5511
5512 // A user can appear in a use list multiple times, and when this
5513 // happens the uses are usually next to each other in the list.
5514 // To help reduce the number of CSE recomputations, process all
5515 // the uses of this user that we can find this way.
5516 do {
5517 SDUse &Use = UI.getUse();
5518 ++UI;
5519 Use.set(To);
5520 } while (UI != UE && *UI == User);
5521
5522 // Now that we have modified User, add it back to the CSE maps. If it
5523 // already exists there, recursively merge the results together.
5524 AddModifiedNodeToCSEMaps(User);
5525 }
5526
5527 // If we just RAUW'd the root, take note.
5528 if (FromN == getRoot())
5529 setRoot(To);
5530}
5531
5532/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5533/// This can cause recursive merging of nodes in the DAG.
5534///
5535/// This version assumes that for each value of From, there is a
5536/// corresponding value in To in the same position with the same type.
5537///
5538void SelectionDAG::ReplaceAllUsesWith(SDNode *From, SDNode *To) {
5539#ifndef NDEBUG
5540 for (unsigned i = 0, e = From->getNumValues(); i != e; ++i)
5541 assert((!From->hasAnyUseOfValue(i) ||
5542 From->getValueType(i) == To->getValueType(i)) &&
5543 "Cannot use this version of ReplaceAllUsesWith!");
5544#endif
5545
5546 // Handle the trivial case.
5547 if (From == To)
5548 return;
5549
5550 // Iterate over just the existing users of From. See the comments in
5551 // the ReplaceAllUsesWith above.
5552 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5553 RAUWUpdateListener Listener(*this, UI, UE);
5554 while (UI != UE) {
5555 SDNode *User = *UI;
5556
5557 // This node is about to morph, remove its old self from the CSE maps.
5558 RemoveNodeFromCSEMaps(User);
5559
5560 // A user can appear in a use list multiple times, and when this
5561 // happens the uses are usually next to each other in the list.
5562 // To help reduce the number of CSE recomputations, process all
5563 // the uses of this user that we can find this way.
5564 do {
5565 SDUse &Use = UI.getUse();
5566 ++UI;
5567 Use.setNode(To);
5568 } while (UI != UE && *UI == User);
5569
5570 // Now that we have modified User, add it back to the CSE maps. If it
5571 // already exists there, recursively merge the results together.
5572 AddModifiedNodeToCSEMaps(User);
5573 }
5574
5575 // If we just RAUW'd the root, take note.
5576 if (From == getRoot().getNode())
5577 setRoot(SDValue(To, getRoot().getResNo()));
5578}
5579
5580/// ReplaceAllUsesWith - Modify anything using 'From' to use 'To' instead.
5581/// This can cause recursive merging of nodes in the DAG.
5582///
5583/// This version can replace From with any result values. To must match the
5584/// number and types of values returned by From.
5585void SelectionDAG::ReplaceAllUsesWith(SDNode *From, const SDValue *To) {
5586 if (From->getNumValues() == 1) // Handle the simple case efficiently.
5587 return ReplaceAllUsesWith(SDValue(From, 0), To[0]);
5588
5589 // Iterate over just the existing users of From. See the comments in
5590 // the ReplaceAllUsesWith above.
5591 SDNode::use_iterator UI = From->use_begin(), UE = From->use_end();
5592 RAUWUpdateListener Listener(*this, UI, UE);
5593 while (UI != UE) {
5594 SDNode *User = *UI;
5595
5596 // This node is about to morph, remove its old self from the CSE maps.
5597 RemoveNodeFromCSEMaps(User);
5598
5599 // A user can appear in a use list multiple times, and when this
5600 // happens the uses are usually next to each other in the list.
5601 // To help reduce the number of CSE recomputations, process all
5602 // the uses of this user that we can find this way.
5603 do {
5604 SDUse &Use = UI.getUse();
5605 const SDValue &ToOp = To[Use.getResNo()];
5606 ++UI;
5607 Use.set(ToOp);
5608 } while (UI != UE && *UI == User);
5609
5610 // Now that we have modified User, add it back to the CSE maps. If it
5611 // already exists there, recursively merge the results together.
5612 AddModifiedNodeToCSEMaps(User);
5613 }
5614
5615 // If we just RAUW'd the root, take note.
5616 if (From == getRoot().getNode())
5617 setRoot(SDValue(To[getRoot().getResNo()]));
5618}
5619
5620/// ReplaceAllUsesOfValueWith - Replace any uses of From with To, leaving
5621/// uses of other values produced by From.getNode() alone. The Deleted
5622/// vector is handled the same way as for ReplaceAllUsesWith.
5623void SelectionDAG::ReplaceAllUsesOfValueWith(SDValue From, SDValue To){
5624 // Handle the really simple, really trivial case efficiently.
5625 if (From == To) return;
5626
5627 // Handle the simple, trivial, case efficiently.
5628 if (From.getNode()->getNumValues() == 1) {
5629 ReplaceAllUsesWith(From, To);
5630 return;
5631 }
5632
5633 // Iterate over just the existing users of From. See the comments in
5634 // the ReplaceAllUsesWith above.
5635 SDNode::use_iterator UI = From.getNode()->use_begin(),
5636 UE = From.getNode()->use_end();
5637 RAUWUpdateListener Listener(*this, UI, UE);
5638 while (UI != UE) {
5639 SDNode *User = *UI;
5640 bool UserRemovedFromCSEMaps = false;
5641
5642 // A user can appear in a use list multiple times, and when this
5643 // happens the uses are usually next to each other in the list.
5644 // To help reduce the number of CSE recomputations, process all
5645 // the uses of this user that we can find this way.
5646 do {
5647 SDUse &Use = UI.getUse();
5648
5649 // Skip uses of different values from the same node.
5650 if (Use.getResNo() != From.getResNo()) {
5651 ++UI;
5652 continue;
5653 }
5654
5655 // If this node hasn't been modified yet, it's still in the CSE maps,
5656 // so remove its old self from the CSE maps.
5657 if (!UserRemovedFromCSEMaps) {
5658 RemoveNodeFromCSEMaps(User);
5659 UserRemovedFromCSEMaps = true;
5660 }
5661
5662 ++UI;
5663 Use.set(To);
5664 } while (UI != UE && *UI == User);
5665
5666 // We are iterating over all uses of the From node, so if a use
5667 // doesn't use the specific value, no changes are made.
5668 if (!UserRemovedFromCSEMaps)
5669 continue;
5670
5671 // Now that we have modified User, add it back to the CSE maps. If it
5672 // already exists there, recursively merge the results together.
5673 AddModifiedNodeToCSEMaps(User);
5674 }
5675
5676 // If we just RAUW'd the root, take note.
5677 if (From == getRoot())
5678 setRoot(To);
5679}
5680
5681namespace {
5682 /// UseMemo - This class is used by SelectionDAG::ReplaceAllUsesOfValuesWith
5683 /// to record information about a use.
5684 struct UseMemo {
5685 SDNode *User;
5686 unsigned Index;
5687 SDUse *Use;
5688 };
5689
5690 /// operator< - Sort Memos by User.
5691 bool operator<(const UseMemo &L, const UseMemo &R) {
5692 return (intptr_t)L.User < (intptr_t)R.User;
5693 }
5694}
5695
5696/// ReplaceAllUsesOfValuesWith - Replace any uses of From with To, leaving
5697/// uses of other values produced by From.getNode() alone. The same value
5698/// may appear in both the From and To list. The Deleted vector is
5699/// handled the same way as for ReplaceAllUsesWith.
5700void SelectionDAG::ReplaceAllUsesOfValuesWith(const SDValue *From,
5701 const SDValue *To,
5702 unsigned Num){
5703 // Handle the simple, trivial case efficiently.
5704 if (Num == 1)
5705 return ReplaceAllUsesOfValueWith(*From, *To);
5706
5707 // Read up all the uses and make records of them. This helps
5708 // processing new uses that are introduced during the
5709 // replacement process.
5710 SmallVector<UseMemo, 4> Uses;
5711 for (unsigned i = 0; i != Num; ++i) {
5712 unsigned FromResNo = From[i].getResNo();
5713 SDNode *FromNode = From[i].getNode();
5714 for (SDNode::use_iterator UI = FromNode->use_begin(),
5715 E = FromNode->use_end(); UI != E; ++UI) {
5716 SDUse &Use = UI.getUse();
5717 if (Use.getResNo() == FromResNo) {
5718 UseMemo Memo = { *UI, i, &Use };
5719 Uses.push_back(Memo);
5720 }
5721 }
5722 }
5723
5724 // Sort the uses, so that all the uses from a given User are together.
5725 std::sort(Uses.begin(), Uses.end());
5726
5727 for (unsigned UseIndex = 0, UseIndexEnd = Uses.size();
5728 UseIndex != UseIndexEnd; ) {
5729 // We know that this user uses some value of From. If it is the right
5730 // value, update it.
5731 SDNode *User = Uses[UseIndex].User;
5732
5733 // This node is about to morph, remove its old self from the CSE maps.
5734 RemoveNodeFromCSEMaps(User);
5735
5736 // The Uses array is sorted, so all the uses for a given User
5737 // are next to each other in the list.
5738 // To help reduce the number of CSE recomputations, process all
5739 // the uses of this user that we can find this way.
5740 do {
5741 unsigned i = Uses[UseIndex].Index;
5742 SDUse &Use = *Uses[UseIndex].Use;
5743 ++UseIndex;
5744
5745 Use.set(To[i]);
5746 } while (UseIndex != UseIndexEnd && Uses[UseIndex].User == User);
5747
5748 // Now that we have modified User, add it back to the CSE maps. If it
5749 // already exists there, recursively merge the results together.
5750 AddModifiedNodeToCSEMaps(User);
5751 }
5752}
5753
5754/// AssignTopologicalOrder - Assign a unique node id for each node in the DAG
5755/// based on their topological order. It returns the maximum id and a vector
5756/// of the SDNodes* in assigned order by reference.
5757unsigned SelectionDAG::AssignTopologicalOrder() {
5758
5759 unsigned DAGSize = 0;
5760
5761 // SortedPos tracks the progress of the algorithm. Nodes before it are
5762 // sorted, nodes after it are unsorted. When the algorithm completes
5763 // it is at the end of the list.
5764 allnodes_iterator SortedPos = allnodes_begin();
5765
5766 // Visit all the nodes. Move nodes with no operands to the front of
5767 // the list immediately. Annotate nodes that do have operands with their
5768 // operand count. Before we do this, the Node Id fields of the nodes
5769 // may contain arbitrary values. After, the Node Id fields for nodes
5770 // before SortedPos will contain the topological sort index, and the
5771 // Node Id fields for nodes At SortedPos and after will contain the
5772 // count of outstanding operands.
5773 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ) {
5774 SDNode *N = I++;
5775 checkForCycles(N);
5776 unsigned Degree = N->getNumOperands();
5777 if (Degree == 0) {
5778 // A node with no uses, add it to the result array immediately.
5779 N->setNodeId(DAGSize++);
5780 allnodes_iterator Q = N;
5781 if (Q != SortedPos)
5782 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(Q));
5783 assert(SortedPos != AllNodes.end() && "Overran node list");
5784 ++SortedPos;
5785 } else {
5786 // Temporarily use the Node Id as scratch space for the degree count.
5787 N->setNodeId(Degree);
5788 }
5789 }
5790
5791 // Visit all the nodes. As we iterate, move nodes into sorted order,
5792 // such that by the time the end is reached all nodes will be sorted.
5793 for (allnodes_iterator I = allnodes_begin(),E = allnodes_end(); I != E; ++I) {
5794 SDNode *N = I;
5795 checkForCycles(N);
5796 // N is in sorted position, so all its uses have one less operand
5797 // that needs to be sorted.
5798 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
5799 UI != UE; ++UI) {
5800 SDNode *P = *UI;
5801 unsigned Degree = P->getNodeId();
5802 assert(Degree != 0 && "Invalid node degree");
5803 --Degree;
5804 if (Degree == 0) {
5805 // All of P's operands are sorted, so P may sorted now.
5806 P->setNodeId(DAGSize++);
5807 if (P != SortedPos)
5808 SortedPos = AllNodes.insert(SortedPos, AllNodes.remove(P));
5809 assert(SortedPos != AllNodes.end() && "Overran node list");
5810 ++SortedPos;
5811 } else {
5812 // Update P's outstanding operand count.
5813 P->setNodeId(Degree);
5814 }
5815 }
5816 if (I == SortedPos) {
5817#ifndef NDEBUG
5818 SDNode *S = ++I;
5819 dbgs() << "Overran sorted position:\n";
5820 S->dumprFull();
5821#endif
5822 llvm_unreachable(0);
5823 }
5824 }
5825
5826 assert(SortedPos == AllNodes.end() &&
5827 "Topological sort incomplete!");
5828 assert(AllNodes.front().getOpcode() == ISD::EntryToken &&
5829 "First node in topological sort is not the entry token!");
5830 assert(AllNodes.front().getNodeId() == 0 &&
5831 "First node in topological sort has non-zero id!");
5832 assert(AllNodes.front().getNumOperands() == 0 &&
5833 "First node in topological sort has operands!");
5834 assert(AllNodes.back().getNodeId() == (int)DAGSize-1 &&
5835 "Last node in topologic sort has unexpected id!");
5836 assert(AllNodes.back().use_empty() &&
5837 "Last node in topologic sort has users!");
5838 assert(DAGSize == allnodes_size() && "Node count mismatch!");
5839 return DAGSize;
5840}
5841
5842/// AssignOrdering - Assign an order to the SDNode.
5843void SelectionDAG::AssignOrdering(const SDNode *SD, unsigned Order) {
5844 assert(SD && "Trying to assign an order to a null node!");
5845 Ordering->add(SD, Order);
5846}
5847
5848/// GetOrdering - Get the order for the SDNode.
5849unsigned SelectionDAG::GetOrdering(const SDNode *SD) const {
5850 assert(SD && "Trying to get the order of a null node!");
5851 return Ordering->getOrder(SD);
5852}
5853
5854/// AddDbgValue - Add a dbg_value SDNode. If SD is non-null that means the
5855/// value is produced by SD.
5856void SelectionDAG::AddDbgValue(SDDbgValue *DB, SDNode *SD, bool isParameter) {
5857 DbgInfo->add(DB, SD, isParameter);
5858 if (SD)
5859 SD->setHasDebugValue(true);
5860}
5861
5862/// TransferDbgValues - Transfer SDDbgValues.
5863void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
5864 if (From == To || !From.getNode()->getHasDebugValue())
5865 return;
5866 SDNode *FromNode = From.getNode();
5867 SDNode *ToNode = To.getNode();
5868 ArrayRef<SDDbgValue *> DVs = GetDbgValues(FromNode);
5869 SmallVector<SDDbgValue *, 2> ClonedDVs;
5870 for (ArrayRef<SDDbgValue *>::iterator I = DVs.begin(), E = DVs.end();
5871 I != E; ++I) {
5872 SDDbgValue *Dbg = *I;
5873 if (Dbg->getKind() == SDDbgValue::SDNODE) {
5874 SDDbgValue *Clone = getDbgValue(Dbg->getMDPtr(), ToNode, To.getResNo(),
5875 Dbg->getOffset(), Dbg->getDebugLoc(),
5876 Dbg->getOrder());
5877 ClonedDVs.push_back(Clone);
5878 }
5879 }
5880 for (SmallVector<SDDbgValue *, 2>::iterator I = ClonedDVs.begin(),
5881 E = ClonedDVs.end(); I != E; ++I)
5882 AddDbgValue(*I, ToNode, false);
5883}
5884
5885//===----------------------------------------------------------------------===//
5886// SDNode Class
5887//===----------------------------------------------------------------------===//
5888
5889HandleSDNode::~HandleSDNode() {
5890 DropOperands();
5891}
5892
5893GlobalAddressSDNode::GlobalAddressSDNode(unsigned Opc, DebugLoc DL,
5894 const GlobalValue *GA,
5895 EVT VT, int64_t o, unsigned char TF)
5896 : SDNode(Opc, DL, getSDVTList(VT)), Offset(o), TargetFlags(TF) {
5897 TheGlobal = GA;
5898}
5899
5900MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs, EVT memvt,
5901 MachineMemOperand *mmo)
5902 : SDNode(Opc, dl, VTs), MemoryVT(memvt), MMO(mmo) {
5903 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5904 MMO->isNonTemporal(), MMO->isInvariant());
5905 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5906 assert(isNonTemporal() == MMO->isNonTemporal() &&
5907 "Non-temporal encoding error!");
5908 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5909}
5910
5911MemSDNode::MemSDNode(unsigned Opc, DebugLoc dl, SDVTList VTs,
5912 const SDValue *Ops, unsigned NumOps, EVT memvt,
5913 MachineMemOperand *mmo)
5914 : SDNode(Opc, dl, VTs, Ops, NumOps),
5915 MemoryVT(memvt), MMO(mmo) {
5916 SubclassData = encodeMemSDNodeFlags(0, ISD::UNINDEXED, MMO->isVolatile(),
5917 MMO->isNonTemporal(), MMO->isInvariant());
5918 assert(isVolatile() == MMO->isVolatile() && "Volatile encoding error!");
5919 assert(memvt.getStoreSize() == MMO->getSize() && "Size mismatch!");
5920}
5921
5922/// Profile - Gather unique data for the node.
5923///
5924void SDNode::Profile(FoldingSetNodeID &ID) const {
5925 AddNodeIDNode(ID, this);
5926}
5927
5928namespace {
5929 struct EVTArray {
5930 std::vector<EVT> VTs;
5931
5932 EVTArray() {
5933 VTs.reserve(MVT::LAST_VALUETYPE);
5934 for (unsigned i = 0; i < MVT::LAST_VALUETYPE; ++i)
5935 VTs.push_back(MVT((MVT::SimpleValueType)i));
5936 }
5937 };
5938}
5939
5940static ManagedStatic<std::set<EVT, EVT::compareRawBits> > EVTs;
5941static ManagedStatic<EVTArray> SimpleVTArray;
5942static ManagedStatic<sys::SmartMutex<true> > VTMutex;
5943
5944/// getValueTypeList - Return a pointer to the specified value type.
5945///
5946const EVT *SDNode::getValueTypeList(EVT VT) {
5947 if (VT.isExtended()) {
5948 sys::SmartScopedLock<true> Lock(*VTMutex);
5949 return &(*EVTs->insert(VT).first);
5950 } else {
5951 assert(VT.getSimpleVT() < MVT::LAST_VALUETYPE &&
5952 "Value type out of range!");
5953 return &SimpleVTArray->VTs[VT.getSimpleVT().SimpleTy];
5954 }
5955}
5956
5957/// hasNUsesOfValue - Return true if there are exactly NUSES uses of the
5958/// indicated value. This method ignores uses of other values defined by this
5959/// operation.
5960bool SDNode::hasNUsesOfValue(unsigned NUses, unsigned Value) const {
5961 assert(Value < getNumValues() && "Bad value!");
5962
5963 // TODO: Only iterate over uses of a given value of the node
5964 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI) {
5965 if (UI.getUse().getResNo() == Value) {
5966 if (NUses == 0)
5967 return false;
5968 --NUses;
5969 }
5970 }
5971
5972 // Found exactly the right number of uses?
5973 return NUses == 0;
5974}
5975
5976
5977/// hasAnyUseOfValue - Return true if there are any use of the indicated
5978/// value. This method ignores uses of other values defined by this operation.
5979bool SDNode::hasAnyUseOfValue(unsigned Value) const {
5980 assert(Value < getNumValues() && "Bad value!");
5981
5982 for (SDNode::use_iterator UI = use_begin(), E = use_end(); UI != E; ++UI)
5983 if (UI.getUse().getResNo() == Value)
5984 return true;
5985
5986 return false;
5987}
5988
5989
5990/// isOnlyUserOf - Return true if this node is the only use of N.
5991///
5992bool SDNode::isOnlyUserOf(SDNode *N) const {
5993 bool Seen = false;
5994 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
5995 SDNode *User = *I;
5996 if (User == this)
5997 Seen = true;
5998 else
5999 return false;
6000 }
6001
6002 return Seen;
6003}
6004
6005/// isOperand - Return true if this node is an operand of N.
6006///
6007bool SDValue::isOperandOf(SDNode *N) const {
6008 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6009 if (*this == N->getOperand(i))
6010 return true;
6011 return false;
6012}
6013
6014bool SDNode::isOperandOf(SDNode *N) const {
6015 for (unsigned i = 0, e = N->NumOperands; i != e; ++i)
6016 if (this == N->OperandList[i].getNode())
6017 return true;
6018 return false;
6019}
6020
6021/// reachesChainWithoutSideEffects - Return true if this operand (which must
6022/// be a chain) reaches the specified operand without crossing any
6023/// side-effecting instructions on any chain path. In practice, this looks
6024/// through token factors and non-volatile loads. In order to remain efficient,
6025/// this only looks a couple of nodes in, it does not do an exhaustive search.
6026bool SDValue::reachesChainWithoutSideEffects(SDValue Dest,
6027 unsigned Depth) const {
6028 if (*this == Dest) return true;
6029
6030 // Don't search too deeply, we just want to be able to see through
6031 // TokenFactor's etc.
6032 if (Depth == 0) return false;
6033
6034 // If this is a token factor, all inputs to the TF happen in parallel. If any
6035 // of the operands of the TF does not reach dest, then we cannot do the xform.
6036 if (getOpcode() == ISD::TokenFactor) {
6037 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6038 if (!getOperand(i).reachesChainWithoutSideEffects(Dest, Depth-1))
6039 return false;
6040 return true;
6041 }
6042
6043 // Loads don't have side effects, look through them.
6044 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(*this)) {
6045 if (!Ld->isVolatile())
6046 return Ld->getChain().reachesChainWithoutSideEffects(Dest, Depth-1);
6047 }
6048 return false;
6049}
6050
6051/// hasPredecessor - Return true if N is a predecessor of this node.
6052/// N is either an operand of this node, or can be reached by recursively
6053/// traversing up the operands.
6054/// NOTE: This is an expensive method. Use it carefully.
6055bool SDNode::hasPredecessor(const SDNode *N) const {
6056 SmallPtrSet<const SDNode *, 32> Visited;
6057 SmallVector<const SDNode *, 16> Worklist;
6058 return hasPredecessorHelper(N, Visited, Worklist);
6059}
6060
6061bool SDNode::hasPredecessorHelper(const SDNode *N,
6062 SmallPtrSet<const SDNode *, 32> &Visited,
6063 SmallVector<const SDNode *, 16> &Worklist) const {
6064 if (Visited.empty()) {
6065 Worklist.push_back(this);
6066 } else {
6067 // Take a look in the visited set. If we've already encountered this node
6068 // we needn't search further.
6069 if (Visited.count(N))
6070 return true;
6071 }
6072
6073 // Haven't visited N yet. Continue the search.
6074 while (!Worklist.empty()) {
6075 const SDNode *M = Worklist.pop_back_val();
6076 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
6077 SDNode *Op = M->getOperand(i).getNode();
6078 if (Visited.insert(Op))
6079 Worklist.push_back(Op);
6080 if (Op == N)
6081 return true;
6082 }
6083 }
6084
6085 return false;
6086}
6087
6088uint64_t SDNode::getConstantOperandVal(unsigned Num) const {
6089 assert(Num < NumOperands && "Invalid child # of SDNode!");
6090 return cast<ConstantSDNode>(OperandList[Num])->getZExtValue();
6091}
6092
6093SDValue SelectionDAG::UnrollVectorOp(SDNode *N, unsigned ResNE) {
6094 assert(N->getNumValues() == 1 &&
6095 "Can't unroll a vector with multiple results!");
6096
6097 EVT VT = N->getValueType(0);
6098 unsigned NE = VT.getVectorNumElements();
6099 EVT EltVT = VT.getVectorElementType();
6100 DebugLoc dl = N->getDebugLoc();
6101
6102 SmallVector<SDValue, 8> Scalars;
6103 SmallVector<SDValue, 4> Operands(N->getNumOperands());
6104
6105 // If ResNE is 0, fully unroll the vector op.
6106 if (ResNE == 0)
6107 ResNE = NE;
6108 else if (NE > ResNE)
6109 NE = ResNE;
6110
6111 unsigned i;
6112 for (i= 0; i != NE; ++i) {
6113 for (unsigned j = 0, e = N->getNumOperands(); j != e; ++j) {
6114 SDValue Operand = N->getOperand(j);
6115 EVT OperandVT = Operand.getValueType();
6116 if (OperandVT.isVector()) {
6117 // A vector operand; extract a single element.
6118 EVT OperandEltVT = OperandVT.getVectorElementType();
6119 Operands[j] = getNode(ISD::EXTRACT_VECTOR_ELT, dl,
6120 OperandEltVT,
6121 Operand,
6122 getConstant(i, TLI.getPointerTy()));
6123 } else {
6124 // A scalar operand; just use it as is.
6125 Operands[j] = Operand;
6126 }
6127 }
6128
6129 switch (N->getOpcode()) {
6130 default:
6131 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6132 &Operands[0], Operands.size()));
6133 break;
6134 case ISD::VSELECT:
6135 Scalars.push_back(getNode(ISD::SELECT, dl, EltVT,
6136 &Operands[0], Operands.size()));
6137 break;
6138 case ISD::SHL:
6139 case ISD::SRA:
6140 case ISD::SRL:
6141 case ISD::ROTL:
6142 case ISD::ROTR:
6143 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT, Operands[0],
6144 getShiftAmountOperand(Operands[0].getValueType(),
6145 Operands[1])));
6146 break;
6147 case ISD::SIGN_EXTEND_INREG:
6148 case ISD::FP_ROUND_INREG: {
6149 EVT ExtVT = cast<VTSDNode>(Operands[1])->getVT().getVectorElementType();
6150 Scalars.push_back(getNode(N->getOpcode(), dl, EltVT,
6151 Operands[0],
6152 getValueType(ExtVT)));
6153 }
6154 }
6155 }
6156
6157 for (; i < ResNE; ++i)
6158 Scalars.push_back(getUNDEF(EltVT));
6159
6160 return getNode(ISD::BUILD_VECTOR, dl,
6161 EVT::getVectorVT(*getContext(), EltVT, ResNE),
6162 &Scalars[0], Scalars.size());
6163}
6164
6165
6166/// isConsecutiveLoad - Return true if LD is loading 'Bytes' bytes from a
6167/// location that is 'Dist' units away from the location that the 'Base' load
6168/// is loading from.
6169bool SelectionDAG::isConsecutiveLoad(LoadSDNode *LD, LoadSDNode *Base,
6170 unsigned Bytes, int Dist) const {
6171 if (LD->getChain() != Base->getChain())
6172 return false;
6173 EVT VT = LD->getValueType(0);
6174 if (VT.getSizeInBits() / 8 != Bytes)
6175 return false;
6176
6177 SDValue Loc = LD->getOperand(1);
6178 SDValue BaseLoc = Base->getOperand(1);
6179 if (Loc.getOpcode() == ISD::FrameIndex) {
6180 if (BaseLoc.getOpcode() != ISD::FrameIndex)
6181 return false;
6182 const MachineFrameInfo *MFI = getMachineFunction().getFrameInfo();
6183 int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
6184 int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
6185 int FS = MFI->getObjectSize(FI);
6186 int BFS = MFI->getObjectSize(BFI);
6187 if (FS != BFS || FS != (int)Bytes) return false;
6188 return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Bytes);
6189 }
6190
6191 // Handle X+C
6192 if (isBaseWithConstantOffset(Loc) && Loc.getOperand(0) == BaseLoc &&
6193 cast<ConstantSDNode>(Loc.getOperand(1))->getSExtValue() == Dist*Bytes)
6194 return true;
6195
6196 const GlobalValue *GV1 = NULL;
6197 const GlobalValue *GV2 = NULL;
6198 int64_t Offset1 = 0;
6199 int64_t Offset2 = 0;
6200 bool isGA1 = TLI.isGAPlusOffset(Loc.getNode(), GV1, Offset1);
6201 bool isGA2 = TLI.isGAPlusOffset(BaseLoc.getNode(), GV2, Offset2);
6202 if (isGA1 && isGA2 && GV1 == GV2)
6203 return Offset1 == (Offset2 + Dist*Bytes);
6204 return false;
6205}
6206
6207
6208/// InferPtrAlignment - Infer alignment of a load / store address. Return 0 if
6209/// it cannot be inferred.
6210unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
6211 // If this is a GlobalAddress + cst, return the alignment.
6212 const GlobalValue *GV;
6213 int64_t GVOffset = 0;
6214 if (TLI.isGAPlusOffset(Ptr.getNode(), GV, GVOffset)) {
6215 unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
6216 APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
6217 llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
970d7e83 6218 TLI.getDataLayout());
223e47cc
LB
6219 unsigned AlignBits = KnownZero.countTrailingOnes();
6220 unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
6221 if (Align)
6222 return MinAlign(Align, GVOffset);
6223 }
6224
6225 // If this is a direct reference to a stack slot, use information about the
6226 // stack slot's alignment.
6227 int FrameIdx = 1 << 31;
6228 int64_t FrameOffset = 0;
6229 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(Ptr)) {
6230 FrameIdx = FI->getIndex();
6231 } else if (isBaseWithConstantOffset(Ptr) &&
6232 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
6233 // Handle FI+Cst
6234 FrameIdx = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
6235 FrameOffset = Ptr.getConstantOperandVal(1);
6236 }
6237
6238 if (FrameIdx != (1 << 31)) {
6239 const MachineFrameInfo &MFI = *getMachineFunction().getFrameInfo();
6240 unsigned FIInfoAlign = MinAlign(MFI.getObjectAlignment(FrameIdx),
6241 FrameOffset);
6242 return FIInfoAlign;
6243 }
6244
6245 return 0;
6246}
6247
6248// getAddressSpace - Return the address space this GlobalAddress belongs to.
6249unsigned GlobalAddressSDNode::getAddressSpace() const {
6250 return getGlobal()->getType()->getAddressSpace();
6251}
6252
6253
6254Type *ConstantPoolSDNode::getType() const {
6255 if (isMachineConstantPoolEntry())
6256 return Val.MachineCPVal->getType();
6257 return Val.ConstVal->getType();
6258}
6259
6260bool BuildVectorSDNode::isConstantSplat(APInt &SplatValue,
6261 APInt &SplatUndef,
6262 unsigned &SplatBitSize,
6263 bool &HasAnyUndefs,
6264 unsigned MinSplatBits,
6265 bool isBigEndian) {
6266 EVT VT = getValueType(0);
6267 assert(VT.isVector() && "Expected a vector type");
6268 unsigned sz = VT.getSizeInBits();
6269 if (MinSplatBits > sz)
6270 return false;
6271
6272 SplatValue = APInt(sz, 0);
6273 SplatUndef = APInt(sz, 0);
6274
6275 // Get the bits. Bits with undefined values (when the corresponding element
6276 // of the vector is an ISD::UNDEF value) are set in SplatUndef and cleared
6277 // in SplatValue. If any of the values are not constant, give up and return
6278 // false.
6279 unsigned int nOps = getNumOperands();
6280 assert(nOps > 0 && "isConstantSplat has 0-size build vector");
6281 unsigned EltBitSize = VT.getVectorElementType().getSizeInBits();
6282
6283 for (unsigned j = 0; j < nOps; ++j) {
6284 unsigned i = isBigEndian ? nOps-1-j : j;
6285 SDValue OpVal = getOperand(i);
6286 unsigned BitPos = j * EltBitSize;
6287
6288 if (OpVal.getOpcode() == ISD::UNDEF)
6289 SplatUndef |= APInt::getBitsSet(sz, BitPos, BitPos + EltBitSize);
6290 else if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(OpVal))
6291 SplatValue |= CN->getAPIntValue().zextOrTrunc(EltBitSize).
6292 zextOrTrunc(sz) << BitPos;
6293 else if (ConstantFPSDNode *CN = dyn_cast<ConstantFPSDNode>(OpVal))
6294 SplatValue |= CN->getValueAPF().bitcastToAPInt().zextOrTrunc(sz) <<BitPos;
6295 else
6296 return false;
6297 }
6298
6299 // The build_vector is all constants or undefs. Find the smallest element
6300 // size that splats the vector.
6301
6302 HasAnyUndefs = (SplatUndef != 0);
6303 while (sz > 8) {
6304
6305 unsigned HalfSize = sz / 2;
6306 APInt HighValue = SplatValue.lshr(HalfSize).trunc(HalfSize);
6307 APInt LowValue = SplatValue.trunc(HalfSize);
6308 APInt HighUndef = SplatUndef.lshr(HalfSize).trunc(HalfSize);
6309 APInt LowUndef = SplatUndef.trunc(HalfSize);
6310
6311 // If the two halves do not match (ignoring undef bits), stop here.
6312 if ((HighValue & ~LowUndef) != (LowValue & ~HighUndef) ||
6313 MinSplatBits > HalfSize)
6314 break;
6315
6316 SplatValue = HighValue | LowValue;
6317 SplatUndef = HighUndef & LowUndef;
6318
6319 sz = HalfSize;
6320 }
6321
6322 SplatBitSize = sz;
6323 return true;
6324}
6325
6326bool ShuffleVectorSDNode::isSplatMask(const int *Mask, EVT VT) {
6327 // Find the first non-undef value in the shuffle mask.
6328 unsigned i, e;
6329 for (i = 0, e = VT.getVectorNumElements(); i != e && Mask[i] < 0; ++i)
6330 /* search */;
6331
6332 assert(i != e && "VECTOR_SHUFFLE node with all undef indices!");
6333
6334 // Make sure all remaining elements are either undef or the same as the first
6335 // non-undef value.
6336 for (int Idx = Mask[i]; i != e; ++i)
6337 if (Mask[i] >= 0 && Mask[i] != Idx)
6338 return false;
6339 return true;
6340}
6341
6342#ifdef XDEBUG
6343static void checkForCyclesHelper(const SDNode *N,
6344 SmallPtrSet<const SDNode*, 32> &Visited,
6345 SmallPtrSet<const SDNode*, 32> &Checked) {
6346 // If this node has already been checked, don't check it again.
6347 if (Checked.count(N))
6348 return;
6349
6350 // If a node has already been visited on this depth-first walk, reject it as
6351 // a cycle.
6352 if (!Visited.insert(N)) {
6353 dbgs() << "Offending node:\n";
6354 N->dumprFull();
6355 errs() << "Detected cycle in SelectionDAG\n";
6356 abort();
6357 }
6358
6359 for(unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
6360 checkForCyclesHelper(N->getOperand(i).getNode(), Visited, Checked);
6361
6362 Checked.insert(N);
6363 Visited.erase(N);
6364}
6365#endif
6366
6367void llvm::checkForCycles(const llvm::SDNode *N) {
6368#ifdef XDEBUG
6369 assert(N && "Checking nonexistant SDNode");
6370 SmallPtrSet<const SDNode*, 32> visited;
6371 SmallPtrSet<const SDNode*, 32> checked;
6372 checkForCyclesHelper(N, visited, checked);
6373#endif
6374}
6375
6376void llvm::checkForCycles(const llvm::SelectionDAG *DAG) {
6377 checkForCycles(DAG->getRoot().getNode());
6378}