]> git.proxmox.com Git - rustc.git/blob - src/llvm/include/llvm/Target/TargetLowering.h
Imported Upstream version 1.0.0+dfsg1
[rustc.git] / src / llvm / include / llvm / Target / TargetLowering.h
1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 ///
10 /// \file
11 /// This file describes how to lower LLVM code to machine code. This has two
12 /// main components:
13 ///
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
17 ///
18 /// In addition it has a few other components, like information about FP
19 /// immediates.
20 ///
21 //===----------------------------------------------------------------------===//
22
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
25
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InlineAsm.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/Target/TargetCallingConv.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include <climits>
40 #include <map>
41 #include <vector>
42
43 namespace llvm {
44 class CallInst;
45 class CCState;
46 class FastISel;
47 class FunctionLoweringInfo;
48 class ImmutableCallSite;
49 class IntrinsicInst;
50 class MachineBasicBlock;
51 class MachineFunction;
52 class MachineInstr;
53 class MachineJumpTableInfo;
54 class MachineLoop;
55 class Mangler;
56 class MCContext;
57 class MCExpr;
58 class MCSymbol;
59 template<typename T> class SmallVectorImpl;
60 class DataLayout;
61 class TargetRegisterClass;
62 class TargetLibraryInfo;
63 class TargetLoweringObjectFile;
64 class Value;
65
66 namespace Sched {
67 enum Preference {
68 None, // No preference
69 Source, // Follow source order.
70 RegPressure, // Scheduling for lowest register pressure.
71 Hybrid, // Scheduling for both latency and register pressure.
72 ILP, // Scheduling for ILP in low register pressure mode.
73 VLIW // Scheduling for VLIW targets.
74 };
75 }
76
77 /// This base class for TargetLowering contains the SelectionDAG-independent
78 /// parts that can be used from the rest of CodeGen.
79 class TargetLoweringBase {
80 TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
81 void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
82
83 public:
84 /// This enum indicates whether operations are valid for a target, and if not,
85 /// what action should be used to make them valid.
86 enum LegalizeAction {
87 Legal, // The target natively supports this operation.
88 Promote, // This operation should be executed in a larger type.
89 Expand, // Try to expand this to other ops, otherwise use a libcall.
90 Custom // Use the LowerOperation hook to implement custom lowering.
91 };
92
93 /// This enum indicates whether a types are legal for a target, and if not,
94 /// what action should be used to make them valid.
95 enum LegalizeTypeAction {
96 TypeLegal, // The target natively supports this type.
97 TypePromoteInteger, // Replace this integer with a larger one.
98 TypeExpandInteger, // Split this integer into two of half the size.
99 TypeSoftenFloat, // Convert this float to a same size integer type.
100 TypeExpandFloat, // Split this float into two of half the size.
101 TypeScalarizeVector, // Replace this one-element vector with its element.
102 TypeSplitVector, // Split this vector into two of half the size.
103 TypeWidenVector // This vector should be widened into a larger vector.
104 };
105
106 /// LegalizeKind holds the legalization kind that needs to happen to EVT
107 /// in order to type-legalize it.
108 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
109
110 /// Enum that describes how the target represents true/false values.
111 enum BooleanContent {
112 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
113 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
114 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
115 };
116
117 /// Enum that describes what type of support for selects the target has.
118 enum SelectSupportKind {
119 ScalarValSelect, // The target supports scalar selects (ex: cmov).
120 ScalarCondVectorVal, // The target supports selects with a scalar condition
121 // and vector values (ex: cmov).
122 VectorMaskSelect // The target supports vector selects with a vector
123 // mask (ex: x86 blends).
124 };
125
126 static ISD::NodeType getExtendForContent(BooleanContent Content) {
127 switch (Content) {
128 case UndefinedBooleanContent:
129 // Extend by adding rubbish bits.
130 return ISD::ANY_EXTEND;
131 case ZeroOrOneBooleanContent:
132 // Extend by adding zero bits.
133 return ISD::ZERO_EXTEND;
134 case ZeroOrNegativeOneBooleanContent:
135 // Extend by copying the sign bit.
136 return ISD::SIGN_EXTEND;
137 }
138 llvm_unreachable("Invalid content kind");
139 }
140
141 /// NOTE: The TargetMachine owns TLOF.
142 explicit TargetLoweringBase(const TargetMachine &TM);
143 virtual ~TargetLoweringBase() {}
144
145 protected:
146 /// \brief Initialize all of the actions to default values.
147 void initActions();
148
149 public:
150 const TargetMachine &getTargetMachine() const { return TM; }
151 const DataLayout *getDataLayout() const { return DL; }
152 const TargetLoweringObjectFile &getObjFileLowering() const {
153 return *TM.getObjFileLowering();
154 }
155
156 bool isBigEndian() const { return !IsLittleEndian; }
157 bool isLittleEndian() const { return IsLittleEndian; }
158
159 /// Return the pointer type for the given address space, defaults to
160 /// the pointer type from the data layout.
161 /// FIXME: The default needs to be removed once all the code is updated.
162 virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
163 unsigned getPointerSizeInBits(uint32_t AS = 0) const;
164 unsigned getPointerTypeSizeInBits(Type *Ty) const;
165 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
166
167 EVT getShiftAmountTy(EVT LHSTy) const;
168
169 /// Returns the type to be used for the index operand of:
170 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
171 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
172 virtual MVT getVectorIdxTy() const {
173 return getPointerTy();
174 }
175
176 /// Return true if the select operation is expensive for this target.
177 bool isSelectExpensive() const { return SelectIsExpensive; }
178
179 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
180 return true;
181 }
182
183 /// Return true if multiple condition registers are available.
184 bool hasMultipleConditionRegisters() const {
185 return HasMultipleConditionRegisters;
186 }
187
188 /// Return true if the target has BitExtract instructions.
189 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
190
191 /// Return the preferred vector type legalization action.
192 virtual TargetLoweringBase::LegalizeTypeAction
193 getPreferredVectorAction(EVT VT) const {
194 // The default action for one element vectors is to scalarize
195 if (VT.getVectorNumElements() == 1)
196 return TypeScalarizeVector;
197 // The default action for other vectors is to promote
198 return TypePromoteInteger;
199 }
200
201 // There are two general methods for expanding a BUILD_VECTOR node:
202 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
203 // them together.
204 // 2. Build the vector on the stack and then load it.
205 // If this function returns true, then method (1) will be used, subject to
206 // the constraint that all of the necessary shuffles are legal (as determined
207 // by isShuffleMaskLegal). If this function returns false, then method (2) is
208 // always used. The vector type, and the number of defined values, are
209 // provided.
210 virtual bool
211 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
212 unsigned DefinedValues) const {
213 return DefinedValues < 3;
214 }
215
216 /// Return true if integer divide is usually cheaper than a sequence of
217 /// several shifts, adds, and multiplies for this target.
218 bool isIntDivCheap() const { return IntDivIsCheap; }
219
220 /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
221 bool isFsqrtCheap() const {
222 return FsqrtIsCheap;
223 }
224
225 /// Returns true if target has indicated at least one type should be bypassed.
226 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
227
228 /// Returns map of slow types for division or remainder with corresponding
229 /// fast types
230 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
231 return BypassSlowDivWidths;
232 }
233
234 /// Return true if pow2 sdiv is cheaper than a chain of sra/srl/add/sra.
235 bool isPow2SDivCheap() const { return Pow2SDivIsCheap; }
236
237 /// Return true if Flow Control is an expensive operation that should be
238 /// avoided.
239 bool isJumpExpensive() const { return JumpIsExpensive; }
240
241 /// Return true if selects are only cheaper than branches if the branch is
242 /// unlikely to be predicted right.
243 bool isPredictableSelectExpensive() const {
244 return PredictableSelectIsExpensive;
245 }
246
247 /// isLoadBitCastBeneficial() - Return true if the following transform
248 /// is beneficial.
249 /// fold (conv (load x)) -> (load (conv*)x)
250 /// On architectures that don't natively support some vector loads efficiently,
251 /// casting the load to a smaller vector of larger types and loading
252 /// is more efficient, however, this can be undone by optimizations in
253 /// dag combiner.
254 virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
255 return true;
256 }
257
258 /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
259 virtual bool isCheapToSpeculateCttz() const {
260 return false;
261 }
262
263 /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
264 virtual bool isCheapToSpeculateCtlz() const {
265 return false;
266 }
267
268 /// \brief Return if the target supports combining a
269 /// chain like:
270 /// \code
271 /// %andResult = and %val1, #imm-with-one-bit-set;
272 /// %icmpResult = icmp %andResult, 0
273 /// br i1 %icmpResult, label %dest1, label %dest2
274 /// \endcode
275 /// into a single machine instruction of a form like:
276 /// \code
277 /// brOnBitSet %register, #bitNumber, dest
278 /// \endcode
279 bool isMaskAndBranchFoldingLegal() const {
280 return MaskAndBranchFoldingIsLegal;
281 }
282
283 /// \brief Return true if the target wants to use the optimization that
284 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
285 /// promotedInst1(...(promotedInstN(ext(load)))).
286 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
287
288 /// Return true if the target can combine store(extractelement VectorTy,
289 /// Idx).
290 /// \p Cost[out] gives the cost of that transformation when this is true.
291 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
292 unsigned &Cost) const {
293 return false;
294 }
295
296 /// Return true if target supports floating point exceptions.
297 bool hasFloatingPointExceptions() const {
298 return HasFloatingPointExceptions;
299 }
300
301 /// Return true if target always beneficiates from combining into FMA for a
302 /// given value type. This must typically return false on targets where FMA
303 /// takes more cycles to execute than FADD.
304 virtual bool enableAggressiveFMAFusion(EVT VT) const {
305 return false;
306 }
307
308 /// Return the ValueType of the result of SETCC operations.
309 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
310
311 /// Return the ValueType for comparison libcalls. Comparions libcalls include
312 /// floating point comparion calls, and Ordered/Unordered check calls on
313 /// floating point numbers.
314 virtual
315 MVT::SimpleValueType getCmpLibcallReturnType() const;
316
317 /// For targets without i1 registers, this gives the nature of the high-bits
318 /// of boolean values held in types wider than i1.
319 ///
320 /// "Boolean values" are special true/false values produced by nodes like
321 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
322 /// Not to be confused with general values promoted from i1. Some cpus
323 /// distinguish between vectors of boolean and scalars; the isVec parameter
324 /// selects between the two kinds. For example on X86 a scalar boolean should
325 /// be zero extended from i1, while the elements of a vector of booleans
326 /// should be sign extended from i1.
327 ///
328 /// Some cpus also treat floating point types the same way as they treat
329 /// vectors instead of the way they treat scalars.
330 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
331 if (isVec)
332 return BooleanVectorContents;
333 return isFloat ? BooleanFloatContents : BooleanContents;
334 }
335
336 BooleanContent getBooleanContents(EVT Type) const {
337 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
338 }
339
340 /// Return target scheduling preference.
341 Sched::Preference getSchedulingPreference() const {
342 return SchedPreferenceInfo;
343 }
344
345 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
346 /// for different nodes. This function returns the preference (or none) for
347 /// the given node.
348 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
349 return Sched::None;
350 }
351
352 /// Return the register class that should be used for the specified value
353 /// type.
354 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
355 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
356 assert(RC && "This value type is not natively supported!");
357 return RC;
358 }
359
360 /// Return the 'representative' register class for the specified value
361 /// type.
362 ///
363 /// The 'representative' register class is the largest legal super-reg
364 /// register class for the register class of the value type. For example, on
365 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
366 /// register class is GR64 on x86_64.
367 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
368 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
369 return RC;
370 }
371
372 /// Return the cost of the 'representative' register class for the specified
373 /// value type.
374 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
375 return RepRegClassCostForVT[VT.SimpleTy];
376 }
377
378 /// Return true if the target has native support for the specified value type.
379 /// This means that it has a register that directly holds it without
380 /// promotions or expansions.
381 bool isTypeLegal(EVT VT) const {
382 assert(!VT.isSimple() ||
383 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
384 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
385 }
386
387 class ValueTypeActionImpl {
388 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
389 /// that indicates how instruction selection should deal with the type.
390 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
391
392 public:
393 ValueTypeActionImpl() {
394 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
395 }
396
397 LegalizeTypeAction getTypeAction(MVT VT) const {
398 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
399 }
400
401 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
402 unsigned I = VT.SimpleTy;
403 ValueTypeActions[I] = Action;
404 }
405 };
406
407 const ValueTypeActionImpl &getValueTypeActions() const {
408 return ValueTypeActions;
409 }
410
411 /// Return how we should legalize values of this type, either it is already
412 /// legal (return 'Legal') or we need to promote it to a larger type (return
413 /// 'Promote'), or we need to expand it into multiple registers of smaller
414 /// integer type (return 'Expand'). 'Custom' is not an option.
415 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
416 return getTypeConversion(Context, VT).first;
417 }
418 LegalizeTypeAction getTypeAction(MVT VT) const {
419 return ValueTypeActions.getTypeAction(VT);
420 }
421
422 /// For types supported by the target, this is an identity function. For
423 /// types that must be promoted to larger types, this returns the larger type
424 /// to promote to. For integer types that are larger than the largest integer
425 /// register, this contains one step in the expansion to get to the smaller
426 /// register. For illegal floating point types, this returns the integer type
427 /// to transform to.
428 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
429 return getTypeConversion(Context, VT).second;
430 }
431
432 /// For types supported by the target, this is an identity function. For
433 /// types that must be expanded (i.e. integer types that are larger than the
434 /// largest integer register or illegal floating point types), this returns
435 /// the largest legal type it will be expanded to.
436 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
437 assert(!VT.isVector());
438 while (true) {
439 switch (getTypeAction(Context, VT)) {
440 case TypeLegal:
441 return VT;
442 case TypeExpandInteger:
443 VT = getTypeToTransformTo(Context, VT);
444 break;
445 default:
446 llvm_unreachable("Type is not legal nor is it to be expanded!");
447 }
448 }
449 }
450
451 /// Vector types are broken down into some number of legal first class types.
452 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
453 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
454 /// turns into 4 EVT::i32 values with both PPC and X86.
455 ///
456 /// This method returns the number of registers needed, and the VT for each
457 /// register. It also returns the VT and quantity of the intermediate values
458 /// before they are promoted/expanded.
459 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
460 EVT &IntermediateVT,
461 unsigned &NumIntermediates,
462 MVT &RegisterVT) const;
463
464 struct IntrinsicInfo {
465 unsigned opc; // target opcode
466 EVT memVT; // memory VT
467 const Value* ptrVal; // value representing memory location
468 int offset; // offset off of ptrVal
469 unsigned size; // the size of the memory location
470 // (taken from memVT if zero)
471 unsigned align; // alignment
472 bool vol; // is volatile?
473 bool readMem; // reads memory?
474 bool writeMem; // writes memory?
475
476 IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
477 vol(false), readMem(false), writeMem(false) {}
478 };
479
480 /// Given an intrinsic, checks if on the target the intrinsic will need to map
481 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
482 /// true and store the intrinsic information into the IntrinsicInfo that was
483 /// passed to the function.
484 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
485 unsigned /*Intrinsic*/) const {
486 return false;
487 }
488
489 /// Returns true if the target can instruction select the specified FP
490 /// immediate natively. If false, the legalizer will materialize the FP
491 /// immediate as a load from a constant pool.
492 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
493 return false;
494 }
495
496 /// Targets can use this to indicate that they only support *some*
497 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
498 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
499 /// legal.
500 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
501 EVT /*VT*/) const {
502 return true;
503 }
504
505 /// Returns true if the operation can trap for the value type.
506 ///
507 /// VT must be a legal type. By default, we optimistically assume most
508 /// operations don't trap except for divide and remainder.
509 virtual bool canOpTrap(unsigned Op, EVT VT) const;
510
511 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
512 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
513 /// a VAND with a constant pool entry.
514 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
515 EVT /*VT*/) const {
516 return false;
517 }
518
519 /// Return how this operation should be treated: either it is legal, needs to
520 /// be promoted to a larger size, needs to be expanded to some other code
521 /// sequence, or the target has a custom expander for it.
522 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
523 if (VT.isExtended()) return Expand;
524 // If a target-specific SDNode requires legalization, require the target
525 // to provide custom legalization for it.
526 if (Op > array_lengthof(OpActions[0])) return Custom;
527 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
528 return (LegalizeAction)OpActions[I][Op];
529 }
530
531 /// Return true if the specified operation is legal on this target or can be
532 /// made legal with custom lowering. This is used to help guide high-level
533 /// lowering decisions.
534 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
535 return (VT == MVT::Other || isTypeLegal(VT)) &&
536 (getOperationAction(Op, VT) == Legal ||
537 getOperationAction(Op, VT) == Custom);
538 }
539
540 /// Return true if the specified operation is legal on this target or can be
541 /// made legal using promotion. This is used to help guide high-level lowering
542 /// decisions.
543 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
544 return (VT == MVT::Other || isTypeLegal(VT)) &&
545 (getOperationAction(Op, VT) == Legal ||
546 getOperationAction(Op, VT) == Promote);
547 }
548
549 /// Return true if the specified operation is illegal on this target or
550 /// unlikely to be made legal with custom lowering. This is used to help guide
551 /// high-level lowering decisions.
552 bool isOperationExpand(unsigned Op, EVT VT) const {
553 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
554 }
555
556 /// Return true if the specified operation is legal on this target.
557 bool isOperationLegal(unsigned Op, EVT VT) const {
558 return (VT == MVT::Other || isTypeLegal(VT)) &&
559 getOperationAction(Op, VT) == Legal;
560 }
561
562 /// Return how this load with extension should be treated: either it is legal,
563 /// needs to be promoted to a larger size, needs to be expanded to some other
564 /// code sequence, or the target has a custom expander for it.
565 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT, EVT MemVT) const {
566 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
567 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
568 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
569 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
570 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
571 return (LegalizeAction)LoadExtActions[ValI][MemI][ExtType];
572 }
573
574 /// Return true if the specified load with extension is legal on this target.
575 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
576 return ValVT.isSimple() && MemVT.isSimple() &&
577 getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
578 }
579
580 /// Return how this store with truncation should be treated: either it is
581 /// legal, needs to be promoted to a larger size, needs to be expanded to some
582 /// other code sequence, or the target has a custom expander for it.
583 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
584 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
585 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
586 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
587 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
588 "Table isn't big enough!");
589 return (LegalizeAction)TruncStoreActions[ValI][MemI];
590 }
591
592 /// Return true if the specified store with truncation is legal on this
593 /// target.
594 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
595 return isTypeLegal(ValVT) && MemVT.isSimple() &&
596 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
597 }
598
599 /// Return how the indexed load should be treated: either it is legal, needs
600 /// to be promoted to a larger size, needs to be expanded to some other code
601 /// sequence, or the target has a custom expander for it.
602 LegalizeAction
603 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
604 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
605 "Table isn't big enough!");
606 unsigned Ty = (unsigned)VT.SimpleTy;
607 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
608 }
609
610 /// Return true if the specified indexed load is legal on this target.
611 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
612 return VT.isSimple() &&
613 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
614 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
615 }
616
617 /// Return how the indexed store should be treated: either it is legal, needs
618 /// to be promoted to a larger size, needs to be expanded to some other code
619 /// sequence, or the target has a custom expander for it.
620 LegalizeAction
621 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
622 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
623 "Table isn't big enough!");
624 unsigned Ty = (unsigned)VT.SimpleTy;
625 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
626 }
627
628 /// Return true if the specified indexed load is legal on this target.
629 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
630 return VT.isSimple() &&
631 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
632 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
633 }
634
635 /// Return how the condition code should be treated: either it is legal, needs
636 /// to be expanded to some other code sequence, or the target has a custom
637 /// expander for it.
638 LegalizeAction
639 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
640 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
641 ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
642 "Table isn't big enough!");
643 // See setCondCodeAction for how this is encoded.
644 uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
645 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
646 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
647 assert(Action != Promote && "Can't promote condition code!");
648 return Action;
649 }
650
651 /// Return true if the specified condition code is legal on this target.
652 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
653 return
654 getCondCodeAction(CC, VT) == Legal ||
655 getCondCodeAction(CC, VT) == Custom;
656 }
657
658
659 /// If the action for this operation is to promote, this method returns the
660 /// ValueType to promote to.
661 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
662 assert(getOperationAction(Op, VT) == Promote &&
663 "This operation isn't promoted!");
664
665 // See if this has an explicit type specified.
666 std::map<std::pair<unsigned, MVT::SimpleValueType>,
667 MVT::SimpleValueType>::const_iterator PTTI =
668 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
669 if (PTTI != PromoteToType.end()) return PTTI->second;
670
671 assert((VT.isInteger() || VT.isFloatingPoint()) &&
672 "Cannot autopromote this type, add it with AddPromotedToType.");
673
674 MVT NVT = VT;
675 do {
676 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
677 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
678 "Didn't find type to promote to!");
679 } while (!isTypeLegal(NVT) ||
680 getOperationAction(Op, NVT) == Promote);
681 return NVT;
682 }
683
684 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
685 /// operations except for the pointer size. If AllowUnknown is true, this
686 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
687 /// otherwise it will assert.
688 EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
689 // Lower scalar pointers to native pointer types.
690 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
691 return getPointerTy(PTy->getAddressSpace());
692
693 if (Ty->isVectorTy()) {
694 VectorType *VTy = cast<VectorType>(Ty);
695 Type *Elm = VTy->getElementType();
696 // Lower vectors of pointers to native pointer types.
697 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
698 EVT PointerTy(getPointerTy(PT->getAddressSpace()));
699 Elm = PointerTy.getTypeForEVT(Ty->getContext());
700 }
701
702 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
703 VTy->getNumElements());
704 }
705 return EVT::getEVT(Ty, AllowUnknown);
706 }
707
708 /// Return the MVT corresponding to this LLVM type. See getValueType.
709 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
710 return getValueType(Ty, AllowUnknown).getSimpleVT();
711 }
712
713 /// Return the desired alignment for ByVal or InAlloca aggregate function
714 /// arguments in the caller parameter area. This is the actual alignment, not
715 /// its logarithm.
716 virtual unsigned getByValTypeAlignment(Type *Ty) const;
717
718 /// Return the type of registers that this ValueType will eventually require.
719 MVT getRegisterType(MVT VT) const {
720 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
721 return RegisterTypeForVT[VT.SimpleTy];
722 }
723
724 /// Return the type of registers that this ValueType will eventually require.
725 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
726 if (VT.isSimple()) {
727 assert((unsigned)VT.getSimpleVT().SimpleTy <
728 array_lengthof(RegisterTypeForVT));
729 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
730 }
731 if (VT.isVector()) {
732 EVT VT1;
733 MVT RegisterVT;
734 unsigned NumIntermediates;
735 (void)getVectorTypeBreakdown(Context, VT, VT1,
736 NumIntermediates, RegisterVT);
737 return RegisterVT;
738 }
739 if (VT.isInteger()) {
740 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
741 }
742 llvm_unreachable("Unsupported extended type!");
743 }
744
745 /// Return the number of registers that this ValueType will eventually
746 /// require.
747 ///
748 /// This is one for any types promoted to live in larger registers, but may be
749 /// more than one for types (like i64) that are split into pieces. For types
750 /// like i140, which are first promoted then expanded, it is the number of
751 /// registers needed to hold all the bits of the original type. For an i140
752 /// on a 32 bit machine this means 5 registers.
753 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
754 if (VT.isSimple()) {
755 assert((unsigned)VT.getSimpleVT().SimpleTy <
756 array_lengthof(NumRegistersForVT));
757 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
758 }
759 if (VT.isVector()) {
760 EVT VT1;
761 MVT VT2;
762 unsigned NumIntermediates;
763 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
764 }
765 if (VT.isInteger()) {
766 unsigned BitWidth = VT.getSizeInBits();
767 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
768 return (BitWidth + RegWidth - 1) / RegWidth;
769 }
770 llvm_unreachable("Unsupported extended type!");
771 }
772
773 /// If true, then instruction selection should seek to shrink the FP constant
774 /// of the specified type to a smaller type in order to save space and / or
775 /// reduce runtime.
776 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
777
778 // Return true if it is profitable to reduce the given load node to a smaller
779 // type.
780 //
781 // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
782 virtual bool shouldReduceLoadWidth(SDNode *Load,
783 ISD::LoadExtType ExtTy,
784 EVT NewVT) const {
785 return true;
786 }
787
788 /// When splitting a value of the specified type into parts, does the Lo
789 /// or Hi part come first? This usually follows the endianness, except
790 /// for ppcf128, where the Hi part always comes first.
791 bool hasBigEndianPartOrdering(EVT VT) const {
792 return isBigEndian() || VT == MVT::ppcf128;
793 }
794
795 /// If true, the target has custom DAG combine transformations that it can
796 /// perform for the specified node.
797 bool hasTargetDAGCombine(ISD::NodeType NT) const {
798 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
799 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
800 }
801
802 /// \brief Get maximum # of store operations permitted for llvm.memset
803 ///
804 /// This function returns the maximum number of store operations permitted
805 /// to replace a call to llvm.memset. The value is set by the target at the
806 /// performance threshold for such a replacement. If OptSize is true,
807 /// return the limit for functions that have OptSize attribute.
808 unsigned getMaxStoresPerMemset(bool OptSize) const {
809 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
810 }
811
812 /// \brief Get maximum # of store operations permitted for llvm.memcpy
813 ///
814 /// This function returns the maximum number of store operations permitted
815 /// to replace a call to llvm.memcpy. The value is set by the target at the
816 /// performance threshold for such a replacement. If OptSize is true,
817 /// return the limit for functions that have OptSize attribute.
818 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
819 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
820 }
821
822 /// \brief Get maximum # of store operations permitted for llvm.memmove
823 ///
824 /// This function returns the maximum number of store operations permitted
825 /// to replace a call to llvm.memmove. The value is set by the target at the
826 /// performance threshold for such a replacement. If OptSize is true,
827 /// return the limit for functions that have OptSize attribute.
828 unsigned getMaxStoresPerMemmove(bool OptSize) const {
829 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
830 }
831
832 /// \brief Determine if the target supports unaligned memory accesses.
833 ///
834 /// This function returns true if the target allows unaligned memory accesses
835 /// of the specified type in the given address space. If true, it also returns
836 /// whether the unaligned memory access is "fast" in the last argument by
837 /// reference. This is used, for example, in situations where an array
838 /// copy/move/set is converted to a sequence of store operations. Its use
839 /// helps to ensure that such replacements don't generate code that causes an
840 /// alignment error (trap) on the target machine.
841 virtual bool allowsMisalignedMemoryAccesses(EVT,
842 unsigned AddrSpace = 0,
843 unsigned Align = 1,
844 bool * /*Fast*/ = nullptr) const {
845 return false;
846 }
847
848 /// Returns the target specific optimal type for load and store operations as
849 /// a result of memset, memcpy, and memmove lowering.
850 ///
851 /// If DstAlign is zero that means it's safe to destination alignment can
852 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
853 /// a need to check it against alignment requirement, probably because the
854 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
855 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
856 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
857 /// does not need to be loaded. It returns EVT::Other if the type should be
858 /// determined using generic target-independent logic.
859 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
860 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
861 bool /*IsMemset*/,
862 bool /*ZeroMemset*/,
863 bool /*MemcpyStrSrc*/,
864 MachineFunction &/*MF*/) const {
865 return MVT::Other;
866 }
867
868 /// Returns true if it's safe to use load / store of the specified type to
869 /// expand memcpy / memset inline.
870 ///
871 /// This is mostly true for all types except for some special cases. For
872 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
873 /// fstpl which also does type conversion. Note the specified type doesn't
874 /// have to be legal as the hook is used before type legalization.
875 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
876
877 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
878 bool usesUnderscoreSetJmp() const {
879 return UseUnderscoreSetJmp;
880 }
881
882 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
883 bool usesUnderscoreLongJmp() const {
884 return UseUnderscoreLongJmp;
885 }
886
887 /// Return integer threshold on number of blocks to use jump tables rather
888 /// than if sequence.
889 int getMinimumJumpTableEntries() const {
890 return MinimumJumpTableEntries;
891 }
892
893 /// If a physical register, this specifies the register that
894 /// llvm.savestack/llvm.restorestack should save and restore.
895 unsigned getStackPointerRegisterToSaveRestore() const {
896 return StackPointerRegisterToSaveRestore;
897 }
898
899 /// If a physical register, this returns the register that receives the
900 /// exception address on entry to a landing pad.
901 unsigned getExceptionPointerRegister() const {
902 return ExceptionPointerRegister;
903 }
904
905 /// If a physical register, this returns the register that receives the
906 /// exception typeid on entry to a landing pad.
907 unsigned getExceptionSelectorRegister() const {
908 return ExceptionSelectorRegister;
909 }
910
911 /// Returns the target's jmp_buf size in bytes (if never set, the default is
912 /// 200)
913 unsigned getJumpBufSize() const {
914 return JumpBufSize;
915 }
916
917 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
918 /// is 0)
919 unsigned getJumpBufAlignment() const {
920 return JumpBufAlignment;
921 }
922
923 /// Return the minimum stack alignment of an argument.
924 unsigned getMinStackArgumentAlignment() const {
925 return MinStackArgumentAlignment;
926 }
927
928 /// Return the minimum function alignment.
929 unsigned getMinFunctionAlignment() const {
930 return MinFunctionAlignment;
931 }
932
933 /// Return the preferred function alignment.
934 unsigned getPrefFunctionAlignment() const {
935 return PrefFunctionAlignment;
936 }
937
938 /// Return the preferred loop alignment.
939 virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
940 return PrefLoopAlignment;
941 }
942
943 /// Return whether the DAG builder should automatically insert fences and
944 /// reduce ordering for atomics.
945 bool getInsertFencesForAtomic() const {
946 return InsertFencesForAtomic;
947 }
948
949 /// Return true if the target stores stack protector cookies at a fixed offset
950 /// in some non-standard address space, and populates the address space and
951 /// offset as appropriate.
952 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
953 unsigned &/*Offset*/) const {
954 return false;
955 }
956
957 /// Returns the maximal possible offset which can be used for loads / stores
958 /// from the global.
959 virtual unsigned getMaximalGlobalOffset() const {
960 return 0;
961 }
962
963 /// Returns true if a cast between SrcAS and DestAS is a noop.
964 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
965 return false;
966 }
967
968 //===--------------------------------------------------------------------===//
969 /// \name Helpers for TargetTransformInfo implementations
970 /// @{
971
972 /// Get the ISD node that corresponds to the Instruction class opcode.
973 int InstructionOpcodeToISD(unsigned Opcode) const;
974
975 /// Estimate the cost of type-legalization and the legalized type.
976 std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
977
978 /// @}
979
980 //===--------------------------------------------------------------------===//
981 /// \name Helpers for atomic expansion.
982 /// @{
983
984 /// True if AtomicExpandPass should use emitLoadLinked/emitStoreConditional
985 /// and expand AtomicCmpXchgInst.
986 virtual bool hasLoadLinkedStoreConditional() const { return false; }
987
988 /// Perform a load-linked operation on Addr, returning a "Value *" with the
989 /// corresponding pointee type. This may entail some non-trivial operations to
990 /// truncate or reconstruct types that will be illegal in the backend. See
991 /// ARMISelLowering for an example implementation.
992 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
993 AtomicOrdering Ord) const {
994 llvm_unreachable("Load linked unimplemented on this target");
995 }
996
997 /// Perform a store-conditional operation to Addr. Return the status of the
998 /// store. This should be 0 if the store succeeded, non-zero otherwise.
999 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1000 Value *Addr, AtomicOrdering Ord) const {
1001 llvm_unreachable("Store conditional unimplemented on this target");
1002 }
1003
1004 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1005 /// It is called by AtomicExpandPass before expanding an
1006 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
1007 /// RMW and CmpXchg set both IsStore and IsLoad to true.
1008 /// This function should either return a nullptr, or a pointer to an IR-level
1009 /// Instruction*. Even complex fence sequences can be represented by a
1010 /// single Instruction* through an intrinsic to be lowered later.
1011 /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
1012 /// Backends should override this method to produce target-specific intrinsic
1013 /// for their fences.
1014 /// FIXME: Please note that the default implementation here in terms of
1015 /// IR-level fences exists for historical/compatibility reasons and is
1016 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1017 /// consistency. For example, consider the following example:
1018 /// atomic<int> x = y = 0;
1019 /// int r1, r2, r3, r4;
1020 /// Thread 0:
1021 /// x.store(1);
1022 /// Thread 1:
1023 /// y.store(1);
1024 /// Thread 2:
1025 /// r1 = x.load();
1026 /// r2 = y.load();
1027 /// Thread 3:
1028 /// r3 = y.load();
1029 /// r4 = x.load();
1030 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1031 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1032 /// IR-level fences can prevent it.
1033 /// @{
1034 virtual Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
1035 bool IsStore, bool IsLoad) const {
1036 if (!getInsertFencesForAtomic())
1037 return nullptr;
1038
1039 if (isAtLeastRelease(Ord) && IsStore)
1040 return Builder.CreateFence(Ord);
1041 else
1042 return nullptr;
1043 }
1044
1045 virtual Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
1046 bool IsStore, bool IsLoad) const {
1047 if (!getInsertFencesForAtomic())
1048 return nullptr;
1049
1050 if (isAtLeastAcquire(Ord))
1051 return Builder.CreateFence(Ord);
1052 else
1053 return nullptr;
1054 }
1055 /// @}
1056
1057 /// Returns true if the given (atomic) store should be expanded by the
1058 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1059 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1060 return false;
1061 }
1062
1063 /// Returns true if the given (atomic) load should be expanded by the
1064 /// IR-level AtomicExpand pass into a load-linked instruction
1065 /// (through emitLoadLinked()).
1066 virtual bool shouldExpandAtomicLoadInIR(LoadInst *LI) const { return false; }
1067
1068 /// Returns true if the given AtomicRMW should be expanded by the
1069 /// IR-level AtomicExpand pass into a loop using LoadLinked/StoreConditional.
1070 virtual bool shouldExpandAtomicRMWInIR(AtomicRMWInst *RMWI) const {
1071 return false;
1072 }
1073
1074 /// On some platforms, an AtomicRMW that never actually modifies the value
1075 /// (such as fetch_add of 0) can be turned into a fence followed by an
1076 /// atomic load. This may sound useless, but it makes it possible for the
1077 /// processor to keep the cacheline shared, dramatically improving
1078 /// performance. And such idempotent RMWs are useful for implementing some
1079 /// kinds of locks, see for example (justification + benchmarks):
1080 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1081 /// This method tries doing that transformation, returning the atomic load if
1082 /// it succeeds, and nullptr otherwise.
1083 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1084 /// another round of expansion.
1085 virtual LoadInst *lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1086 return nullptr;
1087 }
1088 //===--------------------------------------------------------------------===//
1089 // TargetLowering Configuration Methods - These methods should be invoked by
1090 // the derived class constructor to configure this object for the target.
1091 //
1092
1093 /// \brief Reset the operation actions based on target options.
1094 virtual void resetOperationActions() {}
1095
1096 protected:
1097 /// Specify how the target extends the result of integer and floating point
1098 /// boolean values from i1 to a wider type. See getBooleanContents.
1099 void setBooleanContents(BooleanContent Ty) {
1100 BooleanContents = Ty;
1101 BooleanFloatContents = Ty;
1102 }
1103
1104 /// Specify how the target extends the result of integer and floating point
1105 /// boolean values from i1 to a wider type. See getBooleanContents.
1106 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1107 BooleanContents = IntTy;
1108 BooleanFloatContents = FloatTy;
1109 }
1110
1111 /// Specify how the target extends the result of a vector boolean value from a
1112 /// vector of i1 to a wider type. See getBooleanContents.
1113 void setBooleanVectorContents(BooleanContent Ty) {
1114 BooleanVectorContents = Ty;
1115 }
1116
1117 /// Specify the target scheduling preference.
1118 void setSchedulingPreference(Sched::Preference Pref) {
1119 SchedPreferenceInfo = Pref;
1120 }
1121
1122 /// Indicate whether this target prefers to use _setjmp to implement
1123 /// llvm.setjmp or the version without _. Defaults to false.
1124 void setUseUnderscoreSetJmp(bool Val) {
1125 UseUnderscoreSetJmp = Val;
1126 }
1127
1128 /// Indicate whether this target prefers to use _longjmp to implement
1129 /// llvm.longjmp or the version without _. Defaults to false.
1130 void setUseUnderscoreLongJmp(bool Val) {
1131 UseUnderscoreLongJmp = Val;
1132 }
1133
1134 /// Indicate the number of blocks to generate jump tables rather than if
1135 /// sequence.
1136 void setMinimumJumpTableEntries(int Val) {
1137 MinimumJumpTableEntries = Val;
1138 }
1139
1140 /// If set to a physical register, this specifies the register that
1141 /// llvm.savestack/llvm.restorestack should save and restore.
1142 void setStackPointerRegisterToSaveRestore(unsigned R) {
1143 StackPointerRegisterToSaveRestore = R;
1144 }
1145
1146 /// If set to a physical register, this sets the register that receives the
1147 /// exception address on entry to a landing pad.
1148 void setExceptionPointerRegister(unsigned R) {
1149 ExceptionPointerRegister = R;
1150 }
1151
1152 /// If set to a physical register, this sets the register that receives the
1153 /// exception typeid on entry to a landing pad.
1154 void setExceptionSelectorRegister(unsigned R) {
1155 ExceptionSelectorRegister = R;
1156 }
1157
1158 /// Tells the code generator not to expand operations into sequences that use
1159 /// the select operations if possible.
1160 void setSelectIsExpensive(bool isExpensive = true) {
1161 SelectIsExpensive = isExpensive;
1162 }
1163
1164 /// Tells the code generator that the target has multiple (allocatable)
1165 /// condition registers that can be used to store the results of comparisons
1166 /// for use by selects and conditional branches. With multiple condition
1167 /// registers, the code generator will not aggressively sink comparisons into
1168 /// the blocks of their users.
1169 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1170 HasMultipleConditionRegisters = hasManyRegs;
1171 }
1172
1173 /// Tells the code generator that the target has BitExtract instructions.
1174 /// The code generator will aggressively sink "shift"s into the blocks of
1175 /// their users if the users will generate "and" instructions which can be
1176 /// combined with "shift" to BitExtract instructions.
1177 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1178 HasExtractBitsInsn = hasExtractInsn;
1179 }
1180
1181 /// Tells the code generator not to expand sequence of operations into a
1182 /// separate sequences that increases the amount of flow control.
1183 void setJumpIsExpensive(bool isExpensive = true) {
1184 JumpIsExpensive = isExpensive;
1185 }
1186
1187 /// Tells the code generator that integer divide is expensive, and if
1188 /// possible, should be replaced by an alternate sequence of instructions not
1189 /// containing an integer divide.
1190 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
1191
1192 /// Tells the code generator that fsqrt is cheap, and should not be replaced
1193 /// with an alternative sequence of instructions.
1194 void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
1195
1196 /// Tells the code generator that this target supports floating point
1197 /// exceptions and cares about preserving floating point exception behavior.
1198 void setHasFloatingPointExceptions(bool FPExceptions = true) {
1199 HasFloatingPointExceptions = FPExceptions;
1200 }
1201
1202 /// Tells the code generator which bitwidths to bypass.
1203 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1204 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1205 }
1206
1207 /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
1208 /// signed divide by power of two; let the target handle it.
1209 void setPow2SDivIsCheap(bool isCheap = true) { Pow2SDivIsCheap = isCheap; }
1210
1211 /// Add the specified register class as an available regclass for the
1212 /// specified value type. This indicates the selector can handle values of
1213 /// that class natively.
1214 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1215 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1216 AvailableRegClasses.push_back(std::make_pair(VT, RC));
1217 RegClassForVT[VT.SimpleTy] = RC;
1218 }
1219
1220 /// Remove all register classes.
1221 void clearRegisterClasses() {
1222 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
1223
1224 AvailableRegClasses.clear();
1225 }
1226
1227 /// \brief Remove all operation actions.
1228 void clearOperationActions() {
1229 }
1230
1231 /// Return the largest legal super-reg register class of the register class
1232 /// for the specified type and its associated "cost".
1233 virtual std::pair<const TargetRegisterClass*, uint8_t>
1234 findRepresentativeClass(MVT VT) const;
1235
1236 /// Once all of the register classes are added, this allows us to compute
1237 /// derived properties we expose.
1238 void computeRegisterProperties();
1239
1240 /// Indicate that the specified operation does not work with the specified
1241 /// type and indicate what to do about it.
1242 void setOperationAction(unsigned Op, MVT VT,
1243 LegalizeAction Action) {
1244 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1245 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
1246 }
1247
1248 /// Indicate that the specified load with extension does not work with the
1249 /// specified type and indicate what to do about it.
1250 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1251 LegalizeAction Action) {
1252 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1253 MemVT.isValid() && "Table isn't big enough!");
1254 LoadExtActions[ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = (uint8_t)Action;
1255 }
1256
1257 /// Indicate that the specified truncating store does not work with the
1258 /// specified type and indicate what to do about it.
1259 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1260 LegalizeAction Action) {
1261 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1262 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
1263 }
1264
1265 /// Indicate that the specified indexed load does or does not work with the
1266 /// specified type and indicate what to do abort it.
1267 ///
1268 /// NOTE: All indexed mode loads are initialized to Expand in
1269 /// TargetLowering.cpp
1270 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1271 LegalizeAction Action) {
1272 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1273 (unsigned)Action < 0xf && "Table isn't big enough!");
1274 // Load action are kept in the upper half.
1275 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1276 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1277 }
1278
1279 /// Indicate that the specified indexed store does or does not work with the
1280 /// specified type and indicate what to do about it.
1281 ///
1282 /// NOTE: All indexed mode stores are initialized to Expand in
1283 /// TargetLowering.cpp
1284 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1285 LegalizeAction Action) {
1286 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1287 (unsigned)Action < 0xf && "Table isn't big enough!");
1288 // Store action are kept in the lower half.
1289 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1290 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1291 }
1292
1293 /// Indicate that the specified condition code is or isn't supported on the
1294 /// target and indicate what to do about it.
1295 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1296 LegalizeAction Action) {
1297 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1298 "Table isn't big enough!");
1299 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
1300 /// value and the upper 27 bits index into the second dimension of the array
1301 /// to select what 32-bit value to use.
1302 uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
1303 CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
1304 CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
1305 }
1306
1307 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1308 /// to trying a larger integer/fp until it can find one that works. If that
1309 /// default is insufficient, this method can be used by the target to override
1310 /// the default.
1311 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1312 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1313 }
1314
1315 /// Targets should invoke this method for each target independent node that
1316 /// they want to provide a custom DAG combiner for by implementing the
1317 /// PerformDAGCombine virtual method.
1318 void setTargetDAGCombine(ISD::NodeType NT) {
1319 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1320 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1321 }
1322
1323 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1324 void setJumpBufSize(unsigned Size) {
1325 JumpBufSize = Size;
1326 }
1327
1328 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1329 /// 0
1330 void setJumpBufAlignment(unsigned Align) {
1331 JumpBufAlignment = Align;
1332 }
1333
1334 /// Set the target's minimum function alignment (in log2(bytes))
1335 void setMinFunctionAlignment(unsigned Align) {
1336 MinFunctionAlignment = Align;
1337 }
1338
1339 /// Set the target's preferred function alignment. This should be set if
1340 /// there is a performance benefit to higher-than-minimum alignment (in
1341 /// log2(bytes))
1342 void setPrefFunctionAlignment(unsigned Align) {
1343 PrefFunctionAlignment = Align;
1344 }
1345
1346 /// Set the target's preferred loop alignment. Default alignment is zero, it
1347 /// means the target does not care about loop alignment. The alignment is
1348 /// specified in log2(bytes). The target may also override
1349 /// getPrefLoopAlignment to provide per-loop values.
1350 void setPrefLoopAlignment(unsigned Align) {
1351 PrefLoopAlignment = Align;
1352 }
1353
1354 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1355 void setMinStackArgumentAlignment(unsigned Align) {
1356 MinStackArgumentAlignment = Align;
1357 }
1358
1359 /// Set if the DAG builder should automatically insert fences and reduce the
1360 /// order of atomic memory operations to Monotonic.
1361 void setInsertFencesForAtomic(bool fence) {
1362 InsertFencesForAtomic = fence;
1363 }
1364
1365 public:
1366 //===--------------------------------------------------------------------===//
1367 // Addressing mode description hooks (used by LSR etc).
1368 //
1369
1370 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1371 /// instructions reading the address. This allows as much computation as
1372 /// possible to be done in the address mode for that operand. This hook lets
1373 /// targets also pass back when this should be done on intrinsics which
1374 /// load/store.
1375 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1376 SmallVectorImpl<Value*> &/*Ops*/,
1377 Type *&/*AccessTy*/) const {
1378 return false;
1379 }
1380
1381 /// This represents an addressing mode of:
1382 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1383 /// If BaseGV is null, there is no BaseGV.
1384 /// If BaseOffs is zero, there is no base offset.
1385 /// If HasBaseReg is false, there is no base register.
1386 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1387 /// no scale.
1388 struct AddrMode {
1389 GlobalValue *BaseGV;
1390 int64_t BaseOffs;
1391 bool HasBaseReg;
1392 int64_t Scale;
1393 AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1394 };
1395
1396 /// Return true if the addressing mode represented by AM is legal for this
1397 /// target, for a load/store of the specified type.
1398 ///
1399 /// The type may be VoidTy, in which case only return true if the addressing
1400 /// mode is legal for a load/store of any legal type. TODO: Handle
1401 /// pre/postinc as well.
1402 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1403
1404 /// \brief Return the cost of the scaling factor used in the addressing mode
1405 /// represented by AM for this target, for a load/store of the specified type.
1406 ///
1407 /// If the AM is supported, the return value must be >= 0.
1408 /// If the AM is not supported, it returns a negative value.
1409 /// TODO: Handle pre/postinc as well.
1410 virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
1411 // Default: assume that any scaling factor used in a legal AM is free.
1412 if (isLegalAddressingMode(AM, Ty)) return 0;
1413 return -1;
1414 }
1415
1416 /// Return true if the specified immediate is legal icmp immediate, that is
1417 /// the target has icmp instructions which can compare a register against the
1418 /// immediate without having to materialize the immediate into a register.
1419 virtual bool isLegalICmpImmediate(int64_t) const {
1420 return true;
1421 }
1422
1423 /// Return true if the specified immediate is legal add immediate, that is the
1424 /// target has add instructions which can add a register with the immediate
1425 /// without having to materialize the immediate into a register.
1426 virtual bool isLegalAddImmediate(int64_t) const {
1427 return true;
1428 }
1429
1430 /// Return true if it's significantly cheaper to shift a vector by a uniform
1431 /// scalar than by an amount which will vary across each lane. On x86, for
1432 /// example, there is a "psllw" instruction for the former case, but no simple
1433 /// instruction for a general "a << b" operation on vectors.
1434 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1435 return false;
1436 }
1437
1438 /// Return true if it's free to truncate a value of type Ty1 to type
1439 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1440 /// by referencing its sub-register AX.
1441 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1442 return false;
1443 }
1444
1445 /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1446 /// whether a call is in tail position. Typically this means that both results
1447 /// would be assigned to the same register or stack slot, but it could mean
1448 /// the target performs adequate checks of its own before proceeding with the
1449 /// tail call.
1450 virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1451 return false;
1452 }
1453
1454 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1455 return false;
1456 }
1457
1458 /// Return true if any actual instruction that defines a value of type Ty1
1459 /// implicitly zero-extends the value to Ty2 in the result register.
1460 ///
1461 /// This does not necessarily include registers defined in unknown ways, such
1462 /// as incoming arguments, or copies from unknown virtual registers. Also, if
1463 /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1464 /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1465 /// values implicit zero-extend the result out to 64 bits.
1466 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1467 return false;
1468 }
1469
1470 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1471 return false;
1472 }
1473
1474 /// Return true if the target supplies and combines to a paired load
1475 /// two loaded values of type LoadedType next to each other in memory.
1476 /// RequiredAlignment gives the minimal alignment constraints that must be met
1477 /// to be able to select this paired load.
1478 ///
1479 /// This information is *not* used to generate actual paired loads, but it is
1480 /// used to generate a sequence of loads that is easier to combine into a
1481 /// paired load.
1482 /// For instance, something like this:
1483 /// a = load i64* addr
1484 /// b = trunc i64 a to i32
1485 /// c = lshr i64 a, 32
1486 /// d = trunc i64 c to i32
1487 /// will be optimized into:
1488 /// b = load i32* addr1
1489 /// d = load i32* addr2
1490 /// Where addr1 = addr2 +/- sizeof(i32).
1491 ///
1492 /// In other words, unless the target performs a post-isel load combining,
1493 /// this information should not be provided because it will generate more
1494 /// loads.
1495 virtual bool hasPairedLoad(Type * /*LoadedType*/,
1496 unsigned & /*RequiredAligment*/) const {
1497 return false;
1498 }
1499
1500 virtual bool hasPairedLoad(EVT /*LoadedType*/,
1501 unsigned & /*RequiredAligment*/) const {
1502 return false;
1503 }
1504
1505 /// Return true if zero-extending the specific node Val to type VT2 is free
1506 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1507 /// because it's folded such as X86 zero-extending loads).
1508 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1509 return isZExtFree(Val.getValueType(), VT2);
1510 }
1511
1512 /// Return true if an fpext operation is free (for instance, because
1513 /// single-precision floating-point numbers are implicitly extended to
1514 /// double-precision).
1515 virtual bool isFPExtFree(EVT VT) const {
1516 assert(VT.isFloatingPoint());
1517 return false;
1518 }
1519
1520 /// Return true if an fneg operation is free to the point where it is never
1521 /// worthwhile to replace it with a bitwise operation.
1522 virtual bool isFNegFree(EVT VT) const {
1523 assert(VT.isFloatingPoint());
1524 return false;
1525 }
1526
1527 /// Return true if an fabs operation is free to the point where it is never
1528 /// worthwhile to replace it with a bitwise operation.
1529 virtual bool isFAbsFree(EVT VT) const {
1530 assert(VT.isFloatingPoint());
1531 return false;
1532 }
1533
1534 /// Return true if an FMA operation is faster than a pair of fmul and fadd
1535 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1536 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1537 ///
1538 /// NOTE: This may be called before legalization on types for which FMAs are
1539 /// not legal, but should return true if those types will eventually legalize
1540 /// to types that support FMAs. After legalization, it will only be called on
1541 /// types that support FMAs (via Legal or Custom actions)
1542 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1543 return false;
1544 }
1545
1546 /// Return true if it's profitable to narrow operations of type VT1 to
1547 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1548 /// i32 to i16.
1549 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1550 return false;
1551 }
1552
1553 /// \brief Return true if it is beneficial to convert a load of a constant to
1554 /// just the constant itself.
1555 /// On some targets it might be more efficient to use a combination of
1556 /// arithmetic instructions to materialize the constant instead of loading it
1557 /// from a constant pool.
1558 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1559 Type *Ty) const {
1560 return false;
1561 }
1562
1563 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1564 /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1565 /// has custom lowering that depends on the index of the first element,
1566 /// and only the target knows which lowering is cheap.
1567 virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1568 return false;
1569 }
1570
1571 //===--------------------------------------------------------------------===//
1572 // Runtime Library hooks
1573 //
1574
1575 /// Rename the default libcall routine name for the specified libcall.
1576 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1577 LibcallRoutineNames[Call] = Name;
1578 }
1579
1580 /// Get the libcall routine name for the specified libcall.
1581 const char *getLibcallName(RTLIB::Libcall Call) const {
1582 return LibcallRoutineNames[Call];
1583 }
1584
1585 /// Override the default CondCode to be used to test the result of the
1586 /// comparison libcall against zero.
1587 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1588 CmpLibcallCCs[Call] = CC;
1589 }
1590
1591 /// Get the CondCode that's to be used to test the result of the comparison
1592 /// libcall against zero.
1593 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1594 return CmpLibcallCCs[Call];
1595 }
1596
1597 /// Set the CallingConv that should be used for the specified libcall.
1598 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1599 LibcallCallingConvs[Call] = CC;
1600 }
1601
1602 /// Get the CallingConv that should be used for the specified libcall.
1603 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1604 return LibcallCallingConvs[Call];
1605 }
1606
1607 private:
1608 const TargetMachine &TM;
1609 const DataLayout *DL;
1610
1611 /// True if this is a little endian target.
1612 bool IsLittleEndian;
1613
1614 /// Tells the code generator not to expand operations into sequences that use
1615 /// the select operations if possible.
1616 bool SelectIsExpensive;
1617
1618 /// Tells the code generator that the target has multiple (allocatable)
1619 /// condition registers that can be used to store the results of comparisons
1620 /// for use by selects and conditional branches. With multiple condition
1621 /// registers, the code generator will not aggressively sink comparisons into
1622 /// the blocks of their users.
1623 bool HasMultipleConditionRegisters;
1624
1625 /// Tells the code generator that the target has BitExtract instructions.
1626 /// The code generator will aggressively sink "shift"s into the blocks of
1627 /// their users if the users will generate "and" instructions which can be
1628 /// combined with "shift" to BitExtract instructions.
1629 bool HasExtractBitsInsn;
1630
1631 /// Tells the code generator not to expand integer divides by constants into a
1632 /// sequence of muls, adds, and shifts. This is a hack until a real cost
1633 /// model is in place. If we ever optimize for size, this will be set to true
1634 /// unconditionally.
1635 bool IntDivIsCheap;
1636
1637 // Don't expand fsqrt with an approximation based on the inverse sqrt.
1638 bool FsqrtIsCheap;
1639
1640 /// Tells the code generator to bypass slow divide or remainder
1641 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1642 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1643 /// div/rem when the operands are positive and less than 256.
1644 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1645
1646 /// Tells the code generator that it shouldn't generate sra/srl/add/sra for a
1647 /// signed divide by power of two; let the target handle it.
1648 bool Pow2SDivIsCheap;
1649
1650 /// Tells the code generator that it shouldn't generate extra flow control
1651 /// instructions and should attempt to combine flow control instructions via
1652 /// predication.
1653 bool JumpIsExpensive;
1654
1655 /// Whether the target supports or cares about preserving floating point
1656 /// exception behavior.
1657 bool HasFloatingPointExceptions;
1658
1659 /// This target prefers to use _setjmp to implement llvm.setjmp.
1660 ///
1661 /// Defaults to false.
1662 bool UseUnderscoreSetJmp;
1663
1664 /// This target prefers to use _longjmp to implement llvm.longjmp.
1665 ///
1666 /// Defaults to false.
1667 bool UseUnderscoreLongJmp;
1668
1669 /// Number of blocks threshold to use jump tables.
1670 int MinimumJumpTableEntries;
1671
1672 /// Information about the contents of the high-bits in boolean values held in
1673 /// a type wider than i1. See getBooleanContents.
1674 BooleanContent BooleanContents;
1675
1676 /// Information about the contents of the high-bits in boolean values held in
1677 /// a type wider than i1. See getBooleanContents.
1678 BooleanContent BooleanFloatContents;
1679
1680 /// Information about the contents of the high-bits in boolean vector values
1681 /// when the element type is wider than i1. See getBooleanContents.
1682 BooleanContent BooleanVectorContents;
1683
1684 /// The target scheduling preference: shortest possible total cycles or lowest
1685 /// register usage.
1686 Sched::Preference SchedPreferenceInfo;
1687
1688 /// The size, in bytes, of the target's jmp_buf buffers
1689 unsigned JumpBufSize;
1690
1691 /// The alignment, in bytes, of the target's jmp_buf buffers
1692 unsigned JumpBufAlignment;
1693
1694 /// The minimum alignment that any argument on the stack needs to have.
1695 unsigned MinStackArgumentAlignment;
1696
1697 /// The minimum function alignment (used when optimizing for size, and to
1698 /// prevent explicitly provided alignment from leading to incorrect code).
1699 unsigned MinFunctionAlignment;
1700
1701 /// The preferred function alignment (used when alignment unspecified and
1702 /// optimizing for speed).
1703 unsigned PrefFunctionAlignment;
1704
1705 /// The preferred loop alignment.
1706 unsigned PrefLoopAlignment;
1707
1708 /// Whether the DAG builder should automatically insert fences and reduce
1709 /// ordering for atomics. (This will be set for for most architectures with
1710 /// weak memory ordering.)
1711 bool InsertFencesForAtomic;
1712
1713 /// If set to a physical register, this specifies the register that
1714 /// llvm.savestack/llvm.restorestack should save and restore.
1715 unsigned StackPointerRegisterToSaveRestore;
1716
1717 /// If set to a physical register, this specifies the register that receives
1718 /// the exception address on entry to a landing pad.
1719 unsigned ExceptionPointerRegister;
1720
1721 /// If set to a physical register, this specifies the register that receives
1722 /// the exception typeid on entry to a landing pad.
1723 unsigned ExceptionSelectorRegister;
1724
1725 /// This indicates the default register class to use for each ValueType the
1726 /// target supports natively.
1727 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1728 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1729 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1730
1731 /// This indicates the "representative" register class to use for each
1732 /// ValueType the target supports natively. This information is used by the
1733 /// scheduler to track register pressure. By default, the representative
1734 /// register class is the largest legal super-reg register class of the
1735 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1736 /// representative class would be GR32.
1737 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1738
1739 /// This indicates the "cost" of the "representative" register class for each
1740 /// ValueType. The cost is used by the scheduler to approximate register
1741 /// pressure.
1742 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1743
1744 /// For any value types we are promoting or expanding, this contains the value
1745 /// type that we are changing to. For Expanded types, this contains one step
1746 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1747 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
1748 /// the same type (e.g. i32 -> i32).
1749 MVT TransformToType[MVT::LAST_VALUETYPE];
1750
1751 /// For each operation and each value type, keep a LegalizeAction that
1752 /// indicates how instruction selection should deal with the operation. Most
1753 /// operations are Legal (aka, supported natively by the target), but
1754 /// operations that are not should be described. Note that operations on
1755 /// non-legal value types are not described here.
1756 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1757
1758 /// For each load extension type and each value type, keep a LegalizeAction
1759 /// that indicates how instruction selection should deal with a load of a
1760 /// specific value type and extension type.
1761 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
1762 [ISD::LAST_LOADEXT_TYPE];
1763
1764 /// For each value type pair keep a LegalizeAction that indicates whether a
1765 /// truncating store of a specific value type and truncating type is legal.
1766 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1767
1768 /// For each indexed mode and each value type, keep a pair of LegalizeAction
1769 /// that indicates how instruction selection should deal with the load /
1770 /// store.
1771 ///
1772 /// The first dimension is the value_type for the reference. The second
1773 /// dimension represents the various modes for load store.
1774 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1775
1776 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1777 /// indicates how instruction selection should deal with the condition code.
1778 ///
1779 /// Because each CC action takes up 2 bits, we need to have the array size be
1780 /// large enough to fit all of the value types. This can be done by rounding
1781 /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
1782 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
1783
1784 ValueTypeActionImpl ValueTypeActions;
1785
1786 public:
1787 LegalizeKind
1788 getTypeConversion(LLVMContext &Context, EVT VT) const {
1789 // If this is a simple type, use the ComputeRegisterProp mechanism.
1790 if (VT.isSimple()) {
1791 MVT SVT = VT.getSimpleVT();
1792 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1793 MVT NVT = TransformToType[SVT.SimpleTy];
1794 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1795
1796 assert(
1797 (LA == TypeLegal || LA == TypeSoftenFloat ||
1798 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
1799 && "Promote may not follow Expand or Promote");
1800
1801 if (LA == TypeSplitVector)
1802 return LegalizeKind(LA, EVT::getVectorVT(Context,
1803 SVT.getVectorElementType(),
1804 SVT.getVectorNumElements()/2));
1805 if (LA == TypeScalarizeVector)
1806 return LegalizeKind(LA, SVT.getVectorElementType());
1807 return LegalizeKind(LA, NVT);
1808 }
1809
1810 // Handle Extended Scalar Types.
1811 if (!VT.isVector()) {
1812 assert(VT.isInteger() && "Float types must be simple");
1813 unsigned BitSize = VT.getSizeInBits();
1814 // First promote to a power-of-two size, then expand if necessary.
1815 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1816 EVT NVT = VT.getRoundIntegerType(Context);
1817 assert(NVT != VT && "Unable to round integer VT");
1818 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1819 // Avoid multi-step promotion.
1820 if (NextStep.first == TypePromoteInteger) return NextStep;
1821 // Return rounded integer type.
1822 return LegalizeKind(TypePromoteInteger, NVT);
1823 }
1824
1825 return LegalizeKind(TypeExpandInteger,
1826 EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
1827 }
1828
1829 // Handle vector types.
1830 unsigned NumElts = VT.getVectorNumElements();
1831 EVT EltVT = VT.getVectorElementType();
1832
1833 // Vectors with only one element are always scalarized.
1834 if (NumElts == 1)
1835 return LegalizeKind(TypeScalarizeVector, EltVT);
1836
1837 // Try to widen vector elements until the element type is a power of two and
1838 // promote it to a legal type later on, for example:
1839 // <3 x i8> -> <4 x i8> -> <4 x i32>
1840 if (EltVT.isInteger()) {
1841 // Vectors with a number of elements that is not a power of two are always
1842 // widened, for example <3 x i8> -> <4 x i8>.
1843 if (!VT.isPow2VectorType()) {
1844 NumElts = (unsigned)NextPowerOf2(NumElts);
1845 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1846 return LegalizeKind(TypeWidenVector, NVT);
1847 }
1848
1849 // Examine the element type.
1850 LegalizeKind LK = getTypeConversion(Context, EltVT);
1851
1852 // If type is to be expanded, split the vector.
1853 // <4 x i140> -> <2 x i140>
1854 if (LK.first == TypeExpandInteger)
1855 return LegalizeKind(TypeSplitVector,
1856 EVT::getVectorVT(Context, EltVT, NumElts / 2));
1857
1858 // Promote the integer element types until a legal vector type is found
1859 // or until the element integer type is too big. If a legal type was not
1860 // found, fallback to the usual mechanism of widening/splitting the
1861 // vector.
1862 EVT OldEltVT = EltVT;
1863 while (1) {
1864 // Increase the bitwidth of the element to the next pow-of-two
1865 // (which is greater than 8 bits).
1866 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
1867 ).getRoundIntegerType(Context);
1868
1869 // Stop trying when getting a non-simple element type.
1870 // Note that vector elements may be greater than legal vector element
1871 // types. Example: X86 XMM registers hold 64bit element on 32bit
1872 // systems.
1873 if (!EltVT.isSimple()) break;
1874
1875 // Build a new vector type and check if it is legal.
1876 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1877 // Found a legal promoted vector type.
1878 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1879 return LegalizeKind(TypePromoteInteger,
1880 EVT::getVectorVT(Context, EltVT, NumElts));
1881 }
1882
1883 // Reset the type to the unexpanded type if we did not find a legal vector
1884 // type with a promoted vector element type.
1885 EltVT = OldEltVT;
1886 }
1887
1888 // Try to widen the vector until a legal type is found.
1889 // If there is no wider legal type, split the vector.
1890 while (1) {
1891 // Round up to the next power of 2.
1892 NumElts = (unsigned)NextPowerOf2(NumElts);
1893
1894 // If there is no simple vector type with this many elements then there
1895 // cannot be a larger legal vector type. Note that this assumes that
1896 // there are no skipped intermediate vector types in the simple types.
1897 if (!EltVT.isSimple()) break;
1898 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1899 if (LargerVector == MVT()) break;
1900
1901 // If this type is legal then widen the vector.
1902 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1903 return LegalizeKind(TypeWidenVector, LargerVector);
1904 }
1905
1906 // Widen odd vectors to next power of two.
1907 if (!VT.isPow2VectorType()) {
1908 EVT NVT = VT.getPow2VectorType(Context);
1909 return LegalizeKind(TypeWidenVector, NVT);
1910 }
1911
1912 // Vectors with illegal element types are expanded.
1913 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1914 return LegalizeKind(TypeSplitVector, NVT);
1915 }
1916
1917 private:
1918 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1919
1920 /// Targets can specify ISD nodes that they would like PerformDAGCombine
1921 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1922 /// array.
1923 unsigned char
1924 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1925
1926 /// For operations that must be promoted to a specific type, this holds the
1927 /// destination type. This map should be sparse, so don't hold it as an
1928 /// array.
1929 ///
1930 /// Targets add entries to this map with AddPromotedToType(..), clients access
1931 /// this with getTypeToPromoteTo(..).
1932 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1933 PromoteToType;
1934
1935 /// Stores the name each libcall.
1936 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1937
1938 /// The ISD::CondCode that should be used to test the result of each of the
1939 /// comparison libcall against zero.
1940 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1941
1942 /// Stores the CallingConv that should be used for each libcall.
1943 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1944
1945 protected:
1946 /// \brief Specify maximum number of store instructions per memset call.
1947 ///
1948 /// When lowering \@llvm.memset this field specifies the maximum number of
1949 /// store operations that may be substituted for the call to memset. Targets
1950 /// must set this value based on the cost threshold for that target. Targets
1951 /// should assume that the memset will be done using as many of the largest
1952 /// store operations first, followed by smaller ones, if necessary, per
1953 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1954 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1955 /// store. This only applies to setting a constant array of a constant size.
1956 unsigned MaxStoresPerMemset;
1957
1958 /// Maximum number of stores operations that may be substituted for the call
1959 /// to memset, used for functions with OptSize attribute.
1960 unsigned MaxStoresPerMemsetOptSize;
1961
1962 /// \brief Specify maximum bytes of store instructions per memcpy call.
1963 ///
1964 /// When lowering \@llvm.memcpy this field specifies the maximum number of
1965 /// store operations that may be substituted for a call to memcpy. Targets
1966 /// must set this value based on the cost threshold for that target. Targets
1967 /// should assume that the memcpy will be done using as many of the largest
1968 /// store operations first, followed by smaller ones, if necessary, per
1969 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1970 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1971 /// and one 1-byte store. This only applies to copying a constant array of
1972 /// constant size.
1973 unsigned MaxStoresPerMemcpy;
1974
1975 /// Maximum number of store operations that may be substituted for a call to
1976 /// memcpy, used for functions with OptSize attribute.
1977 unsigned MaxStoresPerMemcpyOptSize;
1978
1979 /// \brief Specify maximum bytes of store instructions per memmove call.
1980 ///
1981 /// When lowering \@llvm.memmove this field specifies the maximum number of
1982 /// store instructions that may be substituted for a call to memmove. Targets
1983 /// must set this value based on the cost threshold for that target. Targets
1984 /// should assume that the memmove will be done using as many of the largest
1985 /// store operations first, followed by smaller ones, if necessary, per
1986 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1987 /// with 8-bit alignment would result in nine 1-byte stores. This only
1988 /// applies to copying a constant array of constant size.
1989 unsigned MaxStoresPerMemmove;
1990
1991 /// Maximum number of store instructions that may be substituted for a call to
1992 /// memmove, used for functions with OpSize attribute.
1993 unsigned MaxStoresPerMemmoveOptSize;
1994
1995 /// Tells the code generator that select is more expensive than a branch if
1996 /// the branch is usually predicted right.
1997 bool PredictableSelectIsExpensive;
1998
1999 /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
2000 /// a mask of a single bit, a compare, and a branch into a single instruction.
2001 bool MaskAndBranchFoldingIsLegal;
2002
2003 /// \see enableExtLdPromotion.
2004 bool EnableExtLdPromotion;
2005
2006 protected:
2007 /// Return true if the value types that can be represented by the specified
2008 /// register class are all legal.
2009 bool isLegalRC(const TargetRegisterClass *RC) const;
2010
2011 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2012 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2013 MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
2014 };
2015
2016 /// This class defines information used to lower LLVM code to legal SelectionDAG
2017 /// operators that the target instruction selector can accept natively.
2018 ///
2019 /// This class also defines callbacks that targets must implement to lower
2020 /// target-specific constructs to SelectionDAG operators.
2021 class TargetLowering : public TargetLoweringBase {
2022 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
2023 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
2024
2025 public:
2026 /// NOTE: The TargetMachine owns TLOF.
2027 explicit TargetLowering(const TargetMachine &TM);
2028
2029 /// Returns true by value, base pointer and offset pointer and addressing mode
2030 /// by reference if the node's address can be legally represented as
2031 /// pre-indexed load / store address.
2032 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2033 SDValue &/*Offset*/,
2034 ISD::MemIndexedMode &/*AM*/,
2035 SelectionDAG &/*DAG*/) const {
2036 return false;
2037 }
2038
2039 /// Returns true by value, base pointer and offset pointer and addressing mode
2040 /// by reference if this node can be combined with a load / store to form a
2041 /// post-indexed load / store.
2042 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2043 SDValue &/*Base*/,
2044 SDValue &/*Offset*/,
2045 ISD::MemIndexedMode &/*AM*/,
2046 SelectionDAG &/*DAG*/) const {
2047 return false;
2048 }
2049
2050 /// Return the entry encoding for a jump table in the current function. The
2051 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2052 virtual unsigned getJumpTableEncoding() const;
2053
2054 virtual const MCExpr *
2055 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2056 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2057 MCContext &/*Ctx*/) const {
2058 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2059 }
2060
2061 /// Returns relocation base for the given PIC jumptable.
2062 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2063 SelectionDAG &DAG) const;
2064
2065 /// This returns the relocation base for the given PIC jumptable, the same as
2066 /// getPICJumpTableRelocBase, but as an MCExpr.
2067 virtual const MCExpr *
2068 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2069 unsigned JTI, MCContext &Ctx) const;
2070
2071 /// Return true if folding a constant offset with the given GlobalAddress is
2072 /// legal. It is frequently not legal in PIC relocation models.
2073 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2074
2075 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2076 SDValue &Chain) const;
2077
2078 void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
2079 SDValue &NewLHS, SDValue &NewRHS,
2080 ISD::CondCode &CCCode, SDLoc DL) const;
2081
2082 /// Returns a pair of (return value, chain).
2083 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2084 EVT RetVT, const SDValue *Ops,
2085 unsigned NumOps, bool isSigned,
2086 SDLoc dl, bool doesNotReturn = false,
2087 bool isReturnValueUsed = true) const;
2088
2089 //===--------------------------------------------------------------------===//
2090 // TargetLowering Optimization Methods
2091 //
2092
2093 /// A convenience struct that encapsulates a DAG, and two SDValues for
2094 /// returning information from TargetLowering to its clients that want to
2095 /// combine.
2096 struct TargetLoweringOpt {
2097 SelectionDAG &DAG;
2098 bool LegalTys;
2099 bool LegalOps;
2100 SDValue Old;
2101 SDValue New;
2102
2103 explicit TargetLoweringOpt(SelectionDAG &InDAG,
2104 bool LT, bool LO) :
2105 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2106
2107 bool LegalTypes() const { return LegalTys; }
2108 bool LegalOperations() const { return LegalOps; }
2109
2110 bool CombineTo(SDValue O, SDValue N) {
2111 Old = O;
2112 New = N;
2113 return true;
2114 }
2115
2116 /// Check to see if the specified operand of the specified instruction is a
2117 /// constant integer. If so, check to see if there are any bits set in the
2118 /// constant that are not demanded. If so, shrink the constant and return
2119 /// true.
2120 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
2121
2122 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2123 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2124 /// generalized for targets with other types of implicit widening casts.
2125 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2126 SDLoc dl);
2127 };
2128
2129 /// Look at Op. At this point, we know that only the DemandedMask bits of the
2130 /// result of Op are ever used downstream. If we can use this information to
2131 /// simplify Op, create a new simplified DAG node and return true, returning
2132 /// the original and new nodes in Old and New. Otherwise, analyze the
2133 /// expression and return a mask of KnownOne and KnownZero bits for the
2134 /// expression (used to simplify the caller). The KnownZero/One bits may only
2135 /// be accurate for those bits in the DemandedMask.
2136 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2137 APInt &KnownZero, APInt &KnownOne,
2138 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
2139
2140 /// Determine which of the bits specified in Mask are known to be either zero
2141 /// or one and return them in the KnownZero/KnownOne bitsets.
2142 virtual void computeKnownBitsForTargetNode(const SDValue Op,
2143 APInt &KnownZero,
2144 APInt &KnownOne,
2145 const SelectionDAG &DAG,
2146 unsigned Depth = 0) const;
2147
2148 /// This method can be implemented by targets that want to expose additional
2149 /// information about sign bits to the DAG Combiner.
2150 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2151 const SelectionDAG &DAG,
2152 unsigned Depth = 0) const;
2153
2154 struct DAGCombinerInfo {
2155 void *DC; // The DAG Combiner object.
2156 CombineLevel Level;
2157 bool CalledByLegalizer;
2158 public:
2159 SelectionDAG &DAG;
2160
2161 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2162 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2163
2164 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2165 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
2166 bool isAfterLegalizeVectorOps() const {
2167 return Level == AfterLegalizeDAG;
2168 }
2169 CombineLevel getDAGCombineLevel() { return Level; }
2170 bool isCalledByLegalizer() const { return CalledByLegalizer; }
2171
2172 void AddToWorklist(SDNode *N);
2173 void RemoveFromWorklist(SDNode *N);
2174 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
2175 bool AddTo = true);
2176 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2177 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2178
2179 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2180 };
2181
2182 /// Return if the N is a constant or constant vector equal to the true value
2183 /// from getBooleanContents().
2184 bool isConstTrueVal(const SDNode *N) const;
2185
2186 /// Return if the N is a constant or constant vector equal to the false value
2187 /// from getBooleanContents().
2188 bool isConstFalseVal(const SDNode *N) const;
2189
2190 /// Try to simplify a setcc built with the specified operands and cc. If it is
2191 /// unable to simplify it, return a null SDValue.
2192 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
2193 ISD::CondCode Cond, bool foldBooleans,
2194 DAGCombinerInfo &DCI, SDLoc dl) const;
2195
2196 /// Returns true (and the GlobalValue and the offset) if the node is a
2197 /// GlobalAddress + offset.
2198 virtual bool
2199 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2200
2201 /// This method will be invoked for all target nodes and for any
2202 /// target-independent nodes that the target has registered with invoke it
2203 /// for.
2204 ///
2205 /// The semantics are as follows:
2206 /// Return Value:
2207 /// SDValue.Val == 0 - No change was made
2208 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2209 /// otherwise - N should be replaced by the returned Operand.
2210 ///
2211 /// In addition, methods provided by DAGCombinerInfo may be used to perform
2212 /// more complex transformations.
2213 ///
2214 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2215
2216 /// Return true if it is profitable to move a following shift through this
2217 // node, adjusting any immediate operands as necessary to preserve semantics.
2218 // This transformation may not be desirable if it disrupts a particularly
2219 // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2220 // By default, it returns true.
2221 virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2222 return true;
2223 }
2224
2225 /// Return true if the target has native support for the specified value type
2226 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2227 /// i16 is legal, but undesirable since i16 instruction encodings are longer
2228 /// and some i16 instructions are slow.
2229 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2230 // By default, assume all legal types are desirable.
2231 return isTypeLegal(VT);
2232 }
2233
2234 /// Return true if it is profitable for dag combiner to transform a floating
2235 /// point op of specified opcode to a equivalent op of an integer
2236 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2237 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2238 EVT /*VT*/) const {
2239 return false;
2240 }
2241
2242 /// This method query the target whether it is beneficial for dag combiner to
2243 /// promote the specified node. If true, it should return the desired
2244 /// promotion type by reference.
2245 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2246 return false;
2247 }
2248
2249 //===--------------------------------------------------------------------===//
2250 // Lowering methods - These methods must be implemented by targets so that
2251 // the SelectionDAGBuilder code knows how to lower these.
2252 //
2253
2254 /// This hook must be implemented to lower the incoming (formal) arguments,
2255 /// described by the Ins array, into the specified DAG. The implementation
2256 /// should fill in the InVals array with legal-type argument values, and
2257 /// return the resulting token chain value.
2258 ///
2259 virtual SDValue
2260 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2261 bool /*isVarArg*/,
2262 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
2263 SDLoc /*dl*/, SelectionDAG &/*DAG*/,
2264 SmallVectorImpl<SDValue> &/*InVals*/) const {
2265 llvm_unreachable("Not Implemented");
2266 }
2267
2268 struct ArgListEntry {
2269 SDValue Node;
2270 Type* Ty;
2271 bool isSExt : 1;
2272 bool isZExt : 1;
2273 bool isInReg : 1;
2274 bool isSRet : 1;
2275 bool isNest : 1;
2276 bool isByVal : 1;
2277 bool isInAlloca : 1;
2278 bool isReturned : 1;
2279 uint16_t Alignment;
2280
2281 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
2282 isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
2283 isReturned(false), Alignment(0) { }
2284
2285 void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2286 };
2287 typedef std::vector<ArgListEntry> ArgListTy;
2288
2289 /// This structure contains all information that is necessary for lowering
2290 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2291 /// needs to lower a call, and targets will see this struct in their LowerCall
2292 /// implementation.
2293 struct CallLoweringInfo {
2294 SDValue Chain;
2295 Type *RetTy;
2296 bool RetSExt : 1;
2297 bool RetZExt : 1;
2298 bool IsVarArg : 1;
2299 bool IsInReg : 1;
2300 bool DoesNotReturn : 1;
2301 bool IsReturnValueUsed : 1;
2302
2303 // IsTailCall should be modified by implementations of
2304 // TargetLowering::LowerCall that perform tail call conversions.
2305 bool IsTailCall;
2306
2307 unsigned NumFixedArgs;
2308 CallingConv::ID CallConv;
2309 SDValue Callee;
2310 ArgListTy Args;
2311 SelectionDAG &DAG;
2312 SDLoc DL;
2313 ImmutableCallSite *CS;
2314 bool IsPatchPoint;
2315 SmallVector<ISD::OutputArg, 32> Outs;
2316 SmallVector<SDValue, 32> OutVals;
2317 SmallVector<ISD::InputArg, 32> Ins;
2318
2319 CallLoweringInfo(SelectionDAG &DAG)
2320 : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2321 IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
2322 IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
2323 DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
2324
2325 CallLoweringInfo &setDebugLoc(SDLoc dl) {
2326 DL = dl;
2327 return *this;
2328 }
2329
2330 CallLoweringInfo &setChain(SDValue InChain) {
2331 Chain = InChain;
2332 return *this;
2333 }
2334
2335 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2336 SDValue Target, ArgListTy &&ArgsList,
2337 unsigned FixedArgs = -1) {
2338 RetTy = ResultType;
2339 Callee = Target;
2340 CallConv = CC;
2341 NumFixedArgs =
2342 (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
2343 Args = std::move(ArgsList);
2344 return *this;
2345 }
2346
2347 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2348 SDValue Target, ArgListTy &&ArgsList,
2349 ImmutableCallSite &Call) {
2350 RetTy = ResultType;
2351
2352 IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2353 DoesNotReturn = Call.doesNotReturn();
2354 IsVarArg = FTy->isVarArg();
2355 IsReturnValueUsed = !Call.getInstruction()->use_empty();
2356 RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2357 RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2358
2359 Callee = Target;
2360
2361 CallConv = Call.getCallingConv();
2362 NumFixedArgs = FTy->getNumParams();
2363 Args = std::move(ArgsList);
2364
2365 CS = &Call;
2366
2367 return *this;
2368 }
2369
2370 CallLoweringInfo &setInRegister(bool Value = true) {
2371 IsInReg = Value;
2372 return *this;
2373 }
2374
2375 CallLoweringInfo &setNoReturn(bool Value = true) {
2376 DoesNotReturn = Value;
2377 return *this;
2378 }
2379
2380 CallLoweringInfo &setVarArg(bool Value = true) {
2381 IsVarArg = Value;
2382 return *this;
2383 }
2384
2385 CallLoweringInfo &setTailCall(bool Value = true) {
2386 IsTailCall = Value;
2387 return *this;
2388 }
2389
2390 CallLoweringInfo &setDiscardResult(bool Value = true) {
2391 IsReturnValueUsed = !Value;
2392 return *this;
2393 }
2394
2395 CallLoweringInfo &setSExtResult(bool Value = true) {
2396 RetSExt = Value;
2397 return *this;
2398 }
2399
2400 CallLoweringInfo &setZExtResult(bool Value = true) {
2401 RetZExt = Value;
2402 return *this;
2403 }
2404
2405 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
2406 IsPatchPoint = Value;
2407 return *this;
2408 }
2409
2410 ArgListTy &getArgs() {
2411 return Args;
2412 }
2413 };
2414
2415 /// This function lowers an abstract call to a function into an actual call.
2416 /// This returns a pair of operands. The first element is the return value
2417 /// for the function (if RetTy is not VoidTy). The second element is the
2418 /// outgoing token chain. It calls LowerCall to do the actual lowering.
2419 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2420
2421 /// This hook must be implemented to lower calls into the the specified
2422 /// DAG. The outgoing arguments to the call are described by the Outs array,
2423 /// and the values to be returned by the call are described by the Ins
2424 /// array. The implementation should fill in the InVals array with legal-type
2425 /// return values from the call, and return the resulting token chain value.
2426 virtual SDValue
2427 LowerCall(CallLoweringInfo &/*CLI*/,
2428 SmallVectorImpl<SDValue> &/*InVals*/) const {
2429 llvm_unreachable("Not Implemented");
2430 }
2431
2432 /// Target-specific cleanup for formal ByVal parameters.
2433 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2434
2435 /// This hook should be implemented to check whether the return values
2436 /// described by the Outs array can fit into the return registers. If false
2437 /// is returned, an sret-demotion is performed.
2438 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2439 MachineFunction &/*MF*/, bool /*isVarArg*/,
2440 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2441 LLVMContext &/*Context*/) const
2442 {
2443 // Return true by default to get preexisting behavior.
2444 return true;
2445 }
2446
2447 /// This hook must be implemented to lower outgoing return values, described
2448 /// by the Outs array, into the specified DAG. The implementation should
2449 /// return the resulting token chain value.
2450 virtual SDValue
2451 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2452 bool /*isVarArg*/,
2453 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2454 const SmallVectorImpl<SDValue> &/*OutVals*/,
2455 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2456 llvm_unreachable("Not Implemented");
2457 }
2458
2459 /// Return true if result of the specified node is used by a return node
2460 /// only. It also compute and return the input chain for the tail call.
2461 ///
2462 /// This is used to determine whether it is possible to codegen a libcall as
2463 /// tail call at legalization time.
2464 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2465 return false;
2466 }
2467
2468 /// Return true if the target may be able emit the call instruction as a tail
2469 /// call. This is used by optimization passes to determine if it's profitable
2470 /// to duplicate return instructions to enable tailcall optimization.
2471 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2472 return false;
2473 }
2474
2475 /// Return the builtin name for the __builtin___clear_cache intrinsic
2476 /// Default is to invoke the clear cache library call
2477 virtual const char * getClearCacheBuiltinName() const {
2478 return "__clear_cache";
2479 }
2480
2481 /// Return the register ID of the name passed in. Used by named register
2482 /// global variables extension. There is no target-independent behaviour
2483 /// so the default action is to bail.
2484 virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
2485 report_fatal_error("Named registers not implemented for this target");
2486 }
2487
2488 /// Return the type that should be used to zero or sign extend a
2489 /// zeroext/signext integer argument or return value. FIXME: Most C calling
2490 /// convention requires the return type to be promoted, but this is not true
2491 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2492 /// calling conventions. The frontend should handle this and include all of
2493 /// the necessary information.
2494 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2495 ISD::NodeType /*ExtendKind*/) const {
2496 EVT MinVT = getRegisterType(Context, MVT::i32);
2497 return VT.bitsLT(MinVT) ? MinVT : VT;
2498 }
2499
2500 /// For some targets, an LLVM struct type must be broken down into multiple
2501 /// simple types, but the calling convention specifies that the entire struct
2502 /// must be passed in a block of consecutive registers.
2503 virtual bool
2504 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
2505 bool isVarArg) const {
2506 return false;
2507 }
2508
2509 /// Returns a 0 terminated array of registers that can be safely used as
2510 /// scratch registers.
2511 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2512 return nullptr;
2513 }
2514
2515 /// This callback is used to prepare for a volatile or atomic load.
2516 /// It takes a chain node as input and returns the chain for the load itself.
2517 ///
2518 /// Having a callback like this is necessary for targets like SystemZ,
2519 /// which allows a CPU to reuse the result of a previous load indefinitely,
2520 /// even if a cache-coherent store is performed by another CPU. The default
2521 /// implementation does nothing.
2522 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
2523 SelectionDAG &DAG) const {
2524 return Chain;
2525 }
2526
2527 /// This callback is invoked by the type legalizer to legalize nodes with an
2528 /// illegal operand type but legal result types. It replaces the
2529 /// LowerOperation callback in the type Legalizer. The reason we can not do
2530 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2531 /// use this callback.
2532 ///
2533 /// TODO: Consider merging with ReplaceNodeResults.
2534 ///
2535 /// The target places new result values for the node in Results (their number
2536 /// and types must exactly match those of the original return values of
2537 /// the node), or leaves Results empty, which indicates that the node is not
2538 /// to be custom lowered after all.
2539 /// The default implementation calls LowerOperation.
2540 virtual void LowerOperationWrapper(SDNode *N,
2541 SmallVectorImpl<SDValue> &Results,
2542 SelectionDAG &DAG) const;
2543
2544 /// This callback is invoked for operations that are unsupported by the
2545 /// target, which are registered to use 'custom' lowering, and whose defined
2546 /// values are all legal. If the target has no operations that require custom
2547 /// lowering, it need not implement this. The default implementation of this
2548 /// aborts.
2549 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2550
2551 /// This callback is invoked when a node result type is illegal for the
2552 /// target, and the operation was registered to use 'custom' lowering for that
2553 /// result type. The target places new result values for the node in Results
2554 /// (their number and types must exactly match those of the original return
2555 /// values of the node), or leaves Results empty, which indicates that the
2556 /// node is not to be custom lowered after all.
2557 ///
2558 /// If the target has no operations that require custom lowering, it need not
2559 /// implement this. The default implementation aborts.
2560 virtual void ReplaceNodeResults(SDNode * /*N*/,
2561 SmallVectorImpl<SDValue> &/*Results*/,
2562 SelectionDAG &/*DAG*/) const {
2563 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2564 }
2565
2566 /// This method returns the name of a target specific DAG node.
2567 virtual const char *getTargetNodeName(unsigned Opcode) const;
2568
2569 /// This method returns a target specific FastISel object, or null if the
2570 /// target does not support "fast" ISel.
2571 virtual FastISel *createFastISel(FunctionLoweringInfo &,
2572 const TargetLibraryInfo *) const {
2573 return nullptr;
2574 }
2575
2576
2577 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
2578 SelectionDAG &DAG) const;
2579
2580 //===--------------------------------------------------------------------===//
2581 // Inline Asm Support hooks
2582 //
2583
2584 /// This hook allows the target to expand an inline asm call to be explicit
2585 /// llvm code if it wants to. This is useful for turning simple inline asms
2586 /// into LLVM intrinsics, which gives the compiler more information about the
2587 /// behavior of the code.
2588 virtual bool ExpandInlineAsm(CallInst *) const {
2589 return false;
2590 }
2591
2592 enum ConstraintType {
2593 C_Register, // Constraint represents specific register(s).
2594 C_RegisterClass, // Constraint represents any of register(s) in class.
2595 C_Memory, // Memory constraint.
2596 C_Other, // Something else.
2597 C_Unknown // Unsupported constraint.
2598 };
2599
2600 enum ConstraintWeight {
2601 // Generic weights.
2602 CW_Invalid = -1, // No match.
2603 CW_Okay = 0, // Acceptable.
2604 CW_Good = 1, // Good weight.
2605 CW_Better = 2, // Better weight.
2606 CW_Best = 3, // Best weight.
2607
2608 // Well-known weights.
2609 CW_SpecificReg = CW_Okay, // Specific register operands.
2610 CW_Register = CW_Good, // Register operands.
2611 CW_Memory = CW_Better, // Memory operands.
2612 CW_Constant = CW_Best, // Constant operand.
2613 CW_Default = CW_Okay // Default or don't know type.
2614 };
2615
2616 /// This contains information for each constraint that we are lowering.
2617 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2618 /// This contains the actual string for the code, like "m". TargetLowering
2619 /// picks the 'best' code from ConstraintInfo::Codes that most closely
2620 /// matches the operand.
2621 std::string ConstraintCode;
2622
2623 /// Information about the constraint code, e.g. Register, RegisterClass,
2624 /// Memory, Other, Unknown.
2625 TargetLowering::ConstraintType ConstraintType;
2626
2627 /// If this is the result output operand or a clobber, this is null,
2628 /// otherwise it is the incoming operand to the CallInst. This gets
2629 /// modified as the asm is processed.
2630 Value *CallOperandVal;
2631
2632 /// The ValueType for the operand value.
2633 MVT ConstraintVT;
2634
2635 /// Return true of this is an input operand that is a matching constraint
2636 /// like "4".
2637 bool isMatchingInputConstraint() const;
2638
2639 /// If this is an input matching constraint, this method returns the output
2640 /// operand it matches.
2641 unsigned getMatchedOperand() const;
2642
2643 /// Copy constructor for copying from a ConstraintInfo.
2644 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
2645 : InlineAsm::ConstraintInfo(std::move(Info)),
2646 ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
2647 ConstraintVT(MVT::Other) {}
2648 };
2649
2650 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2651
2652 /// Split up the constraint string from the inline assembly value into the
2653 /// specific constraints and their prefixes, and also tie in the associated
2654 /// operand values. If this returns an empty vector, and if the constraint
2655 /// string itself isn't empty, there was an error parsing.
2656 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
2657
2658 /// Examine constraint type and operand type and determine a weight value.
2659 /// The operand object must already have been set up with the operand type.
2660 virtual ConstraintWeight getMultipleConstraintMatchWeight(
2661 AsmOperandInfo &info, int maIndex) const;
2662
2663 /// Examine constraint string and operand type and determine a weight value.
2664 /// The operand object must already have been set up with the operand type.
2665 virtual ConstraintWeight getSingleConstraintMatchWeight(
2666 AsmOperandInfo &info, const char *constraint) const;
2667
2668 /// Determines the constraint code and constraint type to use for the specific
2669 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2670 /// If the actual operand being passed in is available, it can be passed in as
2671 /// Op, otherwise an empty SDValue can be passed.
2672 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2673 SDValue Op,
2674 SelectionDAG *DAG = nullptr) const;
2675
2676 /// Given a constraint, return the type of constraint it is for this target.
2677 virtual ConstraintType getConstraintType(const std::string &Constraint) const;
2678
2679 /// Given a physical register constraint (e.g. {edx}), return the register
2680 /// number and the register class for the register.
2681 ///
2682 /// Given a register class constraint, like 'r', if this corresponds directly
2683 /// to an LLVM register class, return a register of 0 and the register class
2684 /// pointer.
2685 ///
2686 /// This should only be used for C_Register constraints. On error, this
2687 /// returns a register number of 0 and a null register class pointer..
2688 virtual std::pair<unsigned, const TargetRegisterClass*>
2689 getRegForInlineAsmConstraint(const std::string &Constraint,
2690 MVT VT) const;
2691
2692 /// Try to replace an X constraint, which matches anything, with another that
2693 /// has more specific requirements based on the type of the corresponding
2694 /// operand. This returns null if there is no replacement to make.
2695 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2696
2697 /// Lower the specified operand into the Ops vector. If it is invalid, don't
2698 /// add anything to Ops.
2699 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2700 std::vector<SDValue> &Ops,
2701 SelectionDAG &DAG) const;
2702
2703 //===--------------------------------------------------------------------===//
2704 // Div utility functions
2705 //
2706 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2707 SelectionDAG &DAG) const;
2708 SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2709 bool IsAfterLegalization,
2710 std::vector<SDNode *> *Created) const;
2711 SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2712 bool IsAfterLegalization,
2713 std::vector<SDNode *> *Created) const;
2714 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2715 SelectionDAG &DAG,
2716 std::vector<SDNode *> *Created) const {
2717 return SDValue();
2718 }
2719
2720 /// Indicate whether this target prefers to combine the given number of FDIVs
2721 /// with the same divisor.
2722 virtual bool combineRepeatedFPDivisors(unsigned NumUsers) const {
2723 return false;
2724 }
2725
2726 /// Hooks for building estimates in place of slower divisions and square
2727 /// roots.
2728
2729 /// Return a reciprocal square root estimate value for the input operand.
2730 /// The RefinementSteps output is the number of Newton-Raphson refinement
2731 /// iterations required to generate a sufficient (though not necessarily
2732 /// IEEE-754 compliant) estimate for the value type.
2733 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
2734 /// algorithm implementation that uses one constant or two constants.
2735 /// A target may choose to implement its own refinement within this function.
2736 /// If that's true, then return '0' as the number of RefinementSteps to avoid
2737 /// any further refinement of the estimate.
2738 /// An empty SDValue return means no estimate sequence can be created.
2739 virtual SDValue getRsqrtEstimate(SDValue Operand,
2740 DAGCombinerInfo &DCI,
2741 unsigned &RefinementSteps,
2742 bool &UseOneConstNR) const {
2743 return SDValue();
2744 }
2745
2746 /// Return a reciprocal estimate value for the input operand.
2747 /// The RefinementSteps output is the number of Newton-Raphson refinement
2748 /// iterations required to generate a sufficient (though not necessarily
2749 /// IEEE-754 compliant) estimate for the value type.
2750 /// A target may choose to implement its own refinement within this function.
2751 /// If that's true, then return '0' as the number of RefinementSteps to avoid
2752 /// any further refinement of the estimate.
2753 /// An empty SDValue return means no estimate sequence can be created.
2754 virtual SDValue getRecipEstimate(SDValue Operand,
2755 DAGCombinerInfo &DCI,
2756 unsigned &RefinementSteps) const {
2757 return SDValue();
2758 }
2759
2760 //===--------------------------------------------------------------------===//
2761 // Legalization utility functions
2762 //
2763
2764 /// Expand a MUL into two nodes. One that computes the high bits of
2765 /// the result and one that computes the low bits.
2766 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
2767 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
2768 /// if you want to control how low bits are extracted from the LHS.
2769 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
2770 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
2771 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
2772 /// \returns true if the node has been expanded. false if it has not
2773 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2774 SelectionDAG &DAG, SDValue LL = SDValue(),
2775 SDValue LH = SDValue(), SDValue RL = SDValue(),
2776 SDValue RH = SDValue()) const;
2777
2778 /// Expand float(f32) to SINT(i64) conversion
2779 /// \param N Node to expand
2780 /// \param Result output after conversion
2781 /// \returns True, if the expansion was successful, false otherwise
2782 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
2783
2784 //===--------------------------------------------------------------------===//
2785 // Instruction Emitting Hooks
2786 //
2787
2788 /// This method should be implemented by targets that mark instructions with
2789 /// the 'usesCustomInserter' flag. These instructions are special in various
2790 /// ways, which require special support to insert. The specified MachineInstr
2791 /// is created but not inserted into any basic blocks, and this method is
2792 /// called to expand it into a sequence of instructions, potentially also
2793 /// creating new basic blocks and control flow.
2794 virtual MachineBasicBlock *
2795 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2796
2797 /// This method should be implemented by targets that mark instructions with
2798 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2799 /// instruction selection by target hooks. e.g. To fill in optional defs for
2800 /// ARM 's' setting instructions.
2801 virtual void
2802 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2803
2804 /// If this function returns true, SelectionDAGBuilder emits a
2805 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
2806 virtual bool useLoadStackGuardNode() const {
2807 return false;
2808 }
2809 };
2810
2811 /// Given an LLVM IR type and return type attributes, compute the return value
2812 /// EVTs and flags, and optionally also the offsets, if the return value is
2813 /// being lowered to memory.
2814 void GetReturnInfo(Type* ReturnType, AttributeSet attr,
2815 SmallVectorImpl<ISD::OutputArg> &Outs,
2816 const TargetLowering &TLI);
2817
2818 } // end llvm namespace
2819
2820 #endif