]>
Commit | Line | Data |
---|---|---|
1a4d82fc JJ |
1 | //===-- AMDILISelDAGToDAG.cpp - A dag to dag inst selector for AMDIL ------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //==-----------------------------------------------------------------------===// | |
9 | // | |
10 | /// \file | |
11 | /// \brief Defines an instruction selector for the AMDGPU target. | |
12 | // | |
13 | //===----------------------------------------------------------------------===// | |
14 | #include "AMDGPUInstrInfo.h" | |
15 | #include "AMDGPUISelLowering.h" // For AMDGPUISD | |
16 | #include "AMDGPURegisterInfo.h" | |
17 | #include "AMDGPUSubtarget.h" | |
18 | #include "R600InstrInfo.h" | |
19 | #include "SIDefines.h" | |
20 | #include "SIISelLowering.h" | |
21 | #include "SIMachineFunctionInfo.h" | |
22 | #include "llvm/CodeGen/FunctionLoweringInfo.h" | |
23 | #include "llvm/CodeGen/PseudoSourceValue.h" | |
24 | #include "llvm/CodeGen/MachineFrameInfo.h" | |
25 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |
26 | #include "llvm/CodeGen/SelectionDAG.h" | |
27 | #include "llvm/CodeGen/SelectionDAGISel.h" | |
28 | #include "llvm/IR/Function.h" | |
29 | ||
30 | using namespace llvm; | |
31 | ||
32 | //===----------------------------------------------------------------------===// | |
33 | // Instruction Selector Implementation | |
34 | //===----------------------------------------------------------------------===// | |
35 | ||
36 | namespace { | |
37 | /// AMDGPU specific code to select AMDGPU machine instructions for | |
38 | /// SelectionDAG operations. | |
39 | class AMDGPUDAGToDAGISel : public SelectionDAGISel { | |
40 | // Subtarget - Keep a pointer to the AMDGPU Subtarget around so that we can | |
41 | // make the right decision when generating code for different targets. | |
42 | const AMDGPUSubtarget &Subtarget; | |
43 | public: | |
44 | AMDGPUDAGToDAGISel(TargetMachine &TM); | |
45 | virtual ~AMDGPUDAGToDAGISel(); | |
46 | ||
47 | SDNode *Select(SDNode *N) override; | |
48 | const char *getPassName() const override; | |
49 | void PostprocessISelDAG() override; | |
50 | ||
51 | private: | |
52 | bool isInlineImmediate(SDNode *N) const; | |
53 | inline SDValue getSmallIPtrImm(unsigned Imm); | |
54 | bool FoldOperand(SDValue &Src, SDValue &Sel, SDValue &Neg, SDValue &Abs, | |
55 | const R600InstrInfo *TII); | |
56 | bool FoldOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); | |
57 | bool FoldDotOperands(unsigned, const R600InstrInfo *, std::vector<SDValue> &); | |
58 | ||
59 | // Complex pattern selectors | |
60 | bool SelectADDRParam(SDValue Addr, SDValue& R1, SDValue& R2); | |
61 | bool SelectADDR(SDValue N, SDValue &R1, SDValue &R2); | |
62 | bool SelectADDR64(SDValue N, SDValue &R1, SDValue &R2); | |
63 | ||
64 | static bool checkType(const Value *ptr, unsigned int addrspace); | |
65 | static bool checkPrivateAddress(const MachineMemOperand *Op); | |
66 | ||
67 | static bool isGlobalStore(const StoreSDNode *N); | |
68 | static bool isFlatStore(const StoreSDNode *N); | |
69 | static bool isPrivateStore(const StoreSDNode *N); | |
70 | static bool isLocalStore(const StoreSDNode *N); | |
71 | static bool isRegionStore(const StoreSDNode *N); | |
72 | ||
73 | bool isCPLoad(const LoadSDNode *N) const; | |
74 | bool isConstantLoad(const LoadSDNode *N, int cbID) const; | |
75 | bool isGlobalLoad(const LoadSDNode *N) const; | |
76 | bool isFlatLoad(const LoadSDNode *N) const; | |
77 | bool isParamLoad(const LoadSDNode *N) const; | |
78 | bool isPrivateLoad(const LoadSDNode *N) const; | |
79 | bool isLocalLoad(const LoadSDNode *N) const; | |
80 | bool isRegionLoad(const LoadSDNode *N) const; | |
81 | ||
82 | const TargetRegisterClass *getOperandRegClass(SDNode *N, unsigned OpNo) const; | |
83 | bool SelectGlobalValueConstantOffset(SDValue Addr, SDValue& IntPtr); | |
84 | bool SelectGlobalValueVariableOffset(SDValue Addr, SDValue &BaseReg, | |
85 | SDValue& Offset); | |
86 | bool SelectADDRVTX_READ(SDValue Addr, SDValue &Base, SDValue &Offset); | |
87 | bool SelectADDRIndirect(SDValue Addr, SDValue &Base, SDValue &Offset); | |
88 | bool isDSOffsetLegal(const SDValue &Base, unsigned Offset, | |
89 | unsigned OffsetBits) const; | |
90 | bool SelectDS1Addr1Offset(SDValue Ptr, SDValue &Base, SDValue &Offset) const; | |
91 | bool SelectDS64Bit4ByteAligned(SDValue Ptr, SDValue &Base, SDValue &Offset0, | |
92 | SDValue &Offset1) const; | |
93 | void SelectMUBUF(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, | |
94 | SDValue &SOffset, SDValue &Offset, SDValue &Offen, | |
95 | SDValue &Idxen, SDValue &Addr64, SDValue &GLC, SDValue &SLC, | |
96 | SDValue &TFE) const; | |
97 | bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, SDValue &VAddr, | |
98 | SDValue &Offset) const; | |
99 | bool SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, | |
100 | SDValue &VAddr, SDValue &Offset, | |
101 | SDValue &SLC) const; | |
102 | bool SelectMUBUFScratch(SDValue Addr, SDValue &RSrc, SDValue &VAddr, | |
103 | SDValue &SOffset, SDValue &ImmOffset) const; | |
104 | bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &SOffset, | |
105 | SDValue &Offset, SDValue &GLC, SDValue &SLC, | |
106 | SDValue &TFE) const; | |
107 | bool SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, SDValue &Soffset, | |
108 | SDValue &Offset, SDValue &GLC) const; | |
109 | SDNode *SelectAddrSpaceCast(SDNode *N); | |
110 | bool SelectVOP3Mods(SDValue In, SDValue &Src, SDValue &SrcMods) const; | |
111 | bool SelectVOP3Mods0(SDValue In, SDValue &Src, SDValue &SrcMods, | |
112 | SDValue &Clamp, SDValue &Omod) const; | |
113 | ||
85aaf69f SL |
114 | bool SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, SDValue &SrcMods, |
115 | SDValue &Omod) const; | |
116 | bool SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, SDValue &SrcMods, | |
117 | SDValue &Clamp, | |
118 | SDValue &Omod) const; | |
119 | ||
1a4d82fc JJ |
120 | SDNode *SelectADD_SUB_I64(SDNode *N); |
121 | SDNode *SelectDIV_SCALE(SDNode *N); | |
122 | ||
123 | // Include the pieces autogenerated from the target description. | |
124 | #include "AMDGPUGenDAGISel.inc" | |
125 | }; | |
126 | } // end anonymous namespace | |
127 | ||
128 | /// \brief This pass converts a legalized DAG into a AMDGPU-specific | |
129 | // DAG, ready for instruction scheduling. | |
130 | FunctionPass *llvm::createAMDGPUISelDag(TargetMachine &TM) { | |
131 | return new AMDGPUDAGToDAGISel(TM); | |
132 | } | |
133 | ||
134 | AMDGPUDAGToDAGISel::AMDGPUDAGToDAGISel(TargetMachine &TM) | |
135 | : SelectionDAGISel(TM), Subtarget(TM.getSubtarget<AMDGPUSubtarget>()) { | |
136 | } | |
137 | ||
138 | AMDGPUDAGToDAGISel::~AMDGPUDAGToDAGISel() { | |
139 | } | |
140 | ||
141 | bool AMDGPUDAGToDAGISel::isInlineImmediate(SDNode *N) const { | |
142 | const SITargetLowering *TL | |
143 | = static_cast<const SITargetLowering *>(getTargetLowering()); | |
144 | return TL->analyzeImmediate(N) == 0; | |
145 | } | |
146 | ||
147 | /// \brief Determine the register class for \p OpNo | |
148 | /// \returns The register class of the virtual register that will be used for | |
149 | /// the given operand number \OpNo or NULL if the register class cannot be | |
150 | /// determined. | |
151 | const TargetRegisterClass *AMDGPUDAGToDAGISel::getOperandRegClass(SDNode *N, | |
152 | unsigned OpNo) const { | |
153 | if (!N->isMachineOpcode()) | |
154 | return nullptr; | |
155 | ||
156 | switch (N->getMachineOpcode()) { | |
157 | default: { | |
158 | const MCInstrDesc &Desc = | |
159 | TM.getSubtargetImpl()->getInstrInfo()->get(N->getMachineOpcode()); | |
160 | unsigned OpIdx = Desc.getNumDefs() + OpNo; | |
161 | if (OpIdx >= Desc.getNumOperands()) | |
162 | return nullptr; | |
163 | int RegClass = Desc.OpInfo[OpIdx].RegClass; | |
164 | if (RegClass == -1) | |
165 | return nullptr; | |
166 | ||
167 | return TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RegClass); | |
168 | } | |
169 | case AMDGPU::REG_SEQUENCE: { | |
170 | unsigned RCID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); | |
171 | const TargetRegisterClass *SuperRC = | |
172 | TM.getSubtargetImpl()->getRegisterInfo()->getRegClass(RCID); | |
173 | ||
174 | SDValue SubRegOp = N->getOperand(OpNo + 1); | |
175 | unsigned SubRegIdx = cast<ConstantSDNode>(SubRegOp)->getZExtValue(); | |
176 | return TM.getSubtargetImpl()->getRegisterInfo()->getSubClassWithSubReg( | |
177 | SuperRC, SubRegIdx); | |
178 | } | |
179 | } | |
180 | } | |
181 | ||
182 | SDValue AMDGPUDAGToDAGISel::getSmallIPtrImm(unsigned int Imm) { | |
183 | return CurDAG->getTargetConstant(Imm, MVT::i32); | |
184 | } | |
185 | ||
186 | bool AMDGPUDAGToDAGISel::SelectADDRParam( | |
187 | SDValue Addr, SDValue& R1, SDValue& R2) { | |
188 | ||
189 | if (Addr.getOpcode() == ISD::FrameIndex) { | |
190 | if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { | |
191 | R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i32); | |
192 | R2 = CurDAG->getTargetConstant(0, MVT::i32); | |
193 | } else { | |
194 | R1 = Addr; | |
195 | R2 = CurDAG->getTargetConstant(0, MVT::i32); | |
196 | } | |
197 | } else if (Addr.getOpcode() == ISD::ADD) { | |
198 | R1 = Addr.getOperand(0); | |
199 | R2 = Addr.getOperand(1); | |
200 | } else { | |
201 | R1 = Addr; | |
202 | R2 = CurDAG->getTargetConstant(0, MVT::i32); | |
203 | } | |
204 | return true; | |
205 | } | |
206 | ||
207 | bool AMDGPUDAGToDAGISel::SelectADDR(SDValue Addr, SDValue& R1, SDValue& R2) { | |
208 | if (Addr.getOpcode() == ISD::TargetExternalSymbol || | |
209 | Addr.getOpcode() == ISD::TargetGlobalAddress) { | |
210 | return false; | |
211 | } | |
212 | return SelectADDRParam(Addr, R1, R2); | |
213 | } | |
214 | ||
215 | ||
216 | bool AMDGPUDAGToDAGISel::SelectADDR64(SDValue Addr, SDValue& R1, SDValue& R2) { | |
217 | if (Addr.getOpcode() == ISD::TargetExternalSymbol || | |
218 | Addr.getOpcode() == ISD::TargetGlobalAddress) { | |
219 | return false; | |
220 | } | |
221 | ||
222 | if (Addr.getOpcode() == ISD::FrameIndex) { | |
223 | if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) { | |
224 | R1 = CurDAG->getTargetFrameIndex(FIN->getIndex(), MVT::i64); | |
225 | R2 = CurDAG->getTargetConstant(0, MVT::i64); | |
226 | } else { | |
227 | R1 = Addr; | |
228 | R2 = CurDAG->getTargetConstant(0, MVT::i64); | |
229 | } | |
230 | } else if (Addr.getOpcode() == ISD::ADD) { | |
231 | R1 = Addr.getOperand(0); | |
232 | R2 = Addr.getOperand(1); | |
233 | } else { | |
234 | R1 = Addr; | |
235 | R2 = CurDAG->getTargetConstant(0, MVT::i64); | |
236 | } | |
237 | return true; | |
238 | } | |
239 | ||
240 | SDNode *AMDGPUDAGToDAGISel::Select(SDNode *N) { | |
241 | unsigned int Opc = N->getOpcode(); | |
242 | if (N->isMachineOpcode()) { | |
243 | N->setNodeId(-1); | |
244 | return nullptr; // Already selected. | |
245 | } | |
246 | ||
247 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); | |
248 | switch (Opc) { | |
249 | default: break; | |
250 | // We are selecting i64 ADD here instead of custom lower it during | |
251 | // DAG legalization, so we can fold some i64 ADDs used for address | |
252 | // calculation into the LOAD and STORE instructions. | |
253 | case ISD::ADD: | |
254 | case ISD::SUB: { | |
255 | if (N->getValueType(0) != MVT::i64 || | |
256 | ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) | |
257 | break; | |
258 | ||
259 | return SelectADD_SUB_I64(N); | |
260 | } | |
261 | case ISD::SCALAR_TO_VECTOR: | |
262 | case AMDGPUISD::BUILD_VERTICAL_VECTOR: | |
263 | case ISD::BUILD_VECTOR: { | |
264 | unsigned RegClassID; | |
265 | const AMDGPURegisterInfo *TRI = static_cast<const AMDGPURegisterInfo *>( | |
266 | TM.getSubtargetImpl()->getRegisterInfo()); | |
267 | const SIRegisterInfo *SIRI = static_cast<const SIRegisterInfo *>( | |
268 | TM.getSubtargetImpl()->getRegisterInfo()); | |
269 | EVT VT = N->getValueType(0); | |
270 | unsigned NumVectorElts = VT.getVectorNumElements(); | |
271 | EVT EltVT = VT.getVectorElementType(); | |
272 | assert(EltVT.bitsEq(MVT::i32)); | |
273 | if (ST.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) { | |
274 | bool UseVReg = true; | |
275 | for (SDNode::use_iterator U = N->use_begin(), E = SDNode::use_end(); | |
276 | U != E; ++U) { | |
277 | if (!U->isMachineOpcode()) { | |
278 | continue; | |
279 | } | |
280 | const TargetRegisterClass *RC = getOperandRegClass(*U, U.getOperandNo()); | |
281 | if (!RC) { | |
282 | continue; | |
283 | } | |
284 | if (SIRI->isSGPRClass(RC)) { | |
285 | UseVReg = false; | |
286 | } | |
287 | } | |
288 | switch(NumVectorElts) { | |
85aaf69f | 289 | case 1: RegClassID = UseVReg ? AMDGPU::VGPR_32RegClassID : |
1a4d82fc JJ |
290 | AMDGPU::SReg_32RegClassID; |
291 | break; | |
292 | case 2: RegClassID = UseVReg ? AMDGPU::VReg_64RegClassID : | |
293 | AMDGPU::SReg_64RegClassID; | |
294 | break; | |
295 | case 4: RegClassID = UseVReg ? AMDGPU::VReg_128RegClassID : | |
296 | AMDGPU::SReg_128RegClassID; | |
297 | break; | |
298 | case 8: RegClassID = UseVReg ? AMDGPU::VReg_256RegClassID : | |
299 | AMDGPU::SReg_256RegClassID; | |
300 | break; | |
301 | case 16: RegClassID = UseVReg ? AMDGPU::VReg_512RegClassID : | |
302 | AMDGPU::SReg_512RegClassID; | |
303 | break; | |
304 | default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); | |
305 | } | |
306 | } else { | |
307 | // BUILD_VECTOR was lowered into an IMPLICIT_DEF + 4 INSERT_SUBREG | |
308 | // that adds a 128 bits reg copy when going through TwoAddressInstructions | |
309 | // pass. We want to avoid 128 bits copies as much as possible because they | |
310 | // can't be bundled by our scheduler. | |
311 | switch(NumVectorElts) { | |
312 | case 2: RegClassID = AMDGPU::R600_Reg64RegClassID; break; | |
313 | case 4: | |
314 | if (Opc == AMDGPUISD::BUILD_VERTICAL_VECTOR) | |
315 | RegClassID = AMDGPU::R600_Reg128VerticalRegClassID; | |
316 | else | |
317 | RegClassID = AMDGPU::R600_Reg128RegClassID; | |
318 | break; | |
319 | default: llvm_unreachable("Do not know how to lower this BUILD_VECTOR"); | |
320 | } | |
321 | } | |
322 | ||
323 | SDValue RegClass = CurDAG->getTargetConstant(RegClassID, MVT::i32); | |
324 | ||
325 | if (NumVectorElts == 1) { | |
326 | return CurDAG->SelectNodeTo(N, AMDGPU::COPY_TO_REGCLASS, EltVT, | |
327 | N->getOperand(0), RegClass); | |
328 | } | |
329 | ||
330 | assert(NumVectorElts <= 16 && "Vectors with more than 16 elements not " | |
331 | "supported yet"); | |
332 | // 16 = Max Num Vector Elements | |
333 | // 2 = 2 REG_SEQUENCE operands per element (value, subreg index) | |
334 | // 1 = Vector Register Class | |
335 | SmallVector<SDValue, 16 * 2 + 1> RegSeqArgs(NumVectorElts * 2 + 1); | |
336 | ||
337 | RegSeqArgs[0] = CurDAG->getTargetConstant(RegClassID, MVT::i32); | |
338 | bool IsRegSeq = true; | |
339 | unsigned NOps = N->getNumOperands(); | |
340 | for (unsigned i = 0; i < NOps; i++) { | |
341 | // XXX: Why is this here? | |
342 | if (dyn_cast<RegisterSDNode>(N->getOperand(i))) { | |
343 | IsRegSeq = false; | |
344 | break; | |
345 | } | |
346 | RegSeqArgs[1 + (2 * i)] = N->getOperand(i); | |
347 | RegSeqArgs[1 + (2 * i) + 1] = | |
348 | CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32); | |
349 | } | |
350 | ||
351 | if (NOps != NumVectorElts) { | |
352 | // Fill in the missing undef elements if this was a scalar_to_vector. | |
353 | assert(Opc == ISD::SCALAR_TO_VECTOR && NOps < NumVectorElts); | |
354 | ||
355 | MachineSDNode *ImpDef = CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, | |
356 | SDLoc(N), EltVT); | |
357 | for (unsigned i = NOps; i < NumVectorElts; ++i) { | |
358 | RegSeqArgs[1 + (2 * i)] = SDValue(ImpDef, 0); | |
359 | RegSeqArgs[1 + (2 * i) + 1] = | |
360 | CurDAG->getTargetConstant(TRI->getSubRegFromChannel(i), MVT::i32); | |
361 | } | |
362 | } | |
363 | ||
364 | if (!IsRegSeq) | |
365 | break; | |
366 | return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, N->getVTList(), | |
367 | RegSeqArgs); | |
368 | } | |
369 | case ISD::BUILD_PAIR: { | |
370 | SDValue RC, SubReg0, SubReg1; | |
371 | if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) { | |
372 | break; | |
373 | } | |
374 | if (N->getValueType(0) == MVT::i128) { | |
375 | RC = CurDAG->getTargetConstant(AMDGPU::SReg_128RegClassID, MVT::i32); | |
376 | SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0_sub1, MVT::i32); | |
377 | SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub2_sub3, MVT::i32); | |
378 | } else if (N->getValueType(0) == MVT::i64) { | |
379 | RC = CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32); | |
380 | SubReg0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); | |
381 | SubReg1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); | |
382 | } else { | |
383 | llvm_unreachable("Unhandled value type for BUILD_PAIR"); | |
384 | } | |
385 | const SDValue Ops[] = { RC, N->getOperand(0), SubReg0, | |
386 | N->getOperand(1), SubReg1 }; | |
387 | return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, | |
388 | SDLoc(N), N->getValueType(0), Ops); | |
389 | } | |
390 | ||
391 | case ISD::Constant: | |
392 | case ISD::ConstantFP: { | |
393 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); | |
394 | if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS || | |
395 | N->getValueType(0).getSizeInBits() != 64 || isInlineImmediate(N)) | |
396 | break; | |
397 | ||
398 | uint64_t Imm; | |
399 | if (ConstantFPSDNode *FP = dyn_cast<ConstantFPSDNode>(N)) | |
400 | Imm = FP->getValueAPF().bitcastToAPInt().getZExtValue(); | |
401 | else { | |
402 | ConstantSDNode *C = cast<ConstantSDNode>(N); | |
403 | Imm = C->getZExtValue(); | |
404 | } | |
405 | ||
406 | SDNode *Lo = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, | |
407 | CurDAG->getConstant(Imm & 0xFFFFFFFF, MVT::i32)); | |
408 | SDNode *Hi = CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, | |
409 | CurDAG->getConstant(Imm >> 32, MVT::i32)); | |
410 | const SDValue Ops[] = { | |
411 | CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32), | |
412 | SDValue(Lo, 0), CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), | |
413 | SDValue(Hi, 0), CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32) | |
414 | }; | |
415 | ||
416 | return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, SDLoc(N), | |
417 | N->getValueType(0), Ops); | |
418 | } | |
419 | ||
85aaf69f SL |
420 | case ISD::LOAD: { |
421 | // To simplify the TableGen patters, we replace all i64 loads with | |
422 | // v2i32 loads. Alternatively, we could promote i64 loads to v2i32 | |
423 | // during DAG legalization, however, so places (ExpandUnalignedLoad) | |
424 | // in the DAG legalizer assume that if i64 is legal, so doing this | |
425 | // promotion early can cause problems. | |
426 | EVT VT = N->getValueType(0); | |
427 | LoadSDNode *LD = cast<LoadSDNode>(N); | |
428 | if (VT != MVT::i64 || LD->getExtensionType() != ISD::NON_EXTLOAD) | |
429 | break; | |
430 | ||
431 | SDValue NewLoad = CurDAG->getLoad(MVT::v2i32, SDLoc(N), LD->getChain(), | |
432 | LD->getBasePtr(), LD->getMemOperand()); | |
433 | SDValue BitCast = CurDAG->getNode(ISD::BITCAST, SDLoc(N), | |
434 | MVT::i64, NewLoad); | |
435 | CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 1), NewLoad.getValue(1)); | |
436 | CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), BitCast); | |
437 | SelectCode(NewLoad.getNode()); | |
438 | N = BitCast.getNode(); | |
439 | break; | |
440 | } | |
441 | ||
1a4d82fc JJ |
442 | case AMDGPUISD::REGISTER_LOAD: { |
443 | if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) | |
444 | break; | |
445 | SDValue Addr, Offset; | |
446 | ||
447 | SelectADDRIndirect(N->getOperand(1), Addr, Offset); | |
448 | const SDValue Ops[] = { | |
449 | Addr, | |
450 | Offset, | |
451 | CurDAG->getTargetConstant(0, MVT::i32), | |
452 | N->getOperand(0), | |
453 | }; | |
454 | return CurDAG->getMachineNode(AMDGPU::SI_RegisterLoad, SDLoc(N), | |
455 | CurDAG->getVTList(MVT::i32, MVT::i64, MVT::Other), | |
456 | Ops); | |
457 | } | |
458 | case AMDGPUISD::REGISTER_STORE: { | |
459 | if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) | |
460 | break; | |
461 | SDValue Addr, Offset; | |
462 | SelectADDRIndirect(N->getOperand(2), Addr, Offset); | |
463 | const SDValue Ops[] = { | |
464 | N->getOperand(1), | |
465 | Addr, | |
466 | Offset, | |
467 | CurDAG->getTargetConstant(0, MVT::i32), | |
468 | N->getOperand(0), | |
469 | }; | |
470 | return CurDAG->getMachineNode(AMDGPU::SI_RegisterStorePseudo, SDLoc(N), | |
471 | CurDAG->getVTList(MVT::Other), | |
472 | Ops); | |
473 | } | |
474 | ||
475 | case AMDGPUISD::BFE_I32: | |
476 | case AMDGPUISD::BFE_U32: { | |
477 | if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS) | |
478 | break; | |
479 | ||
480 | // There is a scalar version available, but unlike the vector version which | |
481 | // has a separate operand for the offset and width, the scalar version packs | |
482 | // the width and offset into a single operand. Try to move to the scalar | |
483 | // version if the offsets are constant, so that we can try to keep extended | |
484 | // loads of kernel arguments in SGPRs. | |
485 | ||
486 | // TODO: Technically we could try to pattern match scalar bitshifts of | |
487 | // dynamic values, but it's probably not useful. | |
488 | ConstantSDNode *Offset = dyn_cast<ConstantSDNode>(N->getOperand(1)); | |
489 | if (!Offset) | |
490 | break; | |
491 | ||
492 | ConstantSDNode *Width = dyn_cast<ConstantSDNode>(N->getOperand(2)); | |
493 | if (!Width) | |
494 | break; | |
495 | ||
496 | bool Signed = Opc == AMDGPUISD::BFE_I32; | |
497 | ||
498 | // Transformation function, pack the offset and width of a BFE into | |
499 | // the format expected by the S_BFE_I32 / S_BFE_U32. In the second | |
500 | // source, bits [5:0] contain the offset and bits [22:16] the width. | |
501 | ||
502 | uint32_t OffsetVal = Offset->getZExtValue(); | |
503 | uint32_t WidthVal = Width->getZExtValue(); | |
504 | ||
505 | uint32_t PackedVal = OffsetVal | WidthVal << 16; | |
506 | ||
507 | SDValue PackedOffsetWidth = CurDAG->getTargetConstant(PackedVal, MVT::i32); | |
508 | return CurDAG->getMachineNode(Signed ? AMDGPU::S_BFE_I32 : AMDGPU::S_BFE_U32, | |
509 | SDLoc(N), | |
510 | MVT::i32, | |
511 | N->getOperand(0), | |
512 | PackedOffsetWidth); | |
513 | ||
514 | } | |
515 | case AMDGPUISD::DIV_SCALE: { | |
516 | return SelectDIV_SCALE(N); | |
517 | } | |
85aaf69f SL |
518 | case ISD::CopyToReg: { |
519 | const SITargetLowering& Lowering = | |
520 | *static_cast<const SITargetLowering*>(getTargetLowering()); | |
521 | Lowering.legalizeTargetIndependentNode(N, *CurDAG); | |
522 | break; | |
523 | } | |
1a4d82fc JJ |
524 | case ISD::ADDRSPACECAST: |
525 | return SelectAddrSpaceCast(N); | |
526 | } | |
85aaf69f | 527 | |
1a4d82fc JJ |
528 | return SelectCode(N); |
529 | } | |
530 | ||
531 | ||
532 | bool AMDGPUDAGToDAGISel::checkType(const Value *Ptr, unsigned AS) { | |
533 | assert(AS != 0 && "Use checkPrivateAddress instead."); | |
534 | if (!Ptr) | |
535 | return false; | |
536 | ||
537 | return Ptr->getType()->getPointerAddressSpace() == AS; | |
538 | } | |
539 | ||
540 | bool AMDGPUDAGToDAGISel::checkPrivateAddress(const MachineMemOperand *Op) { | |
541 | if (Op->getPseudoValue()) | |
542 | return true; | |
543 | ||
544 | if (PointerType *PT = dyn_cast<PointerType>(Op->getValue()->getType())) | |
545 | return PT->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS; | |
546 | ||
547 | return false; | |
548 | } | |
549 | ||
550 | bool AMDGPUDAGToDAGISel::isGlobalStore(const StoreSDNode *N) { | |
551 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS); | |
552 | } | |
553 | ||
554 | bool AMDGPUDAGToDAGISel::isPrivateStore(const StoreSDNode *N) { | |
555 | const Value *MemVal = N->getMemOperand()->getValue(); | |
556 | return (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) && | |
557 | !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) && | |
558 | !checkType(MemVal, AMDGPUAS::REGION_ADDRESS)); | |
559 | } | |
560 | ||
561 | bool AMDGPUDAGToDAGISel::isLocalStore(const StoreSDNode *N) { | |
562 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS); | |
563 | } | |
564 | ||
565 | bool AMDGPUDAGToDAGISel::isFlatStore(const StoreSDNode *N) { | |
566 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS); | |
567 | } | |
568 | ||
569 | bool AMDGPUDAGToDAGISel::isRegionStore(const StoreSDNode *N) { | |
570 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS); | |
571 | } | |
572 | ||
573 | bool AMDGPUDAGToDAGISel::isConstantLoad(const LoadSDNode *N, int CbId) const { | |
574 | const Value *MemVal = N->getMemOperand()->getValue(); | |
575 | if (CbId == -1) | |
576 | return checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS); | |
577 | ||
578 | return checkType(MemVal, AMDGPUAS::CONSTANT_BUFFER_0 + CbId); | |
579 | } | |
580 | ||
581 | bool AMDGPUDAGToDAGISel::isGlobalLoad(const LoadSDNode *N) const { | |
582 | if (N->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS) { | |
583 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); | |
584 | if (ST.getGeneration() < AMDGPUSubtarget::SOUTHERN_ISLANDS || | |
585 | N->getMemoryVT().bitsLT(MVT::i32)) { | |
586 | return true; | |
587 | } | |
588 | } | |
589 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::GLOBAL_ADDRESS); | |
590 | } | |
591 | ||
592 | bool AMDGPUDAGToDAGISel::isParamLoad(const LoadSDNode *N) const { | |
593 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::PARAM_I_ADDRESS); | |
594 | } | |
595 | ||
596 | bool AMDGPUDAGToDAGISel::isLocalLoad(const LoadSDNode *N) const { | |
597 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::LOCAL_ADDRESS); | |
598 | } | |
599 | ||
600 | bool AMDGPUDAGToDAGISel::isFlatLoad(const LoadSDNode *N) const { | |
601 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::FLAT_ADDRESS); | |
602 | } | |
603 | ||
604 | bool AMDGPUDAGToDAGISel::isRegionLoad(const LoadSDNode *N) const { | |
605 | return checkType(N->getMemOperand()->getValue(), AMDGPUAS::REGION_ADDRESS); | |
606 | } | |
607 | ||
608 | bool AMDGPUDAGToDAGISel::isCPLoad(const LoadSDNode *N) const { | |
609 | MachineMemOperand *MMO = N->getMemOperand(); | |
610 | if (checkPrivateAddress(N->getMemOperand())) { | |
611 | if (MMO) { | |
612 | const PseudoSourceValue *PSV = MMO->getPseudoValue(); | |
613 | if (PSV && PSV == PseudoSourceValue::getConstantPool()) { | |
614 | return true; | |
615 | } | |
616 | } | |
617 | } | |
618 | return false; | |
619 | } | |
620 | ||
621 | bool AMDGPUDAGToDAGISel::isPrivateLoad(const LoadSDNode *N) const { | |
622 | if (checkPrivateAddress(N->getMemOperand())) { | |
623 | // Check to make sure we are not a constant pool load or a constant load | |
624 | // that is marked as a private load | |
625 | if (isCPLoad(N) || isConstantLoad(N, -1)) { | |
626 | return false; | |
627 | } | |
628 | } | |
629 | ||
630 | const Value *MemVal = N->getMemOperand()->getValue(); | |
631 | if (!checkType(MemVal, AMDGPUAS::LOCAL_ADDRESS) && | |
632 | !checkType(MemVal, AMDGPUAS::GLOBAL_ADDRESS) && | |
633 | !checkType(MemVal, AMDGPUAS::FLAT_ADDRESS) && | |
634 | !checkType(MemVal, AMDGPUAS::REGION_ADDRESS) && | |
635 | !checkType(MemVal, AMDGPUAS::CONSTANT_ADDRESS) && | |
636 | !checkType(MemVal, AMDGPUAS::PARAM_D_ADDRESS) && | |
637 | !checkType(MemVal, AMDGPUAS::PARAM_I_ADDRESS)) { | |
638 | return true; | |
639 | } | |
640 | return false; | |
641 | } | |
642 | ||
643 | const char *AMDGPUDAGToDAGISel::getPassName() const { | |
644 | return "AMDGPU DAG->DAG Pattern Instruction Selection"; | |
645 | } | |
646 | ||
647 | #ifdef DEBUGTMP | |
648 | #undef INT64_C | |
649 | #endif | |
650 | #undef DEBUGTMP | |
651 | ||
652 | //===----------------------------------------------------------------------===// | |
653 | // Complex Patterns | |
654 | //===----------------------------------------------------------------------===// | |
655 | ||
656 | bool AMDGPUDAGToDAGISel::SelectGlobalValueConstantOffset(SDValue Addr, | |
657 | SDValue& IntPtr) { | |
658 | if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Addr)) { | |
659 | IntPtr = CurDAG->getIntPtrConstant(Cst->getZExtValue() / 4, true); | |
660 | return true; | |
661 | } | |
662 | return false; | |
663 | } | |
664 | ||
665 | bool AMDGPUDAGToDAGISel::SelectGlobalValueVariableOffset(SDValue Addr, | |
666 | SDValue& BaseReg, SDValue &Offset) { | |
667 | if (!isa<ConstantSDNode>(Addr)) { | |
668 | BaseReg = Addr; | |
669 | Offset = CurDAG->getIntPtrConstant(0, true); | |
670 | return true; | |
671 | } | |
672 | return false; | |
673 | } | |
674 | ||
675 | bool AMDGPUDAGToDAGISel::SelectADDRVTX_READ(SDValue Addr, SDValue &Base, | |
676 | SDValue &Offset) { | |
677 | ConstantSDNode *IMMOffset; | |
678 | ||
679 | if (Addr.getOpcode() == ISD::ADD | |
680 | && (IMMOffset = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) | |
681 | && isInt<16>(IMMOffset->getZExtValue())) { | |
682 | ||
683 | Base = Addr.getOperand(0); | |
684 | Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); | |
685 | return true; | |
686 | // If the pointer address is constant, we can move it to the offset field. | |
687 | } else if ((IMMOffset = dyn_cast<ConstantSDNode>(Addr)) | |
688 | && isInt<16>(IMMOffset->getZExtValue())) { | |
689 | Base = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), | |
690 | SDLoc(CurDAG->getEntryNode()), | |
691 | AMDGPU::ZERO, MVT::i32); | |
692 | Offset = CurDAG->getTargetConstant(IMMOffset->getZExtValue(), MVT::i32); | |
693 | return true; | |
694 | } | |
695 | ||
696 | // Default case, no offset | |
697 | Base = Addr; | |
698 | Offset = CurDAG->getTargetConstant(0, MVT::i32); | |
699 | return true; | |
700 | } | |
701 | ||
702 | bool AMDGPUDAGToDAGISel::SelectADDRIndirect(SDValue Addr, SDValue &Base, | |
703 | SDValue &Offset) { | |
704 | ConstantSDNode *C; | |
705 | ||
706 | if ((C = dyn_cast<ConstantSDNode>(Addr))) { | |
707 | Base = CurDAG->getRegister(AMDGPU::INDIRECT_BASE_ADDR, MVT::i32); | |
708 | Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); | |
709 | } else if ((Addr.getOpcode() == ISD::ADD || Addr.getOpcode() == ISD::OR) && | |
710 | (C = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) { | |
711 | Base = Addr.getOperand(0); | |
712 | Offset = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32); | |
713 | } else { | |
714 | Base = Addr; | |
715 | Offset = CurDAG->getTargetConstant(0, MVT::i32); | |
716 | } | |
717 | ||
718 | return true; | |
719 | } | |
720 | ||
721 | SDNode *AMDGPUDAGToDAGISel::SelectADD_SUB_I64(SDNode *N) { | |
722 | SDLoc DL(N); | |
723 | SDValue LHS = N->getOperand(0); | |
724 | SDValue RHS = N->getOperand(1); | |
725 | ||
726 | bool IsAdd = (N->getOpcode() == ISD::ADD); | |
727 | ||
728 | SDValue Sub0 = CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32); | |
729 | SDValue Sub1 = CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32); | |
730 | ||
731 | SDNode *Lo0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, | |
732 | DL, MVT::i32, LHS, Sub0); | |
733 | SDNode *Hi0 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, | |
734 | DL, MVT::i32, LHS, Sub1); | |
735 | ||
736 | SDNode *Lo1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, | |
737 | DL, MVT::i32, RHS, Sub0); | |
738 | SDNode *Hi1 = CurDAG->getMachineNode(TargetOpcode::EXTRACT_SUBREG, | |
739 | DL, MVT::i32, RHS, Sub1); | |
740 | ||
741 | SDVTList VTList = CurDAG->getVTList(MVT::i32, MVT::Glue); | |
742 | SDValue AddLoArgs[] = { SDValue(Lo0, 0), SDValue(Lo1, 0) }; | |
743 | ||
744 | ||
745 | unsigned Opc = IsAdd ? AMDGPU::S_ADD_U32 : AMDGPU::S_SUB_U32; | |
746 | unsigned CarryOpc = IsAdd ? AMDGPU::S_ADDC_U32 : AMDGPU::S_SUBB_U32; | |
747 | ||
748 | SDNode *AddLo = CurDAG->getMachineNode( Opc, DL, VTList, AddLoArgs); | |
749 | SDValue Carry(AddLo, 1); | |
750 | SDNode *AddHi | |
751 | = CurDAG->getMachineNode(CarryOpc, DL, MVT::i32, | |
752 | SDValue(Hi0, 0), SDValue(Hi1, 0), Carry); | |
753 | ||
754 | SDValue Args[5] = { | |
755 | CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32), | |
756 | SDValue(AddLo,0), | |
757 | Sub0, | |
758 | SDValue(AddHi,0), | |
759 | Sub1, | |
760 | }; | |
761 | return CurDAG->SelectNodeTo(N, AMDGPU::REG_SEQUENCE, MVT::i64, Args); | |
762 | } | |
763 | ||
764 | SDNode *AMDGPUDAGToDAGISel::SelectDIV_SCALE(SDNode *N) { | |
765 | SDLoc SL(N); | |
766 | EVT VT = N->getValueType(0); | |
767 | ||
768 | assert(VT == MVT::f32 || VT == MVT::f64); | |
769 | ||
770 | unsigned Opc | |
771 | = (VT == MVT::f64) ? AMDGPU::V_DIV_SCALE_F64 : AMDGPU::V_DIV_SCALE_F32; | |
772 | ||
773 | const SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); | |
774 | const SDValue False = CurDAG->getTargetConstant(0, MVT::i1); | |
775 | SDValue Ops[] = { | |
776 | Zero, // src0_modifiers | |
777 | N->getOperand(0), // src0 | |
778 | Zero, // src1_modifiers | |
779 | N->getOperand(1), // src1 | |
780 | Zero, // src2_modifiers | |
781 | N->getOperand(2), // src2 | |
782 | False, // clamp | |
783 | Zero // omod | |
784 | }; | |
785 | ||
786 | return CurDAG->SelectNodeTo(N, Opc, VT, MVT::i1, Ops); | |
787 | } | |
788 | ||
789 | bool AMDGPUDAGToDAGISel::isDSOffsetLegal(const SDValue &Base, unsigned Offset, | |
790 | unsigned OffsetBits) const { | |
791 | const AMDGPUSubtarget &ST = TM.getSubtarget<AMDGPUSubtarget>(); | |
792 | if ((OffsetBits == 16 && !isUInt<16>(Offset)) || | |
793 | (OffsetBits == 8 && !isUInt<8>(Offset))) | |
794 | return false; | |
795 | ||
796 | if (ST.getGeneration() >= AMDGPUSubtarget::SEA_ISLANDS) | |
797 | return true; | |
798 | ||
799 | // On Southern Islands instruction with a negative base value and an offset | |
800 | // don't seem to work. | |
801 | return CurDAG->SignBitIsZero(Base); | |
802 | } | |
803 | ||
804 | bool AMDGPUDAGToDAGISel::SelectDS1Addr1Offset(SDValue Addr, SDValue &Base, | |
805 | SDValue &Offset) const { | |
806 | if (CurDAG->isBaseWithConstantOffset(Addr)) { | |
807 | SDValue N0 = Addr.getOperand(0); | |
808 | SDValue N1 = Addr.getOperand(1); | |
809 | ConstantSDNode *C1 = cast<ConstantSDNode>(N1); | |
810 | if (isDSOffsetLegal(N0, C1->getSExtValue(), 16)) { | |
811 | // (add n0, c0) | |
812 | Base = N0; | |
813 | Offset = N1; | |
814 | return true; | |
815 | } | |
816 | } | |
817 | ||
85aaf69f SL |
818 | // If we have a constant address, prefer to put the constant into the |
819 | // offset. This can save moves to load the constant address since multiple | |
820 | // operations can share the zero base address register, and enables merging | |
821 | // into read2 / write2 instructions. | |
822 | if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { | |
823 | if (isUInt<16>(CAddr->getZExtValue())) { | |
824 | SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); | |
825 | MachineSDNode *MovZero = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, | |
826 | SDLoc(Addr), MVT::i32, Zero); | |
827 | Base = SDValue(MovZero, 0); | |
828 | Offset = Addr; | |
829 | return true; | |
830 | } | |
831 | } | |
832 | ||
1a4d82fc JJ |
833 | // default case |
834 | Base = Addr; | |
835 | Offset = CurDAG->getTargetConstant(0, MVT::i16); | |
836 | return true; | |
837 | } | |
838 | ||
839 | bool AMDGPUDAGToDAGISel::SelectDS64Bit4ByteAligned(SDValue Addr, SDValue &Base, | |
840 | SDValue &Offset0, | |
841 | SDValue &Offset1) const { | |
842 | if (CurDAG->isBaseWithConstantOffset(Addr)) { | |
843 | SDValue N0 = Addr.getOperand(0); | |
844 | SDValue N1 = Addr.getOperand(1); | |
845 | ConstantSDNode *C1 = cast<ConstantSDNode>(N1); | |
846 | unsigned DWordOffset0 = C1->getZExtValue() / 4; | |
847 | unsigned DWordOffset1 = DWordOffset0 + 1; | |
848 | // (add n0, c0) | |
849 | if (isDSOffsetLegal(N0, DWordOffset1, 8)) { | |
850 | Base = N0; | |
851 | Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8); | |
852 | Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8); | |
853 | return true; | |
854 | } | |
855 | } | |
856 | ||
85aaf69f SL |
857 | if (const ConstantSDNode *CAddr = dyn_cast<ConstantSDNode>(Addr)) { |
858 | unsigned DWordOffset0 = CAddr->getZExtValue() / 4; | |
859 | unsigned DWordOffset1 = DWordOffset0 + 1; | |
860 | assert(4 * DWordOffset0 == CAddr->getZExtValue()); | |
861 | ||
862 | if (isUInt<8>(DWordOffset0) && isUInt<8>(DWordOffset1)) { | |
863 | SDValue Zero = CurDAG->getTargetConstant(0, MVT::i32); | |
864 | MachineSDNode *MovZero | |
865 | = CurDAG->getMachineNode(AMDGPU::V_MOV_B32_e32, | |
866 | SDLoc(Addr), MVT::i32, Zero); | |
867 | Base = SDValue(MovZero, 0); | |
868 | Offset0 = CurDAG->getTargetConstant(DWordOffset0, MVT::i8); | |
869 | Offset1 = CurDAG->getTargetConstant(DWordOffset1, MVT::i8); | |
870 | return true; | |
871 | } | |
872 | } | |
873 | ||
1a4d82fc JJ |
874 | // default case |
875 | Base = Addr; | |
876 | Offset0 = CurDAG->getTargetConstant(0, MVT::i8); | |
877 | Offset1 = CurDAG->getTargetConstant(1, MVT::i8); | |
878 | return true; | |
879 | } | |
880 | ||
1a4d82fc JJ |
881 | static bool isLegalMUBUFImmOffset(const ConstantSDNode *Imm) { |
882 | return isUInt<12>(Imm->getZExtValue()); | |
883 | } | |
884 | ||
885 | void AMDGPUDAGToDAGISel::SelectMUBUF(SDValue Addr, SDValue &Ptr, | |
886 | SDValue &VAddr, SDValue &SOffset, | |
887 | SDValue &Offset, SDValue &Offen, | |
888 | SDValue &Idxen, SDValue &Addr64, | |
889 | SDValue &GLC, SDValue &SLC, | |
890 | SDValue &TFE) const { | |
891 | SDLoc DL(Addr); | |
892 | ||
893 | GLC = CurDAG->getTargetConstant(0, MVT::i1); | |
894 | SLC = CurDAG->getTargetConstant(0, MVT::i1); | |
895 | TFE = CurDAG->getTargetConstant(0, MVT::i1); | |
896 | ||
897 | Idxen = CurDAG->getTargetConstant(0, MVT::i1); | |
898 | Offen = CurDAG->getTargetConstant(0, MVT::i1); | |
899 | Addr64 = CurDAG->getTargetConstant(0, MVT::i1); | |
900 | SOffset = CurDAG->getTargetConstant(0, MVT::i32); | |
901 | ||
902 | if (CurDAG->isBaseWithConstantOffset(Addr)) { | |
903 | SDValue N0 = Addr.getOperand(0); | |
904 | SDValue N1 = Addr.getOperand(1); | |
905 | ConstantSDNode *C1 = cast<ConstantSDNode>(N1); | |
906 | ||
907 | if (isLegalMUBUFImmOffset(C1)) { | |
908 | ||
909 | if (N0.getOpcode() == ISD::ADD) { | |
910 | // (add (add N2, N3), C1) -> addr64 | |
911 | SDValue N2 = N0.getOperand(0); | |
912 | SDValue N3 = N0.getOperand(1); | |
913 | Addr64 = CurDAG->getTargetConstant(1, MVT::i1); | |
914 | Ptr = N2; | |
915 | VAddr = N3; | |
916 | Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); | |
917 | return; | |
918 | } | |
919 | ||
920 | // (add N0, C1) -> offset | |
921 | VAddr = CurDAG->getTargetConstant(0, MVT::i32); | |
922 | Ptr = N0; | |
923 | Offset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); | |
924 | return; | |
925 | } | |
926 | } | |
927 | if (Addr.getOpcode() == ISD::ADD) { | |
928 | // (add N0, N1) -> addr64 | |
929 | SDValue N0 = Addr.getOperand(0); | |
930 | SDValue N1 = Addr.getOperand(1); | |
931 | Addr64 = CurDAG->getTargetConstant(1, MVT::i1); | |
932 | Ptr = N0; | |
933 | VAddr = N1; | |
934 | Offset = CurDAG->getTargetConstant(0, MVT::i16); | |
935 | return; | |
936 | } | |
937 | ||
938 | // default case -> offset | |
939 | VAddr = CurDAG->getTargetConstant(0, MVT::i32); | |
940 | Ptr = Addr; | |
941 | Offset = CurDAG->getTargetConstant(0, MVT::i16); | |
942 | ||
943 | } | |
944 | ||
945 | bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, | |
946 | SDValue &VAddr, | |
947 | SDValue &Offset) const { | |
948 | SDValue Ptr, SOffset, Offen, Idxen, Addr64, GLC, SLC, TFE; | |
949 | ||
950 | SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, | |
951 | GLC, SLC, TFE); | |
952 | ||
953 | ConstantSDNode *C = cast<ConstantSDNode>(Addr64); | |
954 | if (C->getSExtValue()) { | |
955 | SDLoc DL(Addr); | |
85aaf69f SL |
956 | |
957 | const SITargetLowering& Lowering = | |
958 | *static_cast<const SITargetLowering*>(getTargetLowering()); | |
959 | ||
960 | SRsrc = SDValue(Lowering.wrapAddr64Rsrc(*CurDAG, DL, Ptr), 0); | |
1a4d82fc JJ |
961 | return true; |
962 | } | |
85aaf69f | 963 | |
1a4d82fc JJ |
964 | return false; |
965 | } | |
966 | ||
967 | bool AMDGPUDAGToDAGISel::SelectMUBUFAddr64(SDValue Addr, SDValue &SRsrc, | |
968 | SDValue &VAddr, SDValue &Offset, | |
969 | SDValue &SLC) const { | |
970 | SLC = CurDAG->getTargetConstant(0, MVT::i1); | |
971 | ||
972 | return SelectMUBUFAddr64(Addr, SRsrc, VAddr, Offset); | |
973 | } | |
974 | ||
1a4d82fc JJ |
975 | bool AMDGPUDAGToDAGISel::SelectMUBUFScratch(SDValue Addr, SDValue &Rsrc, |
976 | SDValue &VAddr, SDValue &SOffset, | |
977 | SDValue &ImmOffset) const { | |
978 | ||
979 | SDLoc DL(Addr); | |
980 | MachineFunction &MF = CurDAG->getMachineFunction(); | |
981 | const SIRegisterInfo *TRI = | |
982 | static_cast<const SIRegisterInfo *>(MF.getSubtarget().getRegisterInfo()); | |
983 | MachineRegisterInfo &MRI = MF.getRegInfo(); | |
984 | const SITargetLowering& Lowering = | |
985 | *static_cast<const SITargetLowering*>(getTargetLowering()); | |
986 | ||
1a4d82fc JJ |
987 | unsigned ScratchOffsetReg = |
988 | TRI->getPreloadedValue(MF, SIRegisterInfo::SCRATCH_WAVE_OFFSET); | |
989 | Lowering.CreateLiveInRegister(*CurDAG, &AMDGPU::SReg_32RegClass, | |
990 | ScratchOffsetReg, MVT::i32); | |
85aaf69f SL |
991 | SDValue Sym0 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD0", MVT::i32); |
992 | SDValue ScratchRsrcDword0 = | |
993 | SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym0), 0); | |
994 | ||
995 | SDValue Sym1 = CurDAG->getExternalSymbol("SCRATCH_RSRC_DWORD1", MVT::i32); | |
996 | SDValue ScratchRsrcDword1 = | |
997 | SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, DL, MVT::i32, Sym1), 0); | |
1a4d82fc | 998 | |
85aaf69f SL |
999 | const SDValue RsrcOps[] = { |
1000 | CurDAG->getTargetConstant(AMDGPU::SReg_64RegClassID, MVT::i32), | |
1001 | ScratchRsrcDword0, | |
1002 | CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), | |
1003 | ScratchRsrcDword1, | |
1004 | CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32), | |
1005 | }; | |
1006 | SDValue ScratchPtr = SDValue(CurDAG->getMachineNode(AMDGPU::REG_SEQUENCE, DL, | |
1007 | MVT::v2i32, RsrcOps), 0); | |
1008 | Rsrc = SDValue(Lowering.buildScratchRSRC(*CurDAG, DL, ScratchPtr), 0); | |
1a4d82fc JJ |
1009 | SOffset = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), DL, |
1010 | MRI.getLiveInVirtReg(ScratchOffsetReg), MVT::i32); | |
1011 | ||
1012 | // (add n0, c1) | |
1013 | if (CurDAG->isBaseWithConstantOffset(Addr)) { | |
1014 | SDValue N1 = Addr.getOperand(1); | |
1015 | ConstantSDNode *C1 = cast<ConstantSDNode>(N1); | |
1016 | ||
1017 | if (isLegalMUBUFImmOffset(C1)) { | |
1018 | VAddr = Addr.getOperand(0); | |
1019 | ImmOffset = CurDAG->getTargetConstant(C1->getZExtValue(), MVT::i16); | |
1020 | return true; | |
1021 | } | |
1022 | } | |
1023 | ||
1a4d82fc JJ |
1024 | // (node) |
1025 | VAddr = Addr; | |
1026 | ImmOffset = CurDAG->getTargetConstant(0, MVT::i16); | |
1027 | return true; | |
1028 | } | |
1029 | ||
1030 | bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, | |
1031 | SDValue &SOffset, SDValue &Offset, | |
1032 | SDValue &GLC, SDValue &SLC, | |
1033 | SDValue &TFE) const { | |
1034 | SDValue Ptr, VAddr, Offen, Idxen, Addr64; | |
85aaf69f SL |
1035 | const SIInstrInfo *TII = |
1036 | static_cast<const SIInstrInfo *>(Subtarget.getInstrInfo()); | |
1a4d82fc JJ |
1037 | |
1038 | SelectMUBUF(Addr, Ptr, VAddr, SOffset, Offset, Offen, Idxen, Addr64, | |
1039 | GLC, SLC, TFE); | |
1040 | ||
1041 | if (!cast<ConstantSDNode>(Offen)->getSExtValue() && | |
1042 | !cast<ConstantSDNode>(Idxen)->getSExtValue() && | |
1043 | !cast<ConstantSDNode>(Addr64)->getSExtValue()) { | |
85aaf69f | 1044 | uint64_t Rsrc = TII->getDefaultRsrcDataFormat() | |
1a4d82fc JJ |
1045 | APInt::getAllOnesValue(32).getZExtValue(); // Size |
1046 | SDLoc DL(Addr); | |
85aaf69f SL |
1047 | |
1048 | const SITargetLowering& Lowering = | |
1049 | *static_cast<const SITargetLowering*>(getTargetLowering()); | |
1050 | ||
1051 | SRsrc = SDValue(Lowering.buildRSRC(*CurDAG, DL, Ptr, 0, Rsrc), 0); | |
1a4d82fc JJ |
1052 | return true; |
1053 | } | |
1054 | return false; | |
1055 | } | |
1056 | ||
1057 | bool AMDGPUDAGToDAGISel::SelectMUBUFOffset(SDValue Addr, SDValue &SRsrc, | |
1058 | SDValue &Soffset, SDValue &Offset, | |
1059 | SDValue &GLC) const { | |
1060 | SDValue SLC, TFE; | |
1061 | ||
1062 | return SelectMUBUFOffset(Addr, SRsrc, Soffset, Offset, GLC, SLC, TFE); | |
1063 | } | |
1064 | ||
1065 | // FIXME: This is incorrect and only enough to be able to compile. | |
1066 | SDNode *AMDGPUDAGToDAGISel::SelectAddrSpaceCast(SDNode *N) { | |
1067 | AddrSpaceCastSDNode *ASC = cast<AddrSpaceCastSDNode>(N); | |
1068 | SDLoc DL(N); | |
1069 | ||
1070 | assert(Subtarget.hasFlatAddressSpace() && | |
1071 | "addrspacecast only supported with flat address space!"); | |
1072 | ||
1073 | assert((ASC->getSrcAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS && | |
1074 | ASC->getDestAddressSpace() != AMDGPUAS::CONSTANT_ADDRESS) && | |
1075 | "Cannot cast address space to / from constant address!"); | |
1076 | ||
1077 | assert((ASC->getSrcAddressSpace() == AMDGPUAS::FLAT_ADDRESS || | |
1078 | ASC->getDestAddressSpace() == AMDGPUAS::FLAT_ADDRESS) && | |
1079 | "Can only cast to / from flat address space!"); | |
1080 | ||
1081 | // The flat instructions read the address as the index of the VGPR holding the | |
1082 | // address, so casting should just be reinterpreting the base VGPR, so just | |
1083 | // insert trunc / bitcast / zext. | |
1084 | ||
1085 | SDValue Src = ASC->getOperand(0); | |
1086 | EVT DestVT = ASC->getValueType(0); | |
1087 | EVT SrcVT = Src.getValueType(); | |
1088 | ||
1089 | unsigned SrcSize = SrcVT.getSizeInBits(); | |
1090 | unsigned DestSize = DestVT.getSizeInBits(); | |
1091 | ||
1092 | if (SrcSize > DestSize) { | |
1093 | assert(SrcSize == 64 && DestSize == 32); | |
1094 | return CurDAG->getMachineNode( | |
1095 | TargetOpcode::EXTRACT_SUBREG, | |
1096 | DL, | |
1097 | DestVT, | |
1098 | Src, | |
1099 | CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32)); | |
1100 | } | |
1101 | ||
1102 | ||
1103 | if (DestSize > SrcSize) { | |
1104 | assert(SrcSize == 32 && DestSize == 64); | |
1105 | ||
85aaf69f SL |
1106 | // FIXME: This is probably wrong, we should never be defining |
1107 | // a register class with both VGPRs and SGPRs | |
1108 | SDValue RC = CurDAG->getTargetConstant(AMDGPU::VS_64RegClassID, MVT::i32); | |
1a4d82fc JJ |
1109 | |
1110 | const SDValue Ops[] = { | |
1111 | RC, | |
1112 | Src, | |
1113 | CurDAG->getTargetConstant(AMDGPU::sub0, MVT::i32), | |
1114 | SDValue(CurDAG->getMachineNode(AMDGPU::S_MOV_B32, SDLoc(N), MVT::i32, | |
1115 | CurDAG->getConstant(0, MVT::i32)), 0), | |
1116 | CurDAG->getTargetConstant(AMDGPU::sub1, MVT::i32) | |
1117 | }; | |
1118 | ||
1119 | return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, | |
1120 | SDLoc(N), N->getValueType(0), Ops); | |
1121 | } | |
1122 | ||
1123 | assert(SrcSize == 64 && DestSize == 64); | |
1124 | return CurDAG->getNode(ISD::BITCAST, DL, DestVT, Src).getNode(); | |
1125 | } | |
1126 | ||
1127 | bool AMDGPUDAGToDAGISel::SelectVOP3Mods(SDValue In, SDValue &Src, | |
1128 | SDValue &SrcMods) const { | |
1129 | ||
1130 | unsigned Mods = 0; | |
1131 | ||
1132 | Src = In; | |
1133 | ||
1134 | if (Src.getOpcode() == ISD::FNEG) { | |
1135 | Mods |= SISrcMods::NEG; | |
1136 | Src = Src.getOperand(0); | |
1137 | } | |
1138 | ||
1139 | if (Src.getOpcode() == ISD::FABS) { | |
1140 | Mods |= SISrcMods::ABS; | |
1141 | Src = Src.getOperand(0); | |
1142 | } | |
1143 | ||
1144 | SrcMods = CurDAG->getTargetConstant(Mods, MVT::i32); | |
1145 | ||
1146 | return true; | |
1147 | } | |
1148 | ||
1149 | bool AMDGPUDAGToDAGISel::SelectVOP3Mods0(SDValue In, SDValue &Src, | |
1150 | SDValue &SrcMods, SDValue &Clamp, | |
1151 | SDValue &Omod) const { | |
1152 | // FIXME: Handle Clamp and Omod | |
1153 | Clamp = CurDAG->getTargetConstant(0, MVT::i32); | |
1154 | Omod = CurDAG->getTargetConstant(0, MVT::i32); | |
1155 | ||
1156 | return SelectVOP3Mods(In, Src, SrcMods); | |
1157 | } | |
1158 | ||
85aaf69f SL |
1159 | bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp(SDValue In, SDValue &Src, |
1160 | SDValue &SrcMods, | |
1161 | SDValue &Omod) const { | |
1162 | // FIXME: Handle Omod | |
1163 | Omod = CurDAG->getTargetConstant(0, MVT::i32); | |
1164 | ||
1165 | return SelectVOP3Mods(In, Src, SrcMods); | |
1166 | } | |
1167 | ||
1168 | bool AMDGPUDAGToDAGISel::SelectVOP3Mods0Clamp0OMod(SDValue In, SDValue &Src, | |
1169 | SDValue &SrcMods, | |
1170 | SDValue &Clamp, | |
1171 | SDValue &Omod) const { | |
1172 | Clamp = Omod = CurDAG->getTargetConstant(0, MVT::i32); | |
1173 | return SelectVOP3Mods(In, Src, SrcMods); | |
1174 | } | |
1175 | ||
1a4d82fc JJ |
1176 | void AMDGPUDAGToDAGISel::PostprocessISelDAG() { |
1177 | const AMDGPUTargetLowering& Lowering = | |
1178 | *static_cast<const AMDGPUTargetLowering*>(getTargetLowering()); | |
1179 | bool IsModified = false; | |
1180 | do { | |
1181 | IsModified = false; | |
1182 | // Go over all selected nodes and try to fold them a bit more | |
1183 | for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(), | |
1184 | E = CurDAG->allnodes_end(); I != E; ++I) { | |
1185 | ||
1186 | SDNode *Node = I; | |
1187 | ||
1188 | MachineSDNode *MachineNode = dyn_cast<MachineSDNode>(I); | |
1189 | if (!MachineNode) | |
1190 | continue; | |
1191 | ||
1192 | SDNode *ResNode = Lowering.PostISelFolding(MachineNode, *CurDAG); | |
1193 | if (ResNode != Node) { | |
1194 | ReplaceUses(Node, ResNode); | |
1195 | IsModified = true; | |
1196 | } | |
1197 | } | |
1198 | CurDAG->RemoveDeadNodes(); | |
1199 | } while (IsModified); | |
1200 | } |