]>
Commit | Line | Data |
---|---|---|
970d7e83 LB |
1 | //===- AArch64RegisterInfo.cpp - AArch64 Register Information -------------===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | // This file contains the AArch64 implementation of the TargetRegisterInfo | |
11 | // class. | |
12 | // | |
13 | //===----------------------------------------------------------------------===// | |
14 | ||
970d7e83 LB |
15 | #include "AArch64RegisterInfo.h" |
16 | #include "AArch64FrameLowering.h" | |
1a4d82fc JJ |
17 | #include "AArch64InstrInfo.h" |
18 | #include "AArch64Subtarget.h" | |
19 | #include "MCTargetDesc/AArch64AddressingModes.h" | |
20 | #include "llvm/ADT/BitVector.h" | |
970d7e83 LB |
21 | #include "llvm/CodeGen/MachineFrameInfo.h" |
22 | #include "llvm/CodeGen/MachineInstrBuilder.h" | |
23 | #include "llvm/CodeGen/MachineRegisterInfo.h" | |
24 | #include "llvm/CodeGen/RegisterScavenging.h" | |
1a4d82fc JJ |
25 | #include "llvm/IR/Function.h" |
26 | #include "llvm/Support/CommandLine.h" | |
27 | #include "llvm/Support/raw_ostream.h" | |
28 | #include "llvm/Target/TargetFrameLowering.h" | |
29 | #include "llvm/Target/TargetOptions.h" | |
30 | ||
31 | using namespace llvm; | |
970d7e83 LB |
32 | |
33 | #define GET_REGINFO_TARGET_DESC | |
34 | #include "AArch64GenRegisterInfo.inc" | |
35 | ||
85aaf69f SL |
36 | static cl::opt<bool> |
37 | ReserveX18("aarch64-reserve-x18", cl::Hidden, | |
38 | cl::desc("Reserve X18, making it unavailable as GPR")); | |
39 | ||
1a4d82fc JJ |
40 | AArch64RegisterInfo::AArch64RegisterInfo(const AArch64InstrInfo *tii, |
41 | const AArch64Subtarget *sti) | |
42 | : AArch64GenRegisterInfo(AArch64::LR), TII(tii), STI(sti) {} | |
970d7e83 | 43 | |
1a4d82fc | 44 | const MCPhysReg * |
970d7e83 | 45 | AArch64RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { |
1a4d82fc | 46 | assert(MF && "Invalid MachineFunction pointer."); |
85aaf69f SL |
47 | if (MF->getFunction()->getCallingConv() == CallingConv::GHC) |
48 | // GHC set of callee saved regs is empty as all those regs are | |
49 | // used for passing STG regs around | |
50 | return CSR_AArch64_NoRegs_SaveList; | |
1a4d82fc JJ |
51 | if (MF->getFunction()->getCallingConv() == CallingConv::AnyReg) |
52 | return CSR_AArch64_AllRegs_SaveList; | |
53 | else | |
54 | return CSR_AArch64_AAPCS_SaveList; | |
970d7e83 LB |
55 | } |
56 | ||
1a4d82fc JJ |
57 | const uint32_t * |
58 | AArch64RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const { | |
85aaf69f SL |
59 | if (CC == CallingConv::GHC) |
60 | // This is academic becase all GHC calls are (supposed to be) tail calls | |
61 | return CSR_AArch64_NoRegs_RegMask; | |
1a4d82fc JJ |
62 | if (CC == CallingConv::AnyReg) |
63 | return CSR_AArch64_AllRegs_RegMask; | |
64 | else | |
65 | return CSR_AArch64_AAPCS_RegMask; | |
970d7e83 LB |
66 | } |
67 | ||
1a4d82fc JJ |
68 | const uint32_t *AArch64RegisterInfo::getTLSCallPreservedMask() const { |
69 | if (STI->isTargetDarwin()) | |
70 | return CSR_AArch64_TLS_Darwin_RegMask; | |
970d7e83 | 71 | |
1a4d82fc JJ |
72 | assert(STI->isTargetELF() && "only expect Darwin or ELF TLS"); |
73 | return CSR_AArch64_TLS_ELF_RegMask; | |
970d7e83 LB |
74 | } |
75 | ||
1a4d82fc | 76 | const uint32_t * |
85aaf69f | 77 | AArch64RegisterInfo::getThisReturnPreservedMask(CallingConv::ID CC) const { |
1a4d82fc JJ |
78 | // This should return a register mask that is the same as that returned by |
79 | // getCallPreservedMask but that additionally preserves the register used for | |
80 | // the first i64 argument (which must also be the register used to return a | |
81 | // single i64 return value) | |
82 | // | |
83 | // In case that the calling convention does not use the same register for | |
84 | // both, the function should return NULL (does not currently apply) | |
85aaf69f | 85 | assert(CC != CallingConv::GHC && "should not be GHC calling convention."); |
1a4d82fc JJ |
86 | return CSR_AArch64_AAPCS_ThisReturn_RegMask; |
87 | } | |
970d7e83 LB |
88 | |
89 | BitVector | |
90 | AArch64RegisterInfo::getReservedRegs(const MachineFunction &MF) const { | |
1a4d82fc | 91 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); |
970d7e83 | 92 | |
1a4d82fc JJ |
93 | // FIXME: avoid re-calculating this every time. |
94 | BitVector Reserved(getNumRegs()); | |
95 | Reserved.set(AArch64::SP); | |
970d7e83 | 96 | Reserved.set(AArch64::XZR); |
1a4d82fc | 97 | Reserved.set(AArch64::WSP); |
970d7e83 LB |
98 | Reserved.set(AArch64::WZR); |
99 | ||
1a4d82fc JJ |
100 | if (TFI->hasFP(MF) || STI->isTargetDarwin()) { |
101 | Reserved.set(AArch64::FP); | |
970d7e83 LB |
102 | Reserved.set(AArch64::W29); |
103 | } | |
104 | ||
85aaf69f | 105 | if (STI->isTargetDarwin() || ReserveX18) { |
1a4d82fc JJ |
106 | Reserved.set(AArch64::X18); // Platform register |
107 | Reserved.set(AArch64::W18); | |
108 | } | |
109 | ||
110 | if (hasBasePointer(MF)) { | |
111 | Reserved.set(AArch64::X19); | |
112 | Reserved.set(AArch64::W19); | |
113 | } | |
114 | ||
970d7e83 LB |
115 | return Reserved; |
116 | } | |
117 | ||
1a4d82fc JJ |
118 | bool AArch64RegisterInfo::isReservedReg(const MachineFunction &MF, |
119 | unsigned Reg) const { | |
120 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); | |
121 | ||
122 | switch (Reg) { | |
123 | default: | |
124 | break; | |
125 | case AArch64::SP: | |
126 | case AArch64::XZR: | |
127 | case AArch64::WSP: | |
128 | case AArch64::WZR: | |
129 | return true; | |
130 | case AArch64::X18: | |
131 | case AArch64::W18: | |
85aaf69f | 132 | return STI->isTargetDarwin() || ReserveX18; |
1a4d82fc JJ |
133 | case AArch64::FP: |
134 | case AArch64::W29: | |
135 | return TFI->hasFP(MF) || STI->isTargetDarwin(); | |
136 | case AArch64::W19: | |
137 | case AArch64::X19: | |
138 | return hasBasePointer(MF); | |
970d7e83 LB |
139 | } |
140 | ||
1a4d82fc JJ |
141 | return false; |
142 | } | |
970d7e83 | 143 | |
1a4d82fc JJ |
144 | const TargetRegisterClass * |
145 | AArch64RegisterInfo::getPointerRegClass(const MachineFunction &MF, | |
146 | unsigned Kind) const { | |
147 | return &AArch64::GPR64RegClass; | |
148 | } | |
970d7e83 | 149 | |
1a4d82fc JJ |
150 | const TargetRegisterClass * |
151 | AArch64RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const { | |
152 | if (RC == &AArch64::CCRRegClass) | |
153 | return &AArch64::GPR64RegClass; // Only MSR & MRS copy NZCV. | |
154 | return RC; | |
155 | } | |
970d7e83 | 156 | |
1a4d82fc | 157 | unsigned AArch64RegisterInfo::getBaseRegister() const { return AArch64::X19; } |
970d7e83 | 158 | |
1a4d82fc JJ |
159 | bool AArch64RegisterInfo::hasBasePointer(const MachineFunction &MF) const { |
160 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | |
970d7e83 | 161 | |
1a4d82fc JJ |
162 | // In the presence of variable sized objects, if the fixed stack size is |
163 | // large enough that referencing from the FP won't result in things being | |
164 | // in range relatively often, we can use a base pointer to allow access | |
165 | // from the other direction like the SP normally works. | |
166 | if (MFI->hasVarSizedObjects()) { | |
167 | // Conservatively estimate whether the negative offset from the frame | |
168 | // pointer will be sufficient to reach. If a function has a smallish | |
169 | // frame, it's less likely to have lots of spills and callee saved | |
170 | // space, so it's all more likely to be within range of the frame pointer. | |
171 | // If it's wrong, we'll materialize the constant and still get to the | |
172 | // object; it's just suboptimal. Negative offsets use the unscaled | |
173 | // load/store instructions, which have a 9-bit signed immediate. | |
174 | if (MFI->getLocalFrameSize() < 256) | |
175 | return false; | |
176 | return true; | |
970d7e83 LB |
177 | } |
178 | ||
1a4d82fc | 179 | return false; |
970d7e83 LB |
180 | } |
181 | ||
182 | unsigned | |
183 | AArch64RegisterInfo::getFrameRegister(const MachineFunction &MF) const { | |
1a4d82fc | 184 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); |
970d7e83 | 185 | |
1a4d82fc JJ |
186 | return TFI->hasFP(MF) ? AArch64::FP : AArch64::SP; |
187 | } | |
188 | ||
189 | bool AArch64RegisterInfo::requiresRegisterScavenging( | |
190 | const MachineFunction &MF) const { | |
191 | return true; | |
192 | } | |
193 | ||
194 | bool AArch64RegisterInfo::requiresVirtualBaseRegisters( | |
195 | const MachineFunction &MF) const { | |
196 | return true; | |
970d7e83 LB |
197 | } |
198 | ||
199 | bool | |
200 | AArch64RegisterInfo::useFPForScavengingIndex(const MachineFunction &MF) const { | |
1a4d82fc JJ |
201 | const MachineFrameInfo *MFI = MF.getFrameInfo(); |
202 | // AArch64FrameLowering::resolveFrameIndexReference() can always fall back | |
203 | // to the stack pointer, so only put the emergency spill slot next to the | |
204 | // FP when there's no better way to access it (SP or base pointer). | |
205 | return MFI->hasVarSizedObjects() && !hasBasePointer(MF); | |
970d7e83 | 206 | } |
1a4d82fc JJ |
207 | |
208 | bool AArch64RegisterInfo::requiresFrameIndexScavenging( | |
209 | const MachineFunction &MF) const { | |
210 | return true; | |
211 | } | |
212 | ||
213 | bool | |
214 | AArch64RegisterInfo::cannotEliminateFrame(const MachineFunction &MF) const { | |
215 | const MachineFrameInfo *MFI = MF.getFrameInfo(); | |
216 | // Only consider eliminating leaf frames. | |
217 | if (MFI->hasCalls() || (MF.getTarget().Options.DisableFramePointerElim(MF) && | |
218 | MFI->adjustsStack())) | |
219 | return true; | |
220 | return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken(); | |
221 | } | |
222 | ||
223 | /// needsFrameBaseReg - Returns true if the instruction's frame index | |
224 | /// reference would be better served by a base register other than FP | |
225 | /// or SP. Used by LocalStackFrameAllocation to determine which frame index | |
226 | /// references it should create new base registers for. | |
227 | bool AArch64RegisterInfo::needsFrameBaseReg(MachineInstr *MI, | |
228 | int64_t Offset) const { | |
229 | for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) | |
230 | assert(i < MI->getNumOperands() && | |
231 | "Instr doesn't have FrameIndex operand!"); | |
232 | ||
233 | // It's the load/store FI references that cause issues, as it can be difficult | |
234 | // to materialize the offset if it won't fit in the literal field. Estimate | |
235 | // based on the size of the local frame and some conservative assumptions | |
236 | // about the rest of the stack frame (note, this is pre-regalloc, so | |
237 | // we don't know everything for certain yet) whether this offset is likely | |
238 | // to be out of range of the immediate. Return true if so. | |
239 | ||
240 | // We only generate virtual base registers for loads and stores, so | |
241 | // return false for everything else. | |
242 | if (!MI->mayLoad() && !MI->mayStore()) | |
243 | return false; | |
244 | ||
245 | // Without a virtual base register, if the function has variable sized | |
246 | // objects, all fixed-size local references will be via the frame pointer, | |
247 | // Approximate the offset and see if it's legal for the instruction. | |
248 | // Note that the incoming offset is based on the SP value at function entry, | |
249 | // so it'll be negative. | |
250 | MachineFunction &MF = *MI->getParent()->getParent(); | |
251 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); | |
252 | MachineFrameInfo *MFI = MF.getFrameInfo(); | |
253 | ||
254 | // Estimate an offset from the frame pointer. | |
255 | // Conservatively assume all GPR callee-saved registers get pushed. | |
256 | // FP, LR, X19-X28, D8-D15. 64-bits each. | |
257 | int64_t FPOffset = Offset - 16 * 20; | |
258 | // Estimate an offset from the stack pointer. | |
259 | // The incoming offset is relating to the SP at the start of the function, | |
260 | // but when we access the local it'll be relative to the SP after local | |
261 | // allocation, so adjust our SP-relative offset by that allocation size. | |
262 | Offset += MFI->getLocalFrameSize(); | |
263 | // Assume that we'll have at least some spill slots allocated. | |
264 | // FIXME: This is a total SWAG number. We should run some statistics | |
265 | // and pick a real one. | |
266 | Offset += 128; // 128 bytes of spill slots | |
267 | ||
268 | // If there is a frame pointer, try using it. | |
269 | // The FP is only available if there is no dynamic realignment. We | |
270 | // don't know for sure yet whether we'll need that, so we guess based | |
271 | // on whether there are any local variables that would trigger it. | |
272 | if (TFI->hasFP(MF) && isFrameOffsetLegal(MI, FPOffset)) | |
273 | return false; | |
274 | ||
275 | // If we can reference via the stack pointer or base pointer, try that. | |
276 | // FIXME: This (and the code that resolves the references) can be improved | |
277 | // to only disallow SP relative references in the live range of | |
278 | // the VLA(s). In practice, it's unclear how much difference that | |
279 | // would make, but it may be worth doing. | |
280 | if (isFrameOffsetLegal(MI, Offset)) | |
281 | return false; | |
282 | ||
283 | // The offset likely isn't legal; we want to allocate a virtual base register. | |
284 | return true; | |
285 | } | |
286 | ||
287 | bool AArch64RegisterInfo::isFrameOffsetLegal(const MachineInstr *MI, | |
288 | int64_t Offset) const { | |
289 | assert(Offset <= INT_MAX && "Offset too big to fit in int."); | |
290 | assert(MI && "Unable to get the legal offset for nil instruction."); | |
291 | int SaveOffset = Offset; | |
292 | return isAArch64FrameOffsetLegal(*MI, SaveOffset) & AArch64FrameOffsetIsLegal; | |
293 | } | |
294 | ||
295 | /// Insert defining instruction(s) for BaseReg to be a pointer to FrameIdx | |
296 | /// at the beginning of the basic block. | |
297 | void AArch64RegisterInfo::materializeFrameBaseRegister(MachineBasicBlock *MBB, | |
298 | unsigned BaseReg, | |
299 | int FrameIdx, | |
300 | int64_t Offset) const { | |
301 | MachineBasicBlock::iterator Ins = MBB->begin(); | |
302 | DebugLoc DL; // Defaults to "unknown" | |
303 | if (Ins != MBB->end()) | |
304 | DL = Ins->getDebugLoc(); | |
305 | ||
306 | const MCInstrDesc &MCID = TII->get(AArch64::ADDXri); | |
307 | MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo(); | |
308 | const MachineFunction &MF = *MBB->getParent(); | |
309 | MRI.constrainRegClass(BaseReg, TII->getRegClass(MCID, 0, this, MF)); | |
310 | unsigned Shifter = AArch64_AM::getShifterImm(AArch64_AM::LSL, 0); | |
311 | ||
312 | BuildMI(*MBB, Ins, DL, MCID, BaseReg) | |
313 | .addFrameIndex(FrameIdx) | |
314 | .addImm(Offset) | |
315 | .addImm(Shifter); | |
316 | } | |
317 | ||
318 | void AArch64RegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg, | |
319 | int64_t Offset) const { | |
320 | int Off = Offset; // ARM doesn't need the general 64-bit offsets | |
321 | unsigned i = 0; | |
322 | ||
323 | while (!MI.getOperand(i).isFI()) { | |
324 | ++i; | |
325 | assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!"); | |
326 | } | |
327 | bool Done = rewriteAArch64FrameIndex(MI, i, BaseReg, Off, TII); | |
328 | assert(Done && "Unable to resolve frame index!"); | |
329 | (void)Done; | |
330 | } | |
331 | ||
332 | void AArch64RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, | |
333 | int SPAdj, unsigned FIOperandNum, | |
334 | RegScavenger *RS) const { | |
335 | assert(SPAdj == 0 && "Unexpected"); | |
336 | ||
337 | MachineInstr &MI = *II; | |
338 | MachineBasicBlock &MBB = *MI.getParent(); | |
339 | MachineFunction &MF = *MBB.getParent(); | |
340 | const AArch64FrameLowering *TFI = static_cast<const AArch64FrameLowering *>( | |
341 | MF.getSubtarget().getFrameLowering()); | |
342 | ||
343 | int FrameIndex = MI.getOperand(FIOperandNum).getIndex(); | |
344 | unsigned FrameReg; | |
345 | int Offset; | |
346 | ||
347 | // Special handling of dbg_value, stackmap and patchpoint instructions. | |
348 | if (MI.isDebugValue() || MI.getOpcode() == TargetOpcode::STACKMAP || | |
349 | MI.getOpcode() == TargetOpcode::PATCHPOINT) { | |
350 | Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg, | |
351 | /*PreferFP=*/true); | |
352 | Offset += MI.getOperand(FIOperandNum + 1).getImm(); | |
353 | MI.getOperand(FIOperandNum).ChangeToRegister(FrameReg, false /*isDef*/); | |
354 | MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset); | |
355 | return; | |
356 | } | |
357 | ||
358 | // Modify MI as necessary to handle as much of 'Offset' as possible | |
359 | Offset = TFI->resolveFrameIndexReference(MF, FrameIndex, FrameReg); | |
360 | if (rewriteAArch64FrameIndex(MI, FIOperandNum, FrameReg, Offset, TII)) | |
361 | return; | |
362 | ||
363 | assert((!RS || !RS->isScavengingFrameIndex(FrameIndex)) && | |
364 | "Emergency spill slot is out of reach"); | |
365 | ||
366 | // If we get here, the immediate doesn't fit into the instruction. We folded | |
367 | // as much as possible above. Handle the rest, providing a register that is | |
368 | // SP+LargeImm. | |
369 | unsigned ScratchReg = | |
370 | MF.getRegInfo().createVirtualRegister(&AArch64::GPR64RegClass); | |
371 | emitFrameOffset(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg, Offset, TII); | |
372 | MI.getOperand(FIOperandNum).ChangeToRegister(ScratchReg, false, false, true); | |
373 | } | |
374 | ||
375 | namespace llvm { | |
376 | ||
377 | unsigned AArch64RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC, | |
378 | MachineFunction &MF) const { | |
379 | const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering(); | |
380 | ||
381 | switch (RC->getID()) { | |
382 | default: | |
383 | return 0; | |
384 | case AArch64::GPR32RegClassID: | |
385 | case AArch64::GPR32spRegClassID: | |
386 | case AArch64::GPR32allRegClassID: | |
387 | case AArch64::GPR64spRegClassID: | |
388 | case AArch64::GPR64allRegClassID: | |
389 | case AArch64::GPR64RegClassID: | |
390 | case AArch64::GPR32commonRegClassID: | |
391 | case AArch64::GPR64commonRegClassID: | |
392 | return 32 - 1 // XZR/SP | |
393 | - (TFI->hasFP(MF) || STI->isTargetDarwin()) // FP | |
85aaf69f | 394 | - (STI->isTargetDarwin() || ReserveX18) // X18 reserved as platform register |
1a4d82fc JJ |
395 | - hasBasePointer(MF); // X19 |
396 | case AArch64::FPR8RegClassID: | |
397 | case AArch64::FPR16RegClassID: | |
398 | case AArch64::FPR32RegClassID: | |
399 | case AArch64::FPR64RegClassID: | |
400 | case AArch64::FPR128RegClassID: | |
401 | return 32; | |
402 | ||
403 | case AArch64::DDRegClassID: | |
404 | case AArch64::DDDRegClassID: | |
405 | case AArch64::DDDDRegClassID: | |
406 | case AArch64::QQRegClassID: | |
407 | case AArch64::QQQRegClassID: | |
408 | case AArch64::QQQQRegClassID: | |
409 | return 32; | |
410 | ||
411 | case AArch64::FPR128_loRegClassID: | |
412 | return 16; | |
413 | } | |
414 | } | |
415 | ||
416 | } // namespace llvm |