]>
git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Target/Mips/MipsSEFrameLowering.cpp
1 //===-- MipsSEFrameLowering.cpp - Mips32/64 Frame Information -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Mips32/64 implementation of TargetFrameLowering class.
12 //===----------------------------------------------------------------------===//
14 #include "MipsSEFrameLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MipsAnalyzeImmediate.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsSEInstrInfo.h"
19 #include "MipsSubtarget.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineFunction.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Function.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Target/TargetOptions.h"
34 typedef MachineBasicBlock::iterator Iter
;
36 static std::pair
<unsigned, unsigned> getMFHiLoOpc(unsigned Src
) {
37 if (Mips::ACC64RegClass
.contains(Src
))
38 return std::make_pair((unsigned)Mips::PseudoMFHI
,
39 (unsigned)Mips::PseudoMFLO
);
41 if (Mips::ACC64DSPRegClass
.contains(Src
))
42 return std::make_pair((unsigned)Mips::MFHI_DSP
, (unsigned)Mips::MFLO_DSP
);
44 if (Mips::ACC128RegClass
.contains(Src
))
45 return std::make_pair((unsigned)Mips::PseudoMFHI64
,
46 (unsigned)Mips::PseudoMFLO64
);
48 return std::make_pair(0, 0);
51 /// Helper class to expand pseudos.
54 ExpandPseudo(MachineFunction
&MF
);
58 bool expandInstr(MachineBasicBlock
&MBB
, Iter I
);
59 void expandLoadCCond(MachineBasicBlock
&MBB
, Iter I
);
60 void expandStoreCCond(MachineBasicBlock
&MBB
, Iter I
);
61 void expandLoadACC(MachineBasicBlock
&MBB
, Iter I
, unsigned RegSize
);
62 void expandStoreACC(MachineBasicBlock
&MBB
, Iter I
, unsigned MFHiOpc
,
63 unsigned MFLoOpc
, unsigned RegSize
);
64 bool expandCopy(MachineBasicBlock
&MBB
, Iter I
);
65 bool expandCopyACC(MachineBasicBlock
&MBB
, Iter I
, unsigned MFHiOpc
,
67 bool expandBuildPairF64(MachineBasicBlock
&MBB
,
68 MachineBasicBlock::iterator I
, bool FP64
) const;
69 bool expandExtractElementF64(MachineBasicBlock
&MBB
,
70 MachineBasicBlock::iterator I
, bool FP64
) const;
73 MachineRegisterInfo
&MRI
;
77 ExpandPseudo::ExpandPseudo(MachineFunction
&MF_
)
78 : MF(MF_
), MRI(MF
.getRegInfo()) {}
80 bool ExpandPseudo::expand() {
81 bool Expanded
= false;
83 for (MachineFunction::iterator BB
= MF
.begin(), BBEnd
= MF
.end();
85 for (Iter I
= BB
->begin(), End
= BB
->end(); I
!= End
;)
86 Expanded
|= expandInstr(*BB
, I
++);
91 bool ExpandPseudo::expandInstr(MachineBasicBlock
&MBB
, Iter I
) {
92 switch(I
->getOpcode()) {
93 case Mips::LOAD_CCOND_DSP
:
94 expandLoadCCond(MBB
, I
);
96 case Mips::STORE_CCOND_DSP
:
97 expandStoreCCond(MBB
, I
);
99 case Mips::LOAD_ACC64
:
100 case Mips::LOAD_ACC64DSP
:
101 expandLoadACC(MBB
, I
, 4);
103 case Mips::LOAD_ACC128
:
104 expandLoadACC(MBB
, I
, 8);
106 case Mips::STORE_ACC64
:
107 expandStoreACC(MBB
, I
, Mips::PseudoMFHI
, Mips::PseudoMFLO
, 4);
109 case Mips::STORE_ACC64DSP
:
110 expandStoreACC(MBB
, I
, Mips::MFHI_DSP
, Mips::MFLO_DSP
, 4);
112 case Mips::STORE_ACC128
:
113 expandStoreACC(MBB
, I
, Mips::PseudoMFHI64
, Mips::PseudoMFLO64
, 8);
115 case Mips::BuildPairF64
:
116 if (expandBuildPairF64(MBB
, I
, false))
119 case Mips::BuildPairF64_64
:
120 if (expandBuildPairF64(MBB
, I
, true))
123 case Mips::ExtractElementF64
:
124 if (expandExtractElementF64(MBB
, I
, false))
127 case Mips::ExtractElementF64_64
:
128 if (expandExtractElementF64(MBB
, I
, true))
131 case TargetOpcode::COPY
:
132 if (!expandCopy(MBB
, I
))
143 void ExpandPseudo::expandLoadCCond(MachineBasicBlock
&MBB
, Iter I
) {
147 assert(I
->getOperand(0).isReg() && I
->getOperand(1).isFI());
149 const MipsSEInstrInfo
&TII
=
150 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
151 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
152 MF
.getSubtarget().getRegisterInfo());
154 const TargetRegisterClass
*RC
= RegInfo
.intRegClass(4);
155 unsigned VR
= MRI
.createVirtualRegister(RC
);
156 unsigned Dst
= I
->getOperand(0).getReg(), FI
= I
->getOperand(1).getIndex();
158 TII
.loadRegFromStack(MBB
, I
, VR
, FI
, RC
, &RegInfo
, 0);
159 BuildMI(MBB
, I
, I
->getDebugLoc(), TII
.get(TargetOpcode::COPY
), Dst
)
160 .addReg(VR
, RegState::Kill
);
163 void ExpandPseudo::expandStoreCCond(MachineBasicBlock
&MBB
, Iter I
) {
167 assert(I
->getOperand(0).isReg() && I
->getOperand(1).isFI());
169 const MipsSEInstrInfo
&TII
=
170 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
171 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
172 MF
.getSubtarget().getRegisterInfo());
174 const TargetRegisterClass
*RC
= RegInfo
.intRegClass(4);
175 unsigned VR
= MRI
.createVirtualRegister(RC
);
176 unsigned Src
= I
->getOperand(0).getReg(), FI
= I
->getOperand(1).getIndex();
178 BuildMI(MBB
, I
, I
->getDebugLoc(), TII
.get(TargetOpcode::COPY
), VR
)
179 .addReg(Src
, getKillRegState(I
->getOperand(0).isKill()));
180 TII
.storeRegToStack(MBB
, I
, VR
, true, FI
, RC
, &RegInfo
, 0);
183 void ExpandPseudo::expandLoadACC(MachineBasicBlock
&MBB
, Iter I
,
190 assert(I
->getOperand(0).isReg() && I
->getOperand(1).isFI());
192 const MipsSEInstrInfo
&TII
=
193 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
194 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
195 MF
.getSubtarget().getRegisterInfo());
197 const TargetRegisterClass
*RC
= RegInfo
.intRegClass(RegSize
);
198 unsigned VR0
= MRI
.createVirtualRegister(RC
);
199 unsigned VR1
= MRI
.createVirtualRegister(RC
);
200 unsigned Dst
= I
->getOperand(0).getReg(), FI
= I
->getOperand(1).getIndex();
201 unsigned Lo
= RegInfo
.getSubReg(Dst
, Mips::sub_lo
);
202 unsigned Hi
= RegInfo
.getSubReg(Dst
, Mips::sub_hi
);
203 DebugLoc DL
= I
->getDebugLoc();
204 const MCInstrDesc
&Desc
= TII
.get(TargetOpcode::COPY
);
206 TII
.loadRegFromStack(MBB
, I
, VR0
, FI
, RC
, &RegInfo
, 0);
207 BuildMI(MBB
, I
, DL
, Desc
, Lo
).addReg(VR0
, RegState::Kill
);
208 TII
.loadRegFromStack(MBB
, I
, VR1
, FI
, RC
, &RegInfo
, RegSize
);
209 BuildMI(MBB
, I
, DL
, Desc
, Hi
).addReg(VR1
, RegState::Kill
);
212 void ExpandPseudo::expandStoreACC(MachineBasicBlock
&MBB
, Iter I
,
213 unsigned MFHiOpc
, unsigned MFLoOpc
,
218 // store $vr1, FI + 4
220 assert(I
->getOperand(0).isReg() && I
->getOperand(1).isFI());
222 const MipsSEInstrInfo
&TII
=
223 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
224 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
225 MF
.getSubtarget().getRegisterInfo());
227 const TargetRegisterClass
*RC
= RegInfo
.intRegClass(RegSize
);
228 unsigned VR0
= MRI
.createVirtualRegister(RC
);
229 unsigned VR1
= MRI
.createVirtualRegister(RC
);
230 unsigned Src
= I
->getOperand(0).getReg(), FI
= I
->getOperand(1).getIndex();
231 unsigned SrcKill
= getKillRegState(I
->getOperand(0).isKill());
232 DebugLoc DL
= I
->getDebugLoc();
234 BuildMI(MBB
, I
, DL
, TII
.get(MFLoOpc
), VR0
).addReg(Src
);
235 TII
.storeRegToStack(MBB
, I
, VR0
, true, FI
, RC
, &RegInfo
, 0);
236 BuildMI(MBB
, I
, DL
, TII
.get(MFHiOpc
), VR1
).addReg(Src
, SrcKill
);
237 TII
.storeRegToStack(MBB
, I
, VR1
, true, FI
, RC
, &RegInfo
, RegSize
);
240 bool ExpandPseudo::expandCopy(MachineBasicBlock
&MBB
, Iter I
) {
241 unsigned Src
= I
->getOperand(1).getReg();
242 std::pair
<unsigned, unsigned> Opcodes
= getMFHiLoOpc(Src
);
247 return expandCopyACC(MBB
, I
, Opcodes
.first
, Opcodes
.second
);
250 bool ExpandPseudo::expandCopyACC(MachineBasicBlock
&MBB
, Iter I
,
251 unsigned MFHiOpc
, unsigned MFLoOpc
) {
257 const MipsSEInstrInfo
&TII
=
258 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
259 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
260 MF
.getSubtarget().getRegisterInfo());
262 unsigned Dst
= I
->getOperand(0).getReg(), Src
= I
->getOperand(1).getReg();
263 unsigned VRegSize
= RegInfo
.getMinimalPhysRegClass(Dst
)->getSize() / 2;
264 const TargetRegisterClass
*RC
= RegInfo
.intRegClass(VRegSize
);
265 unsigned VR0
= MRI
.createVirtualRegister(RC
);
266 unsigned VR1
= MRI
.createVirtualRegister(RC
);
267 unsigned SrcKill
= getKillRegState(I
->getOperand(1).isKill());
268 unsigned DstLo
= RegInfo
.getSubReg(Dst
, Mips::sub_lo
);
269 unsigned DstHi
= RegInfo
.getSubReg(Dst
, Mips::sub_hi
);
270 DebugLoc DL
= I
->getDebugLoc();
272 BuildMI(MBB
, I
, DL
, TII
.get(MFLoOpc
), VR0
).addReg(Src
);
273 BuildMI(MBB
, I
, DL
, TII
.get(TargetOpcode::COPY
), DstLo
)
274 .addReg(VR0
, RegState::Kill
);
275 BuildMI(MBB
, I
, DL
, TII
.get(MFHiOpc
), VR1
).addReg(Src
, SrcKill
);
276 BuildMI(MBB
, I
, DL
, TII
.get(TargetOpcode::COPY
), DstHi
)
277 .addReg(VR1
, RegState::Kill
);
281 /// This method expands the same instruction that MipsSEInstrInfo::
282 /// expandBuildPairF64 does, for the case when ABI is fpxx and mthc1 is not
283 /// available and the case where the ABI is FP64A. It is implemented here
284 /// because frame indexes are eliminated before MipsSEInstrInfo::
285 /// expandBuildPairF64 is called.
286 bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock
&MBB
,
287 MachineBasicBlock::iterator I
,
289 // For fpxx and when mthc1 is not available, use:
290 // spill + reload via ldc1
292 // The case where dmtc1 is available doesn't need to be handled here
293 // because it never creates a BuildPairF64 node.
295 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
296 // for odd-numbered double precision values (because the lower 32-bits is
297 // transferred with mtc1 which is redirected to the upper half of the even
298 // register). Unfortunately, we have to make this decision before register
299 // allocation so for now we use a spill/reload sequence for all
300 // double-precision values in regardless of being an odd/even register.
302 const TargetMachine
&TM
= MF
.getTarget();
303 const MipsSubtarget
&Subtarget
= TM
.getSubtarget
<MipsSubtarget
>();
304 if ((Subtarget
.isABI_FPXX() && !Subtarget
.hasMTHC1()) ||
305 (FP64
&& !Subtarget
.useOddSPReg())) {
306 const MipsSEInstrInfo
&TII
= *static_cast<const MipsSEInstrInfo
*>(
307 TM
.getSubtargetImpl()->getInstrInfo());
308 const MipsRegisterInfo
&TRI
= *static_cast<const MipsRegisterInfo
*>(
309 TM
.getSubtargetImpl()->getRegisterInfo());
311 unsigned DstReg
= I
->getOperand(0).getReg();
312 unsigned LoReg
= I
->getOperand(1).getReg();
313 unsigned HiReg
= I
->getOperand(2).getReg();
315 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
316 // the cases where mthc1 is not available). 64-bit architectures and
317 // MIPS32r2 or later can use FGR64 though.
318 assert(Subtarget
.isGP64bit() || Subtarget
.hasMTHC1() ||
319 !Subtarget
.isFP64bit());
321 const TargetRegisterClass
*RC
= &Mips::GPR32RegClass
;
322 const TargetRegisterClass
*RC2
=
323 FP64
? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
325 // We re-use the same spill slot each time so that the stack frame doesn't
326 // grow too much in functions with a large number of moves.
327 int FI
= MF
.getInfo
<MipsFunctionInfo
>()->getMoveF64ViaSpillFI(RC2
);
328 if (!Subtarget
.isLittle())
329 std::swap(LoReg
, HiReg
);
330 TII
.storeRegToStack(MBB
, I
, LoReg
, I
->getOperand(1).isKill(), FI
, RC
, &TRI
,
332 TII
.storeRegToStack(MBB
, I
, HiReg
, I
->getOperand(2).isKill(), FI
, RC
, &TRI
,
334 TII
.loadRegFromStack(MBB
, I
, DstReg
, FI
, RC2
, &TRI
, 0);
341 /// This method expands the same instruction that MipsSEInstrInfo::
342 /// expandExtractElementF64 does, for the case when ABI is fpxx and mfhc1 is not
343 /// available and the case where the ABI is FP64A. It is implemented here
344 /// because frame indexes are eliminated before MipsSEInstrInfo::
345 /// expandExtractElementF64 is called.
346 bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock
&MBB
,
347 MachineBasicBlock::iterator I
,
349 // For fpxx and when mfhc1 is not available, use:
350 // spill + reload via ldc1
352 // The case where dmfc1 is available doesn't need to be handled here
353 // because it never creates a ExtractElementF64 node.
355 // The FP64A ABI (fp64 with nooddspreg) must also use a spill/reload sequence
356 // for odd-numbered double precision values (because the lower 32-bits is
357 // transferred with mfc1 which is redirected to the upper half of the even
358 // register). Unfortunately, we have to make this decision before register
359 // allocation so for now we use a spill/reload sequence for all
360 // double-precision values in regardless of being an odd/even register.
362 const TargetMachine
&TM
= MF
.getTarget();
363 const MipsSubtarget
&Subtarget
= TM
.getSubtarget
<MipsSubtarget
>();
364 if ((Subtarget
.isABI_FPXX() && !Subtarget
.hasMTHC1()) ||
365 (FP64
&& !Subtarget
.useOddSPReg())) {
366 const MipsSEInstrInfo
&TII
= *static_cast<const MipsSEInstrInfo
*>(
367 TM
.getSubtargetImpl()->getInstrInfo());
368 const MipsRegisterInfo
&TRI
= *static_cast<const MipsRegisterInfo
*>(
369 TM
.getSubtargetImpl()->getRegisterInfo());
371 unsigned DstReg
= I
->getOperand(0).getReg();
372 unsigned SrcReg
= I
->getOperand(1).getReg();
373 unsigned N
= I
->getOperand(2).getImm();
374 int64_t Offset
= 4 * (Subtarget
.isLittle() ? N
: (1 - N
));
376 // It should be impossible to have FGR64 on MIPS-II or MIPS32r1 (which are
377 // the cases where mfhc1 is not available). 64-bit architectures and
378 // MIPS32r2 or later can use FGR64 though.
379 assert(Subtarget
.isGP64bit() || Subtarget
.hasMTHC1() ||
380 !Subtarget
.isFP64bit());
382 const TargetRegisterClass
*RC
=
383 FP64
? &Mips::FGR64RegClass
: &Mips::AFGR64RegClass
;
384 const TargetRegisterClass
*RC2
= &Mips::GPR32RegClass
;
386 // We re-use the same spill slot each time so that the stack frame doesn't
387 // grow too much in functions with a large number of moves.
388 int FI
= MF
.getInfo
<MipsFunctionInfo
>()->getMoveF64ViaSpillFI(RC
);
389 TII
.storeRegToStack(MBB
, I
, SrcReg
, I
->getOperand(1).isKill(), FI
, RC
, &TRI
,
391 TII
.loadRegFromStack(MBB
, I
, DstReg
, FI
, RC2
, &TRI
, Offset
);
398 MipsSEFrameLowering::MipsSEFrameLowering(const MipsSubtarget
&STI
)
399 : MipsFrameLowering(STI
, STI
.stackAlignment()) {}
401 unsigned MipsSEFrameLowering::ehDataReg(unsigned I
) const {
402 static const unsigned EhDataReg
[] = {
403 Mips::A0
, Mips::A1
, Mips::A2
, Mips::A3
405 static const unsigned EhDataReg64
[] = {
406 Mips::A0_64
, Mips::A1_64
, Mips::A2_64
, Mips::A3_64
409 return STI
.isABI_N64() ? EhDataReg64
[I
] : EhDataReg
[I
];
412 void MipsSEFrameLowering::emitPrologue(MachineFunction
&MF
) const {
413 MachineBasicBlock
&MBB
= MF
.front();
414 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
415 MipsFunctionInfo
*MipsFI
= MF
.getInfo
<MipsFunctionInfo
>();
417 const MipsSEInstrInfo
&TII
=
418 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
419 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
420 MF
.getSubtarget().getRegisterInfo());
422 MachineBasicBlock::iterator MBBI
= MBB
.begin();
423 DebugLoc dl
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
424 unsigned SP
= STI
.isABI_N64() ? Mips::SP_64
: Mips::SP
;
425 unsigned FP
= STI
.isABI_N64() ? Mips::FP_64
: Mips::FP
;
426 unsigned ZERO
= STI
.isABI_N64() ? Mips::ZERO_64
: Mips::ZERO
;
427 unsigned ADDu
= STI
.isABI_N64() ? Mips::DADDu
: Mips::ADDu
;
429 // First, compute final stack size.
430 uint64_t StackSize
= MFI
->getStackSize();
432 // No need to allocate space on the stack.
433 if (StackSize
== 0 && !MFI
->adjustsStack()) return;
435 MachineModuleInfo
&MMI
= MF
.getMMI();
436 const MCRegisterInfo
*MRI
= MMI
.getContext().getRegisterInfo();
437 MachineLocation DstML
, SrcML
;
440 TII
.adjustStackPtr(SP
, -StackSize
, MBB
, MBBI
);
442 // emit ".cfi_def_cfa_offset StackSize"
443 unsigned CFIIndex
= MMI
.addFrameInst(
444 MCCFIInstruction::createDefCfaOffset(nullptr, -StackSize
));
445 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
446 .addCFIIndex(CFIIndex
);
448 const std::vector
<CalleeSavedInfo
> &CSI
= MFI
->getCalleeSavedInfo();
451 // Find the instruction past the last instruction that saves a callee-saved
452 // register to the stack.
453 for (unsigned i
= 0; i
< CSI
.size(); ++i
)
456 // Iterate over list of callee-saved registers and emit .cfi_offset
458 for (std::vector
<CalleeSavedInfo
>::const_iterator I
= CSI
.begin(),
459 E
= CSI
.end(); I
!= E
; ++I
) {
460 int64_t Offset
= MFI
->getObjectOffset(I
->getFrameIdx());
461 unsigned Reg
= I
->getReg();
463 // If Reg is a double precision register, emit two cfa_offsets,
464 // one for each of the paired single precision registers.
465 if (Mips::AFGR64RegClass
.contains(Reg
)) {
467 MRI
->getDwarfRegNum(RegInfo
.getSubReg(Reg
, Mips::sub_lo
), true);
469 MRI
->getDwarfRegNum(RegInfo
.getSubReg(Reg
, Mips::sub_hi
), true);
472 std::swap(Reg0
, Reg1
);
474 unsigned CFIIndex
= MMI
.addFrameInst(
475 MCCFIInstruction::createOffset(nullptr, Reg0
, Offset
));
476 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
477 .addCFIIndex(CFIIndex
);
479 CFIIndex
= MMI
.addFrameInst(
480 MCCFIInstruction::createOffset(nullptr, Reg1
, Offset
+ 4));
481 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
482 .addCFIIndex(CFIIndex
);
483 } else if (Mips::FGR64RegClass
.contains(Reg
)) {
484 unsigned Reg0
= MRI
->getDwarfRegNum(Reg
, true);
485 unsigned Reg1
= MRI
->getDwarfRegNum(Reg
, true) + 1;
488 std::swap(Reg0
, Reg1
);
490 unsigned CFIIndex
= MMI
.addFrameInst(
491 MCCFIInstruction::createOffset(nullptr, Reg0
, Offset
));
492 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
493 .addCFIIndex(CFIIndex
);
495 CFIIndex
= MMI
.addFrameInst(
496 MCCFIInstruction::createOffset(nullptr, Reg1
, Offset
+ 4));
497 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
498 .addCFIIndex(CFIIndex
);
500 // Reg is either in GPR32 or FGR32.
501 unsigned CFIIndex
= MMI
.addFrameInst(MCCFIInstruction::createOffset(
502 nullptr, MRI
->getDwarfRegNum(Reg
, 1), Offset
));
503 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
504 .addCFIIndex(CFIIndex
);
509 if (MipsFI
->callsEhReturn()) {
510 const TargetRegisterClass
*RC
= STI
.isABI_N64() ?
511 &Mips::GPR64RegClass
: &Mips::GPR32RegClass
;
513 // Insert instructions that spill eh data registers.
514 for (int I
= 0; I
< 4; ++I
) {
515 if (!MBB
.isLiveIn(ehDataReg(I
)))
516 MBB
.addLiveIn(ehDataReg(I
));
517 TII
.storeRegToStackSlot(MBB
, MBBI
, ehDataReg(I
), false,
518 MipsFI
->getEhDataRegFI(I
), RC
, &RegInfo
);
521 // Emit .cfi_offset directives for eh data registers.
522 for (int I
= 0; I
< 4; ++I
) {
523 int64_t Offset
= MFI
->getObjectOffset(MipsFI
->getEhDataRegFI(I
));
524 unsigned Reg
= MRI
->getDwarfRegNum(ehDataReg(I
), true);
525 unsigned CFIIndex
= MMI
.addFrameInst(
526 MCCFIInstruction::createOffset(nullptr, Reg
, Offset
));
527 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
528 .addCFIIndex(CFIIndex
);
532 // if framepointer enabled, set it to point to the stack pointer.
534 // Insert instruction "move $fp, $sp" at this location.
535 BuildMI(MBB
, MBBI
, dl
, TII
.get(ADDu
), FP
).addReg(SP
).addReg(ZERO
)
536 .setMIFlag(MachineInstr::FrameSetup
);
538 // emit ".cfi_def_cfa_register $fp"
539 unsigned CFIIndex
= MMI
.addFrameInst(MCCFIInstruction::createDefCfaRegister(
540 nullptr, MRI
->getDwarfRegNum(FP
, true)));
541 BuildMI(MBB
, MBBI
, dl
, TII
.get(TargetOpcode::CFI_INSTRUCTION
))
542 .addCFIIndex(CFIIndex
);
546 void MipsSEFrameLowering::emitEpilogue(MachineFunction
&MF
,
547 MachineBasicBlock
&MBB
) const {
548 MachineBasicBlock::iterator MBBI
= MBB
.getLastNonDebugInstr();
549 MachineFrameInfo
*MFI
= MF
.getFrameInfo();
550 MipsFunctionInfo
*MipsFI
= MF
.getInfo
<MipsFunctionInfo
>();
552 const MipsSEInstrInfo
&TII
=
553 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
554 const MipsRegisterInfo
&RegInfo
= *static_cast<const MipsRegisterInfo
*>(
555 MF
.getSubtarget().getRegisterInfo());
557 DebugLoc dl
= MBBI
->getDebugLoc();
558 unsigned SP
= STI
.isABI_N64() ? Mips::SP_64
: Mips::SP
;
559 unsigned FP
= STI
.isABI_N64() ? Mips::FP_64
: Mips::FP
;
560 unsigned ZERO
= STI
.isABI_N64() ? Mips::ZERO_64
: Mips::ZERO
;
561 unsigned ADDu
= STI
.isABI_N64() ? Mips::DADDu
: Mips::ADDu
;
563 // if framepointer enabled, restore the stack pointer.
565 // Find the first instruction that restores a callee-saved register.
566 MachineBasicBlock::iterator I
= MBBI
;
568 for (unsigned i
= 0; i
< MFI
->getCalleeSavedInfo().size(); ++i
)
571 // Insert instruction "move $sp, $fp" at this location.
572 BuildMI(MBB
, I
, dl
, TII
.get(ADDu
), SP
).addReg(FP
).addReg(ZERO
);
575 if (MipsFI
->callsEhReturn()) {
576 const TargetRegisterClass
*RC
= STI
.isABI_N64() ?
577 &Mips::GPR64RegClass
: &Mips::GPR32RegClass
;
579 // Find first instruction that restores a callee-saved register.
580 MachineBasicBlock::iterator I
= MBBI
;
581 for (unsigned i
= 0; i
< MFI
->getCalleeSavedInfo().size(); ++i
)
584 // Insert instructions that restore eh data registers.
585 for (int J
= 0; J
< 4; ++J
) {
586 TII
.loadRegFromStackSlot(MBB
, I
, ehDataReg(J
), MipsFI
->getEhDataRegFI(J
),
591 // Get the number of bytes from FrameInfo
592 uint64_t StackSize
= MFI
->getStackSize();
598 TII
.adjustStackPtr(SP
, StackSize
, MBB
, MBBI
);
601 bool MipsSEFrameLowering::
602 spillCalleeSavedRegisters(MachineBasicBlock
&MBB
,
603 MachineBasicBlock::iterator MI
,
604 const std::vector
<CalleeSavedInfo
> &CSI
,
605 const TargetRegisterInfo
*TRI
) const {
606 MachineFunction
*MF
= MBB
.getParent();
607 MachineBasicBlock
*EntryBlock
= MF
->begin();
608 const TargetInstrInfo
&TII
= *MF
->getSubtarget().getInstrInfo();
610 for (unsigned i
= 0, e
= CSI
.size(); i
!= e
; ++i
) {
611 // Add the callee-saved register as live-in. Do not add if the register is
612 // RA and return address is taken, because it has already been added in
613 // method MipsTargetLowering::LowerRETURNADDR.
614 // It's killed at the spill, unless the register is RA and return address
616 unsigned Reg
= CSI
[i
].getReg();
617 bool IsRAAndRetAddrIsTaken
= (Reg
== Mips::RA
|| Reg
== Mips::RA_64
)
618 && MF
->getFrameInfo()->isReturnAddressTaken();
619 if (!IsRAAndRetAddrIsTaken
)
620 EntryBlock
->addLiveIn(Reg
);
622 // Insert the spill to the stack frame.
623 bool IsKill
= !IsRAAndRetAddrIsTaken
;
624 const TargetRegisterClass
*RC
= TRI
->getMinimalPhysRegClass(Reg
);
625 TII
.storeRegToStackSlot(*EntryBlock
, MI
, Reg
, IsKill
,
626 CSI
[i
].getFrameIdx(), RC
, TRI
);
633 MipsSEFrameLowering::hasReservedCallFrame(const MachineFunction
&MF
) const {
634 const MachineFrameInfo
*MFI
= MF
.getFrameInfo();
636 // Reserve call frame if the size of the maximum call frame fits into 16-bit
637 // immediate field and there are no variable sized objects on the stack.
638 // Make sure the second register scavenger spill slot can be accessed with one
640 return isInt
<16>(MFI
->getMaxCallFrameSize() + getStackAlignment()) &&
641 !MFI
->hasVarSizedObjects();
644 // Eliminate ADJCALLSTACKDOWN, ADJCALLSTACKUP pseudo instructions
645 void MipsSEFrameLowering::
646 eliminateCallFramePseudoInstr(MachineFunction
&MF
, MachineBasicBlock
&MBB
,
647 MachineBasicBlock::iterator I
) const {
648 const MipsSEInstrInfo
&TII
=
649 *static_cast<const MipsSEInstrInfo
*>(MF
.getSubtarget().getInstrInfo());
651 if (!hasReservedCallFrame(MF
)) {
652 int64_t Amount
= I
->getOperand(0).getImm();
654 if (I
->getOpcode() == Mips::ADJCALLSTACKDOWN
)
657 unsigned SP
= STI
.isABI_N64() ? Mips::SP_64
: Mips::SP
;
658 TII
.adjustStackPtr(SP
, Amount
, MBB
, I
);
664 void MipsSEFrameLowering::
665 processFunctionBeforeCalleeSavedScan(MachineFunction
&MF
,
666 RegScavenger
*RS
) const {
667 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
668 MipsFunctionInfo
*MipsFI
= MF
.getInfo
<MipsFunctionInfo
>();
669 unsigned FP
= STI
.isABI_N64() ? Mips::FP_64
: Mips::FP
;
671 // Mark $fp as used if function has dedicated frame pointer.
673 MRI
.setPhysRegUsed(FP
);
675 // Create spill slots for eh data registers if function calls eh_return.
676 if (MipsFI
->callsEhReturn())
677 MipsFI
->createEhDataRegsFI();
679 // Expand pseudo instructions which load, store or copy accumulators.
680 // Add an emergency spill slot if a pseudo was expanded.
681 if (ExpandPseudo(MF
).expand()) {
682 // The spill slot should be half the size of the accumulator. If target is
683 // mips64, it should be 64-bit, otherwise it should be 32-bt.
684 const TargetRegisterClass
*RC
= STI
.hasMips64() ?
685 &Mips::GPR64RegClass
: &Mips::GPR32RegClass
;
686 int FI
= MF
.getFrameInfo()->CreateStackObject(RC
->getSize(),
687 RC
->getAlignment(), false);
688 RS
->addScavengingFrameIndex(FI
);
691 // Set scavenging frame index if necessary.
692 uint64_t MaxSPOffset
= MF
.getInfo
<MipsFunctionInfo
>()->getIncomingArgSize() +
693 estimateStackSize(MF
);
695 if (isInt
<16>(MaxSPOffset
))
698 const TargetRegisterClass
*RC
= STI
.isABI_N64() ?
699 &Mips::GPR64RegClass
: &Mips::GPR32RegClass
;
700 int FI
= MF
.getFrameInfo()->CreateStackObject(RC
->getSize(),
701 RC
->getAlignment(), false);
702 RS
->addScavengingFrameIndex(FI
);
705 const MipsFrameLowering
*
706 llvm::createMipsSEFrameLowering(const MipsSubtarget
&ST
) {
707 return new MipsSEFrameLowering(ST
);