1 //===- HexagonMachineScheduler.cpp - MI Scheduler for Hexagon -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // MachineScheduler schedules machine instructions after phi elimination. It
11 // preserves LiveIntervals so it can be invoked before register allocation.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonMachineScheduler.h"
16 #include "llvm/CodeGen/MachineLoopInfo.h"
17 #include "llvm/IR/Function.h"
21 #define DEBUG_TYPE "misched"
23 /// Platform-specific modifications to DAG.
24 void VLIWMachineScheduler::postprocessDAG() {
25 SUnit
* LastSequentialCall
= nullptr;
26 // Currently we only catch the situation when compare gets scheduled
27 // before preceding call.
28 for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
) {
30 if (SUnits
[su
].getInstr()->isCall())
31 LastSequentialCall
= &(SUnits
[su
]);
32 // Look for a compare that defines a predicate.
33 else if (SUnits
[su
].getInstr()->isCompare() && LastSequentialCall
)
34 SUnits
[su
].addPred(SDep(LastSequentialCall
, SDep::Barrier
));
38 /// Check if scheduling of this SU is possible
39 /// in the current packet.
40 /// It is _not_ precise (statefull), it is more like
41 /// another heuristic. Many corner cases are figured
43 bool VLIWResourceModel::isResourceAvailable(SUnit
*SU
) {
44 if (!SU
|| !SU
->getInstr())
47 // First see if the pipeline could receive this instruction
48 // in the current cycle.
49 switch (SU
->getInstr()->getOpcode()) {
51 if (!ResourcesModel
->canReserveResources(SU
->getInstr()))
53 case TargetOpcode::EXTRACT_SUBREG
:
54 case TargetOpcode::INSERT_SUBREG
:
55 case TargetOpcode::SUBREG_TO_REG
:
56 case TargetOpcode::REG_SEQUENCE
:
57 case TargetOpcode::IMPLICIT_DEF
:
58 case TargetOpcode::COPY
:
59 case TargetOpcode::INLINEASM
:
63 // Now see if there are no other dependencies to instructions already
65 for (unsigned i
= 0, e
= Packet
.size(); i
!= e
; ++i
) {
66 if (Packet
[i
]->Succs
.size() == 0)
68 for (SUnit::const_succ_iterator I
= Packet
[i
]->Succs
.begin(),
69 E
= Packet
[i
]->Succs
.end(); I
!= E
; ++I
) {
70 // Since we do not add pseudos to packets, might as well
71 // ignore order dependencies.
75 if (I
->getSUnit() == SU
)
82 /// Keep track of available resources.
83 bool VLIWResourceModel::reserveResources(SUnit
*SU
) {
84 bool startNewCycle
= false;
85 // Artificially reset state.
87 ResourcesModel
->clearResources();
92 // If this SU does not fit in the packet
94 if (!isResourceAvailable(SU
)) {
95 ResourcesModel
->clearResources();
101 switch (SU
->getInstr()->getOpcode()) {
103 ResourcesModel
->reserveResources(SU
->getInstr());
105 case TargetOpcode::EXTRACT_SUBREG
:
106 case TargetOpcode::INSERT_SUBREG
:
107 case TargetOpcode::SUBREG_TO_REG
:
108 case TargetOpcode::REG_SEQUENCE
:
109 case TargetOpcode::IMPLICIT_DEF
:
110 case TargetOpcode::KILL
:
111 case TargetOpcode::CFI_INSTRUCTION
:
112 case TargetOpcode::EH_LABEL
:
113 case TargetOpcode::COPY
:
114 case TargetOpcode::INLINEASM
:
117 Packet
.push_back(SU
);
120 DEBUG(dbgs() << "Packet[" << TotalPackets
<< "]:\n");
121 for (unsigned i
= 0, e
= Packet
.size(); i
!= e
; ++i
) {
122 DEBUG(dbgs() << "\t[" << i
<< "] SU(");
123 DEBUG(dbgs() << Packet
[i
]->NodeNum
<< ")\t");
124 DEBUG(Packet
[i
]->getInstr()->dump());
128 // If packet is now full, reset the state so in the next cycle
130 if (Packet
.size() >= SchedModel
->getIssueWidth()) {
131 ResourcesModel
->clearResources();
134 startNewCycle
= true;
137 return startNewCycle
;
140 /// schedule - Called back from MachineScheduler::runOnMachineFunction
141 /// after setting up the current scheduling region. [RegionBegin, RegionEnd)
142 /// only includes instructions that have DAG nodes, not scheduling boundaries.
143 void VLIWMachineScheduler::schedule() {
145 << "********** MI Converging Scheduling VLIW BB#" << BB
->getNumber()
146 << " " << BB
->getName()
147 << " in_func " << BB
->getParent()->getFunction()->getName()
148 << " at loop depth " << MLI
->getLoopDepth(BB
)
151 buildDAGWithRegPressure();
153 // Postprocess the DAG to add platform-specific artificial dependencies.
156 SmallVector
<SUnit
*, 8> TopRoots
, BotRoots
;
157 findRootsAndBiasEdges(TopRoots
, BotRoots
);
159 // Initialize the strategy before modifying the DAG.
160 SchedImpl
->initialize(this);
162 // To view Height/Depth correctly, they should be accessed at least once.
164 // FIXME: SUnit::dumpAll always recompute depth and height now. The max
165 // depth/height could be computed directly from the roots and leaves.
166 DEBUG(unsigned maxH
= 0;
167 for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
)
168 if (SUnits
[su
].getHeight() > maxH
)
169 maxH
= SUnits
[su
].getHeight();
170 dbgs() << "Max Height " << maxH
<< "\n";);
171 DEBUG(unsigned maxD
= 0;
172 for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
)
173 if (SUnits
[su
].getDepth() > maxD
)
174 maxD
= SUnits
[su
].getDepth();
175 dbgs() << "Max Depth " << maxD
<< "\n";);
176 DEBUG(for (unsigned su
= 0, e
= SUnits
.size(); su
!= e
; ++su
)
177 SUnits
[su
].dumpAll(this));
179 initQueues(TopRoots
, BotRoots
);
181 bool IsTopNode
= false;
182 while (SUnit
*SU
= SchedImpl
->pickNode(IsTopNode
)) {
183 if (!checkSchedLimit())
186 scheduleMI(SU
, IsTopNode
);
188 updateQueues(SU
, IsTopNode
);
190 // Notify the scheduling strategy after updating the DAG.
191 SchedImpl
->schedNode(SU
, IsTopNode
);
193 assert(CurrentTop
== CurrentBottom
&& "Nonempty unscheduled zone.");
198 void ConvergingVLIWScheduler::initialize(ScheduleDAGMI
*dag
) {
199 DAG
= static_cast<VLIWMachineScheduler
*>(dag
);
200 SchedModel
= DAG
->getSchedModel();
202 Top
.init(DAG
, SchedModel
);
203 Bot
.init(DAG
, SchedModel
);
205 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
206 // are disabled, then these HazardRecs will be disabled.
207 const InstrItineraryData
*Itin
= DAG
->getSchedModel()->getInstrItineraries();
208 const TargetMachine
&TM
= DAG
->MF
.getTarget();
209 delete Top
.HazardRec
;
210 delete Bot
.HazardRec
;
212 TM
.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
215 TM
.getSubtargetImpl()->getInstrInfo()->CreateTargetMIHazardRecognizer(
218 delete Top
.ResourceModel
;
219 delete Bot
.ResourceModel
;
220 Top
.ResourceModel
= new VLIWResourceModel(TM
, DAG
->getSchedModel());
221 Bot
.ResourceModel
= new VLIWResourceModel(TM
, DAG
->getSchedModel());
223 assert((!llvm::ForceTopDown
|| !llvm::ForceBottomUp
) &&
224 "-misched-topdown incompatible with -misched-bottomup");
227 void ConvergingVLIWScheduler::releaseTopNode(SUnit
*SU
) {
231 for (SUnit::succ_iterator I
= SU
->Preds
.begin(), E
= SU
->Preds
.end();
233 unsigned PredReadyCycle
= I
->getSUnit()->TopReadyCycle
;
234 unsigned MinLatency
= I
->getLatency();
236 Top
.MaxMinLatency
= std::max(MinLatency
, Top
.MaxMinLatency
);
238 if (SU
->TopReadyCycle
< PredReadyCycle
+ MinLatency
)
239 SU
->TopReadyCycle
= PredReadyCycle
+ MinLatency
;
241 Top
.releaseNode(SU
, SU
->TopReadyCycle
);
244 void ConvergingVLIWScheduler::releaseBottomNode(SUnit
*SU
) {
248 assert(SU
->getInstr() && "Scheduled SUnit must have instr");
250 for (SUnit::succ_iterator I
= SU
->Succs
.begin(), E
= SU
->Succs
.end();
252 unsigned SuccReadyCycle
= I
->getSUnit()->BotReadyCycle
;
253 unsigned MinLatency
= I
->getLatency();
255 Bot
.MaxMinLatency
= std::max(MinLatency
, Bot
.MaxMinLatency
);
257 if (SU
->BotReadyCycle
< SuccReadyCycle
+ MinLatency
)
258 SU
->BotReadyCycle
= SuccReadyCycle
+ MinLatency
;
260 Bot
.releaseNode(SU
, SU
->BotReadyCycle
);
263 /// Does this SU have a hazard within the current instruction group.
265 /// The scheduler supports two modes of hazard recognition. The first is the
266 /// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
267 /// supports highly complicated in-order reservation tables
268 /// (ScoreboardHazardRecognizer) and arbitrary target-specific logic.
270 /// The second is a streamlined mechanism that checks for hazards based on
271 /// simple counters that the scheduler itself maintains. It explicitly checks
272 /// for instruction dispatch limitations, including the number of micro-ops that
273 /// can dispatch per cycle.
275 /// TODO: Also check whether the SU must start a new group.
276 bool ConvergingVLIWScheduler::VLIWSchedBoundary::checkHazard(SUnit
*SU
) {
277 if (HazardRec
->isEnabled())
278 return HazardRec
->getHazardType(SU
) != ScheduleHazardRecognizer::NoHazard
;
280 unsigned uops
= SchedModel
->getNumMicroOps(SU
->getInstr());
281 if (IssueCount
+ uops
> SchedModel
->getIssueWidth())
287 void ConvergingVLIWScheduler::VLIWSchedBoundary::releaseNode(SUnit
*SU
,
288 unsigned ReadyCycle
) {
289 if (ReadyCycle
< MinReadyCycle
)
290 MinReadyCycle
= ReadyCycle
;
292 // Check for interlocks first. For the purpose of other heuristics, an
293 // instruction that cannot issue appears as if it's not in the ReadyQueue.
294 if (ReadyCycle
> CurrCycle
|| checkHazard(SU
))
301 /// Move the boundary of scheduled code by one cycle.
302 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpCycle() {
303 unsigned Width
= SchedModel
->getIssueWidth();
304 IssueCount
= (IssueCount
<= Width
) ? 0 : IssueCount
- Width
;
306 assert(MinReadyCycle
< UINT_MAX
&& "MinReadyCycle uninitialized");
307 unsigned NextCycle
= std::max(CurrCycle
+ 1, MinReadyCycle
);
309 if (!HazardRec
->isEnabled()) {
310 // Bypass HazardRec virtual calls.
311 CurrCycle
= NextCycle
;
313 // Bypass getHazardType calls in case of long latency.
314 for (; CurrCycle
!= NextCycle
; ++CurrCycle
) {
316 HazardRec
->AdvanceCycle();
318 HazardRec
->RecedeCycle();
323 DEBUG(dbgs() << "*** " << Available
.getName() << " cycle "
324 << CurrCycle
<< '\n');
327 /// Move the boundary of scheduled code by one SUnit.
328 void ConvergingVLIWScheduler::VLIWSchedBoundary::bumpNode(SUnit
*SU
) {
329 bool startNewCycle
= false;
331 // Update the reservation table.
332 if (HazardRec
->isEnabled()) {
333 if (!isTop() && SU
->isCall
) {
334 // Calls are scheduled with their preceding instructions. For bottom-up
335 // scheduling, clear the pipeline state before emitting.
338 HazardRec
->EmitInstruction(SU
);
342 startNewCycle
= ResourceModel
->reserveResources(SU
);
344 // Check the instruction group dispatch limit.
345 // TODO: Check if this SU must end a dispatch group.
346 IssueCount
+= SchedModel
->getNumMicroOps(SU
->getInstr());
348 DEBUG(dbgs() << "*** Max instrs at cycle " << CurrCycle
<< '\n');
352 DEBUG(dbgs() << "*** IssueCount " << IssueCount
353 << " at cycle " << CurrCycle
<< '\n');
356 /// Release pending ready nodes in to the available queue. This makes them
357 /// visible to heuristics.
358 void ConvergingVLIWScheduler::VLIWSchedBoundary::releasePending() {
359 // If the available queue is empty, it is safe to reset MinReadyCycle.
360 if (Available
.empty())
361 MinReadyCycle
= UINT_MAX
;
363 // Check to see if any of the pending instructions are ready to issue. If
364 // so, add them to the available queue.
365 for (unsigned i
= 0, e
= Pending
.size(); i
!= e
; ++i
) {
366 SUnit
*SU
= *(Pending
.begin()+i
);
367 unsigned ReadyCycle
= isTop() ? SU
->TopReadyCycle
: SU
->BotReadyCycle
;
369 if (ReadyCycle
< MinReadyCycle
)
370 MinReadyCycle
= ReadyCycle
;
372 if (ReadyCycle
> CurrCycle
)
379 Pending
.remove(Pending
.begin()+i
);
382 CheckPending
= false;
385 /// Remove SU from the ready set for this boundary.
386 void ConvergingVLIWScheduler::VLIWSchedBoundary::removeReady(SUnit
*SU
) {
387 if (Available
.isInQueue(SU
))
388 Available
.remove(Available
.find(SU
));
390 assert(Pending
.isInQueue(SU
) && "bad ready count");
391 Pending
.remove(Pending
.find(SU
));
395 /// If this queue only has one ready candidate, return it. As a side effect,
396 /// advance the cycle until at least one node is ready. If multiple instructions
397 /// are ready, return NULL.
398 SUnit
*ConvergingVLIWScheduler::VLIWSchedBoundary::pickOnlyChoice() {
402 for (unsigned i
= 0; Available
.empty(); ++i
) {
403 assert(i
<= (HazardRec
->getMaxLookAhead() + MaxMinLatency
) &&
404 "permanent hazard"); (void)i
;
405 ResourceModel
->reserveResources(nullptr);
409 if (Available
.size() == 1)
410 return *Available
.begin();
415 void ConvergingVLIWScheduler::traceCandidate(const char *Label
,
417 SUnit
*SU
, PressureChange P
) {
418 dbgs() << Label
<< " " << Q
.getName() << " ";
420 dbgs() << DAG
->TRI
->getRegPressureSetName(P
.getPSet()) << ":"
421 << P
.getUnitInc() << " ";
428 /// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
429 /// of SU, return it, otherwise return null.
430 static SUnit
*getSingleUnscheduledPred(SUnit
*SU
) {
431 SUnit
*OnlyAvailablePred
= nullptr;
432 for (SUnit::const_pred_iterator I
= SU
->Preds
.begin(), E
= SU
->Preds
.end();
434 SUnit
&Pred
= *I
->getSUnit();
435 if (!Pred
.isScheduled
) {
436 // We found an available, but not scheduled, predecessor. If it's the
437 // only one we have found, keep track of it... otherwise give up.
438 if (OnlyAvailablePred
&& OnlyAvailablePred
!= &Pred
)
440 OnlyAvailablePred
= &Pred
;
443 return OnlyAvailablePred
;
446 /// getSingleUnscheduledSucc - If there is exactly one unscheduled successor
447 /// of SU, return it, otherwise return null.
448 static SUnit
*getSingleUnscheduledSucc(SUnit
*SU
) {
449 SUnit
*OnlyAvailableSucc
= nullptr;
450 for (SUnit::const_succ_iterator I
= SU
->Succs
.begin(), E
= SU
->Succs
.end();
452 SUnit
&Succ
= *I
->getSUnit();
453 if (!Succ
.isScheduled
) {
454 // We found an available, but not scheduled, successor. If it's the
455 // only one we have found, keep track of it... otherwise give up.
456 if (OnlyAvailableSucc
&& OnlyAvailableSucc
!= &Succ
)
458 OnlyAvailableSucc
= &Succ
;
461 return OnlyAvailableSucc
;
464 // Constants used to denote relative importance of
465 // heuristic components for cost computation.
466 static const unsigned PriorityOne
= 200;
467 static const unsigned PriorityTwo
= 50;
468 static const unsigned ScaleTwo
= 10;
469 static const unsigned FactorOne
= 2;
471 /// Single point to compute overall scheduling cost.
472 /// TODO: More heuristics will be used soon.
473 int ConvergingVLIWScheduler::SchedulingCost(ReadyQueue
&Q
, SUnit
*SU
,
474 SchedCandidate
&Candidate
,
475 RegPressureDelta
&Delta
,
477 // Initial trivial priority.
480 // Do not waste time on a node that is already scheduled.
481 if (!SU
|| SU
->isScheduled
)
484 // Forced priority is high.
485 if (SU
->isScheduleHigh
)
486 ResCount
+= PriorityOne
;
488 // Critical path first.
489 if (Q
.getID() == TopQID
) {
490 ResCount
+= (SU
->getHeight() * ScaleTwo
);
492 // If resources are available for it, multiply the
493 // chance of scheduling.
494 if (Top
.ResourceModel
->isResourceAvailable(SU
))
495 ResCount
<<= FactorOne
;
497 ResCount
+= (SU
->getDepth() * ScaleTwo
);
499 // If resources are available for it, multiply the
500 // chance of scheduling.
501 if (Bot
.ResourceModel
->isResourceAvailable(SU
))
502 ResCount
<<= FactorOne
;
505 unsigned NumNodesBlocking
= 0;
506 if (Q
.getID() == TopQID
) {
507 // How many SUs does it block from scheduling?
508 // Look at all of the successors of this node.
509 // Count the number of nodes that
510 // this node is the sole unscheduled node for.
511 for (SUnit::const_succ_iterator I
= SU
->Succs
.begin(), E
= SU
->Succs
.end();
513 if (getSingleUnscheduledPred(I
->getSUnit()) == SU
)
516 // How many unscheduled predecessors block this node?
517 for (SUnit::const_pred_iterator I
= SU
->Preds
.begin(), E
= SU
->Preds
.end();
519 if (getSingleUnscheduledSucc(I
->getSUnit()) == SU
)
522 ResCount
+= (NumNodesBlocking
* ScaleTwo
);
524 // Factor in reg pressure as a heuristic.
525 ResCount
-= (Delta
.Excess
.getUnitInc()*PriorityTwo
);
526 ResCount
-= (Delta
.CriticalMax
.getUnitInc()*PriorityTwo
);
528 DEBUG(if (verbose
) dbgs() << " Total(" << ResCount
<< ")");
533 /// Pick the best candidate from the top queue.
535 /// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
536 /// DAG building. To adjust for the current scheduling location we need to
537 /// maintain the number of vreg uses remaining to be top-scheduled.
538 ConvergingVLIWScheduler::CandResult
ConvergingVLIWScheduler::
539 pickNodeFromQueue(ReadyQueue
&Q
, const RegPressureTracker
&RPTracker
,
540 SchedCandidate
&Candidate
) {
543 // getMaxPressureDelta temporarily modifies the tracker.
544 RegPressureTracker
&TempTracker
= const_cast<RegPressureTracker
&>(RPTracker
);
546 // BestSU remains NULL if no top candidates beat the best existing candidate.
547 CandResult FoundCandidate
= NoCand
;
548 for (ReadyQueue::iterator I
= Q
.begin(), E
= Q
.end(); I
!= E
; ++I
) {
549 RegPressureDelta RPDelta
;
550 TempTracker
.getMaxPressureDelta((*I
)->getInstr(), RPDelta
,
551 DAG
->getRegionCriticalPSets(),
552 DAG
->getRegPressure().MaxSetPressure
);
554 int CurrentCost
= SchedulingCost(Q
, *I
, Candidate
, RPDelta
, false);
556 // Initialize the candidate if needed.
559 Candidate
.RPDelta
= RPDelta
;
560 Candidate
.SCost
= CurrentCost
;
561 FoundCandidate
= NodeOrder
;
566 if (CurrentCost
> Candidate
.SCost
) {
567 DEBUG(traceCandidate("CCAND", Q
, *I
));
569 Candidate
.RPDelta
= RPDelta
;
570 Candidate
.SCost
= CurrentCost
;
571 FoundCandidate
= BestCost
;
575 // Fall through to original instruction order.
576 // Only consider node order if Candidate was chosen from this Q.
577 if (FoundCandidate
== NoCand
)
580 return FoundCandidate
;
583 /// Pick the best candidate node from either the top or bottom queue.
584 SUnit
*ConvergingVLIWScheduler::pickNodeBidrectional(bool &IsTopNode
) {
585 // Schedule as far as possible in the direction of no choice. This is most
586 // efficient, but also provides the best heuristics for CriticalPSets.
587 if (SUnit
*SU
= Bot
.pickOnlyChoice()) {
591 if (SUnit
*SU
= Top
.pickOnlyChoice()) {
595 SchedCandidate BotCand
;
596 // Prefer bottom scheduling when heuristics are silent.
597 CandResult BotResult
= pickNodeFromQueue(Bot
.Available
,
598 DAG
->getBotRPTracker(), BotCand
);
599 assert(BotResult
!= NoCand
&& "failed to find the first candidate");
601 // If either Q has a single candidate that provides the least increase in
602 // Excess pressure, we can immediately schedule from that Q.
604 // RegionCriticalPSets summarizes the pressure within the scheduled region and
605 // affects picking from either Q. If scheduling in one direction must
606 // increase pressure for one of the excess PSets, then schedule in that
607 // direction first to provide more freedom in the other direction.
608 if (BotResult
== SingleExcess
|| BotResult
== SingleCritical
) {
612 // Check if the top Q has a better candidate.
613 SchedCandidate TopCand
;
614 CandResult TopResult
= pickNodeFromQueue(Top
.Available
,
615 DAG
->getTopRPTracker(), TopCand
);
616 assert(TopResult
!= NoCand
&& "failed to find the first candidate");
618 if (TopResult
== SingleExcess
|| TopResult
== SingleCritical
) {
622 // If either Q has a single candidate that minimizes pressure above the
623 // original region's pressure pick it.
624 if (BotResult
== SingleMax
) {
628 if (TopResult
== SingleMax
) {
632 if (TopCand
.SCost
> BotCand
.SCost
) {
636 // Otherwise prefer the bottom candidate in node order.
641 /// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
642 SUnit
*ConvergingVLIWScheduler::pickNode(bool &IsTopNode
) {
643 if (DAG
->top() == DAG
->bottom()) {
644 assert(Top
.Available
.empty() && Top
.Pending
.empty() &&
645 Bot
.Available
.empty() && Bot
.Pending
.empty() && "ReadyQ garbage");
649 if (llvm::ForceTopDown
) {
650 SU
= Top
.pickOnlyChoice();
652 SchedCandidate TopCand
;
653 CandResult TopResult
=
654 pickNodeFromQueue(Top
.Available
, DAG
->getTopRPTracker(), TopCand
);
655 assert(TopResult
!= NoCand
&& "failed to find the first candidate");
660 } else if (llvm::ForceBottomUp
) {
661 SU
= Bot
.pickOnlyChoice();
663 SchedCandidate BotCand
;
664 CandResult BotResult
=
665 pickNodeFromQueue(Bot
.Available
, DAG
->getBotRPTracker(), BotCand
);
666 assert(BotResult
!= NoCand
&& "failed to find the first candidate");
672 SU
= pickNodeBidrectional(IsTopNode
);
674 if (SU
->isTopReady())
676 if (SU
->isBottomReady())
679 DEBUG(dbgs() << "*** " << (IsTopNode
? "Top" : "Bottom")
680 << " Scheduling Instruction in cycle "
681 << (IsTopNode
? Top
.CurrCycle
: Bot
.CurrCycle
) << '\n';
686 /// Update the scheduler's state after scheduling a node. This is the same node
687 /// that was just returned by pickNode(). However, VLIWMachineScheduler needs
688 /// to update it's state based on the current cycle before MachineSchedStrategy
690 void ConvergingVLIWScheduler::schedNode(SUnit
*SU
, bool IsTopNode
) {
692 SU
->TopReadyCycle
= Top
.CurrCycle
;
695 SU
->BotReadyCycle
= Bot
.CurrCycle
;