]>
Commit | Line | Data |
---|---|---|
970d7e83 LB |
1 | //===-- R600MachineScheduler.cpp - R600 Scheduler Interface -*- C++ -*-----===// |
2 | // | |
3 | // The LLVM Compiler Infrastructure | |
4 | // | |
5 | // This file is distributed under the University of Illinois Open Source | |
6 | // License. See LICENSE.TXT for details. | |
7 | // | |
8 | //===----------------------------------------------------------------------===// | |
9 | // | |
10 | /// \file | |
11 | /// \brief R600 Machine Scheduler interface | |
970d7e83 LB |
12 | // |
13 | //===----------------------------------------------------------------------===// | |
14 | ||
970d7e83 | 15 | #include "R600MachineScheduler.h" |
1a4d82fc | 16 | #include "AMDGPUSubtarget.h" |
970d7e83 | 17 | #include "llvm/CodeGen/MachineRegisterInfo.h" |
970d7e83 LB |
18 | #include "llvm/Pass.h" |
19 | #include "llvm/PassManager.h" | |
20 | #include "llvm/Support/raw_ostream.h" | |
970d7e83 LB |
21 | |
22 | using namespace llvm; | |
23 | ||
1a4d82fc | 24 | #define DEBUG_TYPE "misched" |
970d7e83 | 25 | |
1a4d82fc JJ |
26 | void R600SchedStrategy::initialize(ScheduleDAGMI *dag) { |
27 | assert(dag->hasVRegLiveness() && "R600SchedStrategy needs vreg liveness"); | |
28 | DAG = static_cast<ScheduleDAGMILive*>(dag); | |
970d7e83 LB |
29 | TII = static_cast<const R600InstrInfo*>(DAG->TII); |
30 | TRI = static_cast<const R600RegisterInfo*>(DAG->TRI); | |
1a4d82fc | 31 | VLIW5 = !DAG->MF.getTarget().getSubtarget<AMDGPUSubtarget>().hasCaymanISA(); |
970d7e83 | 32 | MRI = &DAG->MRI; |
970d7e83 LB |
33 | CurInstKind = IDOther; |
34 | CurEmitted = 0; | |
1a4d82fc JJ |
35 | OccupedSlotsMask = 31; |
36 | InstKindLimit[IDAlu] = TII->getMaxAlusPerClause(); | |
37 | InstKindLimit[IDOther] = 32; | |
970d7e83 LB |
38 | |
39 | const AMDGPUSubtarget &ST = DAG->TM.getSubtarget<AMDGPUSubtarget>(); | |
1a4d82fc JJ |
40 | InstKindLimit[IDFetch] = ST.getTexVTXClauseSize(); |
41 | AluInstCount = 0; | |
42 | FetchInstCount = 0; | |
970d7e83 LB |
43 | } |
44 | ||
1a4d82fc JJ |
45 | void R600SchedStrategy::MoveUnits(std::vector<SUnit *> &QSrc, |
46 | std::vector<SUnit *> &QDst) | |
970d7e83 | 47 | { |
1a4d82fc JJ |
48 | QDst.insert(QDst.end(), QSrc.begin(), QSrc.end()); |
49 | QSrc.clear(); | |
50 | } | |
51 | ||
52 | static | |
53 | unsigned getWFCountLimitedByGPR(unsigned GPRCount) { | |
54 | assert (GPRCount && "GPRCount cannot be 0"); | |
55 | return 248 / GPRCount; | |
970d7e83 LB |
56 | } |
57 | ||
58 | SUnit* R600SchedStrategy::pickNode(bool &IsTopNode) { | |
1a4d82fc | 59 | SUnit *SU = nullptr; |
970d7e83 LB |
60 | NextInstKind = IDOther; |
61 | ||
1a4d82fc JJ |
62 | IsTopNode = false; |
63 | ||
970d7e83 | 64 | // check if we might want to switch current clause type |
1a4d82fc JJ |
65 | bool AllowSwitchToAlu = (CurEmitted >= InstKindLimit[CurInstKind]) || |
66 | (Available[CurInstKind].empty()); | |
67 | bool AllowSwitchFromAlu = (CurEmitted >= InstKindLimit[CurInstKind]) && | |
68 | (!Available[IDFetch].empty() || !Available[IDOther].empty()); | |
69 | ||
70 | if (CurInstKind == IDAlu && !Available[IDFetch].empty()) { | |
71 | // We use the heuristic provided by AMD Accelerated Parallel Processing | |
72 | // OpenCL Programming Guide : | |
73 | // The approx. number of WF that allows TEX inst to hide ALU inst is : | |
74 | // 500 (cycles for TEX) / (AluFetchRatio * 8 (cycles for ALU)) | |
75 | float ALUFetchRationEstimate = | |
76 | (AluInstCount + AvailablesAluCount() + Pending[IDAlu].size()) / | |
77 | (FetchInstCount + Available[IDFetch].size()); | |
78 | if (ALUFetchRationEstimate == 0) { | |
79 | AllowSwitchFromAlu = true; | |
80 | } else { | |
81 | unsigned NeededWF = 62.5f / ALUFetchRationEstimate; | |
82 | DEBUG( dbgs() << NeededWF << " approx. Wavefronts Required\n" ); | |
83 | // We assume the local GPR requirements to be "dominated" by the requirement | |
84 | // of the TEX clause (which consumes 128 bits regs) ; ALU inst before and | |
85 | // after TEX are indeed likely to consume or generate values from/for the | |
86 | // TEX clause. | |
87 | // Available[IDFetch].size() * 2 : GPRs required in the Fetch clause | |
88 | // We assume that fetch instructions are either TnXYZW = TEX TnXYZW (need | |
89 | // one GPR) or TmXYZW = TnXYZW (need 2 GPR). | |
90 | // (TODO : use RegisterPressure) | |
91 | // If we are going too use too many GPR, we flush Fetch instruction to lower | |
92 | // register pressure on 128 bits regs. | |
93 | unsigned NearRegisterRequirement = 2 * Available[IDFetch].size(); | |
94 | if (NeededWF > getWFCountLimitedByGPR(NearRegisterRequirement)) | |
95 | AllowSwitchFromAlu = true; | |
96 | } | |
97 | } | |
98 | ||
99 | if (!SU && ((AllowSwitchToAlu && CurInstKind != IDAlu) || | |
100 | (!AllowSwitchFromAlu && CurInstKind == IDAlu))) { | |
970d7e83 LB |
101 | // try to pick ALU |
102 | SU = pickAlu(); | |
1a4d82fc JJ |
103 | if (!SU && !PhysicalRegCopy.empty()) { |
104 | SU = PhysicalRegCopy.front(); | |
105 | PhysicalRegCopy.erase(PhysicalRegCopy.begin()); | |
106 | } | |
970d7e83 | 107 | if (SU) { |
1a4d82fc | 108 | if (CurEmitted >= InstKindLimit[IDAlu]) |
970d7e83 LB |
109 | CurEmitted = 0; |
110 | NextInstKind = IDAlu; | |
111 | } | |
112 | } | |
113 | ||
114 | if (!SU) { | |
115 | // try to pick FETCH | |
116 | SU = pickOther(IDFetch); | |
117 | if (SU) | |
118 | NextInstKind = IDFetch; | |
119 | } | |
120 | ||
121 | // try to pick other | |
122 | if (!SU) { | |
123 | SU = pickOther(IDOther); | |
124 | if (SU) | |
125 | NextInstKind = IDOther; | |
126 | } | |
127 | ||
128 | DEBUG( | |
129 | if (SU) { | |
1a4d82fc | 130 | dbgs() << " ** Pick node **\n"; |
970d7e83 LB |
131 | SU->dump(DAG); |
132 | } else { | |
1a4d82fc | 133 | dbgs() << "NO NODE \n"; |
970d7e83 LB |
134 | for (unsigned i = 0; i < DAG->SUnits.size(); i++) { |
135 | const SUnit &S = DAG->SUnits[i]; | |
136 | if (!S.isScheduled) | |
137 | S.dump(DAG); | |
138 | } | |
139 | } | |
140 | ); | |
141 | ||
142 | return SU; | |
143 | } | |
144 | ||
145 | void R600SchedStrategy::schedNode(SUnit *SU, bool IsTopNode) { | |
970d7e83 LB |
146 | if (NextInstKind != CurInstKind) { |
147 | DEBUG(dbgs() << "Instruction Type Switch\n"); | |
148 | if (NextInstKind != IDAlu) | |
1a4d82fc | 149 | OccupedSlotsMask |= 31; |
970d7e83 LB |
150 | CurEmitted = 0; |
151 | CurInstKind = NextInstKind; | |
152 | } | |
153 | ||
154 | if (CurInstKind == IDAlu) { | |
1a4d82fc | 155 | AluInstCount ++; |
970d7e83 LB |
156 | switch (getAluKind(SU)) { |
157 | case AluT_XYZW: | |
158 | CurEmitted += 4; | |
159 | break; | |
160 | case AluDiscarded: | |
161 | break; | |
162 | default: { | |
163 | ++CurEmitted; | |
164 | for (MachineInstr::mop_iterator It = SU->getInstr()->operands_begin(), | |
165 | E = SU->getInstr()->operands_end(); It != E; ++It) { | |
166 | MachineOperand &MO = *It; | |
167 | if (MO.isReg() && MO.getReg() == AMDGPU::ALU_LITERAL_X) | |
168 | ++CurEmitted; | |
169 | } | |
170 | } | |
171 | } | |
172 | } else { | |
173 | ++CurEmitted; | |
174 | } | |
175 | ||
176 | ||
177 | DEBUG(dbgs() << CurEmitted << " Instructions Emitted in this clause\n"); | |
178 | ||
179 | if (CurInstKind != IDFetch) { | |
180 | MoveUnits(Pending[IDFetch], Available[IDFetch]); | |
1a4d82fc JJ |
181 | } else |
182 | FetchInstCount++; | |
970d7e83 LB |
183 | } |
184 | ||
1a4d82fc JJ |
185 | static bool |
186 | isPhysicalRegCopy(MachineInstr *MI) { | |
187 | if (MI->getOpcode() != AMDGPU::COPY) | |
188 | return false; | |
970d7e83 | 189 | |
1a4d82fc JJ |
190 | return !TargetRegisterInfo::isVirtualRegister(MI->getOperand(1).getReg()); |
191 | } | |
970d7e83 | 192 | |
1a4d82fc JJ |
193 | void R600SchedStrategy::releaseTopNode(SUnit *SU) { |
194 | DEBUG(dbgs() << "Top Releasing ";SU->dump(DAG);); | |
970d7e83 LB |
195 | } |
196 | ||
197 | void R600SchedStrategy::releaseBottomNode(SUnit *SU) { | |
1a4d82fc JJ |
198 | DEBUG(dbgs() << "Bottom Releasing ";SU->dump(DAG);); |
199 | if (isPhysicalRegCopy(SU->getInstr())) { | |
200 | PhysicalRegCopy.push_back(SU); | |
201 | return; | |
202 | } | |
203 | ||
204 | int IK = getInstKind(SU); | |
205 | ||
206 | // There is no export clause, we can schedule one as soon as its ready | |
207 | if (IK == IDOther) | |
208 | Available[IDOther].push_back(SU); | |
209 | else | |
210 | Pending[IK].push_back(SU); | |
211 | ||
970d7e83 LB |
212 | } |
213 | ||
214 | bool R600SchedStrategy::regBelongsToClass(unsigned Reg, | |
215 | const TargetRegisterClass *RC) const { | |
216 | if (!TargetRegisterInfo::isVirtualRegister(Reg)) { | |
217 | return RC->contains(Reg); | |
218 | } else { | |
219 | return MRI->getRegClass(Reg) == RC; | |
220 | } | |
221 | } | |
222 | ||
223 | R600SchedStrategy::AluKind R600SchedStrategy::getAluKind(SUnit *SU) const { | |
224 | MachineInstr *MI = SU->getInstr(); | |
225 | ||
1a4d82fc JJ |
226 | if (TII->isTransOnly(MI)) |
227 | return AluTrans; | |
228 | ||
970d7e83 | 229 | switch (MI->getOpcode()) { |
1a4d82fc JJ |
230 | case AMDGPU::PRED_X: |
231 | return AluPredX; | |
970d7e83 LB |
232 | case AMDGPU::INTERP_PAIR_XY: |
233 | case AMDGPU::INTERP_PAIR_ZW: | |
234 | case AMDGPU::INTERP_VEC_LOAD: | |
1a4d82fc | 235 | case AMDGPU::DOT_4: |
970d7e83 LB |
236 | return AluT_XYZW; |
237 | case AMDGPU::COPY: | |
1a4d82fc | 238 | if (MI->getOperand(1).isUndef()) { |
970d7e83 LB |
239 | // MI will become a KILL, don't considers it in scheduling |
240 | return AluDiscarded; | |
241 | } | |
242 | default: | |
243 | break; | |
244 | } | |
245 | ||
246 | // Does the instruction take a whole IG ? | |
1a4d82fc JJ |
247 | // XXX: Is it possible to add a helper function in R600InstrInfo that can |
248 | // be used here and in R600PacketizerList::isSoloInstruction() ? | |
970d7e83 LB |
249 | if(TII->isVector(*MI) || |
250 | TII->isCubeOp(MI->getOpcode()) || | |
1a4d82fc JJ |
251 | TII->isReductionOp(MI->getOpcode()) || |
252 | MI->getOpcode() == AMDGPU::GROUP_BARRIER) { | |
970d7e83 | 253 | return AluT_XYZW; |
1a4d82fc JJ |
254 | } |
255 | ||
256 | if (TII->isLDSInstr(MI->getOpcode())) { | |
257 | return AluT_X; | |
258 | } | |
970d7e83 LB |
259 | |
260 | // Is the result already assigned to a channel ? | |
261 | unsigned DestSubReg = MI->getOperand(0).getSubReg(); | |
262 | switch (DestSubReg) { | |
263 | case AMDGPU::sub0: | |
264 | return AluT_X; | |
265 | case AMDGPU::sub1: | |
266 | return AluT_Y; | |
267 | case AMDGPU::sub2: | |
268 | return AluT_Z; | |
269 | case AMDGPU::sub3: | |
270 | return AluT_W; | |
271 | default: | |
272 | break; | |
273 | } | |
274 | ||
275 | // Is the result already member of a X/Y/Z/W class ? | |
276 | unsigned DestReg = MI->getOperand(0).getReg(); | |
277 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_XRegClass) || | |
278 | regBelongsToClass(DestReg, &AMDGPU::R600_AddrRegClass)) | |
279 | return AluT_X; | |
280 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_YRegClass)) | |
281 | return AluT_Y; | |
282 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass)) | |
283 | return AluT_Z; | |
284 | if (regBelongsToClass(DestReg, &AMDGPU::R600_TReg32_WRegClass)) | |
285 | return AluT_W; | |
286 | if (regBelongsToClass(DestReg, &AMDGPU::R600_Reg128RegClass)) | |
287 | return AluT_XYZW; | |
288 | ||
1a4d82fc JJ |
289 | // LDS src registers cannot be used in the Trans slot. |
290 | if (TII->readsLDSSrcReg(MI)) | |
291 | return AluT_XYZW; | |
292 | ||
970d7e83 LB |
293 | return AluAny; |
294 | ||
295 | } | |
296 | ||
297 | int R600SchedStrategy::getInstKind(SUnit* SU) { | |
298 | int Opcode = SU->getInstr()->getOpcode(); | |
299 | ||
1a4d82fc JJ |
300 | if (TII->usesTextureCache(Opcode) || TII->usesVertexCache(Opcode)) |
301 | return IDFetch; | |
302 | ||
970d7e83 LB |
303 | if (TII->isALUInstr(Opcode)) { |
304 | return IDAlu; | |
305 | } | |
306 | ||
307 | switch (Opcode) { | |
1a4d82fc | 308 | case AMDGPU::PRED_X: |
970d7e83 LB |
309 | case AMDGPU::COPY: |
310 | case AMDGPU::CONST_COPY: | |
311 | case AMDGPU::INTERP_PAIR_XY: | |
312 | case AMDGPU::INTERP_PAIR_ZW: | |
313 | case AMDGPU::INTERP_VEC_LOAD: | |
1a4d82fc | 314 | case AMDGPU::DOT_4: |
970d7e83 | 315 | return IDAlu; |
970d7e83 | 316 | default: |
970d7e83 LB |
317 | return IDOther; |
318 | } | |
319 | } | |
320 | ||
1a4d82fc | 321 | SUnit *R600SchedStrategy::PopInst(std::vector<SUnit *> &Q, bool AnyALU) { |
970d7e83 | 322 | if (Q.empty()) |
1a4d82fc JJ |
323 | return nullptr; |
324 | for (std::vector<SUnit *>::reverse_iterator It = Q.rbegin(), E = Q.rend(); | |
970d7e83 LB |
325 | It != E; ++It) { |
326 | SUnit *SU = *It; | |
327 | InstructionsGroupCandidate.push_back(SU->getInstr()); | |
1a4d82fc JJ |
328 | if (TII->fitsConstReadLimitations(InstructionsGroupCandidate) |
329 | && (!AnyALU || !TII->isVectorOnly(SU->getInstr())) | |
330 | ) { | |
970d7e83 | 331 | InstructionsGroupCandidate.pop_back(); |
1a4d82fc | 332 | Q.erase((It + 1).base()); |
970d7e83 LB |
333 | return SU; |
334 | } else { | |
335 | InstructionsGroupCandidate.pop_back(); | |
336 | } | |
337 | } | |
1a4d82fc | 338 | return nullptr; |
970d7e83 LB |
339 | } |
340 | ||
341 | void R600SchedStrategy::LoadAlu() { | |
1a4d82fc JJ |
342 | std::vector<SUnit *> &QSrc = Pending[IDAlu]; |
343 | for (unsigned i = 0, e = QSrc.size(); i < e; ++i) { | |
344 | AluKind AK = getAluKind(QSrc[i]); | |
345 | AvailableAlus[AK].push_back(QSrc[i]); | |
346 | } | |
347 | QSrc.clear(); | |
970d7e83 LB |
348 | } |
349 | ||
350 | void R600SchedStrategy::PrepareNextSlot() { | |
351 | DEBUG(dbgs() << "New Slot\n"); | |
352 | assert (OccupedSlotsMask && "Slot wasn't filled"); | |
353 | OccupedSlotsMask = 0; | |
1a4d82fc JJ |
354 | // if (HwGen == AMDGPUSubtarget::NORTHERN_ISLANDS) |
355 | // OccupedSlotsMask |= 16; | |
970d7e83 LB |
356 | InstructionsGroupCandidate.clear(); |
357 | LoadAlu(); | |
358 | } | |
359 | ||
360 | void R600SchedStrategy::AssignSlot(MachineInstr* MI, unsigned Slot) { | |
1a4d82fc JJ |
361 | int DstIndex = TII->getOperandIdx(MI->getOpcode(), AMDGPU::OpName::dst); |
362 | if (DstIndex == -1) { | |
363 | return; | |
364 | } | |
365 | unsigned DestReg = MI->getOperand(DstIndex).getReg(); | |
970d7e83 LB |
366 | // PressureRegister crashes if an operand is def and used in the same inst |
367 | // and we try to constraint its regclass | |
368 | for (MachineInstr::mop_iterator It = MI->operands_begin(), | |
369 | E = MI->operands_end(); It != E; ++It) { | |
370 | MachineOperand &MO = *It; | |
371 | if (MO.isReg() && !MO.isDef() && | |
1a4d82fc | 372 | MO.getReg() == DestReg) |
970d7e83 LB |
373 | return; |
374 | } | |
375 | // Constrains the regclass of DestReg to assign it to Slot | |
376 | switch (Slot) { | |
377 | case 0: | |
378 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_XRegClass); | |
379 | break; | |
380 | case 1: | |
381 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_YRegClass); | |
382 | break; | |
383 | case 2: | |
384 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_ZRegClass); | |
385 | break; | |
386 | case 3: | |
387 | MRI->constrainRegClass(DestReg, &AMDGPU::R600_TReg32_WRegClass); | |
388 | break; | |
389 | } | |
390 | } | |
391 | ||
1a4d82fc | 392 | SUnit *R600SchedStrategy::AttemptFillSlot(unsigned Slot, bool AnyAlu) { |
970d7e83 | 393 | static const AluKind IndexToID[] = {AluT_X, AluT_Y, AluT_Z, AluT_W}; |
1a4d82fc JJ |
394 | SUnit *SlotedSU = PopInst(AvailableAlus[IndexToID[Slot]], AnyAlu); |
395 | if (SlotedSU) | |
970d7e83 | 396 | return SlotedSU; |
1a4d82fc JJ |
397 | SUnit *UnslotedSU = PopInst(AvailableAlus[AluAny], AnyAlu); |
398 | if (UnslotedSU) | |
970d7e83 | 399 | AssignSlot(UnslotedSU->getInstr(), Slot); |
1a4d82fc | 400 | return UnslotedSU; |
970d7e83 LB |
401 | } |
402 | ||
1a4d82fc JJ |
403 | unsigned R600SchedStrategy::AvailablesAluCount() const { |
404 | return AvailableAlus[AluAny].size() + AvailableAlus[AluT_XYZW].size() + | |
405 | AvailableAlus[AluT_X].size() + AvailableAlus[AluT_Y].size() + | |
406 | AvailableAlus[AluT_Z].size() + AvailableAlus[AluT_W].size() + | |
407 | AvailableAlus[AluTrans].size() + AvailableAlus[AluDiscarded].size() + | |
408 | AvailableAlus[AluPredX].size(); | |
970d7e83 LB |
409 | } |
410 | ||
411 | SUnit* R600SchedStrategy::pickAlu() { | |
1a4d82fc | 412 | while (AvailablesAluCount() || !Pending[IDAlu].empty()) { |
970d7e83 | 413 | if (!OccupedSlotsMask) { |
1a4d82fc JJ |
414 | // Bottom up scheduling : predX must comes first |
415 | if (!AvailableAlus[AluPredX].empty()) { | |
416 | OccupedSlotsMask |= 31; | |
417 | return PopInst(AvailableAlus[AluPredX], false); | |
418 | } | |
970d7e83 LB |
419 | // Flush physical reg copies (RA will discard them) |
420 | if (!AvailableAlus[AluDiscarded].empty()) { | |
1a4d82fc JJ |
421 | OccupedSlotsMask |= 31; |
422 | return PopInst(AvailableAlus[AluDiscarded], false); | |
970d7e83 LB |
423 | } |
424 | // If there is a T_XYZW alu available, use it | |
425 | if (!AvailableAlus[AluT_XYZW].empty()) { | |
1a4d82fc JJ |
426 | OccupedSlotsMask |= 15; |
427 | return PopInst(AvailableAlus[AluT_XYZW], false); | |
970d7e83 LB |
428 | } |
429 | } | |
1a4d82fc JJ |
430 | bool TransSlotOccuped = OccupedSlotsMask & 16; |
431 | if (!TransSlotOccuped && VLIW5) { | |
432 | if (!AvailableAlus[AluTrans].empty()) { | |
433 | OccupedSlotsMask |= 16; | |
434 | return PopInst(AvailableAlus[AluTrans], false); | |
435 | } | |
436 | SUnit *SU = AttemptFillSlot(3, true); | |
437 | if (SU) { | |
438 | OccupedSlotsMask |= 16; | |
439 | return SU; | |
440 | } | |
441 | } | |
442 | for (int Chan = 3; Chan > -1; --Chan) { | |
970d7e83 LB |
443 | bool isOccupied = OccupedSlotsMask & (1 << Chan); |
444 | if (!isOccupied) { | |
1a4d82fc | 445 | SUnit *SU = AttemptFillSlot(Chan, false); |
970d7e83 LB |
446 | if (SU) { |
447 | OccupedSlotsMask |= (1 << Chan); | |
448 | InstructionsGroupCandidate.push_back(SU->getInstr()); | |
449 | return SU; | |
450 | } | |
451 | } | |
452 | } | |
453 | PrepareNextSlot(); | |
454 | } | |
1a4d82fc | 455 | return nullptr; |
970d7e83 LB |
456 | } |
457 | ||
458 | SUnit* R600SchedStrategy::pickOther(int QID) { | |
1a4d82fc JJ |
459 | SUnit *SU = nullptr; |
460 | std::vector<SUnit *> &AQ = Available[QID]; | |
970d7e83 | 461 | |
1a4d82fc | 462 | if (AQ.empty()) { |
970d7e83 LB |
463 | MoveUnits(Pending[QID], AQ); |
464 | } | |
1a4d82fc JJ |
465 | if (!AQ.empty()) { |
466 | SU = AQ.back(); | |
467 | AQ.resize(AQ.size() - 1); | |
970d7e83 LB |
468 | } |
469 | return SU; | |
470 | } |