]> git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Target/X86/X86RegisterInfo.cpp
Imported Upstream version 1.0.0+dfsg1
[rustc.git] / src / llvm / lib / Target / X86 / X86RegisterInfo.cpp
1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
12 // on X86.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "X86RegisterInfo.h"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/MachineValueType.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
40
41 using namespace llvm;
42
43 #define GET_REGINFO_TARGET_DESC
44 #include "X86GenRegisterInfo.inc"
45
46 cl::opt<bool>
47 ForceStackAlign("force-align-stack",
48 cl::desc("Force align the stack to the minimum alignment"
49 " needed for the function."),
50 cl::init(false), cl::Hidden);
51
52 static cl::opt<bool>
53 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
54 cl::desc("Enable use of a base pointer for complex stack frames"));
55
56 X86RegisterInfo::X86RegisterInfo(const X86Subtarget &STI)
57 : X86GenRegisterInfo(
58 (STI.is64Bit() ? X86::RIP : X86::EIP),
59 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), false),
60 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), true),
61 (STI.is64Bit() ? X86::RIP : X86::EIP)),
62 Subtarget(STI) {
63 X86_MC::InitLLVM2SEHRegisterMapping(this);
64
65 // Cache some information.
66 Is64Bit = Subtarget.is64Bit();
67 IsWin64 = Subtarget.isTargetWin64();
68
69 if (Is64Bit) {
70 SlotSize = 8;
71 StackPtr = (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) ?
72 X86::RSP : X86::ESP;
73 FramePtr = (Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64()) ?
74 X86::RBP : X86::EBP;
75 } else {
76 SlotSize = 4;
77 StackPtr = X86::ESP;
78 FramePtr = X86::EBP;
79 }
80 // Use a callee-saved register as the base pointer. These registers must
81 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
82 // requires GOT in the EBX register before function calls via PLT GOT pointer.
83 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
84 }
85
86 bool
87 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
88 // ExeDepsFixer and PostRAScheduler require liveness.
89 return true;
90 }
91
92 int
93 X86RegisterInfo::getSEHRegNum(unsigned i) const {
94 return getEncodingValue(i);
95 }
96
97 const TargetRegisterClass *
98 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
99 unsigned Idx) const {
100 // The sub_8bit sub-register index is more constrained in 32-bit mode.
101 // It behaves just like the sub_8bit_hi index.
102 if (!Is64Bit && Idx == X86::sub_8bit)
103 Idx = X86::sub_8bit_hi;
104
105 // Forward to TableGen's default version.
106 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
107 }
108
109 const TargetRegisterClass *
110 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
111 const TargetRegisterClass *B,
112 unsigned SubIdx) const {
113 // The sub_8bit sub-register index is more constrained in 32-bit mode.
114 if (!Is64Bit && SubIdx == X86::sub_8bit) {
115 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
116 if (!A)
117 return nullptr;
118 }
119 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
120 }
121
122 const TargetRegisterClass*
123 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
124 // Don't allow super-classes of GR8_NOREX. This class is only used after
125 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
126 // to the full GR8 register class in 64-bit mode, so we cannot allow the
127 // reigster class inflation.
128 //
129 // The GR8_NOREX class is always used in a way that won't be constrained to a
130 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
131 // full GR8 class.
132 if (RC == &X86::GR8_NOREXRegClass)
133 return RC;
134
135 const TargetRegisterClass *Super = RC;
136 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
137 do {
138 switch (Super->getID()) {
139 case X86::GR8RegClassID:
140 case X86::GR16RegClassID:
141 case X86::GR32RegClassID:
142 case X86::GR64RegClassID:
143 case X86::FR32RegClassID:
144 case X86::FR64RegClassID:
145 case X86::RFP32RegClassID:
146 case X86::RFP64RegClassID:
147 case X86::RFP80RegClassID:
148 case X86::VR128RegClassID:
149 case X86::VR256RegClassID:
150 // Don't return a super-class that would shrink the spill size.
151 // That can happen with the vector and float classes.
152 if (Super->getSize() == RC->getSize())
153 return Super;
154 }
155 Super = *I++;
156 } while (Super);
157 return RC;
158 }
159
160 const TargetRegisterClass *
161 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
162 unsigned Kind) const {
163 switch (Kind) {
164 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
165 case 0: // Normal GPRs.
166 if (Subtarget.isTarget64BitLP64())
167 return &X86::GR64RegClass;
168 return &X86::GR32RegClass;
169 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
170 if (Subtarget.isTarget64BitLP64())
171 return &X86::GR64_NOSPRegClass;
172 return &X86::GR32_NOSPRegClass;
173 case 2: // Available for tailcall (not callee-saved GPRs).
174 if (Subtarget.isTargetWin64())
175 return &X86::GR64_TCW64RegClass;
176 else if (Subtarget.is64Bit())
177 return &X86::GR64_TCRegClass;
178
179 const Function *F = MF.getFunction();
180 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
181 if (hasHipeCC)
182 return &X86::GR32RegClass;
183 return &X86::GR32_TCRegClass;
184 }
185 }
186
187 const TargetRegisterClass *
188 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
189 if (RC == &X86::CCRRegClass) {
190 if (Is64Bit)
191 return &X86::GR64RegClass;
192 else
193 return &X86::GR32RegClass;
194 }
195 return RC;
196 }
197
198 unsigned
199 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
200 MachineFunction &MF) const {
201 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
202
203 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
204 switch (RC->getID()) {
205 default:
206 return 0;
207 case X86::GR32RegClassID:
208 return 4 - FPDiff;
209 case X86::GR64RegClassID:
210 return 12 - FPDiff;
211 case X86::VR128RegClassID:
212 return Subtarget.is64Bit() ? 10 : 4;
213 case X86::VR64RegClassID:
214 return 4;
215 }
216 }
217
218 const MCPhysReg *
219 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
220 bool HasAVX = Subtarget.hasAVX();
221 bool HasAVX512 = Subtarget.hasAVX512();
222
223 assert(MF && "MachineFunction required");
224 switch (MF->getFunction()->getCallingConv()) {
225 case CallingConv::GHC:
226 case CallingConv::HiPE:
227 return CSR_NoRegs_SaveList;
228 case CallingConv::AnyReg:
229 if (HasAVX)
230 return CSR_64_AllRegs_AVX_SaveList;
231 return CSR_64_AllRegs_SaveList;
232 case CallingConv::PreserveMost:
233 return CSR_64_RT_MostRegs_SaveList;
234 case CallingConv::PreserveAll:
235 if (HasAVX)
236 return CSR_64_RT_AllRegs_AVX_SaveList;
237 return CSR_64_RT_AllRegs_SaveList;
238 case CallingConv::Intel_OCL_BI: {
239 if (HasAVX512 && IsWin64)
240 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
241 if (HasAVX512 && Is64Bit)
242 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
243 if (HasAVX && IsWin64)
244 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
245 if (HasAVX && Is64Bit)
246 return CSR_64_Intel_OCL_BI_AVX_SaveList;
247 if (!HasAVX && !IsWin64 && Is64Bit)
248 return CSR_64_Intel_OCL_BI_SaveList;
249 break;
250 }
251 case CallingConv::Cold:
252 if (Is64Bit)
253 return CSR_64_MostRegs_SaveList;
254 break;
255 default:
256 break;
257 }
258
259 bool CallsEHReturn = MF->getMMI().callsEHReturn();
260 if (Is64Bit) {
261 if (IsWin64)
262 return CSR_Win64_SaveList;
263 if (CallsEHReturn)
264 return CSR_64EHRet_SaveList;
265 return CSR_64_SaveList;
266 }
267 if (CallsEHReturn)
268 return CSR_32EHRet_SaveList;
269 return CSR_32_SaveList;
270 }
271
272 const uint32_t*
273 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
274 bool HasAVX = Subtarget.hasAVX();
275 bool HasAVX512 = Subtarget.hasAVX512();
276
277 switch (CC) {
278 case CallingConv::GHC:
279 case CallingConv::HiPE:
280 return CSR_NoRegs_RegMask;
281 case CallingConv::AnyReg:
282 if (HasAVX)
283 return CSR_64_AllRegs_AVX_RegMask;
284 return CSR_64_AllRegs_RegMask;
285 case CallingConv::PreserveMost:
286 return CSR_64_RT_MostRegs_RegMask;
287 case CallingConv::PreserveAll:
288 if (HasAVX)
289 return CSR_64_RT_AllRegs_AVX_RegMask;
290 return CSR_64_RT_AllRegs_RegMask;
291 case CallingConv::Intel_OCL_BI: {
292 if (HasAVX512 && IsWin64)
293 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
294 if (HasAVX512 && Is64Bit)
295 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
296 if (HasAVX && IsWin64)
297 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
298 if (HasAVX && Is64Bit)
299 return CSR_64_Intel_OCL_BI_AVX_RegMask;
300 if (!HasAVX && !IsWin64 && Is64Bit)
301 return CSR_64_Intel_OCL_BI_RegMask;
302 break;
303 }
304 case CallingConv::Cold:
305 if (Is64Bit)
306 return CSR_64_MostRegs_RegMask;
307 break;
308 default:
309 break;
310 }
311
312 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
313 // callsEHReturn().
314 if (Is64Bit) {
315 if (IsWin64)
316 return CSR_Win64_RegMask;
317 return CSR_64_RegMask;
318 }
319 return CSR_32_RegMask;
320 }
321
322 const uint32_t*
323 X86RegisterInfo::getNoPreservedMask() const {
324 return CSR_NoRegs_RegMask;
325 }
326
327 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
328 BitVector Reserved(getNumRegs());
329 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
330
331 // Set the stack-pointer register and its aliases as reserved.
332 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
333 ++I)
334 Reserved.set(*I);
335
336 // Set the instruction pointer register and its aliases as reserved.
337 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
338 ++I)
339 Reserved.set(*I);
340
341 // Set the frame-pointer register and its aliases as reserved if needed.
342 if (TFI->hasFP(MF)) {
343 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
344 ++I)
345 Reserved.set(*I);
346 }
347
348 // Set the base-pointer register and its aliases as reserved if needed.
349 if (hasBasePointer(MF)) {
350 CallingConv::ID CC = MF.getFunction()->getCallingConv();
351 const uint32_t* RegMask = getCallPreservedMask(CC);
352 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
353 report_fatal_error(
354 "Stack realignment in presence of dynamic allocas is not supported with"
355 "this calling convention.");
356
357 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
358 I.isValid(); ++I)
359 Reserved.set(*I);
360 }
361
362 // Mark the segment registers as reserved.
363 Reserved.set(X86::CS);
364 Reserved.set(X86::SS);
365 Reserved.set(X86::DS);
366 Reserved.set(X86::ES);
367 Reserved.set(X86::FS);
368 Reserved.set(X86::GS);
369
370 // Mark the floating point stack registers as reserved.
371 for (unsigned n = 0; n != 8; ++n)
372 Reserved.set(X86::ST0 + n);
373
374 // Reserve the registers that only exist in 64-bit mode.
375 if (!Is64Bit) {
376 // These 8-bit registers are part of the x86-64 extension even though their
377 // super-registers are old 32-bits.
378 Reserved.set(X86::SIL);
379 Reserved.set(X86::DIL);
380 Reserved.set(X86::BPL);
381 Reserved.set(X86::SPL);
382
383 for (unsigned n = 0; n != 8; ++n) {
384 // R8, R9, ...
385 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
386 Reserved.set(*AI);
387
388 // XMM8, XMM9, ...
389 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
390 Reserved.set(*AI);
391 }
392 }
393 if (!Is64Bit || !Subtarget.hasAVX512()) {
394 for (unsigned n = 16; n != 32; ++n) {
395 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
396 Reserved.set(*AI);
397 }
398 }
399
400 return Reserved;
401 }
402
403 //===----------------------------------------------------------------------===//
404 // Stack Frame Processing methods
405 //===----------------------------------------------------------------------===//
406
407 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
408 const MachineFrameInfo *MFI = MF.getFrameInfo();
409
410 if (!EnableBasePointer)
411 return false;
412
413 // When we need stack realignment, we can't address the stack from the frame
414 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
415 // can't address variables from the stack pointer. MS inline asm can
416 // reference locals while also adjusting the stack pointer. When we can't
417 // use both the SP and the FP, we need a separate base pointer register.
418 bool CantUseFP = needsStackRealignment(MF);
419 bool CantUseSP =
420 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust();
421 return CantUseFP && CantUseSP;
422 }
423
424 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
425 if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
426 return false;
427
428 const MachineFrameInfo *MFI = MF.getFrameInfo();
429 const MachineRegisterInfo *MRI = &MF.getRegInfo();
430
431 // Stack realignment requires a frame pointer. If we already started
432 // register allocation with frame pointer elimination, it is too late now.
433 if (!MRI->canReserveReg(FramePtr))
434 return false;
435
436 // If a base pointer is necessary. Check that it isn't too late to reserve
437 // it.
438 if (MFI->hasVarSizedObjects())
439 return MRI->canReserveReg(BasePtr);
440 return true;
441 }
442
443 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
444 const MachineFrameInfo *MFI = MF.getFrameInfo();
445 const Function *F = MF.getFunction();
446 unsigned StackAlign =
447 MF.getSubtarget().getFrameLowering()->getStackAlignment();
448 bool requiresRealignment =
449 ((MFI->getMaxAlignment() > StackAlign) ||
450 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
451 Attribute::StackAlignment));
452
453 // If we've requested that we force align the stack do so now.
454 if (ForceStackAlign)
455 return canRealignStack(MF);
456
457 return requiresRealignment && canRealignStack(MF);
458 }
459
460 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
461 unsigned Reg, int &FrameIdx) const {
462 // Since X86 defines assignCalleeSavedSpillSlots which always return true
463 // this function neither used nor tested.
464 llvm_unreachable("Unused function on X86. Otherwise need a test case.");
465 }
466
467 void
468 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
469 int SPAdj, unsigned FIOperandNum,
470 RegScavenger *RS) const {
471 assert(SPAdj == 0 && "Unexpected");
472
473 MachineInstr &MI = *II;
474 MachineFunction &MF = *MI.getParent()->getParent();
475 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
476 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
477 unsigned BasePtr;
478
479 unsigned Opc = MI.getOpcode();
480 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
481 if (hasBasePointer(MF))
482 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
483 else if (needsStackRealignment(MF))
484 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
485 else if (AfterFPPop)
486 BasePtr = StackPtr;
487 else
488 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
489
490 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
491 // register as source operand, semantic is the same and destination is
492 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
493 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
494 BasePtr = getX86SubSuperRegister(BasePtr, MVT::i64, false);
495
496 // This must be part of a four operand memory reference. Replace the
497 // FrameIndex with base register with EBP. Add an offset to the offset.
498 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
499
500 // Now add the frame object offset to the offset from EBP.
501 int FIOffset;
502 if (AfterFPPop) {
503 // Tail call jmp happens after FP is popped.
504 const MachineFrameInfo *MFI = MF.getFrameInfo();
505 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
506 } else
507 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
508
509 // The frame index format for stackmaps and patchpoints is different from the
510 // X86 format. It only has a FI and an offset.
511 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
512 assert(BasePtr == FramePtr && "Expected the FP as base register");
513 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
514 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
515 return;
516 }
517
518 if (MI.getOperand(FIOperandNum+3).isImm()) {
519 // Offset is a 32-bit integer.
520 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
521 int Offset = FIOffset + Imm;
522 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
523 "Requesting 64-bit offset in 32-bit immediate!");
524 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
525 } else {
526 // Offset is symbolic. This is extremely rare.
527 uint64_t Offset = FIOffset +
528 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
529 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
530 }
531 }
532
533 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
534 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
535 return TFI->hasFP(MF) ? FramePtr : StackPtr;
536 }
537
538 unsigned X86RegisterInfo::getPtrSizedFrameRegister(
539 const MachineFunction &MF) const {
540 unsigned FrameReg = getFrameRegister(MF);
541 if (Subtarget.isTarget64BitILP32())
542 FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false);
543 return FrameReg;
544 }
545
546 namespace llvm {
547 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
548 bool High) {
549 switch (VT) {
550 default: llvm_unreachable("Unexpected VT");
551 case MVT::i8:
552 if (High) {
553 switch (Reg) {
554 default: return getX86SubSuperRegister(Reg, MVT::i64);
555 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
556 return X86::SI;
557 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
558 return X86::DI;
559 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
560 return X86::BP;
561 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
562 return X86::SP;
563 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
564 return X86::AH;
565 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
566 return X86::DH;
567 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
568 return X86::CH;
569 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
570 return X86::BH;
571 }
572 } else {
573 switch (Reg) {
574 default: llvm_unreachable("Unexpected register");
575 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
576 return X86::AL;
577 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
578 return X86::DL;
579 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
580 return X86::CL;
581 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
582 return X86::BL;
583 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
584 return X86::SIL;
585 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
586 return X86::DIL;
587 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
588 return X86::BPL;
589 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
590 return X86::SPL;
591 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
592 return X86::R8B;
593 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
594 return X86::R9B;
595 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
596 return X86::R10B;
597 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
598 return X86::R11B;
599 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
600 return X86::R12B;
601 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
602 return X86::R13B;
603 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
604 return X86::R14B;
605 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
606 return X86::R15B;
607 }
608 }
609 case MVT::i16:
610 switch (Reg) {
611 default: llvm_unreachable("Unexpected register");
612 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
613 return X86::AX;
614 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
615 return X86::DX;
616 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
617 return X86::CX;
618 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
619 return X86::BX;
620 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
621 return X86::SI;
622 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
623 return X86::DI;
624 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
625 return X86::BP;
626 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
627 return X86::SP;
628 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
629 return X86::R8W;
630 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
631 return X86::R9W;
632 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
633 return X86::R10W;
634 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
635 return X86::R11W;
636 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
637 return X86::R12W;
638 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
639 return X86::R13W;
640 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
641 return X86::R14W;
642 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
643 return X86::R15W;
644 }
645 case MVT::i32:
646 switch (Reg) {
647 default: llvm_unreachable("Unexpected register");
648 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
649 return X86::EAX;
650 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
651 return X86::EDX;
652 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
653 return X86::ECX;
654 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
655 return X86::EBX;
656 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
657 return X86::ESI;
658 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
659 return X86::EDI;
660 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
661 return X86::EBP;
662 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
663 return X86::ESP;
664 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
665 return X86::R8D;
666 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
667 return X86::R9D;
668 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
669 return X86::R10D;
670 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
671 return X86::R11D;
672 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
673 return X86::R12D;
674 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
675 return X86::R13D;
676 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
677 return X86::R14D;
678 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
679 return X86::R15D;
680 }
681 case MVT::i64:
682 switch (Reg) {
683 default: llvm_unreachable("Unexpected register");
684 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
685 return X86::RAX;
686 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
687 return X86::RDX;
688 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
689 return X86::RCX;
690 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
691 return X86::RBX;
692 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
693 return X86::RSI;
694 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
695 return X86::RDI;
696 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
697 return X86::RBP;
698 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
699 return X86::RSP;
700 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
701 return X86::R8;
702 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
703 return X86::R9;
704 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
705 return X86::R10;
706 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
707 return X86::R11;
708 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
709 return X86::R12;
710 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
711 return X86::R13;
712 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
713 return X86::R14;
714 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
715 return X86::R15;
716 }
717 }
718 }
719
720 unsigned get512BitSuperRegister(unsigned Reg) {
721 if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
722 return X86::ZMM0 + (Reg - X86::XMM0);
723 if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
724 return X86::ZMM0 + (Reg - X86::YMM0);
725 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
726 return Reg;
727 llvm_unreachable("Unexpected SIMD register");
728 }
729
730 }