1 //===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "llvm/MC/MCTargetAsmParser.h"
11 #include "MCTargetDesc/ARMAddressingModes.h"
12 #include "MCTargetDesc/ARMBaseInfo.h"
13 #include "MCTargetDesc/ARMMCExpr.h"
14 #include "llvm/ADT/BitVector.h"
15 #include "llvm/ADT/OwningPtr.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/StringSwitch.h"
19 #include "llvm/ADT/Twine.h"
20 #include "llvm/MC/MCAsmInfo.h"
21 #include "llvm/MC/MCAssembler.h"
22 #include "llvm/MC/MCContext.h"
23 #include "llvm/MC/MCELFStreamer.h"
24 #include "llvm/MC/MCExpr.h"
25 #include "llvm/MC/MCInst.h"
26 #include "llvm/MC/MCInstrDesc.h"
27 #include "llvm/MC/MCParser/MCAsmLexer.h"
28 #include "llvm/MC/MCParser/MCAsmParser.h"
29 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
30 #include "llvm/MC/MCRegisterInfo.h"
31 #include "llvm/MC/MCStreamer.h"
32 #include "llvm/MC/MCSubtargetInfo.h"
33 #include "llvm/Support/ELF.h"
34 #include "llvm/Support/MathExtras.h"
35 #include "llvm/Support/SourceMgr.h"
36 #include "llvm/Support/TargetRegistry.h"
37 #include "llvm/Support/raw_ostream.h"
45 enum VectorLaneTy
{ NoLanes
, AllLanes
, IndexedLane
};
47 class ARMAsmParser
: public MCTargetAsmParser
{
50 const MCRegisterInfo
*MRI
;
52 // Map of register aliases registers via the .req directive.
53 StringMap
<unsigned> RegisterReqs
;
56 ARMCC::CondCodes Cond
; // Condition for IT block.
57 unsigned Mask
:4; // Condition mask for instructions.
58 // Starting at first 1 (from lsb).
59 // '1' condition as indicated in IT.
60 // '0' inverse of condition (else).
61 // Count of instructions in IT block is
62 // 4 - trailingzeroes(mask)
64 bool FirstCond
; // Explicit flag for when we're parsing the
65 // First instruction in the IT block. It's
66 // implied in the mask, so needs special
69 unsigned CurPosition
; // Current position in parsing of IT
70 // block. In range [0,3]. Initialized
71 // according to count of instructions in block.
72 // ~0U if no active IT block.
74 bool inITBlock() { return ITState
.CurPosition
!= ~0U;}
75 void forwardITPosition() {
76 if (!inITBlock()) return;
77 // Move to the next instruction in the IT block, if there is one. If not,
78 // mark the block as done.
79 unsigned TZ
= CountTrailingZeros_32(ITState
.Mask
);
80 if (++ITState
.CurPosition
== 5 - TZ
)
81 ITState
.CurPosition
= ~0U; // Done with the IT block after this.
85 MCAsmParser
&getParser() const { return Parser
; }
86 MCAsmLexer
&getLexer() const { return Parser
.getLexer(); }
88 bool Warning(SMLoc L
, const Twine
&Msg
,
89 ArrayRef
<SMRange
> Ranges
= ArrayRef
<SMRange
>()) {
90 return Parser
.Warning(L
, Msg
, Ranges
);
92 bool Error(SMLoc L
, const Twine
&Msg
,
93 ArrayRef
<SMRange
> Ranges
= ArrayRef
<SMRange
>()) {
94 return Parser
.Error(L
, Msg
, Ranges
);
97 int tryParseRegister();
98 bool tryParseRegisterWithWriteBack(SmallVectorImpl
<MCParsedAsmOperand
*> &);
99 int tryParseShiftRegister(SmallVectorImpl
<MCParsedAsmOperand
*> &);
100 bool parseRegisterList(SmallVectorImpl
<MCParsedAsmOperand
*> &);
101 bool parseMemory(SmallVectorImpl
<MCParsedAsmOperand
*> &);
102 bool parseOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &, StringRef Mnemonic
);
103 bool parsePrefix(ARMMCExpr::VariantKind
&RefKind
);
104 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc
&ShiftType
,
105 unsigned &ShiftAmount
);
106 bool parseDirectiveWord(unsigned Size
, SMLoc L
);
107 bool parseDirectiveThumb(SMLoc L
);
108 bool parseDirectiveARM(SMLoc L
);
109 bool parseDirectiveThumbFunc(SMLoc L
);
110 bool parseDirectiveCode(SMLoc L
);
111 bool parseDirectiveSyntax(SMLoc L
);
112 bool parseDirectiveReq(StringRef Name
, SMLoc L
);
113 bool parseDirectiveUnreq(SMLoc L
);
114 bool parseDirectiveArch(SMLoc L
);
115 bool parseDirectiveEabiAttr(SMLoc L
);
117 StringRef
splitMnemonic(StringRef Mnemonic
, unsigned &PredicationCode
,
118 bool &CarrySetting
, unsigned &ProcessorIMod
,
120 void getMnemonicAcceptInfo(StringRef Mnemonic
, bool &CanAcceptCarrySet
,
121 bool &CanAcceptPredicationCode
);
123 bool isThumb() const {
124 // FIXME: Can tablegen auto-generate this?
125 return (STI
.getFeatureBits() & ARM::ModeThumb
) != 0;
127 bool isThumbOne() const {
128 return isThumb() && (STI
.getFeatureBits() & ARM::FeatureThumb2
) == 0;
130 bool isThumbTwo() const {
131 return isThumb() && (STI
.getFeatureBits() & ARM::FeatureThumb2
);
133 bool hasV6Ops() const {
134 return STI
.getFeatureBits() & ARM::HasV6Ops
;
136 bool hasV7Ops() const {
137 return STI
.getFeatureBits() & ARM::HasV7Ops
;
140 unsigned FB
= ComputeAvailableFeatures(STI
.ToggleFeature(ARM::ModeThumb
));
141 setAvailableFeatures(FB
);
143 bool isMClass() const {
144 return STI
.getFeatureBits() & ARM::FeatureMClass
;
147 /// @name Auto-generated Match Functions
150 #define GET_ASSEMBLER_HEADER
151 #include "ARMGenAsmMatcher.inc"
155 OperandMatchResultTy
parseITCondCode(SmallVectorImpl
<MCParsedAsmOperand
*>&);
156 OperandMatchResultTy
parseCoprocNumOperand(
157 SmallVectorImpl
<MCParsedAsmOperand
*>&);
158 OperandMatchResultTy
parseCoprocRegOperand(
159 SmallVectorImpl
<MCParsedAsmOperand
*>&);
160 OperandMatchResultTy
parseCoprocOptionOperand(
161 SmallVectorImpl
<MCParsedAsmOperand
*>&);
162 OperandMatchResultTy
parseMemBarrierOptOperand(
163 SmallVectorImpl
<MCParsedAsmOperand
*>&);
164 OperandMatchResultTy
parseProcIFlagsOperand(
165 SmallVectorImpl
<MCParsedAsmOperand
*>&);
166 OperandMatchResultTy
parseMSRMaskOperand(
167 SmallVectorImpl
<MCParsedAsmOperand
*>&);
168 OperandMatchResultTy
parsePKHImm(SmallVectorImpl
<MCParsedAsmOperand
*> &O
,
169 StringRef Op
, int Low
, int High
);
170 OperandMatchResultTy
parsePKHLSLImm(SmallVectorImpl
<MCParsedAsmOperand
*> &O
) {
171 return parsePKHImm(O
, "lsl", 0, 31);
173 OperandMatchResultTy
parsePKHASRImm(SmallVectorImpl
<MCParsedAsmOperand
*> &O
) {
174 return parsePKHImm(O
, "asr", 1, 32);
176 OperandMatchResultTy
parseSetEndImm(SmallVectorImpl
<MCParsedAsmOperand
*>&);
177 OperandMatchResultTy
parseShifterImm(SmallVectorImpl
<MCParsedAsmOperand
*>&);
178 OperandMatchResultTy
parseRotImm(SmallVectorImpl
<MCParsedAsmOperand
*>&);
179 OperandMatchResultTy
parseBitfield(SmallVectorImpl
<MCParsedAsmOperand
*>&);
180 OperandMatchResultTy
parsePostIdxReg(SmallVectorImpl
<MCParsedAsmOperand
*>&);
181 OperandMatchResultTy
parseAM3Offset(SmallVectorImpl
<MCParsedAsmOperand
*>&);
182 OperandMatchResultTy
parseFPImm(SmallVectorImpl
<MCParsedAsmOperand
*>&);
183 OperandMatchResultTy
parseVectorList(SmallVectorImpl
<MCParsedAsmOperand
*>&);
184 OperandMatchResultTy
parseVectorLane(VectorLaneTy
&LaneKind
, unsigned &Index
,
187 // Asm Match Converter Methods
188 void cvtT2LdrdPre(MCInst
&Inst
, const SmallVectorImpl
<MCParsedAsmOperand
*> &);
189 void cvtT2StrdPre(MCInst
&Inst
, const SmallVectorImpl
<MCParsedAsmOperand
*> &);
190 void cvtLdWriteBackRegT2AddrModeImm8(MCInst
&Inst
,
191 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
192 void cvtStWriteBackRegT2AddrModeImm8(MCInst
&Inst
,
193 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
194 void cvtLdWriteBackRegAddrMode2(MCInst
&Inst
,
195 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
196 void cvtLdWriteBackRegAddrModeImm12(MCInst
&Inst
,
197 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
198 void cvtStWriteBackRegAddrModeImm12(MCInst
&Inst
,
199 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
200 void cvtStWriteBackRegAddrMode2(MCInst
&Inst
,
201 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
202 void cvtStWriteBackRegAddrMode3(MCInst
&Inst
,
203 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
204 void cvtLdExtTWriteBackImm(MCInst
&Inst
,
205 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
206 void cvtLdExtTWriteBackReg(MCInst
&Inst
,
207 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
208 void cvtStExtTWriteBackImm(MCInst
&Inst
,
209 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
210 void cvtStExtTWriteBackReg(MCInst
&Inst
,
211 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
212 void cvtLdrdPre(MCInst
&Inst
, const SmallVectorImpl
<MCParsedAsmOperand
*> &);
213 void cvtStrdPre(MCInst
&Inst
, const SmallVectorImpl
<MCParsedAsmOperand
*> &);
214 void cvtLdWriteBackRegAddrMode3(MCInst
&Inst
,
215 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
216 void cvtThumbMultiply(MCInst
&Inst
,
217 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
218 void cvtVLDwbFixed(MCInst
&Inst
,
219 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
220 void cvtVLDwbRegister(MCInst
&Inst
,
221 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
222 void cvtVSTwbFixed(MCInst
&Inst
,
223 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
224 void cvtVSTwbRegister(MCInst
&Inst
,
225 const SmallVectorImpl
<MCParsedAsmOperand
*> &);
226 bool validateInstruction(MCInst
&Inst
,
227 const SmallVectorImpl
<MCParsedAsmOperand
*> &Ops
);
228 bool processInstruction(MCInst
&Inst
,
229 const SmallVectorImpl
<MCParsedAsmOperand
*> &Ops
);
230 bool shouldOmitCCOutOperand(StringRef Mnemonic
,
231 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
);
234 enum ARMMatchResultTy
{
235 Match_RequiresITBlock
= FIRST_TARGET_MATCH_RESULT_TY
,
236 Match_RequiresNotITBlock
,
238 Match_RequiresThumb2
,
239 #define GET_OPERAND_DIAGNOSTIC_TYPES
240 #include "ARMGenAsmMatcher.inc"
244 ARMAsmParser(MCSubtargetInfo
&_STI
, MCAsmParser
&_Parser
)
245 : MCTargetAsmParser(), STI(_STI
), Parser(_Parser
) {
246 MCAsmParserExtension::Initialize(_Parser
);
248 // Cache the MCRegisterInfo.
249 MRI
= &getContext().getRegisterInfo();
251 // Initialize the set of available features.
252 setAvailableFeatures(ComputeAvailableFeatures(STI
.getFeatureBits()));
254 // Not in an ITBlock to start with.
255 ITState
.CurPosition
= ~0U;
257 // Set ELF header flags.
258 // FIXME: This should eventually end up somewhere else where more
259 // intelligent flag decisions can be made. For now we are just maintaining
260 // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
261 if (MCELFStreamer
*MES
= dyn_cast
<MCELFStreamer
>(&Parser
.getStreamer()))
262 MES
->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5
);
265 // Implementation of the MCTargetAsmParser interface:
266 bool ParseRegister(unsigned &RegNo
, SMLoc
&StartLoc
, SMLoc
&EndLoc
);
267 bool ParseInstruction(ParseInstructionInfo
&Info
, StringRef Name
,
269 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
);
270 bool ParseDirective(AsmToken DirectiveID
);
272 unsigned validateTargetOperandClass(MCParsedAsmOperand
*Op
, unsigned Kind
);
273 unsigned checkTargetMatchPredicate(MCInst
&Inst
);
275 bool MatchAndEmitInstruction(SMLoc IDLoc
, unsigned &Opcode
,
276 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
,
277 MCStreamer
&Out
, unsigned &ErrorInfo
,
278 bool MatchingInlineAsm
);
280 } // end anonymous namespace
284 /// ARMOperand - Instances of this class represent a parsed ARM machine
286 class ARMOperand
: public MCParsedAsmOperand
{
306 k_VectorListAllLanes
,
312 k_BitfieldDescriptor
,
316 SMLoc StartLoc
, EndLoc
;
317 SmallVector
<unsigned, 8> Registers
;
320 ARMCC::CondCodes Val
;
327 struct CoprocOptionOp
{
340 ARM_PROC::IFlags Val
;
356 // A vector register list is a sequential list of 1 to 4 registers.
357 struct VectorListOp
{
364 struct VectorIndexOp
{
372 /// Combined record for all forms of ARM address expressions.
375 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
377 const MCConstantExpr
*OffsetImm
; // Offset immediate value
378 unsigned OffsetRegNum
; // Offset register num, when OffsetImm == NULL
379 ARM_AM::ShiftOpc ShiftType
; // Shift type for OffsetReg
380 unsigned ShiftImm
; // shift for OffsetReg.
381 unsigned Alignment
; // 0 = no alignment specified
382 // n = alignment in bytes (2, 4, 8, 16, or 32)
383 unsigned isNegative
: 1; // Negated OffsetReg? (~'U' bit)
386 struct PostIdxRegOp
{
389 ARM_AM::ShiftOpc ShiftTy
;
393 struct ShifterImmOp
{
398 struct RegShiftedRegOp
{
399 ARM_AM::ShiftOpc ShiftTy
;
405 struct RegShiftedImmOp
{
406 ARM_AM::ShiftOpc ShiftTy
;
423 struct CoprocOptionOp CoprocOption
;
424 struct MBOptOp MBOpt
;
425 struct ITMaskOp ITMask
;
426 struct IFlagsOp IFlags
;
427 struct MMaskOp MMask
;
430 struct VectorListOp VectorList
;
431 struct VectorIndexOp VectorIndex
;
433 struct MemoryOp Memory
;
434 struct PostIdxRegOp PostIdxReg
;
435 struct ShifterImmOp ShifterImm
;
436 struct RegShiftedRegOp RegShiftedReg
;
437 struct RegShiftedImmOp RegShiftedImm
;
438 struct RotImmOp RotImm
;
439 struct BitfieldOp Bitfield
;
442 ARMOperand(KindTy K
) : MCParsedAsmOperand(), Kind(K
) {}
444 ARMOperand(const ARMOperand
&o
) : MCParsedAsmOperand() {
446 StartLoc
= o
.StartLoc
;
463 case k_DPRRegisterList
:
464 case k_SPRRegisterList
:
465 Registers
= o
.Registers
;
468 case k_VectorListAllLanes
:
469 case k_VectorListIndexed
:
470 VectorList
= o
.VectorList
;
477 CoprocOption
= o
.CoprocOption
;
482 case k_MemBarrierOpt
:
488 case k_PostIndexRegister
:
489 PostIdxReg
= o
.PostIdxReg
;
497 case k_ShifterImmediate
:
498 ShifterImm
= o
.ShifterImm
;
500 case k_ShiftedRegister
:
501 RegShiftedReg
= o
.RegShiftedReg
;
503 case k_ShiftedImmediate
:
504 RegShiftedImm
= o
.RegShiftedImm
;
506 case k_RotateImmediate
:
509 case k_BitfieldDescriptor
:
510 Bitfield
= o
.Bitfield
;
513 VectorIndex
= o
.VectorIndex
;
518 /// getStartLoc - Get the location of the first token of this operand.
519 SMLoc
getStartLoc() const { return StartLoc
; }
520 /// getEndLoc - Get the location of the last token of this operand.
521 SMLoc
getEndLoc() const { return EndLoc
; }
522 /// getLocRange - Get the range between the first and last token of this
524 SMRange
getLocRange() const { return SMRange(StartLoc
, EndLoc
); }
526 ARMCC::CondCodes
getCondCode() const {
527 assert(Kind
== k_CondCode
&& "Invalid access!");
531 unsigned getCoproc() const {
532 assert((Kind
== k_CoprocNum
|| Kind
== k_CoprocReg
) && "Invalid access!");
536 StringRef
getToken() const {
537 assert(Kind
== k_Token
&& "Invalid access!");
538 return StringRef(Tok
.Data
, Tok
.Length
);
541 unsigned getReg() const {
542 assert((Kind
== k_Register
|| Kind
== k_CCOut
) && "Invalid access!");
546 const SmallVectorImpl
<unsigned> &getRegList() const {
547 assert((Kind
== k_RegisterList
|| Kind
== k_DPRRegisterList
||
548 Kind
== k_SPRRegisterList
) && "Invalid access!");
552 const MCExpr
*getImm() const {
553 assert(isImm() && "Invalid access!");
557 unsigned getVectorIndex() const {
558 assert(Kind
== k_VectorIndex
&& "Invalid access!");
559 return VectorIndex
.Val
;
562 ARM_MB::MemBOpt
getMemBarrierOpt() const {
563 assert(Kind
== k_MemBarrierOpt
&& "Invalid access!");
567 ARM_PROC::IFlags
getProcIFlags() const {
568 assert(Kind
== k_ProcIFlags
&& "Invalid access!");
572 unsigned getMSRMask() const {
573 assert(Kind
== k_MSRMask
&& "Invalid access!");
577 bool isCoprocNum() const { return Kind
== k_CoprocNum
; }
578 bool isCoprocReg() const { return Kind
== k_CoprocReg
; }
579 bool isCoprocOption() const { return Kind
== k_CoprocOption
; }
580 bool isCondCode() const { return Kind
== k_CondCode
; }
581 bool isCCOut() const { return Kind
== k_CCOut
; }
582 bool isITMask() const { return Kind
== k_ITCondMask
; }
583 bool isITCondCode() const { return Kind
== k_CondCode
; }
584 bool isImm() const { return Kind
== k_Immediate
; }
585 bool isFPImm() const {
586 if (!isImm()) return false;
587 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
588 if (!CE
) return false;
589 int Val
= ARM_AM::getFP32Imm(APInt(32, CE
->getValue()));
592 bool isFBits16() const {
593 if (!isImm()) return false;
594 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
595 if (!CE
) return false;
596 int64_t Value
= CE
->getValue();
597 return Value
>= 0 && Value
<= 16;
599 bool isFBits32() const {
600 if (!isImm()) return false;
601 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
602 if (!CE
) return false;
603 int64_t Value
= CE
->getValue();
604 return Value
>= 1 && Value
<= 32;
606 bool isImm8s4() const {
607 if (!isImm()) return false;
608 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
609 if (!CE
) return false;
610 int64_t Value
= CE
->getValue();
611 return ((Value
& 3) == 0) && Value
>= -1020 && Value
<= 1020;
613 bool isImm0_1020s4() const {
614 if (!isImm()) return false;
615 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
616 if (!CE
) return false;
617 int64_t Value
= CE
->getValue();
618 return ((Value
& 3) == 0) && Value
>= 0 && Value
<= 1020;
620 bool isImm0_508s4() const {
621 if (!isImm()) return false;
622 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
623 if (!CE
) return false;
624 int64_t Value
= CE
->getValue();
625 return ((Value
& 3) == 0) && Value
>= 0 && Value
<= 508;
627 bool isImm0_508s4Neg() const {
628 if (!isImm()) return false;
629 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
630 if (!CE
) return false;
631 int64_t Value
= -CE
->getValue();
632 // explicitly exclude zero. we want that to use the normal 0_508 version.
633 return ((Value
& 3) == 0) && Value
> 0 && Value
<= 508;
635 bool isImm0_255() const {
636 if (!isImm()) return false;
637 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
638 if (!CE
) return false;
639 int64_t Value
= CE
->getValue();
640 return Value
>= 0 && Value
< 256;
642 bool isImm0_4095() const {
643 if (!isImm()) return false;
644 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
645 if (!CE
) return false;
646 int64_t Value
= CE
->getValue();
647 return Value
>= 0 && Value
< 4096;
649 bool isImm0_4095Neg() const {
650 if (!isImm()) return false;
651 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
652 if (!CE
) return false;
653 int64_t Value
= -CE
->getValue();
654 return Value
> 0 && Value
< 4096;
656 bool isImm0_1() const {
657 if (!isImm()) return false;
658 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
659 if (!CE
) return false;
660 int64_t Value
= CE
->getValue();
661 return Value
>= 0 && Value
< 2;
663 bool isImm0_3() const {
664 if (!isImm()) return false;
665 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
666 if (!CE
) return false;
667 int64_t Value
= CE
->getValue();
668 return Value
>= 0 && Value
< 4;
670 bool isImm0_7() const {
671 if (!isImm()) return false;
672 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
673 if (!CE
) return false;
674 int64_t Value
= CE
->getValue();
675 return Value
>= 0 && Value
< 8;
677 bool isImm0_15() const {
678 if (!isImm()) return false;
679 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
680 if (!CE
) return false;
681 int64_t Value
= CE
->getValue();
682 return Value
>= 0 && Value
< 16;
684 bool isImm0_31() const {
685 if (!isImm()) return false;
686 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
687 if (!CE
) return false;
688 int64_t Value
= CE
->getValue();
689 return Value
>= 0 && Value
< 32;
691 bool isImm0_63() const {
692 if (!isImm()) return false;
693 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
694 if (!CE
) return false;
695 int64_t Value
= CE
->getValue();
696 return Value
>= 0 && Value
< 64;
698 bool isImm8() const {
699 if (!isImm()) return false;
700 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
701 if (!CE
) return false;
702 int64_t Value
= CE
->getValue();
705 bool isImm16() const {
706 if (!isImm()) return false;
707 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
708 if (!CE
) return false;
709 int64_t Value
= CE
->getValue();
712 bool isImm32() const {
713 if (!isImm()) return false;
714 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
715 if (!CE
) return false;
716 int64_t Value
= CE
->getValue();
719 bool isShrImm8() const {
720 if (!isImm()) return false;
721 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
722 if (!CE
) return false;
723 int64_t Value
= CE
->getValue();
724 return Value
> 0 && Value
<= 8;
726 bool isShrImm16() const {
727 if (!isImm()) return false;
728 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
729 if (!CE
) return false;
730 int64_t Value
= CE
->getValue();
731 return Value
> 0 && Value
<= 16;
733 bool isShrImm32() const {
734 if (!isImm()) return false;
735 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
736 if (!CE
) return false;
737 int64_t Value
= CE
->getValue();
738 return Value
> 0 && Value
<= 32;
740 bool isShrImm64() const {
741 if (!isImm()) return false;
742 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
743 if (!CE
) return false;
744 int64_t Value
= CE
->getValue();
745 return Value
> 0 && Value
<= 64;
747 bool isImm1_7() const {
748 if (!isImm()) return false;
749 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
750 if (!CE
) return false;
751 int64_t Value
= CE
->getValue();
752 return Value
> 0 && Value
< 8;
754 bool isImm1_15() const {
755 if (!isImm()) return false;
756 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
757 if (!CE
) return false;
758 int64_t Value
= CE
->getValue();
759 return Value
> 0 && Value
< 16;
761 bool isImm1_31() const {
762 if (!isImm()) return false;
763 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
764 if (!CE
) return false;
765 int64_t Value
= CE
->getValue();
766 return Value
> 0 && Value
< 32;
768 bool isImm1_16() const {
769 if (!isImm()) return false;
770 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
771 if (!CE
) return false;
772 int64_t Value
= CE
->getValue();
773 return Value
> 0 && Value
< 17;
775 bool isImm1_32() const {
776 if (!isImm()) return false;
777 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
778 if (!CE
) return false;
779 int64_t Value
= CE
->getValue();
780 return Value
> 0 && Value
< 33;
782 bool isImm0_32() const {
783 if (!isImm()) return false;
784 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
785 if (!CE
) return false;
786 int64_t Value
= CE
->getValue();
787 return Value
>= 0 && Value
< 33;
789 bool isImm0_65535() const {
790 if (!isImm()) return false;
791 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
792 if (!CE
) return false;
793 int64_t Value
= CE
->getValue();
794 return Value
>= 0 && Value
< 65536;
796 bool isImm0_65535Expr() const {
797 if (!isImm()) return false;
798 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
799 // If it's not a constant expression, it'll generate a fixup and be
801 if (!CE
) return true;
802 int64_t Value
= CE
->getValue();
803 return Value
>= 0 && Value
< 65536;
805 bool isImm24bit() const {
806 if (!isImm()) return false;
807 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
808 if (!CE
) return false;
809 int64_t Value
= CE
->getValue();
810 return Value
>= 0 && Value
<= 0xffffff;
812 bool isImmThumbSR() const {
813 if (!isImm()) return false;
814 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
815 if (!CE
) return false;
816 int64_t Value
= CE
->getValue();
817 return Value
> 0 && Value
< 33;
819 bool isPKHLSLImm() const {
820 if (!isImm()) return false;
821 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
822 if (!CE
) return false;
823 int64_t Value
= CE
->getValue();
824 return Value
>= 0 && Value
< 32;
826 bool isPKHASRImm() const {
827 if (!isImm()) return false;
828 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
829 if (!CE
) return false;
830 int64_t Value
= CE
->getValue();
831 return Value
> 0 && Value
<= 32;
833 bool isAdrLabel() const {
834 // If we have an immediate that's not a constant, treat it as a label
835 // reference needing a fixup. If it is a constant, but it can't fit
836 // into shift immediate encoding, we reject it.
837 if (isImm() && !isa
<MCConstantExpr
>(getImm())) return true;
838 else return (isARMSOImm() || isARMSOImmNeg());
840 bool isARMSOImm() const {
841 if (!isImm()) return false;
842 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
843 if (!CE
) return false;
844 int64_t Value
= CE
->getValue();
845 return ARM_AM::getSOImmVal(Value
) != -1;
847 bool isARMSOImmNot() const {
848 if (!isImm()) return false;
849 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
850 if (!CE
) return false;
851 int64_t Value
= CE
->getValue();
852 return ARM_AM::getSOImmVal(~Value
) != -1;
854 bool isARMSOImmNeg() const {
855 if (!isImm()) return false;
856 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
857 if (!CE
) return false;
858 int64_t Value
= CE
->getValue();
859 // Only use this when not representable as a plain so_imm.
860 return ARM_AM::getSOImmVal(Value
) == -1 &&
861 ARM_AM::getSOImmVal(-Value
) != -1;
863 bool isT2SOImm() const {
864 if (!isImm()) return false;
865 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
866 if (!CE
) return false;
867 int64_t Value
= CE
->getValue();
868 return ARM_AM::getT2SOImmVal(Value
) != -1;
870 bool isT2SOImmNot() const {
871 if (!isImm()) return false;
872 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
873 if (!CE
) return false;
874 int64_t Value
= CE
->getValue();
875 return ARM_AM::getT2SOImmVal(~Value
) != -1;
877 bool isT2SOImmNeg() const {
878 if (!isImm()) return false;
879 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
880 if (!CE
) return false;
881 int64_t Value
= CE
->getValue();
882 // Only use this when not representable as a plain so_imm.
883 return ARM_AM::getT2SOImmVal(Value
) == -1 &&
884 ARM_AM::getT2SOImmVal(-Value
) != -1;
886 bool isSetEndImm() const {
887 if (!isImm()) return false;
888 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
889 if (!CE
) return false;
890 int64_t Value
= CE
->getValue();
891 return Value
== 1 || Value
== 0;
893 bool isReg() const { return Kind
== k_Register
; }
894 bool isRegList() const { return Kind
== k_RegisterList
; }
895 bool isDPRRegList() const { return Kind
== k_DPRRegisterList
; }
896 bool isSPRRegList() const { return Kind
== k_SPRRegisterList
; }
897 bool isToken() const { return Kind
== k_Token
; }
898 bool isMemBarrierOpt() const { return Kind
== k_MemBarrierOpt
; }
899 bool isMem() const { return Kind
== k_Memory
; }
900 bool isShifterImm() const { return Kind
== k_ShifterImmediate
; }
901 bool isRegShiftedReg() const { return Kind
== k_ShiftedRegister
; }
902 bool isRegShiftedImm() const { return Kind
== k_ShiftedImmediate
; }
903 bool isRotImm() const { return Kind
== k_RotateImmediate
; }
904 bool isBitfield() const { return Kind
== k_BitfieldDescriptor
; }
905 bool isPostIdxRegShifted() const { return Kind
== k_PostIndexRegister
; }
906 bool isPostIdxReg() const {
907 return Kind
== k_PostIndexRegister
&& PostIdxReg
.ShiftTy
==ARM_AM::no_shift
;
909 bool isMemNoOffset(bool alignOK
= false) const {
912 // No offset of any kind.
913 return Memory
.OffsetRegNum
== 0 && Memory
.OffsetImm
== 0 &&
914 (alignOK
|| Memory
.Alignment
== 0);
916 bool isMemPCRelImm12() const {
917 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
919 // Base register must be PC.
920 if (Memory
.BaseRegNum
!= ARM::PC
)
922 // Immediate offset in range [-4095, 4095].
923 if (!Memory
.OffsetImm
) return true;
924 int64_t Val
= Memory
.OffsetImm
->getValue();
925 return (Val
> -4096 && Val
< 4096) || (Val
== INT32_MIN
);
927 bool isAlignedMemory() const {
928 return isMemNoOffset(true);
930 bool isAddrMode2() const {
931 if (!isMem() || Memory
.Alignment
!= 0) return false;
932 // Check for register offset.
933 if (Memory
.OffsetRegNum
) return true;
934 // Immediate offset in range [-4095, 4095].
935 if (!Memory
.OffsetImm
) return true;
936 int64_t Val
= Memory
.OffsetImm
->getValue();
937 return Val
> -4096 && Val
< 4096;
939 bool isAM2OffsetImm() const {
940 if (!isImm()) return false;
941 // Immediate offset in range [-4095, 4095].
942 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
943 if (!CE
) return false;
944 int64_t Val
= CE
->getValue();
945 return Val
> -4096 && Val
< 4096;
947 bool isAddrMode3() const {
948 // If we have an immediate that's not a constant, treat it as a label
949 // reference needing a fixup. If it is a constant, it's something else
951 if (isImm() && !isa
<MCConstantExpr
>(getImm()))
953 if (!isMem() || Memory
.Alignment
!= 0) return false;
954 // No shifts are legal for AM3.
955 if (Memory
.ShiftType
!= ARM_AM::no_shift
) return false;
956 // Check for register offset.
957 if (Memory
.OffsetRegNum
) return true;
958 // Immediate offset in range [-255, 255].
959 if (!Memory
.OffsetImm
) return true;
960 int64_t Val
= Memory
.OffsetImm
->getValue();
961 // The #-0 offset is encoded as INT32_MIN, and we have to check
963 return (Val
> -256 && Val
< 256) || Val
== INT32_MIN
;
965 bool isAM3Offset() const {
966 if (Kind
!= k_Immediate
&& Kind
!= k_PostIndexRegister
)
968 if (Kind
== k_PostIndexRegister
)
969 return PostIdxReg
.ShiftTy
== ARM_AM::no_shift
;
970 // Immediate offset in range [-255, 255].
971 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
972 if (!CE
) return false;
973 int64_t Val
= CE
->getValue();
974 // Special case, #-0 is INT32_MIN.
975 return (Val
> -256 && Val
< 256) || Val
== INT32_MIN
;
977 bool isAddrMode5() const {
978 // If we have an immediate that's not a constant, treat it as a label
979 // reference needing a fixup. If it is a constant, it's something else
981 if (isImm() && !isa
<MCConstantExpr
>(getImm()))
983 if (!isMem() || Memory
.Alignment
!= 0) return false;
984 // Check for register offset.
985 if (Memory
.OffsetRegNum
) return false;
986 // Immediate offset in range [-1020, 1020] and a multiple of 4.
987 if (!Memory
.OffsetImm
) return true;
988 int64_t Val
= Memory
.OffsetImm
->getValue();
989 return (Val
>= -1020 && Val
<= 1020 && ((Val
& 3) == 0)) ||
992 bool isMemTBB() const {
993 if (!isMem() || !Memory
.OffsetRegNum
|| Memory
.isNegative
||
994 Memory
.ShiftType
!= ARM_AM::no_shift
|| Memory
.Alignment
!= 0)
998 bool isMemTBH() const {
999 if (!isMem() || !Memory
.OffsetRegNum
|| Memory
.isNegative
||
1000 Memory
.ShiftType
!= ARM_AM::lsl
|| Memory
.ShiftImm
!= 1 ||
1001 Memory
.Alignment
!= 0 )
1005 bool isMemRegOffset() const {
1006 if (!isMem() || !Memory
.OffsetRegNum
|| Memory
.Alignment
!= 0)
1010 bool isT2MemRegOffset() const {
1011 if (!isMem() || !Memory
.OffsetRegNum
|| Memory
.isNegative
||
1012 Memory
.Alignment
!= 0)
1014 // Only lsl #{0, 1, 2, 3} allowed.
1015 if (Memory
.ShiftType
== ARM_AM::no_shift
)
1017 if (Memory
.ShiftType
!= ARM_AM::lsl
|| Memory
.ShiftImm
> 3)
1021 bool isMemThumbRR() const {
1022 // Thumb reg+reg addressing is simple. Just two registers, a base and
1023 // an offset. No shifts, negations or any other complicating factors.
1024 if (!isMem() || !Memory
.OffsetRegNum
|| Memory
.isNegative
||
1025 Memory
.ShiftType
!= ARM_AM::no_shift
|| Memory
.Alignment
!= 0)
1027 return isARMLowRegister(Memory
.BaseRegNum
) &&
1028 (!Memory
.OffsetRegNum
|| isARMLowRegister(Memory
.OffsetRegNum
));
1030 bool isMemThumbRIs4() const {
1031 if (!isMem() || Memory
.OffsetRegNum
!= 0 ||
1032 !isARMLowRegister(Memory
.BaseRegNum
) || Memory
.Alignment
!= 0)
1034 // Immediate offset, multiple of 4 in range [0, 124].
1035 if (!Memory
.OffsetImm
) return true;
1036 int64_t Val
= Memory
.OffsetImm
->getValue();
1037 return Val
>= 0 && Val
<= 124 && (Val
% 4) == 0;
1039 bool isMemThumbRIs2() const {
1040 if (!isMem() || Memory
.OffsetRegNum
!= 0 ||
1041 !isARMLowRegister(Memory
.BaseRegNum
) || Memory
.Alignment
!= 0)
1043 // Immediate offset, multiple of 4 in range [0, 62].
1044 if (!Memory
.OffsetImm
) return true;
1045 int64_t Val
= Memory
.OffsetImm
->getValue();
1046 return Val
>= 0 && Val
<= 62 && (Val
% 2) == 0;
1048 bool isMemThumbRIs1() const {
1049 if (!isMem() || Memory
.OffsetRegNum
!= 0 ||
1050 !isARMLowRegister(Memory
.BaseRegNum
) || Memory
.Alignment
!= 0)
1052 // Immediate offset in range [0, 31].
1053 if (!Memory
.OffsetImm
) return true;
1054 int64_t Val
= Memory
.OffsetImm
->getValue();
1055 return Val
>= 0 && Val
<= 31;
1057 bool isMemThumbSPI() const {
1058 if (!isMem() || Memory
.OffsetRegNum
!= 0 ||
1059 Memory
.BaseRegNum
!= ARM::SP
|| Memory
.Alignment
!= 0)
1061 // Immediate offset, multiple of 4 in range [0, 1020].
1062 if (!Memory
.OffsetImm
) return true;
1063 int64_t Val
= Memory
.OffsetImm
->getValue();
1064 return Val
>= 0 && Val
<= 1020 && (Val
% 4) == 0;
1066 bool isMemImm8s4Offset() const {
1067 // If we have an immediate that's not a constant, treat it as a label
1068 // reference needing a fixup. If it is a constant, it's something else
1069 // and we reject it.
1070 if (isImm() && !isa
<MCConstantExpr
>(getImm()))
1072 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1074 // Immediate offset a multiple of 4 in range [-1020, 1020].
1075 if (!Memory
.OffsetImm
) return true;
1076 int64_t Val
= Memory
.OffsetImm
->getValue();
1077 // Special case, #-0 is INT32_MIN.
1078 return (Val
>= -1020 && Val
<= 1020 && (Val
& 3) == 0) || Val
== INT32_MIN
;
1080 bool isMemImm0_1020s4Offset() const {
1081 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1083 // Immediate offset a multiple of 4 in range [0, 1020].
1084 if (!Memory
.OffsetImm
) return true;
1085 int64_t Val
= Memory
.OffsetImm
->getValue();
1086 return Val
>= 0 && Val
<= 1020 && (Val
& 3) == 0;
1088 bool isMemImm8Offset() const {
1089 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1091 // Base reg of PC isn't allowed for these encodings.
1092 if (Memory
.BaseRegNum
== ARM::PC
) return false;
1093 // Immediate offset in range [-255, 255].
1094 if (!Memory
.OffsetImm
) return true;
1095 int64_t Val
= Memory
.OffsetImm
->getValue();
1096 return (Val
== INT32_MIN
) || (Val
> -256 && Val
< 256);
1098 bool isMemPosImm8Offset() const {
1099 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1101 // Immediate offset in range [0, 255].
1102 if (!Memory
.OffsetImm
) return true;
1103 int64_t Val
= Memory
.OffsetImm
->getValue();
1104 return Val
>= 0 && Val
< 256;
1106 bool isMemNegImm8Offset() const {
1107 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1109 // Base reg of PC isn't allowed for these encodings.
1110 if (Memory
.BaseRegNum
== ARM::PC
) return false;
1111 // Immediate offset in range [-255, -1].
1112 if (!Memory
.OffsetImm
) return false;
1113 int64_t Val
= Memory
.OffsetImm
->getValue();
1114 return (Val
== INT32_MIN
) || (Val
> -256 && Val
< 0);
1116 bool isMemUImm12Offset() const {
1117 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1119 // Immediate offset in range [0, 4095].
1120 if (!Memory
.OffsetImm
) return true;
1121 int64_t Val
= Memory
.OffsetImm
->getValue();
1122 return (Val
>= 0 && Val
< 4096);
1124 bool isMemImm12Offset() const {
1125 // If we have an immediate that's not a constant, treat it as a label
1126 // reference needing a fixup. If it is a constant, it's something else
1127 // and we reject it.
1128 if (isImm() && !isa
<MCConstantExpr
>(getImm()))
1131 if (!isMem() || Memory
.OffsetRegNum
!= 0 || Memory
.Alignment
!= 0)
1133 // Immediate offset in range [-4095, 4095].
1134 if (!Memory
.OffsetImm
) return true;
1135 int64_t Val
= Memory
.OffsetImm
->getValue();
1136 return (Val
> -4096 && Val
< 4096) || (Val
== INT32_MIN
);
1138 bool isPostIdxImm8() const {
1139 if (!isImm()) return false;
1140 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1141 if (!CE
) return false;
1142 int64_t Val
= CE
->getValue();
1143 return (Val
> -256 && Val
< 256) || (Val
== INT32_MIN
);
1145 bool isPostIdxImm8s4() const {
1146 if (!isImm()) return false;
1147 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1148 if (!CE
) return false;
1149 int64_t Val
= CE
->getValue();
1150 return ((Val
& 3) == 0 && Val
>= -1020 && Val
<= 1020) ||
1154 bool isMSRMask() const { return Kind
== k_MSRMask
; }
1155 bool isProcIFlags() const { return Kind
== k_ProcIFlags
; }
1158 bool isSingleSpacedVectorList() const {
1159 return Kind
== k_VectorList
&& !VectorList
.isDoubleSpaced
;
1161 bool isDoubleSpacedVectorList() const {
1162 return Kind
== k_VectorList
&& VectorList
.isDoubleSpaced
;
1164 bool isVecListOneD() const {
1165 if (!isSingleSpacedVectorList()) return false;
1166 return VectorList
.Count
== 1;
1169 bool isVecListDPair() const {
1170 if (!isSingleSpacedVectorList()) return false;
1171 return (ARMMCRegisterClasses
[ARM::DPairRegClassID
]
1172 .contains(VectorList
.RegNum
));
1175 bool isVecListThreeD() const {
1176 if (!isSingleSpacedVectorList()) return false;
1177 return VectorList
.Count
== 3;
1180 bool isVecListFourD() const {
1181 if (!isSingleSpacedVectorList()) return false;
1182 return VectorList
.Count
== 4;
1185 bool isVecListDPairSpaced() const {
1186 if (isSingleSpacedVectorList()) return false;
1187 return (ARMMCRegisterClasses
[ARM::DPairSpcRegClassID
]
1188 .contains(VectorList
.RegNum
));
1191 bool isVecListThreeQ() const {
1192 if (!isDoubleSpacedVectorList()) return false;
1193 return VectorList
.Count
== 3;
1196 bool isVecListFourQ() const {
1197 if (!isDoubleSpacedVectorList()) return false;
1198 return VectorList
.Count
== 4;
1201 bool isSingleSpacedVectorAllLanes() const {
1202 return Kind
== k_VectorListAllLanes
&& !VectorList
.isDoubleSpaced
;
1204 bool isDoubleSpacedVectorAllLanes() const {
1205 return Kind
== k_VectorListAllLanes
&& VectorList
.isDoubleSpaced
;
1207 bool isVecListOneDAllLanes() const {
1208 if (!isSingleSpacedVectorAllLanes()) return false;
1209 return VectorList
.Count
== 1;
1212 bool isVecListDPairAllLanes() const {
1213 if (!isSingleSpacedVectorAllLanes()) return false;
1214 return (ARMMCRegisterClasses
[ARM::DPairRegClassID
]
1215 .contains(VectorList
.RegNum
));
1218 bool isVecListDPairSpacedAllLanes() const {
1219 if (!isDoubleSpacedVectorAllLanes()) return false;
1220 return VectorList
.Count
== 2;
1223 bool isVecListThreeDAllLanes() const {
1224 if (!isSingleSpacedVectorAllLanes()) return false;
1225 return VectorList
.Count
== 3;
1228 bool isVecListThreeQAllLanes() const {
1229 if (!isDoubleSpacedVectorAllLanes()) return false;
1230 return VectorList
.Count
== 3;
1233 bool isVecListFourDAllLanes() const {
1234 if (!isSingleSpacedVectorAllLanes()) return false;
1235 return VectorList
.Count
== 4;
1238 bool isVecListFourQAllLanes() const {
1239 if (!isDoubleSpacedVectorAllLanes()) return false;
1240 return VectorList
.Count
== 4;
1243 bool isSingleSpacedVectorIndexed() const {
1244 return Kind
== k_VectorListIndexed
&& !VectorList
.isDoubleSpaced
;
1246 bool isDoubleSpacedVectorIndexed() const {
1247 return Kind
== k_VectorListIndexed
&& VectorList
.isDoubleSpaced
;
1249 bool isVecListOneDByteIndexed() const {
1250 if (!isSingleSpacedVectorIndexed()) return false;
1251 return VectorList
.Count
== 1 && VectorList
.LaneIndex
<= 7;
1254 bool isVecListOneDHWordIndexed() const {
1255 if (!isSingleSpacedVectorIndexed()) return false;
1256 return VectorList
.Count
== 1 && VectorList
.LaneIndex
<= 3;
1259 bool isVecListOneDWordIndexed() const {
1260 if (!isSingleSpacedVectorIndexed()) return false;
1261 return VectorList
.Count
== 1 && VectorList
.LaneIndex
<= 1;
1264 bool isVecListTwoDByteIndexed() const {
1265 if (!isSingleSpacedVectorIndexed()) return false;
1266 return VectorList
.Count
== 2 && VectorList
.LaneIndex
<= 7;
1269 bool isVecListTwoDHWordIndexed() const {
1270 if (!isSingleSpacedVectorIndexed()) return false;
1271 return VectorList
.Count
== 2 && VectorList
.LaneIndex
<= 3;
1274 bool isVecListTwoQWordIndexed() const {
1275 if (!isDoubleSpacedVectorIndexed()) return false;
1276 return VectorList
.Count
== 2 && VectorList
.LaneIndex
<= 1;
1279 bool isVecListTwoQHWordIndexed() const {
1280 if (!isDoubleSpacedVectorIndexed()) return false;
1281 return VectorList
.Count
== 2 && VectorList
.LaneIndex
<= 3;
1284 bool isVecListTwoDWordIndexed() const {
1285 if (!isSingleSpacedVectorIndexed()) return false;
1286 return VectorList
.Count
== 2 && VectorList
.LaneIndex
<= 1;
1289 bool isVecListThreeDByteIndexed() const {
1290 if (!isSingleSpacedVectorIndexed()) return false;
1291 return VectorList
.Count
== 3 && VectorList
.LaneIndex
<= 7;
1294 bool isVecListThreeDHWordIndexed() const {
1295 if (!isSingleSpacedVectorIndexed()) return false;
1296 return VectorList
.Count
== 3 && VectorList
.LaneIndex
<= 3;
1299 bool isVecListThreeQWordIndexed() const {
1300 if (!isDoubleSpacedVectorIndexed()) return false;
1301 return VectorList
.Count
== 3 && VectorList
.LaneIndex
<= 1;
1304 bool isVecListThreeQHWordIndexed() const {
1305 if (!isDoubleSpacedVectorIndexed()) return false;
1306 return VectorList
.Count
== 3 && VectorList
.LaneIndex
<= 3;
1309 bool isVecListThreeDWordIndexed() const {
1310 if (!isSingleSpacedVectorIndexed()) return false;
1311 return VectorList
.Count
== 3 && VectorList
.LaneIndex
<= 1;
1314 bool isVecListFourDByteIndexed() const {
1315 if (!isSingleSpacedVectorIndexed()) return false;
1316 return VectorList
.Count
== 4 && VectorList
.LaneIndex
<= 7;
1319 bool isVecListFourDHWordIndexed() const {
1320 if (!isSingleSpacedVectorIndexed()) return false;
1321 return VectorList
.Count
== 4 && VectorList
.LaneIndex
<= 3;
1324 bool isVecListFourQWordIndexed() const {
1325 if (!isDoubleSpacedVectorIndexed()) return false;
1326 return VectorList
.Count
== 4 && VectorList
.LaneIndex
<= 1;
1329 bool isVecListFourQHWordIndexed() const {
1330 if (!isDoubleSpacedVectorIndexed()) return false;
1331 return VectorList
.Count
== 4 && VectorList
.LaneIndex
<= 3;
1334 bool isVecListFourDWordIndexed() const {
1335 if (!isSingleSpacedVectorIndexed()) return false;
1336 return VectorList
.Count
== 4 && VectorList
.LaneIndex
<= 1;
1339 bool isVectorIndex8() const {
1340 if (Kind
!= k_VectorIndex
) return false;
1341 return VectorIndex
.Val
< 8;
1343 bool isVectorIndex16() const {
1344 if (Kind
!= k_VectorIndex
) return false;
1345 return VectorIndex
.Val
< 4;
1347 bool isVectorIndex32() const {
1348 if (Kind
!= k_VectorIndex
) return false;
1349 return VectorIndex
.Val
< 2;
1352 bool isNEONi8splat() const {
1353 if (!isImm()) return false;
1354 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1355 // Must be a constant.
1356 if (!CE
) return false;
1357 int64_t Value
= CE
->getValue();
1358 // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1360 return Value
>= 0 && Value
< 256;
1363 bool isNEONi16splat() const {
1364 if (!isImm()) return false;
1365 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1366 // Must be a constant.
1367 if (!CE
) return false;
1368 int64_t Value
= CE
->getValue();
1369 // i16 value in the range [0,255] or [0x0100, 0xff00]
1370 return (Value
>= 0 && Value
< 256) || (Value
>= 0x0100 && Value
<= 0xff00);
1373 bool isNEONi32splat() const {
1374 if (!isImm()) return false;
1375 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1376 // Must be a constant.
1377 if (!CE
) return false;
1378 int64_t Value
= CE
->getValue();
1379 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1380 return (Value
>= 0 && Value
< 256) ||
1381 (Value
>= 0x0100 && Value
<= 0xff00) ||
1382 (Value
>= 0x010000 && Value
<= 0xff0000) ||
1383 (Value
>= 0x01000000 && Value
<= 0xff000000);
1386 bool isNEONi32vmov() const {
1387 if (!isImm()) return false;
1388 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1389 // Must be a constant.
1390 if (!CE
) return false;
1391 int64_t Value
= CE
->getValue();
1392 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1393 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1394 return (Value
>= 0 && Value
< 256) ||
1395 (Value
>= 0x0100 && Value
<= 0xff00) ||
1396 (Value
>= 0x010000 && Value
<= 0xff0000) ||
1397 (Value
>= 0x01000000 && Value
<= 0xff000000) ||
1398 (Value
>= 0x01ff && Value
<= 0xffff && (Value
& 0xff) == 0xff) ||
1399 (Value
>= 0x01ffff && Value
<= 0xffffff && (Value
& 0xffff) == 0xffff);
1401 bool isNEONi32vmovNeg() const {
1402 if (!isImm()) return false;
1403 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1404 // Must be a constant.
1405 if (!CE
) return false;
1406 int64_t Value
= ~CE
->getValue();
1407 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1408 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1409 return (Value
>= 0 && Value
< 256) ||
1410 (Value
>= 0x0100 && Value
<= 0xff00) ||
1411 (Value
>= 0x010000 && Value
<= 0xff0000) ||
1412 (Value
>= 0x01000000 && Value
<= 0xff000000) ||
1413 (Value
>= 0x01ff && Value
<= 0xffff && (Value
& 0xff) == 0xff) ||
1414 (Value
>= 0x01ffff && Value
<= 0xffffff && (Value
& 0xffff) == 0xffff);
1417 bool isNEONi64splat() const {
1418 if (!isImm()) return false;
1419 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1420 // Must be a constant.
1421 if (!CE
) return false;
1422 uint64_t Value
= CE
->getValue();
1423 // i64 value with each byte being either 0 or 0xff.
1424 for (unsigned i
= 0; i
< 8; ++i
)
1425 if ((Value
& 0xff) != 0 && (Value
& 0xff) != 0xff) return false;
1429 void addExpr(MCInst
&Inst
, const MCExpr
*Expr
) const {
1430 // Add as immediates when possible. Null MCExpr = 0.
1432 Inst
.addOperand(MCOperand::CreateImm(0));
1433 else if (const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Expr
))
1434 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue()));
1436 Inst
.addOperand(MCOperand::CreateExpr(Expr
));
1439 void addCondCodeOperands(MCInst
&Inst
, unsigned N
) const {
1440 assert(N
== 2 && "Invalid number of operands!");
1441 Inst
.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1442 unsigned RegNum
= getCondCode() == ARMCC::AL
? 0: ARM::CPSR
;
1443 Inst
.addOperand(MCOperand::CreateReg(RegNum
));
1446 void addCoprocNumOperands(MCInst
&Inst
, unsigned N
) const {
1447 assert(N
== 1 && "Invalid number of operands!");
1448 Inst
.addOperand(MCOperand::CreateImm(getCoproc()));
1451 void addCoprocRegOperands(MCInst
&Inst
, unsigned N
) const {
1452 assert(N
== 1 && "Invalid number of operands!");
1453 Inst
.addOperand(MCOperand::CreateImm(getCoproc()));
1456 void addCoprocOptionOperands(MCInst
&Inst
, unsigned N
) const {
1457 assert(N
== 1 && "Invalid number of operands!");
1458 Inst
.addOperand(MCOperand::CreateImm(CoprocOption
.Val
));
1461 void addITMaskOperands(MCInst
&Inst
, unsigned N
) const {
1462 assert(N
== 1 && "Invalid number of operands!");
1463 Inst
.addOperand(MCOperand::CreateImm(ITMask
.Mask
));
1466 void addITCondCodeOperands(MCInst
&Inst
, unsigned N
) const {
1467 assert(N
== 1 && "Invalid number of operands!");
1468 Inst
.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1471 void addCCOutOperands(MCInst
&Inst
, unsigned N
) const {
1472 assert(N
== 1 && "Invalid number of operands!");
1473 Inst
.addOperand(MCOperand::CreateReg(getReg()));
1476 void addRegOperands(MCInst
&Inst
, unsigned N
) const {
1477 assert(N
== 1 && "Invalid number of operands!");
1478 Inst
.addOperand(MCOperand::CreateReg(getReg()));
1481 void addRegShiftedRegOperands(MCInst
&Inst
, unsigned N
) const {
1482 assert(N
== 3 && "Invalid number of operands!");
1483 assert(isRegShiftedReg() &&
1484 "addRegShiftedRegOperands() on non RegShiftedReg!");
1485 Inst
.addOperand(MCOperand::CreateReg(RegShiftedReg
.SrcReg
));
1486 Inst
.addOperand(MCOperand::CreateReg(RegShiftedReg
.ShiftReg
));
1487 Inst
.addOperand(MCOperand::CreateImm(
1488 ARM_AM::getSORegOpc(RegShiftedReg
.ShiftTy
, RegShiftedReg
.ShiftImm
)));
1491 void addRegShiftedImmOperands(MCInst
&Inst
, unsigned N
) const {
1492 assert(N
== 2 && "Invalid number of operands!");
1493 assert(isRegShiftedImm() &&
1494 "addRegShiftedImmOperands() on non RegShiftedImm!");
1495 Inst
.addOperand(MCOperand::CreateReg(RegShiftedImm
.SrcReg
));
1496 // Shift of #32 is encoded as 0 where permitted
1497 unsigned Imm
= (RegShiftedImm
.ShiftImm
== 32 ? 0 : RegShiftedImm
.ShiftImm
);
1498 Inst
.addOperand(MCOperand::CreateImm(
1499 ARM_AM::getSORegOpc(RegShiftedImm
.ShiftTy
, Imm
)));
1502 void addShifterImmOperands(MCInst
&Inst
, unsigned N
) const {
1503 assert(N
== 1 && "Invalid number of operands!");
1504 Inst
.addOperand(MCOperand::CreateImm((ShifterImm
.isASR
<< 5) |
1508 void addRegListOperands(MCInst
&Inst
, unsigned N
) const {
1509 assert(N
== 1 && "Invalid number of operands!");
1510 const SmallVectorImpl
<unsigned> &RegList
= getRegList();
1511 for (SmallVectorImpl
<unsigned>::const_iterator
1512 I
= RegList
.begin(), E
= RegList
.end(); I
!= E
; ++I
)
1513 Inst
.addOperand(MCOperand::CreateReg(*I
));
1516 void addDPRRegListOperands(MCInst
&Inst
, unsigned N
) const {
1517 addRegListOperands(Inst
, N
);
1520 void addSPRRegListOperands(MCInst
&Inst
, unsigned N
) const {
1521 addRegListOperands(Inst
, N
);
1524 void addRotImmOperands(MCInst
&Inst
, unsigned N
) const {
1525 assert(N
== 1 && "Invalid number of operands!");
1526 // Encoded as val>>3. The printer handles display as 8, 16, 24.
1527 Inst
.addOperand(MCOperand::CreateImm(RotImm
.Imm
>> 3));
1530 void addBitfieldOperands(MCInst
&Inst
, unsigned N
) const {
1531 assert(N
== 1 && "Invalid number of operands!");
1532 // Munge the lsb/width into a bitfield mask.
1533 unsigned lsb
= Bitfield
.LSB
;
1534 unsigned width
= Bitfield
.Width
;
1535 // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1536 uint32_t Mask
= ~(((uint32_t)0xffffffff >> lsb
) << (32 - width
) >>
1537 (32 - (lsb
+ width
)));
1538 Inst
.addOperand(MCOperand::CreateImm(Mask
));
1541 void addImmOperands(MCInst
&Inst
, unsigned N
) const {
1542 assert(N
== 1 && "Invalid number of operands!");
1543 addExpr(Inst
, getImm());
1546 void addFBits16Operands(MCInst
&Inst
, unsigned N
) const {
1547 assert(N
== 1 && "Invalid number of operands!");
1548 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1549 Inst
.addOperand(MCOperand::CreateImm(16 - CE
->getValue()));
1552 void addFBits32Operands(MCInst
&Inst
, unsigned N
) const {
1553 assert(N
== 1 && "Invalid number of operands!");
1554 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1555 Inst
.addOperand(MCOperand::CreateImm(32 - CE
->getValue()));
1558 void addFPImmOperands(MCInst
&Inst
, unsigned N
) const {
1559 assert(N
== 1 && "Invalid number of operands!");
1560 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1561 int Val
= ARM_AM::getFP32Imm(APInt(32, CE
->getValue()));
1562 Inst
.addOperand(MCOperand::CreateImm(Val
));
1565 void addImm8s4Operands(MCInst
&Inst
, unsigned N
) const {
1566 assert(N
== 1 && "Invalid number of operands!");
1567 // FIXME: We really want to scale the value here, but the LDRD/STRD
1568 // instruction don't encode operands that way yet.
1569 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1570 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue()));
1573 void addImm0_1020s4Operands(MCInst
&Inst
, unsigned N
) const {
1574 assert(N
== 1 && "Invalid number of operands!");
1575 // The immediate is scaled by four in the encoding and is stored
1576 // in the MCInst as such. Lop off the low two bits here.
1577 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1578 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue() / 4));
1581 void addImm0_508s4NegOperands(MCInst
&Inst
, unsigned N
) const {
1582 assert(N
== 1 && "Invalid number of operands!");
1583 // The immediate is scaled by four in the encoding and is stored
1584 // in the MCInst as such. Lop off the low two bits here.
1585 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1586 Inst
.addOperand(MCOperand::CreateImm(-(CE
->getValue() / 4)));
1589 void addImm0_508s4Operands(MCInst
&Inst
, unsigned N
) const {
1590 assert(N
== 1 && "Invalid number of operands!");
1591 // The immediate is scaled by four in the encoding and is stored
1592 // in the MCInst as such. Lop off the low two bits here.
1593 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1594 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue() / 4));
1597 void addImm1_16Operands(MCInst
&Inst
, unsigned N
) const {
1598 assert(N
== 1 && "Invalid number of operands!");
1599 // The constant encodes as the immediate-1, and we store in the instruction
1600 // the bits as encoded, so subtract off one here.
1601 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1602 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue() - 1));
1605 void addImm1_32Operands(MCInst
&Inst
, unsigned N
) const {
1606 assert(N
== 1 && "Invalid number of operands!");
1607 // The constant encodes as the immediate-1, and we store in the instruction
1608 // the bits as encoded, so subtract off one here.
1609 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1610 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue() - 1));
1613 void addImmThumbSROperands(MCInst
&Inst
, unsigned N
) const {
1614 assert(N
== 1 && "Invalid number of operands!");
1615 // The constant encodes as the immediate, except for 32, which encodes as
1617 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1618 unsigned Imm
= CE
->getValue();
1619 Inst
.addOperand(MCOperand::CreateImm((Imm
== 32 ? 0 : Imm
)));
1622 void addPKHASRImmOperands(MCInst
&Inst
, unsigned N
) const {
1623 assert(N
== 1 && "Invalid number of operands!");
1624 // An ASR value of 32 encodes as 0, so that's how we want to add it to
1625 // the instruction as well.
1626 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1627 int Val
= CE
->getValue();
1628 Inst
.addOperand(MCOperand::CreateImm(Val
== 32 ? 0 : Val
));
1631 void addT2SOImmNotOperands(MCInst
&Inst
, unsigned N
) const {
1632 assert(N
== 1 && "Invalid number of operands!");
1633 // The operand is actually a t2_so_imm, but we have its bitwise
1634 // negation in the assembly source, so twiddle it here.
1635 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1636 Inst
.addOperand(MCOperand::CreateImm(~CE
->getValue()));
1639 void addT2SOImmNegOperands(MCInst
&Inst
, unsigned N
) const {
1640 assert(N
== 1 && "Invalid number of operands!");
1641 // The operand is actually a t2_so_imm, but we have its
1642 // negation in the assembly source, so twiddle it here.
1643 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1644 Inst
.addOperand(MCOperand::CreateImm(-CE
->getValue()));
1647 void addImm0_4095NegOperands(MCInst
&Inst
, unsigned N
) const {
1648 assert(N
== 1 && "Invalid number of operands!");
1649 // The operand is actually an imm0_4095, but we have its
1650 // negation in the assembly source, so twiddle it here.
1651 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1652 Inst
.addOperand(MCOperand::CreateImm(-CE
->getValue()));
1655 void addARMSOImmNotOperands(MCInst
&Inst
, unsigned N
) const {
1656 assert(N
== 1 && "Invalid number of operands!");
1657 // The operand is actually a so_imm, but we have its bitwise
1658 // negation in the assembly source, so twiddle it here.
1659 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1660 Inst
.addOperand(MCOperand::CreateImm(~CE
->getValue()));
1663 void addARMSOImmNegOperands(MCInst
&Inst
, unsigned N
) const {
1664 assert(N
== 1 && "Invalid number of operands!");
1665 // The operand is actually a so_imm, but we have its
1666 // negation in the assembly source, so twiddle it here.
1667 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1668 Inst
.addOperand(MCOperand::CreateImm(-CE
->getValue()));
1671 void addMemBarrierOptOperands(MCInst
&Inst
, unsigned N
) const {
1672 assert(N
== 1 && "Invalid number of operands!");
1673 Inst
.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1676 void addMemNoOffsetOperands(MCInst
&Inst
, unsigned N
) const {
1677 assert(N
== 1 && "Invalid number of operands!");
1678 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1681 void addMemPCRelImm12Operands(MCInst
&Inst
, unsigned N
) const {
1682 assert(N
== 1 && "Invalid number of operands!");
1683 int32_t Imm
= Memory
.OffsetImm
->getValue();
1684 // FIXME: Handle #-0
1685 if (Imm
== INT32_MIN
) Imm
= 0;
1686 Inst
.addOperand(MCOperand::CreateImm(Imm
));
1689 void addAdrLabelOperands(MCInst
&Inst
, unsigned N
) const {
1690 assert(N
== 1 && "Invalid number of operands!");
1691 assert(isImm() && "Not an immediate!");
1693 // If we have an immediate that's not a constant, treat it as a label
1694 // reference needing a fixup.
1695 if (!isa
<MCConstantExpr
>(getImm())) {
1696 Inst
.addOperand(MCOperand::CreateExpr(getImm()));
1700 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1701 int Val
= CE
->getValue();
1702 Inst
.addOperand(MCOperand::CreateImm(Val
));
1705 void addAlignedMemoryOperands(MCInst
&Inst
, unsigned N
) const {
1706 assert(N
== 2 && "Invalid number of operands!");
1707 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1708 Inst
.addOperand(MCOperand::CreateImm(Memory
.Alignment
));
1711 void addAddrMode2Operands(MCInst
&Inst
, unsigned N
) const {
1712 assert(N
== 3 && "Invalid number of operands!");
1713 int32_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1714 if (!Memory
.OffsetRegNum
) {
1715 ARM_AM::AddrOpc AddSub
= Val
< 0 ? ARM_AM::sub
: ARM_AM::add
;
1716 // Special case for #-0
1717 if (Val
== INT32_MIN
) Val
= 0;
1718 if (Val
< 0) Val
= -Val
;
1719 Val
= ARM_AM::getAM2Opc(AddSub
, Val
, ARM_AM::no_shift
);
1721 // For register offset, we encode the shift type and negation flag
1723 Val
= ARM_AM::getAM2Opc(Memory
.isNegative
? ARM_AM::sub
: ARM_AM::add
,
1724 Memory
.ShiftImm
, Memory
.ShiftType
);
1726 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1727 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1728 Inst
.addOperand(MCOperand::CreateImm(Val
));
1731 void addAM2OffsetImmOperands(MCInst
&Inst
, unsigned N
) const {
1732 assert(N
== 2 && "Invalid number of operands!");
1733 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1734 assert(CE
&& "non-constant AM2OffsetImm operand!");
1735 int32_t Val
= CE
->getValue();
1736 ARM_AM::AddrOpc AddSub
= Val
< 0 ? ARM_AM::sub
: ARM_AM::add
;
1737 // Special case for #-0
1738 if (Val
== INT32_MIN
) Val
= 0;
1739 if (Val
< 0) Val
= -Val
;
1740 Val
= ARM_AM::getAM2Opc(AddSub
, Val
, ARM_AM::no_shift
);
1741 Inst
.addOperand(MCOperand::CreateReg(0));
1742 Inst
.addOperand(MCOperand::CreateImm(Val
));
1745 void addAddrMode3Operands(MCInst
&Inst
, unsigned N
) const {
1746 assert(N
== 3 && "Invalid number of operands!");
1747 // If we have an immediate that's not a constant, treat it as a label
1748 // reference needing a fixup. If it is a constant, it's something else
1749 // and we reject it.
1751 Inst
.addOperand(MCOperand::CreateExpr(getImm()));
1752 Inst
.addOperand(MCOperand::CreateReg(0));
1753 Inst
.addOperand(MCOperand::CreateImm(0));
1757 int32_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1758 if (!Memory
.OffsetRegNum
) {
1759 ARM_AM::AddrOpc AddSub
= Val
< 0 ? ARM_AM::sub
: ARM_AM::add
;
1760 // Special case for #-0
1761 if (Val
== INT32_MIN
) Val
= 0;
1762 if (Val
< 0) Val
= -Val
;
1763 Val
= ARM_AM::getAM3Opc(AddSub
, Val
);
1765 // For register offset, we encode the shift type and negation flag
1767 Val
= ARM_AM::getAM3Opc(Memory
.isNegative
? ARM_AM::sub
: ARM_AM::add
, 0);
1769 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1770 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1771 Inst
.addOperand(MCOperand::CreateImm(Val
));
1774 void addAM3OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1775 assert(N
== 2 && "Invalid number of operands!");
1776 if (Kind
== k_PostIndexRegister
) {
1778 ARM_AM::getAM3Opc(PostIdxReg
.isAdd
? ARM_AM::add
: ARM_AM::sub
, 0);
1779 Inst
.addOperand(MCOperand::CreateReg(PostIdxReg
.RegNum
));
1780 Inst
.addOperand(MCOperand::CreateImm(Val
));
1785 const MCConstantExpr
*CE
= static_cast<const MCConstantExpr
*>(getImm());
1786 int32_t Val
= CE
->getValue();
1787 ARM_AM::AddrOpc AddSub
= Val
< 0 ? ARM_AM::sub
: ARM_AM::add
;
1788 // Special case for #-0
1789 if (Val
== INT32_MIN
) Val
= 0;
1790 if (Val
< 0) Val
= -Val
;
1791 Val
= ARM_AM::getAM3Opc(AddSub
, Val
);
1792 Inst
.addOperand(MCOperand::CreateReg(0));
1793 Inst
.addOperand(MCOperand::CreateImm(Val
));
1796 void addAddrMode5Operands(MCInst
&Inst
, unsigned N
) const {
1797 assert(N
== 2 && "Invalid number of operands!");
1798 // If we have an immediate that's not a constant, treat it as a label
1799 // reference needing a fixup. If it is a constant, it's something else
1800 // and we reject it.
1802 Inst
.addOperand(MCOperand::CreateExpr(getImm()));
1803 Inst
.addOperand(MCOperand::CreateImm(0));
1807 // The lower two bits are always zero and as such are not encoded.
1808 int32_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() / 4 : 0;
1809 ARM_AM::AddrOpc AddSub
= Val
< 0 ? ARM_AM::sub
: ARM_AM::add
;
1810 // Special case for #-0
1811 if (Val
== INT32_MIN
) Val
= 0;
1812 if (Val
< 0) Val
= -Val
;
1813 Val
= ARM_AM::getAM5Opc(AddSub
, Val
);
1814 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1815 Inst
.addOperand(MCOperand::CreateImm(Val
));
1818 void addMemImm8s4OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1819 assert(N
== 2 && "Invalid number of operands!");
1820 // If we have an immediate that's not a constant, treat it as a label
1821 // reference needing a fixup. If it is a constant, it's something else
1822 // and we reject it.
1824 Inst
.addOperand(MCOperand::CreateExpr(getImm()));
1825 Inst
.addOperand(MCOperand::CreateImm(0));
1829 int64_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1830 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1831 Inst
.addOperand(MCOperand::CreateImm(Val
));
1834 void addMemImm0_1020s4OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1835 assert(N
== 2 && "Invalid number of operands!");
1836 // The lower two bits are always zero and as such are not encoded.
1837 int32_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() / 4 : 0;
1838 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1839 Inst
.addOperand(MCOperand::CreateImm(Val
));
1842 void addMemImm8OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1843 assert(N
== 2 && "Invalid number of operands!");
1844 int64_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1845 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1846 Inst
.addOperand(MCOperand::CreateImm(Val
));
1849 void addMemPosImm8OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1850 addMemImm8OffsetOperands(Inst
, N
);
1853 void addMemNegImm8OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1854 addMemImm8OffsetOperands(Inst
, N
);
1857 void addMemUImm12OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1858 assert(N
== 2 && "Invalid number of operands!");
1859 // If this is an immediate, it's a label reference.
1861 addExpr(Inst
, getImm());
1862 Inst
.addOperand(MCOperand::CreateImm(0));
1866 // Otherwise, it's a normal memory reg+offset.
1867 int64_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1868 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1869 Inst
.addOperand(MCOperand::CreateImm(Val
));
1872 void addMemImm12OffsetOperands(MCInst
&Inst
, unsigned N
) const {
1873 assert(N
== 2 && "Invalid number of operands!");
1874 // If this is an immediate, it's a label reference.
1876 addExpr(Inst
, getImm());
1877 Inst
.addOperand(MCOperand::CreateImm(0));
1881 // Otherwise, it's a normal memory reg+offset.
1882 int64_t Val
= Memory
.OffsetImm
? Memory
.OffsetImm
->getValue() : 0;
1883 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1884 Inst
.addOperand(MCOperand::CreateImm(Val
));
1887 void addMemTBBOperands(MCInst
&Inst
, unsigned N
) const {
1888 assert(N
== 2 && "Invalid number of operands!");
1889 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1890 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1893 void addMemTBHOperands(MCInst
&Inst
, unsigned N
) const {
1894 assert(N
== 2 && "Invalid number of operands!");
1895 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1896 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1899 void addMemRegOffsetOperands(MCInst
&Inst
, unsigned N
) const {
1900 assert(N
== 3 && "Invalid number of operands!");
1902 ARM_AM::getAM2Opc(Memory
.isNegative
? ARM_AM::sub
: ARM_AM::add
,
1903 Memory
.ShiftImm
, Memory
.ShiftType
);
1904 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1905 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1906 Inst
.addOperand(MCOperand::CreateImm(Val
));
1909 void addT2MemRegOffsetOperands(MCInst
&Inst
, unsigned N
) const {
1910 assert(N
== 3 && "Invalid number of operands!");
1911 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1912 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1913 Inst
.addOperand(MCOperand::CreateImm(Memory
.ShiftImm
));
1916 void addMemThumbRROperands(MCInst
&Inst
, unsigned N
) const {
1917 assert(N
== 2 && "Invalid number of operands!");
1918 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1919 Inst
.addOperand(MCOperand::CreateReg(Memory
.OffsetRegNum
));
1922 void addMemThumbRIs4Operands(MCInst
&Inst
, unsigned N
) const {
1923 assert(N
== 2 && "Invalid number of operands!");
1924 int64_t Val
= Memory
.OffsetImm
? (Memory
.OffsetImm
->getValue() / 4) : 0;
1925 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1926 Inst
.addOperand(MCOperand::CreateImm(Val
));
1929 void addMemThumbRIs2Operands(MCInst
&Inst
, unsigned N
) const {
1930 assert(N
== 2 && "Invalid number of operands!");
1931 int64_t Val
= Memory
.OffsetImm
? (Memory
.OffsetImm
->getValue() / 2) : 0;
1932 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1933 Inst
.addOperand(MCOperand::CreateImm(Val
));
1936 void addMemThumbRIs1Operands(MCInst
&Inst
, unsigned N
) const {
1937 assert(N
== 2 && "Invalid number of operands!");
1938 int64_t Val
= Memory
.OffsetImm
? (Memory
.OffsetImm
->getValue()) : 0;
1939 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1940 Inst
.addOperand(MCOperand::CreateImm(Val
));
1943 void addMemThumbSPIOperands(MCInst
&Inst
, unsigned N
) const {
1944 assert(N
== 2 && "Invalid number of operands!");
1945 int64_t Val
= Memory
.OffsetImm
? (Memory
.OffsetImm
->getValue() / 4) : 0;
1946 Inst
.addOperand(MCOperand::CreateReg(Memory
.BaseRegNum
));
1947 Inst
.addOperand(MCOperand::CreateImm(Val
));
1950 void addPostIdxImm8Operands(MCInst
&Inst
, unsigned N
) const {
1951 assert(N
== 1 && "Invalid number of operands!");
1952 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1953 assert(CE
&& "non-constant post-idx-imm8 operand!");
1954 int Imm
= CE
->getValue();
1955 bool isAdd
= Imm
>= 0;
1956 if (Imm
== INT32_MIN
) Imm
= 0;
1957 Imm
= (Imm
< 0 ? -Imm
: Imm
) | (int)isAdd
<< 8;
1958 Inst
.addOperand(MCOperand::CreateImm(Imm
));
1961 void addPostIdxImm8s4Operands(MCInst
&Inst
, unsigned N
) const {
1962 assert(N
== 1 && "Invalid number of operands!");
1963 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
1964 assert(CE
&& "non-constant post-idx-imm8s4 operand!");
1965 int Imm
= CE
->getValue();
1966 bool isAdd
= Imm
>= 0;
1967 if (Imm
== INT32_MIN
) Imm
= 0;
1968 // Immediate is scaled by 4.
1969 Imm
= ((Imm
< 0 ? -Imm
: Imm
) / 4) | (int)isAdd
<< 8;
1970 Inst
.addOperand(MCOperand::CreateImm(Imm
));
1973 void addPostIdxRegOperands(MCInst
&Inst
, unsigned N
) const {
1974 assert(N
== 2 && "Invalid number of operands!");
1975 Inst
.addOperand(MCOperand::CreateReg(PostIdxReg
.RegNum
));
1976 Inst
.addOperand(MCOperand::CreateImm(PostIdxReg
.isAdd
));
1979 void addPostIdxRegShiftedOperands(MCInst
&Inst
, unsigned N
) const {
1980 assert(N
== 2 && "Invalid number of operands!");
1981 Inst
.addOperand(MCOperand::CreateReg(PostIdxReg
.RegNum
));
1982 // The sign, shift type, and shift amount are encoded in a single operand
1983 // using the AM2 encoding helpers.
1984 ARM_AM::AddrOpc opc
= PostIdxReg
.isAdd
? ARM_AM::add
: ARM_AM::sub
;
1985 unsigned Imm
= ARM_AM::getAM2Opc(opc
, PostIdxReg
.ShiftImm
,
1986 PostIdxReg
.ShiftTy
);
1987 Inst
.addOperand(MCOperand::CreateImm(Imm
));
1990 void addMSRMaskOperands(MCInst
&Inst
, unsigned N
) const {
1991 assert(N
== 1 && "Invalid number of operands!");
1992 Inst
.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1995 void addProcIFlagsOperands(MCInst
&Inst
, unsigned N
) const {
1996 assert(N
== 1 && "Invalid number of operands!");
1997 Inst
.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
2000 void addVecListOperands(MCInst
&Inst
, unsigned N
) const {
2001 assert(N
== 1 && "Invalid number of operands!");
2002 Inst
.addOperand(MCOperand::CreateReg(VectorList
.RegNum
));
2005 void addVecListIndexedOperands(MCInst
&Inst
, unsigned N
) const {
2006 assert(N
== 2 && "Invalid number of operands!");
2007 Inst
.addOperand(MCOperand::CreateReg(VectorList
.RegNum
));
2008 Inst
.addOperand(MCOperand::CreateImm(VectorList
.LaneIndex
));
2011 void addVectorIndex8Operands(MCInst
&Inst
, unsigned N
) const {
2012 assert(N
== 1 && "Invalid number of operands!");
2013 Inst
.addOperand(MCOperand::CreateImm(getVectorIndex()));
2016 void addVectorIndex16Operands(MCInst
&Inst
, unsigned N
) const {
2017 assert(N
== 1 && "Invalid number of operands!");
2018 Inst
.addOperand(MCOperand::CreateImm(getVectorIndex()));
2021 void addVectorIndex32Operands(MCInst
&Inst
, unsigned N
) const {
2022 assert(N
== 1 && "Invalid number of operands!");
2023 Inst
.addOperand(MCOperand::CreateImm(getVectorIndex()));
2026 void addNEONi8splatOperands(MCInst
&Inst
, unsigned N
) const {
2027 assert(N
== 1 && "Invalid number of operands!");
2028 // The immediate encodes the type of constant as well as the value.
2029 // Mask in that this is an i8 splat.
2030 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2031 Inst
.addOperand(MCOperand::CreateImm(CE
->getValue() | 0xe00));
2034 void addNEONi16splatOperands(MCInst
&Inst
, unsigned N
) const {
2035 assert(N
== 1 && "Invalid number of operands!");
2036 // The immediate encodes the type of constant as well as the value.
2037 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2038 unsigned Value
= CE
->getValue();
2040 Value
= (Value
>> 8) | 0xa00;
2043 Inst
.addOperand(MCOperand::CreateImm(Value
));
2046 void addNEONi32splatOperands(MCInst
&Inst
, unsigned N
) const {
2047 assert(N
== 1 && "Invalid number of operands!");
2048 // The immediate encodes the type of constant as well as the value.
2049 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2050 unsigned Value
= CE
->getValue();
2051 if (Value
>= 256 && Value
<= 0xff00)
2052 Value
= (Value
>> 8) | 0x200;
2053 else if (Value
> 0xffff && Value
<= 0xff0000)
2054 Value
= (Value
>> 16) | 0x400;
2055 else if (Value
> 0xffffff)
2056 Value
= (Value
>> 24) | 0x600;
2057 Inst
.addOperand(MCOperand::CreateImm(Value
));
2060 void addNEONi32vmovOperands(MCInst
&Inst
, unsigned N
) const {
2061 assert(N
== 1 && "Invalid number of operands!");
2062 // The immediate encodes the type of constant as well as the value.
2063 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2064 unsigned Value
= CE
->getValue();
2065 if (Value
>= 256 && Value
<= 0xffff)
2066 Value
= (Value
>> 8) | ((Value
& 0xff) ? 0xc00 : 0x200);
2067 else if (Value
> 0xffff && Value
<= 0xffffff)
2068 Value
= (Value
>> 16) | ((Value
& 0xff) ? 0xd00 : 0x400);
2069 else if (Value
> 0xffffff)
2070 Value
= (Value
>> 24) | 0x600;
2071 Inst
.addOperand(MCOperand::CreateImm(Value
));
2074 void addNEONi32vmovNegOperands(MCInst
&Inst
, unsigned N
) const {
2075 assert(N
== 1 && "Invalid number of operands!");
2076 // The immediate encodes the type of constant as well as the value.
2077 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2078 unsigned Value
= ~CE
->getValue();
2079 if (Value
>= 256 && Value
<= 0xffff)
2080 Value
= (Value
>> 8) | ((Value
& 0xff) ? 0xc00 : 0x200);
2081 else if (Value
> 0xffff && Value
<= 0xffffff)
2082 Value
= (Value
>> 16) | ((Value
& 0xff) ? 0xd00 : 0x400);
2083 else if (Value
> 0xffffff)
2084 Value
= (Value
>> 24) | 0x600;
2085 Inst
.addOperand(MCOperand::CreateImm(Value
));
2088 void addNEONi64splatOperands(MCInst
&Inst
, unsigned N
) const {
2089 assert(N
== 1 && "Invalid number of operands!");
2090 // The immediate encodes the type of constant as well as the value.
2091 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(getImm());
2092 uint64_t Value
= CE
->getValue();
2094 for (unsigned i
= 0; i
< 8; ++i
, Value
>>= 8) {
2095 Imm
|= (Value
& 1) << i
;
2097 Inst
.addOperand(MCOperand::CreateImm(Imm
| 0x1e00));
2100 virtual void print(raw_ostream
&OS
) const;
2102 static ARMOperand
*CreateITMask(unsigned Mask
, SMLoc S
) {
2103 ARMOperand
*Op
= new ARMOperand(k_ITCondMask
);
2104 Op
->ITMask
.Mask
= Mask
;
2110 static ARMOperand
*CreateCondCode(ARMCC::CondCodes CC
, SMLoc S
) {
2111 ARMOperand
*Op
= new ARMOperand(k_CondCode
);
2118 static ARMOperand
*CreateCoprocNum(unsigned CopVal
, SMLoc S
) {
2119 ARMOperand
*Op
= new ARMOperand(k_CoprocNum
);
2120 Op
->Cop
.Val
= CopVal
;
2126 static ARMOperand
*CreateCoprocReg(unsigned CopVal
, SMLoc S
) {
2127 ARMOperand
*Op
= new ARMOperand(k_CoprocReg
);
2128 Op
->Cop
.Val
= CopVal
;
2134 static ARMOperand
*CreateCoprocOption(unsigned Val
, SMLoc S
, SMLoc E
) {
2135 ARMOperand
*Op
= new ARMOperand(k_CoprocOption
);
2142 static ARMOperand
*CreateCCOut(unsigned RegNum
, SMLoc S
) {
2143 ARMOperand
*Op
= new ARMOperand(k_CCOut
);
2144 Op
->Reg
.RegNum
= RegNum
;
2150 static ARMOperand
*CreateToken(StringRef Str
, SMLoc S
) {
2151 ARMOperand
*Op
= new ARMOperand(k_Token
);
2152 Op
->Tok
.Data
= Str
.data();
2153 Op
->Tok
.Length
= Str
.size();
2159 static ARMOperand
*CreateReg(unsigned RegNum
, SMLoc S
, SMLoc E
) {
2160 ARMOperand
*Op
= new ARMOperand(k_Register
);
2161 Op
->Reg
.RegNum
= RegNum
;
2167 static ARMOperand
*CreateShiftedRegister(ARM_AM::ShiftOpc ShTy
,
2172 ARMOperand
*Op
= new ARMOperand(k_ShiftedRegister
);
2173 Op
->RegShiftedReg
.ShiftTy
= ShTy
;
2174 Op
->RegShiftedReg
.SrcReg
= SrcReg
;
2175 Op
->RegShiftedReg
.ShiftReg
= ShiftReg
;
2176 Op
->RegShiftedReg
.ShiftImm
= ShiftImm
;
2182 static ARMOperand
*CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy
,
2186 ARMOperand
*Op
= new ARMOperand(k_ShiftedImmediate
);
2187 Op
->RegShiftedImm
.ShiftTy
= ShTy
;
2188 Op
->RegShiftedImm
.SrcReg
= SrcReg
;
2189 Op
->RegShiftedImm
.ShiftImm
= ShiftImm
;
2195 static ARMOperand
*CreateShifterImm(bool isASR
, unsigned Imm
,
2197 ARMOperand
*Op
= new ARMOperand(k_ShifterImmediate
);
2198 Op
->ShifterImm
.isASR
= isASR
;
2199 Op
->ShifterImm
.Imm
= Imm
;
2205 static ARMOperand
*CreateRotImm(unsigned Imm
, SMLoc S
, SMLoc E
) {
2206 ARMOperand
*Op
= new ARMOperand(k_RotateImmediate
);
2207 Op
->RotImm
.Imm
= Imm
;
2213 static ARMOperand
*CreateBitfield(unsigned LSB
, unsigned Width
,
2215 ARMOperand
*Op
= new ARMOperand(k_BitfieldDescriptor
);
2216 Op
->Bitfield
.LSB
= LSB
;
2217 Op
->Bitfield
.Width
= Width
;
2224 CreateRegList(const SmallVectorImpl
<std::pair
<unsigned, SMLoc
> > &Regs
,
2225 SMLoc StartLoc
, SMLoc EndLoc
) {
2226 KindTy Kind
= k_RegisterList
;
2228 if (ARMMCRegisterClasses
[ARM::DPRRegClassID
].contains(Regs
.front().first
))
2229 Kind
= k_DPRRegisterList
;
2230 else if (ARMMCRegisterClasses
[ARM::SPRRegClassID
].
2231 contains(Regs
.front().first
))
2232 Kind
= k_SPRRegisterList
;
2234 ARMOperand
*Op
= new ARMOperand(Kind
);
2235 for (SmallVectorImpl
<std::pair
<unsigned, SMLoc
> >::const_iterator
2236 I
= Regs
.begin(), E
= Regs
.end(); I
!= E
; ++I
)
2237 Op
->Registers
.push_back(I
->first
);
2238 array_pod_sort(Op
->Registers
.begin(), Op
->Registers
.end());
2239 Op
->StartLoc
= StartLoc
;
2240 Op
->EndLoc
= EndLoc
;
2244 static ARMOperand
*CreateVectorList(unsigned RegNum
, unsigned Count
,
2245 bool isDoubleSpaced
, SMLoc S
, SMLoc E
) {
2246 ARMOperand
*Op
= new ARMOperand(k_VectorList
);
2247 Op
->VectorList
.RegNum
= RegNum
;
2248 Op
->VectorList
.Count
= Count
;
2249 Op
->VectorList
.isDoubleSpaced
= isDoubleSpaced
;
2255 static ARMOperand
*CreateVectorListAllLanes(unsigned RegNum
, unsigned Count
,
2256 bool isDoubleSpaced
,
2258 ARMOperand
*Op
= new ARMOperand(k_VectorListAllLanes
);
2259 Op
->VectorList
.RegNum
= RegNum
;
2260 Op
->VectorList
.Count
= Count
;
2261 Op
->VectorList
.isDoubleSpaced
= isDoubleSpaced
;
2267 static ARMOperand
*CreateVectorListIndexed(unsigned RegNum
, unsigned Count
,
2269 bool isDoubleSpaced
,
2271 ARMOperand
*Op
= new ARMOperand(k_VectorListIndexed
);
2272 Op
->VectorList
.RegNum
= RegNum
;
2273 Op
->VectorList
.Count
= Count
;
2274 Op
->VectorList
.LaneIndex
= Index
;
2275 Op
->VectorList
.isDoubleSpaced
= isDoubleSpaced
;
2281 static ARMOperand
*CreateVectorIndex(unsigned Idx
, SMLoc S
, SMLoc E
,
2283 ARMOperand
*Op
= new ARMOperand(k_VectorIndex
);
2284 Op
->VectorIndex
.Val
= Idx
;
2290 static ARMOperand
*CreateImm(const MCExpr
*Val
, SMLoc S
, SMLoc E
) {
2291 ARMOperand
*Op
= new ARMOperand(k_Immediate
);
2298 static ARMOperand
*CreateMem(unsigned BaseRegNum
,
2299 const MCConstantExpr
*OffsetImm
,
2300 unsigned OffsetRegNum
,
2301 ARM_AM::ShiftOpc ShiftType
,
2306 ARMOperand
*Op
= new ARMOperand(k_Memory
);
2307 Op
->Memory
.BaseRegNum
= BaseRegNum
;
2308 Op
->Memory
.OffsetImm
= OffsetImm
;
2309 Op
->Memory
.OffsetRegNum
= OffsetRegNum
;
2310 Op
->Memory
.ShiftType
= ShiftType
;
2311 Op
->Memory
.ShiftImm
= ShiftImm
;
2312 Op
->Memory
.Alignment
= Alignment
;
2313 Op
->Memory
.isNegative
= isNegative
;
2319 static ARMOperand
*CreatePostIdxReg(unsigned RegNum
, bool isAdd
,
2320 ARM_AM::ShiftOpc ShiftTy
,
2323 ARMOperand
*Op
= new ARMOperand(k_PostIndexRegister
);
2324 Op
->PostIdxReg
.RegNum
= RegNum
;
2325 Op
->PostIdxReg
.isAdd
= isAdd
;
2326 Op
->PostIdxReg
.ShiftTy
= ShiftTy
;
2327 Op
->PostIdxReg
.ShiftImm
= ShiftImm
;
2333 static ARMOperand
*CreateMemBarrierOpt(ARM_MB::MemBOpt Opt
, SMLoc S
) {
2334 ARMOperand
*Op
= new ARMOperand(k_MemBarrierOpt
);
2335 Op
->MBOpt
.Val
= Opt
;
2341 static ARMOperand
*CreateProcIFlags(ARM_PROC::IFlags IFlags
, SMLoc S
) {
2342 ARMOperand
*Op
= new ARMOperand(k_ProcIFlags
);
2343 Op
->IFlags
.Val
= IFlags
;
2349 static ARMOperand
*CreateMSRMask(unsigned MMask
, SMLoc S
) {
2350 ARMOperand
*Op
= new ARMOperand(k_MSRMask
);
2351 Op
->MMask
.Val
= MMask
;
2358 } // end anonymous namespace.
2360 void ARMOperand::print(raw_ostream
&OS
) const {
2363 OS
<< "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2366 OS
<< "<ccout " << getReg() << ">";
2368 case k_ITCondMask
: {
2369 static const char *const MaskStr
[] = {
2370 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2371 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2373 assert((ITMask
.Mask
& 0xf) == ITMask
.Mask
);
2374 OS
<< "<it-mask " << MaskStr
[ITMask
.Mask
] << ">";
2378 OS
<< "<coprocessor number: " << getCoproc() << ">";
2381 OS
<< "<coprocessor register: " << getCoproc() << ">";
2383 case k_CoprocOption
:
2384 OS
<< "<coprocessor option: " << CoprocOption
.Val
<< ">";
2387 OS
<< "<mask: " << getMSRMask() << ">";
2390 getImm()->print(OS
);
2392 case k_MemBarrierOpt
:
2393 OS
<< "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2397 << " base:" << Memory
.BaseRegNum
;
2400 case k_PostIndexRegister
:
2401 OS
<< "post-idx register " << (PostIdxReg
.isAdd
? "" : "-")
2402 << PostIdxReg
.RegNum
;
2403 if (PostIdxReg
.ShiftTy
!= ARM_AM::no_shift
)
2404 OS
<< ARM_AM::getShiftOpcStr(PostIdxReg
.ShiftTy
) << " "
2405 << PostIdxReg
.ShiftImm
;
2408 case k_ProcIFlags
: {
2409 OS
<< "<ARM_PROC::";
2410 unsigned IFlags
= getProcIFlags();
2411 for (int i
=2; i
>= 0; --i
)
2412 if (IFlags
& (1 << i
))
2413 OS
<< ARM_PROC::IFlagsToString(1 << i
);
2418 OS
<< "<register " << getReg() << ">";
2420 case k_ShifterImmediate
:
2421 OS
<< "<shift " << (ShifterImm
.isASR
? "asr" : "lsl")
2422 << " #" << ShifterImm
.Imm
<< ">";
2424 case k_ShiftedRegister
:
2425 OS
<< "<so_reg_reg "
2426 << RegShiftedReg
.SrcReg
<< " "
2427 << ARM_AM::getShiftOpcStr(RegShiftedReg
.ShiftTy
)
2428 << " " << RegShiftedReg
.ShiftReg
<< ">";
2430 case k_ShiftedImmediate
:
2431 OS
<< "<so_reg_imm "
2432 << RegShiftedImm
.SrcReg
<< " "
2433 << ARM_AM::getShiftOpcStr(RegShiftedImm
.ShiftTy
)
2434 << " #" << RegShiftedImm
.ShiftImm
<< ">";
2436 case k_RotateImmediate
:
2437 OS
<< "<ror " << " #" << (RotImm
.Imm
* 8) << ">";
2439 case k_BitfieldDescriptor
:
2440 OS
<< "<bitfield " << "lsb: " << Bitfield
.LSB
2441 << ", width: " << Bitfield
.Width
<< ">";
2443 case k_RegisterList
:
2444 case k_DPRRegisterList
:
2445 case k_SPRRegisterList
: {
2446 OS
<< "<register_list ";
2448 const SmallVectorImpl
<unsigned> &RegList
= getRegList();
2449 for (SmallVectorImpl
<unsigned>::const_iterator
2450 I
= RegList
.begin(), E
= RegList
.end(); I
!= E
; ) {
2452 if (++I
< E
) OS
<< ", ";
2459 OS
<< "<vector_list " << VectorList
.Count
<< " * "
2460 << VectorList
.RegNum
<< ">";
2462 case k_VectorListAllLanes
:
2463 OS
<< "<vector_list(all lanes) " << VectorList
.Count
<< " * "
2464 << VectorList
.RegNum
<< ">";
2466 case k_VectorListIndexed
:
2467 OS
<< "<vector_list(lane " << VectorList
.LaneIndex
<< ") "
2468 << VectorList
.Count
<< " * " << VectorList
.RegNum
<< ">";
2471 OS
<< "'" << getToken() << "'";
2474 OS
<< "<vectorindex " << getVectorIndex() << ">";
2479 /// @name Auto-generated Match Functions
2482 static unsigned MatchRegisterName(StringRef Name
);
2486 bool ARMAsmParser::ParseRegister(unsigned &RegNo
,
2487 SMLoc
&StartLoc
, SMLoc
&EndLoc
) {
2488 StartLoc
= Parser
.getTok().getLoc();
2489 EndLoc
= Parser
.getTok().getEndLoc();
2490 RegNo
= tryParseRegister();
2492 return (RegNo
== (unsigned)-1);
2495 /// Try to parse a register name. The token must be an Identifier when called,
2496 /// and if it is a register name the token is eaten and the register number is
2497 /// returned. Otherwise return -1.
2499 int ARMAsmParser::tryParseRegister() {
2500 const AsmToken
&Tok
= Parser
.getTok();
2501 if (Tok
.isNot(AsmToken::Identifier
)) return -1;
2503 std::string lowerCase
= Tok
.getString().lower();
2504 unsigned RegNum
= MatchRegisterName(lowerCase
);
2506 RegNum
= StringSwitch
<unsigned>(lowerCase
)
2507 .Case("r13", ARM::SP
)
2508 .Case("r14", ARM::LR
)
2509 .Case("r15", ARM::PC
)
2510 .Case("ip", ARM::R12
)
2511 // Additional register name aliases for 'gas' compatibility.
2512 .Case("a1", ARM::R0
)
2513 .Case("a2", ARM::R1
)
2514 .Case("a3", ARM::R2
)
2515 .Case("a4", ARM::R3
)
2516 .Case("v1", ARM::R4
)
2517 .Case("v2", ARM::R5
)
2518 .Case("v3", ARM::R6
)
2519 .Case("v4", ARM::R7
)
2520 .Case("v5", ARM::R8
)
2521 .Case("v6", ARM::R9
)
2522 .Case("v7", ARM::R10
)
2523 .Case("v8", ARM::R11
)
2524 .Case("sb", ARM::R9
)
2525 .Case("sl", ARM::R10
)
2526 .Case("fp", ARM::R11
)
2530 // Check for aliases registered via .req. Canonicalize to lower case.
2531 // That's more consistent since register names are case insensitive, and
2532 // it's how the original entry was passed in from MC/MCParser/AsmParser.
2533 StringMap
<unsigned>::const_iterator Entry
= RegisterReqs
.find(lowerCase
);
2534 // If no match, return failure.
2535 if (Entry
== RegisterReqs
.end())
2537 Parser
.Lex(); // Eat identifier token.
2538 return Entry
->getValue();
2541 Parser
.Lex(); // Eat identifier token.
2546 // Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0.
2547 // If a recoverable error occurs, return 1. If an irrecoverable error
2548 // occurs, return -1. An irrecoverable error is one where tokens have been
2549 // consumed in the process of trying to parse the shifter (i.e., when it is
2550 // indeed a shifter operand, but malformed).
2551 int ARMAsmParser::tryParseShiftRegister(
2552 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2553 SMLoc S
= Parser
.getTok().getLoc();
2554 const AsmToken
&Tok
= Parser
.getTok();
2555 assert(Tok
.is(AsmToken::Identifier
) && "Token is not an Identifier");
2557 std::string lowerCase
= Tok
.getString().lower();
2558 ARM_AM::ShiftOpc ShiftTy
= StringSwitch
<ARM_AM::ShiftOpc
>(lowerCase
)
2559 .Case("asl", ARM_AM::lsl
)
2560 .Case("lsl", ARM_AM::lsl
)
2561 .Case("lsr", ARM_AM::lsr
)
2562 .Case("asr", ARM_AM::asr
)
2563 .Case("ror", ARM_AM::ror
)
2564 .Case("rrx", ARM_AM::rrx
)
2565 .Default(ARM_AM::no_shift
);
2567 if (ShiftTy
== ARM_AM::no_shift
)
2570 Parser
.Lex(); // Eat the operator.
2572 // The source register for the shift has already been added to the
2573 // operand list, so we need to pop it off and combine it into the shifted
2574 // register operand instead.
2575 OwningPtr
<ARMOperand
> PrevOp((ARMOperand
*)Operands
.pop_back_val());
2576 if (!PrevOp
->isReg())
2577 return Error(PrevOp
->getStartLoc(), "shift must be of a register");
2578 int SrcReg
= PrevOp
->getReg();
2583 if (ShiftTy
== ARM_AM::rrx
) {
2584 // RRX Doesn't have an explicit shift amount. The encoder expects
2585 // the shift register to be the same as the source register. Seems odd,
2589 // Figure out if this is shifted by a constant or a register (for non-RRX).
2590 if (Parser
.getTok().is(AsmToken::Hash
) ||
2591 Parser
.getTok().is(AsmToken::Dollar
)) {
2592 Parser
.Lex(); // Eat hash.
2593 SMLoc ImmLoc
= Parser
.getTok().getLoc();
2594 const MCExpr
*ShiftExpr
= 0;
2595 if (getParser().parseExpression(ShiftExpr
, EndLoc
)) {
2596 Error(ImmLoc
, "invalid immediate shift value");
2599 // The expression must be evaluatable as an immediate.
2600 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(ShiftExpr
);
2602 Error(ImmLoc
, "invalid immediate shift value");
2605 // Range check the immediate.
2606 // lsl, ror: 0 <= imm <= 31
2607 // lsr, asr: 0 <= imm <= 32
2608 Imm
= CE
->getValue();
2610 ((ShiftTy
== ARM_AM::lsl
|| ShiftTy
== ARM_AM::ror
) && Imm
> 31) ||
2611 ((ShiftTy
== ARM_AM::lsr
|| ShiftTy
== ARM_AM::asr
) && Imm
> 32)) {
2612 Error(ImmLoc
, "immediate shift value out of range");
2615 // shift by zero is a nop. Always send it through as lsl.
2616 // ('as' compatibility)
2618 ShiftTy
= ARM_AM::lsl
;
2619 } else if (Parser
.getTok().is(AsmToken::Identifier
)) {
2620 SMLoc L
= Parser
.getTok().getLoc();
2621 EndLoc
= Parser
.getTok().getEndLoc();
2622 ShiftReg
= tryParseRegister();
2623 if (ShiftReg
== -1) {
2624 Error (L
, "expected immediate or register in shift operand");
2628 Error (Parser
.getTok().getLoc(),
2629 "expected immediate or register in shift operand");
2634 if (ShiftReg
&& ShiftTy
!= ARM_AM::rrx
)
2635 Operands
.push_back(ARMOperand::CreateShiftedRegister(ShiftTy
, SrcReg
,
2639 Operands
.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy
, SrcReg
, Imm
,
2646 /// Try to parse a register name. The token must be an Identifier when called.
2647 /// If it's a register, an AsmOperand is created. Another AsmOperand is created
2648 /// if there is a "writeback". 'true' if it's not a register.
2650 /// TODO this is likely to change to allow different register types and or to
2651 /// parse for a specific register type.
2653 tryParseRegisterWithWriteBack(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2654 const AsmToken
&RegTok
= Parser
.getTok();
2655 int RegNo
= tryParseRegister();
2659 Operands
.push_back(ARMOperand::CreateReg(RegNo
, RegTok
.getLoc(),
2660 RegTok
.getEndLoc()));
2662 const AsmToken
&ExclaimTok
= Parser
.getTok();
2663 if (ExclaimTok
.is(AsmToken::Exclaim
)) {
2664 Operands
.push_back(ARMOperand::CreateToken(ExclaimTok
.getString(),
2665 ExclaimTok
.getLoc()));
2666 Parser
.Lex(); // Eat exclaim token
2670 // Also check for an index operand. This is only legal for vector registers,
2671 // but that'll get caught OK in operand matching, so we don't need to
2672 // explicitly filter everything else out here.
2673 if (Parser
.getTok().is(AsmToken::LBrac
)) {
2674 SMLoc SIdx
= Parser
.getTok().getLoc();
2675 Parser
.Lex(); // Eat left bracket token.
2677 const MCExpr
*ImmVal
;
2678 if (getParser().parseExpression(ImmVal
))
2680 const MCConstantExpr
*MCE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
2682 return TokError("immediate value expected for vector index");
2684 if (Parser
.getTok().isNot(AsmToken::RBrac
))
2685 return Error(Parser
.getTok().getLoc(), "']' expected");
2687 SMLoc E
= Parser
.getTok().getEndLoc();
2688 Parser
.Lex(); // Eat right bracket token.
2690 Operands
.push_back(ARMOperand::CreateVectorIndex(MCE
->getValue(),
2698 /// MatchCoprocessorOperandName - Try to parse an coprocessor related
2699 /// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2701 static int MatchCoprocessorOperandName(StringRef Name
, char CoprocOp
) {
2702 // Use the same layout as the tablegen'erated register name matcher. Ugly,
2704 switch (Name
.size()) {
2707 if (Name
[0] != CoprocOp
)
2723 if (Name
[0] != CoprocOp
|| Name
[1] != '1')
2727 case '0': return 10;
2728 case '1': return 11;
2729 case '2': return 12;
2730 case '3': return 13;
2731 case '4': return 14;
2732 case '5': return 15;
2737 /// parseITCondCode - Try to parse a condition code for an IT instruction.
2738 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
2739 parseITCondCode(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2740 SMLoc S
= Parser
.getTok().getLoc();
2741 const AsmToken
&Tok
= Parser
.getTok();
2742 if (!Tok
.is(AsmToken::Identifier
))
2743 return MatchOperand_NoMatch
;
2744 unsigned CC
= StringSwitch
<unsigned>(Tok
.getString().lower())
2745 .Case("eq", ARMCC::EQ
)
2746 .Case("ne", ARMCC::NE
)
2747 .Case("hs", ARMCC::HS
)
2748 .Case("cs", ARMCC::HS
)
2749 .Case("lo", ARMCC::LO
)
2750 .Case("cc", ARMCC::LO
)
2751 .Case("mi", ARMCC::MI
)
2752 .Case("pl", ARMCC::PL
)
2753 .Case("vs", ARMCC::VS
)
2754 .Case("vc", ARMCC::VC
)
2755 .Case("hi", ARMCC::HI
)
2756 .Case("ls", ARMCC::LS
)
2757 .Case("ge", ARMCC::GE
)
2758 .Case("lt", ARMCC::LT
)
2759 .Case("gt", ARMCC::GT
)
2760 .Case("le", ARMCC::LE
)
2761 .Case("al", ARMCC::AL
)
2764 return MatchOperand_NoMatch
;
2765 Parser
.Lex(); // Eat the token.
2767 Operands
.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC
), S
));
2769 return MatchOperand_Success
;
2772 /// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2773 /// token must be an Identifier when called, and if it is a coprocessor
2774 /// number, the token is eaten and the operand is added to the operand list.
2775 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
2776 parseCoprocNumOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2777 SMLoc S
= Parser
.getTok().getLoc();
2778 const AsmToken
&Tok
= Parser
.getTok();
2779 if (Tok
.isNot(AsmToken::Identifier
))
2780 return MatchOperand_NoMatch
;
2782 int Num
= MatchCoprocessorOperandName(Tok
.getString(), 'p');
2784 return MatchOperand_NoMatch
;
2786 Parser
.Lex(); // Eat identifier token.
2787 Operands
.push_back(ARMOperand::CreateCoprocNum(Num
, S
));
2788 return MatchOperand_Success
;
2791 /// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2792 /// token must be an Identifier when called, and if it is a coprocessor
2793 /// number, the token is eaten and the operand is added to the operand list.
2794 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
2795 parseCoprocRegOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2796 SMLoc S
= Parser
.getTok().getLoc();
2797 const AsmToken
&Tok
= Parser
.getTok();
2798 if (Tok
.isNot(AsmToken::Identifier
))
2799 return MatchOperand_NoMatch
;
2801 int Reg
= MatchCoprocessorOperandName(Tok
.getString(), 'c');
2803 return MatchOperand_NoMatch
;
2805 Parser
.Lex(); // Eat identifier token.
2806 Operands
.push_back(ARMOperand::CreateCoprocReg(Reg
, S
));
2807 return MatchOperand_Success
;
2810 /// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2811 /// coproc_option : '{' imm0_255 '}'
2812 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
2813 parseCoprocOptionOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2814 SMLoc S
= Parser
.getTok().getLoc();
2816 // If this isn't a '{', this isn't a coprocessor immediate operand.
2817 if (Parser
.getTok().isNot(AsmToken::LCurly
))
2818 return MatchOperand_NoMatch
;
2819 Parser
.Lex(); // Eat the '{'
2822 SMLoc Loc
= Parser
.getTok().getLoc();
2823 if (getParser().parseExpression(Expr
)) {
2824 Error(Loc
, "illegal expression");
2825 return MatchOperand_ParseFail
;
2827 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Expr
);
2828 if (!CE
|| CE
->getValue() < 0 || CE
->getValue() > 255) {
2829 Error(Loc
, "coprocessor option must be an immediate in range [0, 255]");
2830 return MatchOperand_ParseFail
;
2832 int Val
= CE
->getValue();
2834 // Check for and consume the closing '}'
2835 if (Parser
.getTok().isNot(AsmToken::RCurly
))
2836 return MatchOperand_ParseFail
;
2837 SMLoc E
= Parser
.getTok().getEndLoc();
2838 Parser
.Lex(); // Eat the '}'
2840 Operands
.push_back(ARMOperand::CreateCoprocOption(Val
, S
, E
));
2841 return MatchOperand_Success
;
2844 // For register list parsing, we need to map from raw GPR register numbering
2845 // to the enumeration values. The enumeration values aren't sorted by
2846 // register number due to our using "sp", "lr" and "pc" as canonical names.
2847 static unsigned getNextRegister(unsigned Reg
) {
2848 // If this is a GPR, we need to do it manually, otherwise we can rely
2849 // on the sort ordering of the enumeration since the other reg-classes
2851 if (!ARMMCRegisterClasses
[ARM::GPRRegClassID
].contains(Reg
))
2854 default: llvm_unreachable("Invalid GPR number!");
2855 case ARM::R0
: return ARM::R1
; case ARM::R1
: return ARM::R2
;
2856 case ARM::R2
: return ARM::R3
; case ARM::R3
: return ARM::R4
;
2857 case ARM::R4
: return ARM::R5
; case ARM::R5
: return ARM::R6
;
2858 case ARM::R6
: return ARM::R7
; case ARM::R7
: return ARM::R8
;
2859 case ARM::R8
: return ARM::R9
; case ARM::R9
: return ARM::R10
;
2860 case ARM::R10
: return ARM::R11
; case ARM::R11
: return ARM::R12
;
2861 case ARM::R12
: return ARM::SP
; case ARM::SP
: return ARM::LR
;
2862 case ARM::LR
: return ARM::PC
; case ARM::PC
: return ARM::R0
;
2866 // Return the low-subreg of a given Q register.
2867 static unsigned getDRegFromQReg(unsigned QReg
) {
2869 default: llvm_unreachable("expected a Q register!");
2870 case ARM::Q0
: return ARM::D0
;
2871 case ARM::Q1
: return ARM::D2
;
2872 case ARM::Q2
: return ARM::D4
;
2873 case ARM::Q3
: return ARM::D6
;
2874 case ARM::Q4
: return ARM::D8
;
2875 case ARM::Q5
: return ARM::D10
;
2876 case ARM::Q6
: return ARM::D12
;
2877 case ARM::Q7
: return ARM::D14
;
2878 case ARM::Q8
: return ARM::D16
;
2879 case ARM::Q9
: return ARM::D18
;
2880 case ARM::Q10
: return ARM::D20
;
2881 case ARM::Q11
: return ARM::D22
;
2882 case ARM::Q12
: return ARM::D24
;
2883 case ARM::Q13
: return ARM::D26
;
2884 case ARM::Q14
: return ARM::D28
;
2885 case ARM::Q15
: return ARM::D30
;
2889 /// Parse a register list.
2891 parseRegisterList(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
2892 assert(Parser
.getTok().is(AsmToken::LCurly
) &&
2893 "Token is not a Left Curly Brace");
2894 SMLoc S
= Parser
.getTok().getLoc();
2895 Parser
.Lex(); // Eat '{' token.
2896 SMLoc RegLoc
= Parser
.getTok().getLoc();
2898 // Check the first register in the list to see what register class
2899 // this is a list of.
2900 int Reg
= tryParseRegister();
2902 return Error(RegLoc
, "register expected");
2904 // The reglist instructions have at most 16 registers, so reserve
2905 // space for that many.
2906 SmallVector
<std::pair
<unsigned, SMLoc
>, 16> Registers
;
2908 // Allow Q regs and just interpret them as the two D sub-registers.
2909 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(Reg
)) {
2910 Reg
= getDRegFromQReg(Reg
);
2911 Registers
.push_back(std::pair
<unsigned, SMLoc
>(Reg
, RegLoc
));
2914 const MCRegisterClass
*RC
;
2915 if (ARMMCRegisterClasses
[ARM::GPRRegClassID
].contains(Reg
))
2916 RC
= &ARMMCRegisterClasses
[ARM::GPRRegClassID
];
2917 else if (ARMMCRegisterClasses
[ARM::DPRRegClassID
].contains(Reg
))
2918 RC
= &ARMMCRegisterClasses
[ARM::DPRRegClassID
];
2919 else if (ARMMCRegisterClasses
[ARM::SPRRegClassID
].contains(Reg
))
2920 RC
= &ARMMCRegisterClasses
[ARM::SPRRegClassID
];
2922 return Error(RegLoc
, "invalid register in register list");
2924 // Store the register.
2925 Registers
.push_back(std::pair
<unsigned, SMLoc
>(Reg
, RegLoc
));
2927 // This starts immediately after the first register token in the list,
2928 // so we can see either a comma or a minus (range separator) as a legal
2930 while (Parser
.getTok().is(AsmToken::Comma
) ||
2931 Parser
.getTok().is(AsmToken::Minus
)) {
2932 if (Parser
.getTok().is(AsmToken::Minus
)) {
2933 Parser
.Lex(); // Eat the minus.
2934 SMLoc AfterMinusLoc
= Parser
.getTok().getLoc();
2935 int EndReg
= tryParseRegister();
2937 return Error(AfterMinusLoc
, "register expected");
2938 // Allow Q regs and just interpret them as the two D sub-registers.
2939 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(EndReg
))
2940 EndReg
= getDRegFromQReg(EndReg
) + 1;
2941 // If the register is the same as the start reg, there's nothing
2945 // The register must be in the same register class as the first.
2946 if (!RC
->contains(EndReg
))
2947 return Error(AfterMinusLoc
, "invalid register in register list");
2948 // Ranges must go from low to high.
2949 if (MRI
->getEncodingValue(Reg
) > MRI
->getEncodingValue(EndReg
))
2950 return Error(AfterMinusLoc
, "bad range in register list");
2952 // Add all the registers in the range to the register list.
2953 while (Reg
!= EndReg
) {
2954 Reg
= getNextRegister(Reg
);
2955 Registers
.push_back(std::pair
<unsigned, SMLoc
>(Reg
, RegLoc
));
2959 Parser
.Lex(); // Eat the comma.
2960 RegLoc
= Parser
.getTok().getLoc();
2962 const AsmToken RegTok
= Parser
.getTok();
2963 Reg
= tryParseRegister();
2965 return Error(RegLoc
, "register expected");
2966 // Allow Q regs and just interpret them as the two D sub-registers.
2967 bool isQReg
= false;
2968 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(Reg
)) {
2969 Reg
= getDRegFromQReg(Reg
);
2972 // The register must be in the same register class as the first.
2973 if (!RC
->contains(Reg
))
2974 return Error(RegLoc
, "invalid register in register list");
2975 // List must be monotonically increasing.
2976 if (MRI
->getEncodingValue(Reg
) < MRI
->getEncodingValue(OldReg
)) {
2977 if (ARMMCRegisterClasses
[ARM::GPRRegClassID
].contains(Reg
))
2978 Warning(RegLoc
, "register list not in ascending order");
2980 return Error(RegLoc
, "register list not in ascending order");
2982 if (MRI
->getEncodingValue(Reg
) == MRI
->getEncodingValue(OldReg
)) {
2983 Warning(RegLoc
, "duplicated register (" + RegTok
.getString() +
2984 ") in register list");
2987 // VFP register lists must also be contiguous.
2988 // It's OK to use the enumeration values directly here rather, as the
2989 // VFP register classes have the enum sorted properly.
2990 if (RC
!= &ARMMCRegisterClasses
[ARM::GPRRegClassID
] &&
2992 return Error(RegLoc
, "non-contiguous register range");
2993 Registers
.push_back(std::pair
<unsigned, SMLoc
>(Reg
, RegLoc
));
2995 Registers
.push_back(std::pair
<unsigned, SMLoc
>(++Reg
, RegLoc
));
2998 if (Parser
.getTok().isNot(AsmToken::RCurly
))
2999 return Error(Parser
.getTok().getLoc(), "'}' expected");
3000 SMLoc E
= Parser
.getTok().getEndLoc();
3001 Parser
.Lex(); // Eat '}' token.
3003 // Push the register list operand.
3004 Operands
.push_back(ARMOperand::CreateRegList(Registers
, S
, E
));
3006 // The ARM system instruction variants for LDM/STM have a '^' token here.
3007 if (Parser
.getTok().is(AsmToken::Caret
)) {
3008 Operands
.push_back(ARMOperand::CreateToken("^",Parser
.getTok().getLoc()));
3009 Parser
.Lex(); // Eat '^' token.
3015 // Helper function to parse the lane index for vector lists.
3016 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3017 parseVectorLane(VectorLaneTy
&LaneKind
, unsigned &Index
, SMLoc
&EndLoc
) {
3018 Index
= 0; // Always return a defined index value.
3019 if (Parser
.getTok().is(AsmToken::LBrac
)) {
3020 Parser
.Lex(); // Eat the '['.
3021 if (Parser
.getTok().is(AsmToken::RBrac
)) {
3022 // "Dn[]" is the 'all lanes' syntax.
3023 LaneKind
= AllLanes
;
3024 EndLoc
= Parser
.getTok().getEndLoc();
3025 Parser
.Lex(); // Eat the ']'.
3026 return MatchOperand_Success
;
3029 // There's an optional '#' token here. Normally there wouldn't be, but
3030 // inline assemble puts one in, and it's friendly to accept that.
3031 if (Parser
.getTok().is(AsmToken::Hash
))
3032 Parser
.Lex(); // Eat the '#'
3034 const MCExpr
*LaneIndex
;
3035 SMLoc Loc
= Parser
.getTok().getLoc();
3036 if (getParser().parseExpression(LaneIndex
)) {
3037 Error(Loc
, "illegal expression");
3038 return MatchOperand_ParseFail
;
3040 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(LaneIndex
);
3042 Error(Loc
, "lane index must be empty or an integer");
3043 return MatchOperand_ParseFail
;
3045 if (Parser
.getTok().isNot(AsmToken::RBrac
)) {
3046 Error(Parser
.getTok().getLoc(), "']' expected");
3047 return MatchOperand_ParseFail
;
3049 EndLoc
= Parser
.getTok().getEndLoc();
3050 Parser
.Lex(); // Eat the ']'.
3051 int64_t Val
= CE
->getValue();
3053 // FIXME: Make this range check context sensitive for .8, .16, .32.
3054 if (Val
< 0 || Val
> 7) {
3055 Error(Parser
.getTok().getLoc(), "lane index out of range");
3056 return MatchOperand_ParseFail
;
3059 LaneKind
= IndexedLane
;
3060 return MatchOperand_Success
;
3063 return MatchOperand_Success
;
3066 // parse a vector register list
3067 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3068 parseVectorList(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3069 VectorLaneTy LaneKind
;
3071 SMLoc S
= Parser
.getTok().getLoc();
3072 // As an extension (to match gas), support a plain D register or Q register
3073 // (without encosing curly braces) as a single or double entry list,
3075 if (Parser
.getTok().is(AsmToken::Identifier
)) {
3076 SMLoc E
= Parser
.getTok().getEndLoc();
3077 int Reg
= tryParseRegister();
3079 return MatchOperand_NoMatch
;
3080 if (ARMMCRegisterClasses
[ARM::DPRRegClassID
].contains(Reg
)) {
3081 OperandMatchResultTy Res
= parseVectorLane(LaneKind
, LaneIndex
, E
);
3082 if (Res
!= MatchOperand_Success
)
3086 Operands
.push_back(ARMOperand::CreateVectorList(Reg
, 1, false, S
, E
));
3089 Operands
.push_back(ARMOperand::CreateVectorListAllLanes(Reg
, 1, false,
3093 Operands
.push_back(ARMOperand::CreateVectorListIndexed(Reg
, 1,
3098 return MatchOperand_Success
;
3100 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(Reg
)) {
3101 Reg
= getDRegFromQReg(Reg
);
3102 OperandMatchResultTy Res
= parseVectorLane(LaneKind
, LaneIndex
, E
);
3103 if (Res
!= MatchOperand_Success
)
3107 Reg
= MRI
->getMatchingSuperReg(Reg
, ARM::dsub_0
,
3108 &ARMMCRegisterClasses
[ARM::DPairRegClassID
]);
3109 Operands
.push_back(ARMOperand::CreateVectorList(Reg
, 2, false, S
, E
));
3112 Reg
= MRI
->getMatchingSuperReg(Reg
, ARM::dsub_0
,
3113 &ARMMCRegisterClasses
[ARM::DPairRegClassID
]);
3114 Operands
.push_back(ARMOperand::CreateVectorListAllLanes(Reg
, 2, false,
3118 Operands
.push_back(ARMOperand::CreateVectorListIndexed(Reg
, 2,
3123 return MatchOperand_Success
;
3125 Error(S
, "vector register expected");
3126 return MatchOperand_ParseFail
;
3129 if (Parser
.getTok().isNot(AsmToken::LCurly
))
3130 return MatchOperand_NoMatch
;
3132 Parser
.Lex(); // Eat '{' token.
3133 SMLoc RegLoc
= Parser
.getTok().getLoc();
3135 int Reg
= tryParseRegister();
3137 Error(RegLoc
, "register expected");
3138 return MatchOperand_ParseFail
;
3142 unsigned FirstReg
= Reg
;
3143 // The list is of D registers, but we also allow Q regs and just interpret
3144 // them as the two D sub-registers.
3145 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(Reg
)) {
3146 FirstReg
= Reg
= getDRegFromQReg(Reg
);
3147 Spacing
= 1; // double-spacing requires explicit D registers, otherwise
3148 // it's ambiguous with four-register single spaced.
3154 if (parseVectorLane(LaneKind
, LaneIndex
, E
) != MatchOperand_Success
)
3155 return MatchOperand_ParseFail
;
3157 while (Parser
.getTok().is(AsmToken::Comma
) ||
3158 Parser
.getTok().is(AsmToken::Minus
)) {
3159 if (Parser
.getTok().is(AsmToken::Minus
)) {
3161 Spacing
= 1; // Register range implies a single spaced list.
3162 else if (Spacing
== 2) {
3163 Error(Parser
.getTok().getLoc(),
3164 "sequential registers in double spaced list");
3165 return MatchOperand_ParseFail
;
3167 Parser
.Lex(); // Eat the minus.
3168 SMLoc AfterMinusLoc
= Parser
.getTok().getLoc();
3169 int EndReg
= tryParseRegister();
3171 Error(AfterMinusLoc
, "register expected");
3172 return MatchOperand_ParseFail
;
3174 // Allow Q regs and just interpret them as the two D sub-registers.
3175 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(EndReg
))
3176 EndReg
= getDRegFromQReg(EndReg
) + 1;
3177 // If the register is the same as the start reg, there's nothing
3181 // The register must be in the same register class as the first.
3182 if (!ARMMCRegisterClasses
[ARM::DPRRegClassID
].contains(EndReg
)) {
3183 Error(AfterMinusLoc
, "invalid register in register list");
3184 return MatchOperand_ParseFail
;
3186 // Ranges must go from low to high.
3188 Error(AfterMinusLoc
, "bad range in register list");
3189 return MatchOperand_ParseFail
;
3191 // Parse the lane specifier if present.
3192 VectorLaneTy NextLaneKind
;
3193 unsigned NextLaneIndex
;
3194 if (parseVectorLane(NextLaneKind
, NextLaneIndex
, E
) !=
3195 MatchOperand_Success
)
3196 return MatchOperand_ParseFail
;
3197 if (NextLaneKind
!= LaneKind
|| LaneIndex
!= NextLaneIndex
) {
3198 Error(AfterMinusLoc
, "mismatched lane index in register list");
3199 return MatchOperand_ParseFail
;
3202 // Add all the registers in the range to the register list.
3203 Count
+= EndReg
- Reg
;
3207 Parser
.Lex(); // Eat the comma.
3208 RegLoc
= Parser
.getTok().getLoc();
3210 Reg
= tryParseRegister();
3212 Error(RegLoc
, "register expected");
3213 return MatchOperand_ParseFail
;
3215 // vector register lists must be contiguous.
3216 // It's OK to use the enumeration values directly here rather, as the
3217 // VFP register classes have the enum sorted properly.
3219 // The list is of D registers, but we also allow Q regs and just interpret
3220 // them as the two D sub-registers.
3221 if (ARMMCRegisterClasses
[ARM::QPRRegClassID
].contains(Reg
)) {
3223 Spacing
= 1; // Register range implies a single spaced list.
3224 else if (Spacing
== 2) {
3226 "invalid register in double-spaced list (must be 'D' register')");
3227 return MatchOperand_ParseFail
;
3229 Reg
= getDRegFromQReg(Reg
);
3230 if (Reg
!= OldReg
+ 1) {
3231 Error(RegLoc
, "non-contiguous register range");
3232 return MatchOperand_ParseFail
;
3236 // Parse the lane specifier if present.
3237 VectorLaneTy NextLaneKind
;
3238 unsigned NextLaneIndex
;
3239 SMLoc LaneLoc
= Parser
.getTok().getLoc();
3240 if (parseVectorLane(NextLaneKind
, NextLaneIndex
, E
) !=
3241 MatchOperand_Success
)
3242 return MatchOperand_ParseFail
;
3243 if (NextLaneKind
!= LaneKind
|| LaneIndex
!= NextLaneIndex
) {
3244 Error(LaneLoc
, "mismatched lane index in register list");
3245 return MatchOperand_ParseFail
;
3249 // Normal D register.
3250 // Figure out the register spacing (single or double) of the list if
3251 // we don't know it already.
3253 Spacing
= 1 + (Reg
== OldReg
+ 2);
3255 // Just check that it's contiguous and keep going.
3256 if (Reg
!= OldReg
+ Spacing
) {
3257 Error(RegLoc
, "non-contiguous register range");
3258 return MatchOperand_ParseFail
;
3261 // Parse the lane specifier if present.
3262 VectorLaneTy NextLaneKind
;
3263 unsigned NextLaneIndex
;
3264 SMLoc EndLoc
= Parser
.getTok().getLoc();
3265 if (parseVectorLane(NextLaneKind
, NextLaneIndex
, E
) != MatchOperand_Success
)
3266 return MatchOperand_ParseFail
;
3267 if (NextLaneKind
!= LaneKind
|| LaneIndex
!= NextLaneIndex
) {
3268 Error(EndLoc
, "mismatched lane index in register list");
3269 return MatchOperand_ParseFail
;
3273 if (Parser
.getTok().isNot(AsmToken::RCurly
)) {
3274 Error(Parser
.getTok().getLoc(), "'}' expected");
3275 return MatchOperand_ParseFail
;
3277 E
= Parser
.getTok().getEndLoc();
3278 Parser
.Lex(); // Eat '}' token.
3282 // Two-register operands have been converted to the
3283 // composite register classes.
3285 const MCRegisterClass
*RC
= (Spacing
== 1) ?
3286 &ARMMCRegisterClasses
[ARM::DPairRegClassID
] :
3287 &ARMMCRegisterClasses
[ARM::DPairSpcRegClassID
];
3288 FirstReg
= MRI
->getMatchingSuperReg(FirstReg
, ARM::dsub_0
, RC
);
3291 Operands
.push_back(ARMOperand::CreateVectorList(FirstReg
, Count
,
3292 (Spacing
== 2), S
, E
));
3295 // Two-register operands have been converted to the
3296 // composite register classes.
3298 const MCRegisterClass
*RC
= (Spacing
== 1) ?
3299 &ARMMCRegisterClasses
[ARM::DPairRegClassID
] :
3300 &ARMMCRegisterClasses
[ARM::DPairSpcRegClassID
];
3301 FirstReg
= MRI
->getMatchingSuperReg(FirstReg
, ARM::dsub_0
, RC
);
3303 Operands
.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg
, Count
,
3308 Operands
.push_back(ARMOperand::CreateVectorListIndexed(FirstReg
, Count
,
3314 return MatchOperand_Success
;
3317 /// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3318 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3319 parseMemBarrierOptOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3320 SMLoc S
= Parser
.getTok().getLoc();
3321 const AsmToken
&Tok
= Parser
.getTok();
3324 if (Tok
.is(AsmToken::Identifier
)) {
3325 StringRef OptStr
= Tok
.getString();
3327 Opt
= StringSwitch
<unsigned>(OptStr
.slice(0, OptStr
.size()).lower())
3328 .Case("sy", ARM_MB::SY
)
3329 .Case("st", ARM_MB::ST
)
3330 .Case("sh", ARM_MB::ISH
)
3331 .Case("ish", ARM_MB::ISH
)
3332 .Case("shst", ARM_MB::ISHST
)
3333 .Case("ishst", ARM_MB::ISHST
)
3334 .Case("nsh", ARM_MB::NSH
)
3335 .Case("un", ARM_MB::NSH
)
3336 .Case("nshst", ARM_MB::NSHST
)
3337 .Case("unst", ARM_MB::NSHST
)
3338 .Case("osh", ARM_MB::OSH
)
3339 .Case("oshst", ARM_MB::OSHST
)
3343 return MatchOperand_NoMatch
;
3345 Parser
.Lex(); // Eat identifier token.
3346 } else if (Tok
.is(AsmToken::Hash
) ||
3347 Tok
.is(AsmToken::Dollar
) ||
3348 Tok
.is(AsmToken::Integer
)) {
3349 if (Parser
.getTok().isNot(AsmToken::Integer
))
3350 Parser
.Lex(); // Eat the '#'.
3351 SMLoc Loc
= Parser
.getTok().getLoc();
3353 const MCExpr
*MemBarrierID
;
3354 if (getParser().parseExpression(MemBarrierID
)) {
3355 Error(Loc
, "illegal expression");
3356 return MatchOperand_ParseFail
;
3359 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(MemBarrierID
);
3361 Error(Loc
, "constant expression expected");
3362 return MatchOperand_ParseFail
;
3365 int Val
= CE
->getValue();
3367 Error(Loc
, "immediate value out of range");
3368 return MatchOperand_ParseFail
;
3371 Opt
= ARM_MB::RESERVED_0
+ Val
;
3373 return MatchOperand_ParseFail
;
3375 Operands
.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt
)Opt
, S
));
3376 return MatchOperand_Success
;
3379 /// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3380 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3381 parseProcIFlagsOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3382 SMLoc S
= Parser
.getTok().getLoc();
3383 const AsmToken
&Tok
= Parser
.getTok();
3384 if (!Tok
.is(AsmToken::Identifier
))
3385 return MatchOperand_NoMatch
;
3386 StringRef IFlagsStr
= Tok
.getString();
3388 // An iflags string of "none" is interpreted to mean that none of the AIF
3389 // bits are set. Not a terribly useful instruction, but a valid encoding.
3390 unsigned IFlags
= 0;
3391 if (IFlagsStr
!= "none") {
3392 for (int i
= 0, e
= IFlagsStr
.size(); i
!= e
; ++i
) {
3393 unsigned Flag
= StringSwitch
<unsigned>(IFlagsStr
.substr(i
, 1))
3394 .Case("a", ARM_PROC::A
)
3395 .Case("i", ARM_PROC::I
)
3396 .Case("f", ARM_PROC::F
)
3399 // If some specific iflag is already set, it means that some letter is
3400 // present more than once, this is not acceptable.
3401 if (Flag
== ~0U || (IFlags
& Flag
))
3402 return MatchOperand_NoMatch
;
3408 Parser
.Lex(); // Eat identifier token.
3409 Operands
.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags
)IFlags
, S
));
3410 return MatchOperand_Success
;
3413 /// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3414 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3415 parseMSRMaskOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3416 SMLoc S
= Parser
.getTok().getLoc();
3417 const AsmToken
&Tok
= Parser
.getTok();
3418 if (!Tok
.is(AsmToken::Identifier
))
3419 return MatchOperand_NoMatch
;
3420 StringRef Mask
= Tok
.getString();
3423 // See ARMv6-M 10.1.1
3424 std::string Name
= Mask
.lower();
3425 unsigned FlagsVal
= StringSwitch
<unsigned>(Name
)
3426 // Note: in the documentation:
3427 // ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3428 // for MSR APSR_nzcvq.
3429 // but we do make it an alias here. This is so to get the "mask encoding"
3430 // bits correct on MSR APSR writes.
3432 // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3433 // should really only be allowed when writing a special register. Note
3434 // they get dropped in the MRS instruction reading a special register as
3435 // the SYSm field is only 8 bits.
3437 // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3438 // includes the DSP extension but that is not checked.
3439 .Case("apsr", 0x800)
3440 .Case("apsr_nzcvq", 0x800)
3441 .Case("apsr_g", 0x400)
3442 .Case("apsr_nzcvqg", 0xc00)
3443 .Case("iapsr", 0x801)
3444 .Case("iapsr_nzcvq", 0x801)
3445 .Case("iapsr_g", 0x401)
3446 .Case("iapsr_nzcvqg", 0xc01)
3447 .Case("eapsr", 0x802)
3448 .Case("eapsr_nzcvq", 0x802)
3449 .Case("eapsr_g", 0x402)
3450 .Case("eapsr_nzcvqg", 0xc02)
3451 .Case("xpsr", 0x803)
3452 .Case("xpsr_nzcvq", 0x803)
3453 .Case("xpsr_g", 0x403)
3454 .Case("xpsr_nzcvqg", 0xc03)
3455 .Case("ipsr", 0x805)
3456 .Case("epsr", 0x806)
3457 .Case("iepsr", 0x807)
3460 .Case("primask", 0x810)
3461 .Case("basepri", 0x811)
3462 .Case("basepri_max", 0x812)
3463 .Case("faultmask", 0x813)
3464 .Case("control", 0x814)
3467 if (FlagsVal
== ~0U)
3468 return MatchOperand_NoMatch
;
3470 if (!hasV7Ops() && FlagsVal
>= 0x811 && FlagsVal
<= 0x813)
3471 // basepri, basepri_max and faultmask only valid for V7m.
3472 return MatchOperand_NoMatch
;
3474 Parser
.Lex(); // Eat identifier token.
3475 Operands
.push_back(ARMOperand::CreateMSRMask(FlagsVal
, S
));
3476 return MatchOperand_Success
;
3479 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3480 size_t Start
= 0, Next
= Mask
.find('_');
3481 StringRef Flags
= "";
3482 std::string SpecReg
= Mask
.slice(Start
, Next
).lower();
3483 if (Next
!= StringRef::npos
)
3484 Flags
= Mask
.slice(Next
+1, Mask
.size());
3486 // FlagsVal contains the complete mask:
3488 // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3489 unsigned FlagsVal
= 0;
3491 if (SpecReg
== "apsr") {
3492 FlagsVal
= StringSwitch
<unsigned>(Flags
)
3493 .Case("nzcvq", 0x8) // same as CPSR_f
3494 .Case("g", 0x4) // same as CPSR_s
3495 .Case("nzcvqg", 0xc) // same as CPSR_fs
3498 if (FlagsVal
== ~0U) {
3500 return MatchOperand_NoMatch
;
3502 FlagsVal
= 8; // No flag
3504 } else if (SpecReg
== "cpsr" || SpecReg
== "spsr") {
3505 // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3506 if (Flags
== "all" || Flags
== "")
3508 for (int i
= 0, e
= Flags
.size(); i
!= e
; ++i
) {
3509 unsigned Flag
= StringSwitch
<unsigned>(Flags
.substr(i
, 1))
3516 // If some specific flag is already set, it means that some letter is
3517 // present more than once, this is not acceptable.
3518 if (FlagsVal
== ~0U || (FlagsVal
& Flag
))
3519 return MatchOperand_NoMatch
;
3522 } else // No match for special register.
3523 return MatchOperand_NoMatch
;
3525 // Special register without flags is NOT equivalent to "fc" flags.
3526 // NOTE: This is a divergence from gas' behavior. Uncommenting the following
3527 // two lines would enable gas compatibility at the expense of breaking
3533 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3534 if (SpecReg
== "spsr")
3537 Parser
.Lex(); // Eat identifier token.
3538 Operands
.push_back(ARMOperand::CreateMSRMask(FlagsVal
, S
));
3539 return MatchOperand_Success
;
3542 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3543 parsePKHImm(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
, StringRef Op
,
3544 int Low
, int High
) {
3545 const AsmToken
&Tok
= Parser
.getTok();
3546 if (Tok
.isNot(AsmToken::Identifier
)) {
3547 Error(Parser
.getTok().getLoc(), Op
+ " operand expected.");
3548 return MatchOperand_ParseFail
;
3550 StringRef ShiftName
= Tok
.getString();
3551 std::string LowerOp
= Op
.lower();
3552 std::string UpperOp
= Op
.upper();
3553 if (ShiftName
!= LowerOp
&& ShiftName
!= UpperOp
) {
3554 Error(Parser
.getTok().getLoc(), Op
+ " operand expected.");
3555 return MatchOperand_ParseFail
;
3557 Parser
.Lex(); // Eat shift type token.
3559 // There must be a '#' and a shift amount.
3560 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
3561 Parser
.getTok().isNot(AsmToken::Dollar
)) {
3562 Error(Parser
.getTok().getLoc(), "'#' expected");
3563 return MatchOperand_ParseFail
;
3565 Parser
.Lex(); // Eat hash token.
3567 const MCExpr
*ShiftAmount
;
3568 SMLoc Loc
= Parser
.getTok().getLoc();
3570 if (getParser().parseExpression(ShiftAmount
, EndLoc
)) {
3571 Error(Loc
, "illegal expression");
3572 return MatchOperand_ParseFail
;
3574 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(ShiftAmount
);
3576 Error(Loc
, "constant expression expected");
3577 return MatchOperand_ParseFail
;
3579 int Val
= CE
->getValue();
3580 if (Val
< Low
|| Val
> High
) {
3581 Error(Loc
, "immediate value out of range");
3582 return MatchOperand_ParseFail
;
3585 Operands
.push_back(ARMOperand::CreateImm(CE
, Loc
, EndLoc
));
3587 return MatchOperand_Success
;
3590 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3591 parseSetEndImm(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3592 const AsmToken
&Tok
= Parser
.getTok();
3593 SMLoc S
= Tok
.getLoc();
3594 if (Tok
.isNot(AsmToken::Identifier
)) {
3595 Error(S
, "'be' or 'le' operand expected");
3596 return MatchOperand_ParseFail
;
3598 int Val
= StringSwitch
<int>(Tok
.getString())
3602 Parser
.Lex(); // Eat the token.
3605 Error(S
, "'be' or 'le' operand expected");
3606 return MatchOperand_ParseFail
;
3608 Operands
.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val
,
3610 S
, Tok
.getEndLoc()));
3611 return MatchOperand_Success
;
3614 /// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3615 /// instructions. Legal values are:
3616 /// lsl #n 'n' in [0,31]
3617 /// asr #n 'n' in [1,32]
3618 /// n == 32 encoded as n == 0.
3619 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3620 parseShifterImm(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3621 const AsmToken
&Tok
= Parser
.getTok();
3622 SMLoc S
= Tok
.getLoc();
3623 if (Tok
.isNot(AsmToken::Identifier
)) {
3624 Error(S
, "shift operator 'asr' or 'lsl' expected");
3625 return MatchOperand_ParseFail
;
3627 StringRef ShiftName
= Tok
.getString();
3629 if (ShiftName
== "lsl" || ShiftName
== "LSL")
3631 else if (ShiftName
== "asr" || ShiftName
== "ASR")
3634 Error(S
, "shift operator 'asr' or 'lsl' expected");
3635 return MatchOperand_ParseFail
;
3637 Parser
.Lex(); // Eat the operator.
3639 // A '#' and a shift amount.
3640 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
3641 Parser
.getTok().isNot(AsmToken::Dollar
)) {
3642 Error(Parser
.getTok().getLoc(), "'#' expected");
3643 return MatchOperand_ParseFail
;
3645 Parser
.Lex(); // Eat hash token.
3646 SMLoc ExLoc
= Parser
.getTok().getLoc();
3648 const MCExpr
*ShiftAmount
;
3650 if (getParser().parseExpression(ShiftAmount
, EndLoc
)) {
3651 Error(ExLoc
, "malformed shift expression");
3652 return MatchOperand_ParseFail
;
3654 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(ShiftAmount
);
3656 Error(ExLoc
, "shift amount must be an immediate");
3657 return MatchOperand_ParseFail
;
3660 int64_t Val
= CE
->getValue();
3662 // Shift amount must be in [1,32]
3663 if (Val
< 1 || Val
> 32) {
3664 Error(ExLoc
, "'asr' shift amount must be in range [1,32]");
3665 return MatchOperand_ParseFail
;
3667 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3668 if (isThumb() && Val
== 32) {
3669 Error(ExLoc
, "'asr #32' shift amount not allowed in Thumb mode");
3670 return MatchOperand_ParseFail
;
3672 if (Val
== 32) Val
= 0;
3674 // Shift amount must be in [1,32]
3675 if (Val
< 0 || Val
> 31) {
3676 Error(ExLoc
, "'lsr' shift amount must be in range [0,31]");
3677 return MatchOperand_ParseFail
;
3681 Operands
.push_back(ARMOperand::CreateShifterImm(isASR
, Val
, S
, EndLoc
));
3683 return MatchOperand_Success
;
3686 /// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3687 /// of instructions. Legal values are:
3688 /// ror #n 'n' in {0, 8, 16, 24}
3689 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3690 parseRotImm(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3691 const AsmToken
&Tok
= Parser
.getTok();
3692 SMLoc S
= Tok
.getLoc();
3693 if (Tok
.isNot(AsmToken::Identifier
))
3694 return MatchOperand_NoMatch
;
3695 StringRef ShiftName
= Tok
.getString();
3696 if (ShiftName
!= "ror" && ShiftName
!= "ROR")
3697 return MatchOperand_NoMatch
;
3698 Parser
.Lex(); // Eat the operator.
3700 // A '#' and a rotate amount.
3701 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
3702 Parser
.getTok().isNot(AsmToken::Dollar
)) {
3703 Error(Parser
.getTok().getLoc(), "'#' expected");
3704 return MatchOperand_ParseFail
;
3706 Parser
.Lex(); // Eat hash token.
3707 SMLoc ExLoc
= Parser
.getTok().getLoc();
3709 const MCExpr
*ShiftAmount
;
3711 if (getParser().parseExpression(ShiftAmount
, EndLoc
)) {
3712 Error(ExLoc
, "malformed rotate expression");
3713 return MatchOperand_ParseFail
;
3715 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(ShiftAmount
);
3717 Error(ExLoc
, "rotate amount must be an immediate");
3718 return MatchOperand_ParseFail
;
3721 int64_t Val
= CE
->getValue();
3722 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3723 // normally, zero is represented in asm by omitting the rotate operand
3725 if (Val
!= 8 && Val
!= 16 && Val
!= 24 && Val
!= 0) {
3726 Error(ExLoc
, "'ror' rotate amount must be 8, 16, or 24");
3727 return MatchOperand_ParseFail
;
3730 Operands
.push_back(ARMOperand::CreateRotImm(Val
, S
, EndLoc
));
3732 return MatchOperand_Success
;
3735 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3736 parseBitfield(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3737 SMLoc S
= Parser
.getTok().getLoc();
3738 // The bitfield descriptor is really two operands, the LSB and the width.
3739 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
3740 Parser
.getTok().isNot(AsmToken::Dollar
)) {
3741 Error(Parser
.getTok().getLoc(), "'#' expected");
3742 return MatchOperand_ParseFail
;
3744 Parser
.Lex(); // Eat hash token.
3746 const MCExpr
*LSBExpr
;
3747 SMLoc E
= Parser
.getTok().getLoc();
3748 if (getParser().parseExpression(LSBExpr
)) {
3749 Error(E
, "malformed immediate expression");
3750 return MatchOperand_ParseFail
;
3752 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(LSBExpr
);
3754 Error(E
, "'lsb' operand must be an immediate");
3755 return MatchOperand_ParseFail
;
3758 int64_t LSB
= CE
->getValue();
3759 // The LSB must be in the range [0,31]
3760 if (LSB
< 0 || LSB
> 31) {
3761 Error(E
, "'lsb' operand must be in the range [0,31]");
3762 return MatchOperand_ParseFail
;
3764 E
= Parser
.getTok().getLoc();
3766 // Expect another immediate operand.
3767 if (Parser
.getTok().isNot(AsmToken::Comma
)) {
3768 Error(Parser
.getTok().getLoc(), "too few operands");
3769 return MatchOperand_ParseFail
;
3771 Parser
.Lex(); // Eat hash token.
3772 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
3773 Parser
.getTok().isNot(AsmToken::Dollar
)) {
3774 Error(Parser
.getTok().getLoc(), "'#' expected");
3775 return MatchOperand_ParseFail
;
3777 Parser
.Lex(); // Eat hash token.
3779 const MCExpr
*WidthExpr
;
3781 if (getParser().parseExpression(WidthExpr
, EndLoc
)) {
3782 Error(E
, "malformed immediate expression");
3783 return MatchOperand_ParseFail
;
3785 CE
= dyn_cast
<MCConstantExpr
>(WidthExpr
);
3787 Error(E
, "'width' operand must be an immediate");
3788 return MatchOperand_ParseFail
;
3791 int64_t Width
= CE
->getValue();
3792 // The LSB must be in the range [1,32-lsb]
3793 if (Width
< 1 || Width
> 32 - LSB
) {
3794 Error(E
, "'width' operand must be in the range [1,32-lsb]");
3795 return MatchOperand_ParseFail
;
3798 Operands
.push_back(ARMOperand::CreateBitfield(LSB
, Width
, S
, EndLoc
));
3800 return MatchOperand_Success
;
3803 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3804 parsePostIdxReg(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3805 // Check for a post-index addressing register operand. Specifically:
3806 // postidx_reg := '+' register {, shift}
3807 // | '-' register {, shift}
3808 // | register {, shift}
3810 // This method must return MatchOperand_NoMatch without consuming any tokens
3811 // in the case where there is no match, as other alternatives take other
3813 AsmToken Tok
= Parser
.getTok();
3814 SMLoc S
= Tok
.getLoc();
3815 bool haveEaten
= false;
3817 if (Tok
.is(AsmToken::Plus
)) {
3818 Parser
.Lex(); // Eat the '+' token.
3820 } else if (Tok
.is(AsmToken::Minus
)) {
3821 Parser
.Lex(); // Eat the '-' token.
3826 SMLoc E
= Parser
.getTok().getEndLoc();
3827 int Reg
= tryParseRegister();
3830 return MatchOperand_NoMatch
;
3831 Error(Parser
.getTok().getLoc(), "register expected");
3832 return MatchOperand_ParseFail
;
3835 ARM_AM::ShiftOpc ShiftTy
= ARM_AM::no_shift
;
3836 unsigned ShiftImm
= 0;
3837 if (Parser
.getTok().is(AsmToken::Comma
)) {
3838 Parser
.Lex(); // Eat the ','.
3839 if (parseMemRegOffsetShift(ShiftTy
, ShiftImm
))
3840 return MatchOperand_ParseFail
;
3842 // FIXME: Only approximates end...may include intervening whitespace.
3843 E
= Parser
.getTok().getLoc();
3846 Operands
.push_back(ARMOperand::CreatePostIdxReg(Reg
, isAdd
, ShiftTy
,
3849 return MatchOperand_Success
;
3852 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
3853 parseAM3Offset(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3854 // Check for a post-index addressing register operand. Specifically:
3855 // am3offset := '+' register
3862 // This method must return MatchOperand_NoMatch without consuming any tokens
3863 // in the case where there is no match, as other alternatives take other
3865 AsmToken Tok
= Parser
.getTok();
3866 SMLoc S
= Tok
.getLoc();
3868 // Do immediates first, as we always parse those if we have a '#'.
3869 if (Parser
.getTok().is(AsmToken::Hash
) ||
3870 Parser
.getTok().is(AsmToken::Dollar
)) {
3871 Parser
.Lex(); // Eat the '#'.
3872 // Explicitly look for a '-', as we need to encode negative zero
3874 bool isNegative
= Parser
.getTok().is(AsmToken::Minus
);
3875 const MCExpr
*Offset
;
3877 if (getParser().parseExpression(Offset
, E
))
3878 return MatchOperand_ParseFail
;
3879 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Offset
);
3881 Error(S
, "constant expression expected");
3882 return MatchOperand_ParseFail
;
3884 // Negative zero is encoded as the flag value INT32_MIN.
3885 int32_t Val
= CE
->getValue();
3886 if (isNegative
&& Val
== 0)
3890 ARMOperand::CreateImm(MCConstantExpr::Create(Val
, getContext()), S
, E
));
3892 return MatchOperand_Success
;
3896 bool haveEaten
= false;
3898 if (Tok
.is(AsmToken::Plus
)) {
3899 Parser
.Lex(); // Eat the '+' token.
3901 } else if (Tok
.is(AsmToken::Minus
)) {
3902 Parser
.Lex(); // Eat the '-' token.
3907 Tok
= Parser
.getTok();
3908 int Reg
= tryParseRegister();
3911 return MatchOperand_NoMatch
;
3912 Error(Tok
.getLoc(), "register expected");
3913 return MatchOperand_ParseFail
;
3916 Operands
.push_back(ARMOperand::CreatePostIdxReg(Reg
, isAdd
, ARM_AM::no_shift
,
3917 0, S
, Tok
.getEndLoc()));
3919 return MatchOperand_Success
;
3922 /// cvtT2LdrdPre - Convert parsed operands to MCInst.
3923 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3924 /// when they refer multiple MIOperands inside a single one.
3926 cvtT2LdrdPre(MCInst
&Inst
,
3927 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3929 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
3930 ((ARMOperand
*)Operands
[3])->addRegOperands(Inst
, 1);
3931 // Create a writeback register dummy placeholder.
3932 Inst
.addOperand(MCOperand::CreateReg(0));
3934 ((ARMOperand
*)Operands
[4])->addMemImm8s4OffsetOperands(Inst
, 2);
3936 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
3939 /// cvtT2StrdPre - Convert parsed operands to MCInst.
3940 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3941 /// when they refer multiple MIOperands inside a single one.
3943 cvtT2StrdPre(MCInst
&Inst
,
3944 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3945 // Create a writeback register dummy placeholder.
3946 Inst
.addOperand(MCOperand::CreateReg(0));
3948 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
3949 ((ARMOperand
*)Operands
[3])->addRegOperands(Inst
, 1);
3951 ((ARMOperand
*)Operands
[4])->addMemImm8s4OffsetOperands(Inst
, 2);
3953 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
3956 /// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3957 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3958 /// when they refer multiple MIOperands inside a single one.
3960 cvtLdWriteBackRegT2AddrModeImm8(MCInst
&Inst
,
3961 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3962 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
3964 // Create a writeback register dummy placeholder.
3965 Inst
.addOperand(MCOperand::CreateImm(0));
3967 ((ARMOperand
*)Operands
[3])->addMemImm8OffsetOperands(Inst
, 2);
3968 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
3971 /// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3972 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3973 /// when they refer multiple MIOperands inside a single one.
3975 cvtStWriteBackRegT2AddrModeImm8(MCInst
&Inst
,
3976 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3977 // Create a writeback register dummy placeholder.
3978 Inst
.addOperand(MCOperand::CreateImm(0));
3979 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
3980 ((ARMOperand
*)Operands
[3])->addMemImm8OffsetOperands(Inst
, 2);
3981 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
3984 /// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3985 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
3986 /// when they refer multiple MIOperands inside a single one.
3988 cvtLdWriteBackRegAddrMode2(MCInst
&Inst
,
3989 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
3990 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
3992 // Create a writeback register dummy placeholder.
3993 Inst
.addOperand(MCOperand::CreateImm(0));
3995 ((ARMOperand
*)Operands
[3])->addAddrMode2Operands(Inst
, 3);
3996 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
3999 /// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4000 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4001 /// when they refer multiple MIOperands inside a single one.
4003 cvtLdWriteBackRegAddrModeImm12(MCInst
&Inst
,
4004 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4005 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4007 // Create a writeback register dummy placeholder.
4008 Inst
.addOperand(MCOperand::CreateImm(0));
4010 ((ARMOperand
*)Operands
[3])->addMemImm12OffsetOperands(Inst
, 2);
4011 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4015 /// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
4016 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4017 /// when they refer multiple MIOperands inside a single one.
4019 cvtStWriteBackRegAddrModeImm12(MCInst
&Inst
,
4020 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4021 // Create a writeback register dummy placeholder.
4022 Inst
.addOperand(MCOperand::CreateImm(0));
4023 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4024 ((ARMOperand
*)Operands
[3])->addMemImm12OffsetOperands(Inst
, 2);
4025 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4028 /// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
4029 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4030 /// when they refer multiple MIOperands inside a single one.
4032 cvtStWriteBackRegAddrMode2(MCInst
&Inst
,
4033 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4034 // Create a writeback register dummy placeholder.
4035 Inst
.addOperand(MCOperand::CreateImm(0));
4036 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4037 ((ARMOperand
*)Operands
[3])->addAddrMode2Operands(Inst
, 3);
4038 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4041 /// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4042 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4043 /// when they refer multiple MIOperands inside a single one.
4045 cvtStWriteBackRegAddrMode3(MCInst
&Inst
,
4046 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4047 // Create a writeback register dummy placeholder.
4048 Inst
.addOperand(MCOperand::CreateImm(0));
4049 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4050 ((ARMOperand
*)Operands
[3])->addAddrMode3Operands(Inst
, 3);
4051 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4054 /// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
4055 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4056 /// when they refer multiple MIOperands inside a single one.
4058 cvtLdExtTWriteBackImm(MCInst
&Inst
,
4059 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4061 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4062 // Create a writeback register dummy placeholder.
4063 Inst
.addOperand(MCOperand::CreateImm(0));
4065 ((ARMOperand
*)Operands
[3])->addMemNoOffsetOperands(Inst
, 1);
4067 ((ARMOperand
*)Operands
[4])->addPostIdxImm8Operands(Inst
, 1);
4069 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4072 /// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
4073 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4074 /// when they refer multiple MIOperands inside a single one.
4076 cvtLdExtTWriteBackReg(MCInst
&Inst
,
4077 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4079 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4080 // Create a writeback register dummy placeholder.
4081 Inst
.addOperand(MCOperand::CreateImm(0));
4083 ((ARMOperand
*)Operands
[3])->addMemNoOffsetOperands(Inst
, 1);
4085 ((ARMOperand
*)Operands
[4])->addPostIdxRegOperands(Inst
, 2);
4087 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4090 /// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
4091 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4092 /// when they refer multiple MIOperands inside a single one.
4094 cvtStExtTWriteBackImm(MCInst
&Inst
,
4095 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4096 // Create a writeback register dummy placeholder.
4097 Inst
.addOperand(MCOperand::CreateImm(0));
4099 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4101 ((ARMOperand
*)Operands
[3])->addMemNoOffsetOperands(Inst
, 1);
4103 ((ARMOperand
*)Operands
[4])->addPostIdxImm8Operands(Inst
, 1);
4105 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4108 /// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
4109 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4110 /// when they refer multiple MIOperands inside a single one.
4112 cvtStExtTWriteBackReg(MCInst
&Inst
,
4113 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4114 // Create a writeback register dummy placeholder.
4115 Inst
.addOperand(MCOperand::CreateImm(0));
4117 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4119 ((ARMOperand
*)Operands
[3])->addMemNoOffsetOperands(Inst
, 1);
4121 ((ARMOperand
*)Operands
[4])->addPostIdxRegOperands(Inst
, 2);
4123 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4126 /// cvtLdrdPre - Convert parsed operands to MCInst.
4127 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4128 /// when they refer multiple MIOperands inside a single one.
4130 cvtLdrdPre(MCInst
&Inst
,
4131 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4133 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4134 ((ARMOperand
*)Operands
[3])->addRegOperands(Inst
, 1);
4135 // Create a writeback register dummy placeholder.
4136 Inst
.addOperand(MCOperand::CreateImm(0));
4138 ((ARMOperand
*)Operands
[4])->addAddrMode3Operands(Inst
, 3);
4140 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4143 /// cvtStrdPre - Convert parsed operands to MCInst.
4144 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4145 /// when they refer multiple MIOperands inside a single one.
4147 cvtStrdPre(MCInst
&Inst
,
4148 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4149 // Create a writeback register dummy placeholder.
4150 Inst
.addOperand(MCOperand::CreateImm(0));
4152 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4153 ((ARMOperand
*)Operands
[3])->addRegOperands(Inst
, 1);
4155 ((ARMOperand
*)Operands
[4])->addAddrMode3Operands(Inst
, 3);
4157 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4160 /// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4161 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4162 /// when they refer multiple MIOperands inside a single one.
4164 cvtLdWriteBackRegAddrMode3(MCInst
&Inst
,
4165 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4166 ((ARMOperand
*)Operands
[2])->addRegOperands(Inst
, 1);
4167 // Create a writeback register dummy placeholder.
4168 Inst
.addOperand(MCOperand::CreateImm(0));
4169 ((ARMOperand
*)Operands
[3])->addAddrMode3Operands(Inst
, 3);
4170 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4173 /// cvtThumbMultiply - Convert parsed operands to MCInst.
4174 /// Needed here because the Asm Gen Matcher can't handle properly tied operands
4175 /// when they refer multiple MIOperands inside a single one.
4177 cvtThumbMultiply(MCInst
&Inst
,
4178 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4179 ((ARMOperand
*)Operands
[3])->addRegOperands(Inst
, 1);
4180 ((ARMOperand
*)Operands
[1])->addCCOutOperands(Inst
, 1);
4181 // If we have a three-operand form, make sure to set Rn to be the operand
4182 // that isn't the same as Rd.
4184 if (Operands
.size() == 6 &&
4185 ((ARMOperand
*)Operands
[4])->getReg() ==
4186 ((ARMOperand
*)Operands
[3])->getReg())
4188 ((ARMOperand
*)Operands
[RegOp
])->addRegOperands(Inst
, 1);
4189 Inst
.addOperand(Inst
.getOperand(0));
4190 ((ARMOperand
*)Operands
[2])->addCondCodeOperands(Inst
, 2);
4194 cvtVLDwbFixed(MCInst
&Inst
,
4195 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4197 ((ARMOperand
*)Operands
[3])->addVecListOperands(Inst
, 1);
4198 // Create a writeback register dummy placeholder.
4199 Inst
.addOperand(MCOperand::CreateImm(0));
4201 ((ARMOperand
*)Operands
[4])->addAlignedMemoryOperands(Inst
, 2);
4203 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4207 cvtVLDwbRegister(MCInst
&Inst
,
4208 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4210 ((ARMOperand
*)Operands
[3])->addVecListOperands(Inst
, 1);
4211 // Create a writeback register dummy placeholder.
4212 Inst
.addOperand(MCOperand::CreateImm(0));
4214 ((ARMOperand
*)Operands
[4])->addAlignedMemoryOperands(Inst
, 2);
4216 ((ARMOperand
*)Operands
[5])->addRegOperands(Inst
, 1);
4218 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4222 cvtVSTwbFixed(MCInst
&Inst
,
4223 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4224 // Create a writeback register dummy placeholder.
4225 Inst
.addOperand(MCOperand::CreateImm(0));
4227 ((ARMOperand
*)Operands
[4])->addAlignedMemoryOperands(Inst
, 2);
4229 ((ARMOperand
*)Operands
[3])->addVecListOperands(Inst
, 1);
4231 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4235 cvtVSTwbRegister(MCInst
&Inst
,
4236 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4237 // Create a writeback register dummy placeholder.
4238 Inst
.addOperand(MCOperand::CreateImm(0));
4240 ((ARMOperand
*)Operands
[4])->addAlignedMemoryOperands(Inst
, 2);
4242 ((ARMOperand
*)Operands
[5])->addRegOperands(Inst
, 1);
4244 ((ARMOperand
*)Operands
[3])->addVecListOperands(Inst
, 1);
4246 ((ARMOperand
*)Operands
[1])->addCondCodeOperands(Inst
, 2);
4249 /// Parse an ARM memory expression, return false if successful else return true
4250 /// or an error. The first token must be a '[' when called.
4252 parseMemory(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4254 assert(Parser
.getTok().is(AsmToken::LBrac
) &&
4255 "Token is not a Left Bracket");
4256 S
= Parser
.getTok().getLoc();
4257 Parser
.Lex(); // Eat left bracket token.
4259 const AsmToken
&BaseRegTok
= Parser
.getTok();
4260 int BaseRegNum
= tryParseRegister();
4261 if (BaseRegNum
== -1)
4262 return Error(BaseRegTok
.getLoc(), "register expected");
4264 // The next token must either be a comma, a colon or a closing bracket.
4265 const AsmToken
&Tok
= Parser
.getTok();
4266 if (!Tok
.is(AsmToken::Colon
) && !Tok
.is(AsmToken::Comma
) &&
4267 !Tok
.is(AsmToken::RBrac
))
4268 return Error(Tok
.getLoc(), "malformed memory operand");
4270 if (Tok
.is(AsmToken::RBrac
)) {
4271 E
= Tok
.getEndLoc();
4272 Parser
.Lex(); // Eat right bracket token.
4274 Operands
.push_back(ARMOperand::CreateMem(BaseRegNum
, 0, 0, ARM_AM::no_shift
,
4275 0, 0, false, S
, E
));
4277 // If there's a pre-indexing writeback marker, '!', just add it as a token
4278 // operand. It's rather odd, but syntactically valid.
4279 if (Parser
.getTok().is(AsmToken::Exclaim
)) {
4280 Operands
.push_back(ARMOperand::CreateToken("!",Parser
.getTok().getLoc()));
4281 Parser
.Lex(); // Eat the '!'.
4287 assert((Tok
.is(AsmToken::Colon
) || Tok
.is(AsmToken::Comma
)) &&
4288 "Lost colon or comma in memory operand?!");
4289 if (Tok
.is(AsmToken::Comma
)) {
4290 Parser
.Lex(); // Eat the comma.
4293 // If we have a ':', it's an alignment specifier.
4294 if (Parser
.getTok().is(AsmToken::Colon
)) {
4295 Parser
.Lex(); // Eat the ':'.
4296 E
= Parser
.getTok().getLoc();
4299 if (getParser().parseExpression(Expr
))
4302 // The expression has to be a constant. Memory references with relocations
4303 // don't come through here, as they use the <label> forms of the relevant
4305 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Expr
);
4307 return Error (E
, "constant expression expected");
4310 switch (CE
->getValue()) {
4313 "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4314 case 16: Align
= 2; break;
4315 case 32: Align
= 4; break;
4316 case 64: Align
= 8; break;
4317 case 128: Align
= 16; break;
4318 case 256: Align
= 32; break;
4321 // Now we should have the closing ']'
4322 if (Parser
.getTok().isNot(AsmToken::RBrac
))
4323 return Error(Parser
.getTok().getLoc(), "']' expected");
4324 E
= Parser
.getTok().getEndLoc();
4325 Parser
.Lex(); // Eat right bracket token.
4327 // Don't worry about range checking the value here. That's handled by
4328 // the is*() predicates.
4329 Operands
.push_back(ARMOperand::CreateMem(BaseRegNum
, 0, 0,
4330 ARM_AM::no_shift
, 0, Align
,
4333 // If there's a pre-indexing writeback marker, '!', just add it as a token
4335 if (Parser
.getTok().is(AsmToken::Exclaim
)) {
4336 Operands
.push_back(ARMOperand::CreateToken("!",Parser
.getTok().getLoc()));
4337 Parser
.Lex(); // Eat the '!'.
4343 // If we have a '#', it's an immediate offset, else assume it's a register
4344 // offset. Be friendly and also accept a plain integer (without a leading
4345 // hash) for gas compatibility.
4346 if (Parser
.getTok().is(AsmToken::Hash
) ||
4347 Parser
.getTok().is(AsmToken::Dollar
) ||
4348 Parser
.getTok().is(AsmToken::Integer
)) {
4349 if (Parser
.getTok().isNot(AsmToken::Integer
))
4350 Parser
.Lex(); // Eat the '#'.
4351 E
= Parser
.getTok().getLoc();
4353 bool isNegative
= getParser().getTok().is(AsmToken::Minus
);
4354 const MCExpr
*Offset
;
4355 if (getParser().parseExpression(Offset
))
4358 // The expression has to be a constant. Memory references with relocations
4359 // don't come through here, as they use the <label> forms of the relevant
4361 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Offset
);
4363 return Error (E
, "constant expression expected");
4365 // If the constant was #-0, represent it as INT32_MIN.
4366 int32_t Val
= CE
->getValue();
4367 if (isNegative
&& Val
== 0)
4368 CE
= MCConstantExpr::Create(INT32_MIN
, getContext());
4370 // Now we should have the closing ']'
4371 if (Parser
.getTok().isNot(AsmToken::RBrac
))
4372 return Error(Parser
.getTok().getLoc(), "']' expected");
4373 E
= Parser
.getTok().getEndLoc();
4374 Parser
.Lex(); // Eat right bracket token.
4376 // Don't worry about range checking the value here. That's handled by
4377 // the is*() predicates.
4378 Operands
.push_back(ARMOperand::CreateMem(BaseRegNum
, CE
, 0,
4379 ARM_AM::no_shift
, 0, 0,
4382 // If there's a pre-indexing writeback marker, '!', just add it as a token
4384 if (Parser
.getTok().is(AsmToken::Exclaim
)) {
4385 Operands
.push_back(ARMOperand::CreateToken("!",Parser
.getTok().getLoc()));
4386 Parser
.Lex(); // Eat the '!'.
4392 // The register offset is optionally preceded by a '+' or '-'
4393 bool isNegative
= false;
4394 if (Parser
.getTok().is(AsmToken::Minus
)) {
4396 Parser
.Lex(); // Eat the '-'.
4397 } else if (Parser
.getTok().is(AsmToken::Plus
)) {
4399 Parser
.Lex(); // Eat the '+'.
4402 E
= Parser
.getTok().getLoc();
4403 int OffsetRegNum
= tryParseRegister();
4404 if (OffsetRegNum
== -1)
4405 return Error(E
, "register expected");
4407 // If there's a shift operator, handle it.
4408 ARM_AM::ShiftOpc ShiftType
= ARM_AM::no_shift
;
4409 unsigned ShiftImm
= 0;
4410 if (Parser
.getTok().is(AsmToken::Comma
)) {
4411 Parser
.Lex(); // Eat the ','.
4412 if (parseMemRegOffsetShift(ShiftType
, ShiftImm
))
4416 // Now we should have the closing ']'
4417 if (Parser
.getTok().isNot(AsmToken::RBrac
))
4418 return Error(Parser
.getTok().getLoc(), "']' expected");
4419 E
= Parser
.getTok().getEndLoc();
4420 Parser
.Lex(); // Eat right bracket token.
4422 Operands
.push_back(ARMOperand::CreateMem(BaseRegNum
, 0, OffsetRegNum
,
4423 ShiftType
, ShiftImm
, 0, isNegative
,
4426 // If there's a pre-indexing writeback marker, '!', just add it as a token
4428 if (Parser
.getTok().is(AsmToken::Exclaim
)) {
4429 Operands
.push_back(ARMOperand::CreateToken("!",Parser
.getTok().getLoc()));
4430 Parser
.Lex(); // Eat the '!'.
4436 /// parseMemRegOffsetShift - one of these two:
4437 /// ( lsl | lsr | asr | ror ) , # shift_amount
4439 /// return true if it parses a shift otherwise it returns false.
4440 bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc
&St
,
4442 SMLoc Loc
= Parser
.getTok().getLoc();
4443 const AsmToken
&Tok
= Parser
.getTok();
4444 if (Tok
.isNot(AsmToken::Identifier
))
4446 StringRef ShiftName
= Tok
.getString();
4447 if (ShiftName
== "lsl" || ShiftName
== "LSL" ||
4448 ShiftName
== "asl" || ShiftName
== "ASL")
4450 else if (ShiftName
== "lsr" || ShiftName
== "LSR")
4452 else if (ShiftName
== "asr" || ShiftName
== "ASR")
4454 else if (ShiftName
== "ror" || ShiftName
== "ROR")
4456 else if (ShiftName
== "rrx" || ShiftName
== "RRX")
4459 return Error(Loc
, "illegal shift operator");
4460 Parser
.Lex(); // Eat shift type token.
4462 // rrx stands alone.
4464 if (St
!= ARM_AM::rrx
) {
4465 Loc
= Parser
.getTok().getLoc();
4466 // A '#' and a shift amount.
4467 const AsmToken
&HashTok
= Parser
.getTok();
4468 if (HashTok
.isNot(AsmToken::Hash
) &&
4469 HashTok
.isNot(AsmToken::Dollar
))
4470 return Error(HashTok
.getLoc(), "'#' expected");
4471 Parser
.Lex(); // Eat hash token.
4474 if (getParser().parseExpression(Expr
))
4476 // Range check the immediate.
4477 // lsl, ror: 0 <= imm <= 31
4478 // lsr, asr: 0 <= imm <= 32
4479 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Expr
);
4481 return Error(Loc
, "shift amount must be an immediate");
4482 int64_t Imm
= CE
->getValue();
4484 ((St
== ARM_AM::lsl
|| St
== ARM_AM::ror
) && Imm
> 31) ||
4485 ((St
== ARM_AM::lsr
|| St
== ARM_AM::asr
) && Imm
> 32))
4486 return Error(Loc
, "immediate shift value out of range");
4487 // If <ShiftTy> #0, turn it into a no_shift.
4490 // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4499 /// parseFPImm - A floating point immediate expression operand.
4500 ARMAsmParser::OperandMatchResultTy
ARMAsmParser::
4501 parseFPImm(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4502 // Anything that can accept a floating point constant as an operand
4503 // needs to go through here, as the regular parseExpression is
4506 // This routine still creates a generic Immediate operand, containing
4507 // a bitcast of the 64-bit floating point value. The various operands
4508 // that accept floats can check whether the value is valid for them
4509 // via the standard is*() predicates.
4511 SMLoc S
= Parser
.getTok().getLoc();
4513 if (Parser
.getTok().isNot(AsmToken::Hash
) &&
4514 Parser
.getTok().isNot(AsmToken::Dollar
))
4515 return MatchOperand_NoMatch
;
4517 // Disambiguate the VMOV forms that can accept an FP immediate.
4518 // vmov.f32 <sreg>, #imm
4519 // vmov.f64 <dreg>, #imm
4520 // vmov.f32 <dreg>, #imm @ vector f32x2
4521 // vmov.f32 <qreg>, #imm @ vector f32x4
4523 // There are also the NEON VMOV instructions which expect an
4524 // integer constant. Make sure we don't try to parse an FPImm
4526 // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4527 ARMOperand
*TyOp
= static_cast<ARMOperand
*>(Operands
[2]);
4528 if (!TyOp
->isToken() || (TyOp
->getToken() != ".f32" &&
4529 TyOp
->getToken() != ".f64"))
4530 return MatchOperand_NoMatch
;
4532 Parser
.Lex(); // Eat the '#'.
4534 // Handle negation, as that still comes through as a separate token.
4535 bool isNegative
= false;
4536 if (Parser
.getTok().is(AsmToken::Minus
)) {
4540 const AsmToken
&Tok
= Parser
.getTok();
4541 SMLoc Loc
= Tok
.getLoc();
4542 if (Tok
.is(AsmToken::Real
)) {
4543 APFloat
RealVal(APFloat::IEEEsingle
, Tok
.getString());
4544 uint64_t IntVal
= RealVal
.bitcastToAPInt().getZExtValue();
4545 // If we had a '-' in front, toggle the sign bit.
4546 IntVal
^= (uint64_t)isNegative
<< 31;
4547 Parser
.Lex(); // Eat the token.
4548 Operands
.push_back(ARMOperand::CreateImm(
4549 MCConstantExpr::Create(IntVal
, getContext()),
4550 S
, Parser
.getTok().getLoc()));
4551 return MatchOperand_Success
;
4553 // Also handle plain integers. Instructions which allow floating point
4554 // immediates also allow a raw encoded 8-bit value.
4555 if (Tok
.is(AsmToken::Integer
)) {
4556 int64_t Val
= Tok
.getIntVal();
4557 Parser
.Lex(); // Eat the token.
4558 if (Val
> 255 || Val
< 0) {
4559 Error(Loc
, "encoded floating point value out of range");
4560 return MatchOperand_ParseFail
;
4562 double RealVal
= ARM_AM::getFPImmFloat(Val
);
4563 Val
= APFloat(APFloat::IEEEdouble
, RealVal
).bitcastToAPInt().getZExtValue();
4564 Operands
.push_back(ARMOperand::CreateImm(
4565 MCConstantExpr::Create(Val
, getContext()), S
,
4566 Parser
.getTok().getLoc()));
4567 return MatchOperand_Success
;
4570 Error(Loc
, "invalid floating point immediate");
4571 return MatchOperand_ParseFail
;
4574 /// Parse a arm instruction operand. For now this parses the operand regardless
4575 /// of the mnemonic.
4576 bool ARMAsmParser::parseOperand(SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
,
4577 StringRef Mnemonic
) {
4580 // Check if the current operand has a custom associated parser, if so, try to
4581 // custom parse the operand, or fallback to the general approach.
4582 OperandMatchResultTy ResTy
= MatchOperandParserImpl(Operands
, Mnemonic
);
4583 if (ResTy
== MatchOperand_Success
)
4585 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4586 // there was a match, but an error occurred, in which case, just return that
4587 // the operand parsing failed.
4588 if (ResTy
== MatchOperand_ParseFail
)
4591 switch (getLexer().getKind()) {
4593 Error(Parser
.getTok().getLoc(), "unexpected token in operand");
4595 case AsmToken::Identifier
: {
4596 if (!tryParseRegisterWithWriteBack(Operands
))
4598 int Res
= tryParseShiftRegister(Operands
);
4599 if (Res
== 0) // success
4601 else if (Res
== -1) // irrecoverable error
4603 // If this is VMRS, check for the apsr_nzcv operand.
4604 if (Mnemonic
== "vmrs" &&
4605 Parser
.getTok().getString().equals_lower("apsr_nzcv")) {
4606 S
= Parser
.getTok().getLoc();
4608 Operands
.push_back(ARMOperand::CreateToken("APSR_nzcv", S
));
4612 // Fall though for the Identifier case that is not a register or a
4615 case AsmToken::LParen
: // parenthesized expressions like (_strcmp-4)
4616 case AsmToken::Integer
: // things like 1f and 2b as a branch targets
4617 case AsmToken::String
: // quoted label names.
4618 case AsmToken::Dot
: { // . as a branch target
4619 // This was not a register so parse other operands that start with an
4620 // identifier (like labels) as expressions and create them as immediates.
4621 const MCExpr
*IdVal
;
4622 S
= Parser
.getTok().getLoc();
4623 if (getParser().parseExpression(IdVal
))
4625 E
= SMLoc::getFromPointer(Parser
.getTok().getLoc().getPointer() - 1);
4626 Operands
.push_back(ARMOperand::CreateImm(IdVal
, S
, E
));
4629 case AsmToken::LBrac
:
4630 return parseMemory(Operands
);
4631 case AsmToken::LCurly
:
4632 return parseRegisterList(Operands
);
4633 case AsmToken::Dollar
:
4634 case AsmToken::Hash
: {
4635 // #42 -> immediate.
4636 S
= Parser
.getTok().getLoc();
4639 if (Parser
.getTok().isNot(AsmToken::Colon
)) {
4640 bool isNegative
= Parser
.getTok().is(AsmToken::Minus
);
4641 const MCExpr
*ImmVal
;
4642 if (getParser().parseExpression(ImmVal
))
4644 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(ImmVal
);
4646 int32_t Val
= CE
->getValue();
4647 if (isNegative
&& Val
== 0)
4648 ImmVal
= MCConstantExpr::Create(INT32_MIN
, getContext());
4650 E
= SMLoc::getFromPointer(Parser
.getTok().getLoc().getPointer() - 1);
4651 Operands
.push_back(ARMOperand::CreateImm(ImmVal
, S
, E
));
4653 // There can be a trailing '!' on operands that we want as a separate
4654 // '!' Token operand. Handle that here. For example, the compatibilty
4655 // alias for 'srsdb sp!, #imm' is 'srsdb #imm!'.
4656 if (Parser
.getTok().is(AsmToken::Exclaim
)) {
4657 Operands
.push_back(ARMOperand::CreateToken(Parser
.getTok().getString(),
4658 Parser
.getTok().getLoc()));
4659 Parser
.Lex(); // Eat exclaim token
4663 // w/ a ':' after the '#', it's just like a plain ':'.
4666 case AsmToken::Colon
: {
4667 // ":lower16:" and ":upper16:" expression prefixes
4668 // FIXME: Check it's an expression prefix,
4669 // e.g. (FOO - :lower16:BAR) isn't legal.
4670 ARMMCExpr::VariantKind RefKind
;
4671 if (parsePrefix(RefKind
))
4674 const MCExpr
*SubExprVal
;
4675 if (getParser().parseExpression(SubExprVal
))
4678 const MCExpr
*ExprVal
= ARMMCExpr::Create(RefKind
, SubExprVal
,
4680 E
= SMLoc::getFromPointer(Parser
.getTok().getLoc().getPointer() - 1);
4681 Operands
.push_back(ARMOperand::CreateImm(ExprVal
, S
, E
));
4687 // parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4688 // :lower16: and :upper16:.
4689 bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind
&RefKind
) {
4690 RefKind
= ARMMCExpr::VK_ARM_None
;
4692 // :lower16: and :upper16: modifiers
4693 assert(getLexer().is(AsmToken::Colon
) && "expected a :");
4694 Parser
.Lex(); // Eat ':'
4696 if (getLexer().isNot(AsmToken::Identifier
)) {
4697 Error(Parser
.getTok().getLoc(), "expected prefix identifier in operand");
4701 StringRef IDVal
= Parser
.getTok().getIdentifier();
4702 if (IDVal
== "lower16") {
4703 RefKind
= ARMMCExpr::VK_ARM_LO16
;
4704 } else if (IDVal
== "upper16") {
4705 RefKind
= ARMMCExpr::VK_ARM_HI16
;
4707 Error(Parser
.getTok().getLoc(), "unexpected prefix in operand");
4712 if (getLexer().isNot(AsmToken::Colon
)) {
4713 Error(Parser
.getTok().getLoc(), "unexpected token after prefix");
4716 Parser
.Lex(); // Eat the last ':'
4720 /// \brief Given a mnemonic, split out possible predication code and carry
4721 /// setting letters to form a canonical mnemonic and flags.
4723 // FIXME: Would be nice to autogen this.
4724 // FIXME: This is a bit of a maze of special cases.
4725 StringRef
ARMAsmParser::splitMnemonic(StringRef Mnemonic
,
4726 unsigned &PredicationCode
,
4728 unsigned &ProcessorIMod
,
4729 StringRef
&ITMask
) {
4730 PredicationCode
= ARMCC::AL
;
4731 CarrySetting
= false;
4734 // Ignore some mnemonics we know aren't predicated forms.
4736 // FIXME: Would be nice to autogen this.
4737 if ((Mnemonic
== "movs" && isThumb()) ||
4738 Mnemonic
== "teq" || Mnemonic
== "vceq" || Mnemonic
== "svc" ||
4739 Mnemonic
== "mls" || Mnemonic
== "smmls" || Mnemonic
== "vcls" ||
4740 Mnemonic
== "vmls" || Mnemonic
== "vnmls" || Mnemonic
== "vacge" ||
4741 Mnemonic
== "vcge" || Mnemonic
== "vclt" || Mnemonic
== "vacgt" ||
4742 Mnemonic
== "vcgt" || Mnemonic
== "vcle" || Mnemonic
== "smlal" ||
4743 Mnemonic
== "umaal" || Mnemonic
== "umlal" || Mnemonic
== "vabal" ||
4744 Mnemonic
== "vmlal" || Mnemonic
== "vpadal" || Mnemonic
== "vqdmlal" ||
4745 Mnemonic
== "fmuls")
4748 // First, split out any predication code. Ignore mnemonics we know aren't
4749 // predicated but do have a carry-set and so weren't caught above.
4750 if (Mnemonic
!= "adcs" && Mnemonic
!= "bics" && Mnemonic
!= "movs" &&
4751 Mnemonic
!= "muls" && Mnemonic
!= "smlals" && Mnemonic
!= "smulls" &&
4752 Mnemonic
!= "umlals" && Mnemonic
!= "umulls" && Mnemonic
!= "lsls" &&
4753 Mnemonic
!= "sbcs" && Mnemonic
!= "rscs") {
4754 unsigned CC
= StringSwitch
<unsigned>(Mnemonic
.substr(Mnemonic
.size()-2))
4755 .Case("eq", ARMCC::EQ
)
4756 .Case("ne", ARMCC::NE
)
4757 .Case("hs", ARMCC::HS
)
4758 .Case("cs", ARMCC::HS
)
4759 .Case("lo", ARMCC::LO
)
4760 .Case("cc", ARMCC::LO
)
4761 .Case("mi", ARMCC::MI
)
4762 .Case("pl", ARMCC::PL
)
4763 .Case("vs", ARMCC::VS
)
4764 .Case("vc", ARMCC::VC
)
4765 .Case("hi", ARMCC::HI
)
4766 .Case("ls", ARMCC::LS
)
4767 .Case("ge", ARMCC::GE
)
4768 .Case("lt", ARMCC::LT
)
4769 .Case("gt", ARMCC::GT
)
4770 .Case("le", ARMCC::LE
)
4771 .Case("al", ARMCC::AL
)
4774 Mnemonic
= Mnemonic
.slice(0, Mnemonic
.size() - 2);
4775 PredicationCode
= CC
;
4779 // Next, determine if we have a carry setting bit. We explicitly ignore all
4780 // the instructions we know end in 's'.
4781 if (Mnemonic
.endswith("s") &&
4782 !(Mnemonic
== "cps" || Mnemonic
== "mls" ||
4783 Mnemonic
== "mrs" || Mnemonic
== "smmls" || Mnemonic
== "vabs" ||
4784 Mnemonic
== "vcls" || Mnemonic
== "vmls" || Mnemonic
== "vmrs" ||
4785 Mnemonic
== "vnmls" || Mnemonic
== "vqabs" || Mnemonic
== "vrecps" ||
4786 Mnemonic
== "vrsqrts" || Mnemonic
== "srs" || Mnemonic
== "flds" ||
4787 Mnemonic
== "fmrs" || Mnemonic
== "fsqrts" || Mnemonic
== "fsubs" ||
4788 Mnemonic
== "fsts" || Mnemonic
== "fcpys" || Mnemonic
== "fdivs" ||
4789 Mnemonic
== "fmuls" || Mnemonic
== "fcmps" || Mnemonic
== "fcmpzs" ||
4790 Mnemonic
== "vfms" || Mnemonic
== "vfnms" ||
4791 (Mnemonic
== "movs" && isThumb()))) {
4792 Mnemonic
= Mnemonic
.slice(0, Mnemonic
.size() - 1);
4793 CarrySetting
= true;
4796 // The "cps" instruction can have a interrupt mode operand which is glued into
4797 // the mnemonic. Check if this is the case, split it and parse the imod op
4798 if (Mnemonic
.startswith("cps")) {
4799 // Split out any imod code.
4801 StringSwitch
<unsigned>(Mnemonic
.substr(Mnemonic
.size()-2, 2))
4802 .Case("ie", ARM_PROC::IE
)
4803 .Case("id", ARM_PROC::ID
)
4806 Mnemonic
= Mnemonic
.slice(0, Mnemonic
.size()-2);
4807 ProcessorIMod
= IMod
;
4811 // The "it" instruction has the condition mask on the end of the mnemonic.
4812 if (Mnemonic
.startswith("it")) {
4813 ITMask
= Mnemonic
.slice(2, Mnemonic
.size());
4814 Mnemonic
= Mnemonic
.slice(0, 2);
4820 /// \brief Given a canonical mnemonic, determine if the instruction ever allows
4821 /// inclusion of carry set or predication code operands.
4823 // FIXME: It would be nice to autogen this.
4825 getMnemonicAcceptInfo(StringRef Mnemonic
, bool &CanAcceptCarrySet
,
4826 bool &CanAcceptPredicationCode
) {
4827 if (Mnemonic
== "and" || Mnemonic
== "lsl" || Mnemonic
== "lsr" ||
4828 Mnemonic
== "rrx" || Mnemonic
== "ror" || Mnemonic
== "sub" ||
4829 Mnemonic
== "add" || Mnemonic
== "adc" ||
4830 Mnemonic
== "mul" || Mnemonic
== "bic" || Mnemonic
== "asr" ||
4831 Mnemonic
== "orr" || Mnemonic
== "mvn" ||
4832 Mnemonic
== "rsb" || Mnemonic
== "rsc" || Mnemonic
== "orn" ||
4833 Mnemonic
== "sbc" || Mnemonic
== "eor" || Mnemonic
== "neg" ||
4834 Mnemonic
== "vfm" || Mnemonic
== "vfnm" ||
4835 (!isThumb() && (Mnemonic
== "smull" || Mnemonic
== "mov" ||
4836 Mnemonic
== "mla" || Mnemonic
== "smlal" ||
4837 Mnemonic
== "umlal" || Mnemonic
== "umull"))) {
4838 CanAcceptCarrySet
= true;
4840 CanAcceptCarrySet
= false;
4842 if (Mnemonic
== "cbnz" || Mnemonic
== "setend" || Mnemonic
== "dmb" ||
4843 Mnemonic
== "cps" || Mnemonic
== "mcr2" || Mnemonic
== "it" ||
4844 Mnemonic
== "mcrr2" || Mnemonic
== "cbz" || Mnemonic
== "cdp2" ||
4845 Mnemonic
== "trap" || Mnemonic
== "mrc2" || Mnemonic
== "mrrc2" ||
4846 Mnemonic
== "dsb" || Mnemonic
== "isb" || Mnemonic
== "setend" ||
4847 (Mnemonic
== "clrex" && !isThumb()) ||
4848 (Mnemonic
== "nop" && isThumbOne()) ||
4849 ((Mnemonic
== "pld" || Mnemonic
== "pli" || Mnemonic
== "pldw" ||
4850 Mnemonic
== "ldc2" || Mnemonic
== "ldc2l" ||
4851 Mnemonic
== "stc2" || Mnemonic
== "stc2l") && !isThumb()) ||
4852 ((Mnemonic
.startswith("rfe") || Mnemonic
.startswith("srs")) &&
4854 Mnemonic
.startswith("cps") || (Mnemonic
== "movs" && isThumbOne())) {
4855 CanAcceptPredicationCode
= false;
4857 CanAcceptPredicationCode
= true;
4860 if (Mnemonic
== "bkpt" || Mnemonic
== "mcr" || Mnemonic
== "mcrr" ||
4861 Mnemonic
== "mrc" || Mnemonic
== "mrrc" || Mnemonic
== "cdp")
4862 CanAcceptPredicationCode
= false;
4866 bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic
,
4867 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
4868 // FIXME: This is all horribly hacky. We really need a better way to deal
4869 // with optional operands like this in the matcher table.
4871 // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4872 // another does not. Specifically, the MOVW instruction does not. So we
4873 // special case it here and remove the defaulted (non-setting) cc_out
4874 // operand if that's the instruction we're trying to match.
4876 // We do this as post-processing of the explicit operands rather than just
4877 // conditionally adding the cc_out in the first place because we need
4878 // to check the type of the parsed immediate operand.
4879 if (Mnemonic
== "mov" && Operands
.size() > 4 && !isThumb() &&
4880 !static_cast<ARMOperand
*>(Operands
[4])->isARMSOImm() &&
4881 static_cast<ARMOperand
*>(Operands
[4])->isImm0_65535Expr() &&
4882 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0)
4885 // Register-register 'add' for thumb does not have a cc_out operand
4886 // when there are only two register operands.
4887 if (isThumb() && Mnemonic
== "add" && Operands
.size() == 5 &&
4888 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4889 static_cast<ARMOperand
*>(Operands
[4])->isReg() &&
4890 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0)
4892 // Register-register 'add' for thumb does not have a cc_out operand
4893 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4894 // have to check the immediate range here since Thumb2 has a variant
4895 // that can handle a different range and has a cc_out operand.
4896 if (((isThumb() && Mnemonic
== "add") ||
4897 (isThumbTwo() && Mnemonic
== "sub")) &&
4898 Operands
.size() == 6 &&
4899 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4900 static_cast<ARMOperand
*>(Operands
[4])->isReg() &&
4901 static_cast<ARMOperand
*>(Operands
[4])->getReg() == ARM::SP
&&
4902 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0 &&
4903 ((Mnemonic
== "add" &&static_cast<ARMOperand
*>(Operands
[5])->isReg()) ||
4904 static_cast<ARMOperand
*>(Operands
[5])->isImm0_1020s4()))
4906 // For Thumb2, add/sub immediate does not have a cc_out operand for the
4907 // imm0_4095 variant. That's the least-preferred variant when
4908 // selecting via the generic "add" mnemonic, so to know that we
4909 // should remove the cc_out operand, we have to explicitly check that
4910 // it's not one of the other variants. Ugh.
4911 if (isThumbTwo() && (Mnemonic
== "add" || Mnemonic
== "sub") &&
4912 Operands
.size() == 6 &&
4913 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4914 static_cast<ARMOperand
*>(Operands
[4])->isReg() &&
4915 static_cast<ARMOperand
*>(Operands
[5])->isImm()) {
4916 // Nest conditions rather than one big 'if' statement for readability.
4918 // If either register is a high reg, it's either one of the SP
4919 // variants (handled above) or a 32-bit encoding, so we just
4920 // check against T3. If the second register is the PC, this is an
4921 // alternate form of ADR, which uses encoding T4, so check for that too.
4922 if ((!isARMLowRegister(static_cast<ARMOperand
*>(Operands
[3])->getReg()) ||
4923 !isARMLowRegister(static_cast<ARMOperand
*>(Operands
[4])->getReg())) &&
4924 static_cast<ARMOperand
*>(Operands
[4])->getReg() != ARM::PC
&&
4925 static_cast<ARMOperand
*>(Operands
[5])->isT2SOImm())
4927 // If both registers are low, we're in an IT block, and the immediate is
4928 // in range, we should use encoding T1 instead, which has a cc_out.
4930 isARMLowRegister(static_cast<ARMOperand
*>(Operands
[3])->getReg()) &&
4931 isARMLowRegister(static_cast<ARMOperand
*>(Operands
[4])->getReg()) &&
4932 static_cast<ARMOperand
*>(Operands
[5])->isImm0_7())
4935 // Otherwise, we use encoding T4, which does not have a cc_out
4940 // The thumb2 multiply instruction doesn't have a CCOut register, so
4941 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4942 // use the 16-bit encoding or not.
4943 if (isThumbTwo() && Mnemonic
== "mul" && Operands
.size() == 6 &&
4944 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0 &&
4945 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4946 static_cast<ARMOperand
*>(Operands
[4])->isReg() &&
4947 static_cast<ARMOperand
*>(Operands
[5])->isReg() &&
4948 // If the registers aren't low regs, the destination reg isn't the
4949 // same as one of the source regs, or the cc_out operand is zero
4950 // outside of an IT block, we have to use the 32-bit encoding, so
4951 // remove the cc_out operand.
4952 (!isARMLowRegister(static_cast<ARMOperand
*>(Operands
[3])->getReg()) ||
4953 !isARMLowRegister(static_cast<ARMOperand
*>(Operands
[4])->getReg()) ||
4954 !isARMLowRegister(static_cast<ARMOperand
*>(Operands
[5])->getReg()) ||
4956 (static_cast<ARMOperand
*>(Operands
[3])->getReg() !=
4957 static_cast<ARMOperand
*>(Operands
[5])->getReg() &&
4958 static_cast<ARMOperand
*>(Operands
[3])->getReg() !=
4959 static_cast<ARMOperand
*>(Operands
[4])->getReg())))
4962 // Also check the 'mul' syntax variant that doesn't specify an explicit
4963 // destination register.
4964 if (isThumbTwo() && Mnemonic
== "mul" && Operands
.size() == 5 &&
4965 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0 &&
4966 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4967 static_cast<ARMOperand
*>(Operands
[4])->isReg() &&
4968 // If the registers aren't low regs or the cc_out operand is zero
4969 // outside of an IT block, we have to use the 32-bit encoding, so
4970 // remove the cc_out operand.
4971 (!isARMLowRegister(static_cast<ARMOperand
*>(Operands
[3])->getReg()) ||
4972 !isARMLowRegister(static_cast<ARMOperand
*>(Operands
[4])->getReg()) ||
4978 // Register-register 'add/sub' for thumb does not have a cc_out operand
4979 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4980 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4981 // right, this will result in better diagnostics (which operand is off)
4983 if (isThumb() && (Mnemonic
== "add" || Mnemonic
== "sub") &&
4984 (Operands
.size() == 5 || Operands
.size() == 6) &&
4985 static_cast<ARMOperand
*>(Operands
[3])->isReg() &&
4986 static_cast<ARMOperand
*>(Operands
[3])->getReg() == ARM::SP
&&
4987 static_cast<ARMOperand
*>(Operands
[1])->getReg() == 0 &&
4988 (static_cast<ARMOperand
*>(Operands
[4])->isImm() ||
4989 (Operands
.size() == 6 &&
4990 static_cast<ARMOperand
*>(Operands
[5])->isImm())))
4996 static bool isDataTypeToken(StringRef Tok
) {
4997 return Tok
== ".8" || Tok
== ".16" || Tok
== ".32" || Tok
== ".64" ||
4998 Tok
== ".i8" || Tok
== ".i16" || Tok
== ".i32" || Tok
== ".i64" ||
4999 Tok
== ".u8" || Tok
== ".u16" || Tok
== ".u32" || Tok
== ".u64" ||
5000 Tok
== ".s8" || Tok
== ".s16" || Tok
== ".s32" || Tok
== ".s64" ||
5001 Tok
== ".p8" || Tok
== ".p16" || Tok
== ".f32" || Tok
== ".f64" ||
5002 Tok
== ".f" || Tok
== ".d";
5005 // FIXME: This bit should probably be handled via an explicit match class
5006 // in the .td files that matches the suffix instead of having it be
5007 // a literal string token the way it is now.
5008 static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic
, StringRef DT
) {
5009 return Mnemonic
.startswith("vldm") || Mnemonic
.startswith("vstm");
5012 static void applyMnemonicAliases(StringRef
&Mnemonic
, unsigned Features
);
5013 /// Parse an arm instruction mnemonic followed by its operands.
5014 bool ARMAsmParser::ParseInstruction(ParseInstructionInfo
&Info
, StringRef Name
,
5016 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
5017 // Apply mnemonic aliases before doing anything else, as the destination
5018 // mnemnonic may include suffices and we want to handle them normally.
5019 // The generic tblgen'erated code does this later, at the start of
5020 // MatchInstructionImpl(), but that's too late for aliases that include
5021 // any sort of suffix.
5022 unsigned AvailableFeatures
= getAvailableFeatures();
5023 applyMnemonicAliases(Name
, AvailableFeatures
);
5025 // First check for the ARM-specific .req directive.
5026 if (Parser
.getTok().is(AsmToken::Identifier
) &&
5027 Parser
.getTok().getIdentifier() == ".req") {
5028 parseDirectiveReq(Name
, NameLoc
);
5029 // We always return 'error' for this, as we're done with this
5030 // statement and don't need to match the 'instruction."
5034 // Create the leading tokens for the mnemonic, split by '.' characters.
5035 size_t Start
= 0, Next
= Name
.find('.');
5036 StringRef Mnemonic
= Name
.slice(Start
, Next
);
5038 // Split out the predication code and carry setting flag from the mnemonic.
5039 unsigned PredicationCode
;
5040 unsigned ProcessorIMod
;
5043 Mnemonic
= splitMnemonic(Mnemonic
, PredicationCode
, CarrySetting
,
5044 ProcessorIMod
, ITMask
);
5046 // In Thumb1, only the branch (B) instruction can be predicated.
5047 if (isThumbOne() && PredicationCode
!= ARMCC::AL
&& Mnemonic
!= "b") {
5048 Parser
.eatToEndOfStatement();
5049 return Error(NameLoc
, "conditional execution not supported in Thumb1");
5052 Operands
.push_back(ARMOperand::CreateToken(Mnemonic
, NameLoc
));
5054 // Handle the IT instruction ITMask. Convert it to a bitmask. This
5055 // is the mask as it will be for the IT encoding if the conditional
5056 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5057 // where the conditional bit0 is zero, the instruction post-processing
5058 // will adjust the mask accordingly.
5059 if (Mnemonic
== "it") {
5060 SMLoc Loc
= SMLoc::getFromPointer(NameLoc
.getPointer() + 2);
5061 if (ITMask
.size() > 3) {
5062 Parser
.eatToEndOfStatement();
5063 return Error(Loc
, "too many conditions on IT instruction");
5066 for (unsigned i
= ITMask
.size(); i
!= 0; --i
) {
5067 char pos
= ITMask
[i
- 1];
5068 if (pos
!= 't' && pos
!= 'e') {
5069 Parser
.eatToEndOfStatement();
5070 return Error(Loc
, "illegal IT block condition mask '" + ITMask
+ "'");
5073 if (ITMask
[i
- 1] == 't')
5076 Operands
.push_back(ARMOperand::CreateITMask(Mask
, Loc
));
5079 // FIXME: This is all a pretty gross hack. We should automatically handle
5080 // optional operands like this via tblgen.
5082 // Next, add the CCOut and ConditionCode operands, if needed.
5084 // For mnemonics which can ever incorporate a carry setting bit or predication
5085 // code, our matching model involves us always generating CCOut and
5086 // ConditionCode operands to match the mnemonic "as written" and then we let
5087 // the matcher deal with finding the right instruction or generating an
5088 // appropriate error.
5089 bool CanAcceptCarrySet
, CanAcceptPredicationCode
;
5090 getMnemonicAcceptInfo(Mnemonic
, CanAcceptCarrySet
, CanAcceptPredicationCode
);
5092 // If we had a carry-set on an instruction that can't do that, issue an
5094 if (!CanAcceptCarrySet
&& CarrySetting
) {
5095 Parser
.eatToEndOfStatement();
5096 return Error(NameLoc
, "instruction '" + Mnemonic
+
5097 "' can not set flags, but 's' suffix specified");
5099 // If we had a predication code on an instruction that can't do that, issue an
5101 if (!CanAcceptPredicationCode
&& PredicationCode
!= ARMCC::AL
) {
5102 Parser
.eatToEndOfStatement();
5103 return Error(NameLoc
, "instruction '" + Mnemonic
+
5104 "' is not predicable, but condition code specified");
5107 // Add the carry setting operand, if necessary.
5108 if (CanAcceptCarrySet
) {
5109 SMLoc Loc
= SMLoc::getFromPointer(NameLoc
.getPointer() + Mnemonic
.size());
5110 Operands
.push_back(ARMOperand::CreateCCOut(CarrySetting
? ARM::CPSR
: 0,
5114 // Add the predication code operand, if necessary.
5115 if (CanAcceptPredicationCode
) {
5116 SMLoc Loc
= SMLoc::getFromPointer(NameLoc
.getPointer() + Mnemonic
.size() +
5118 Operands
.push_back(ARMOperand::CreateCondCode(
5119 ARMCC::CondCodes(PredicationCode
), Loc
));
5122 // Add the processor imod operand, if necessary.
5123 if (ProcessorIMod
) {
5124 Operands
.push_back(ARMOperand::CreateImm(
5125 MCConstantExpr::Create(ProcessorIMod
, getContext()),
5129 // Add the remaining tokens in the mnemonic.
5130 while (Next
!= StringRef::npos
) {
5132 Next
= Name
.find('.', Start
+ 1);
5133 StringRef ExtraToken
= Name
.slice(Start
, Next
);
5135 // Some NEON instructions have an optional datatype suffix that is
5136 // completely ignored. Check for that.
5137 if (isDataTypeToken(ExtraToken
) &&
5138 doesIgnoreDataTypeSuffix(Mnemonic
, ExtraToken
))
5141 if (ExtraToken
!= ".n") {
5142 SMLoc Loc
= SMLoc::getFromPointer(NameLoc
.getPointer() + Start
);
5143 Operands
.push_back(ARMOperand::CreateToken(ExtraToken
, Loc
));
5147 // Read the remaining operands.
5148 if (getLexer().isNot(AsmToken::EndOfStatement
)) {
5149 // Read the first operand.
5150 if (parseOperand(Operands
, Mnemonic
)) {
5151 Parser
.eatToEndOfStatement();
5155 while (getLexer().is(AsmToken::Comma
)) {
5156 Parser
.Lex(); // Eat the comma.
5158 // Parse and remember the operand.
5159 if (parseOperand(Operands
, Mnemonic
)) {
5160 Parser
.eatToEndOfStatement();
5166 if (getLexer().isNot(AsmToken::EndOfStatement
)) {
5167 SMLoc Loc
= getLexer().getLoc();
5168 Parser
.eatToEndOfStatement();
5169 return Error(Loc
, "unexpected token in argument list");
5172 Parser
.Lex(); // Consume the EndOfStatement
5174 // Some instructions, mostly Thumb, have forms for the same mnemonic that
5175 // do and don't have a cc_out optional-def operand. With some spot-checks
5176 // of the operand list, we can figure out which variant we're trying to
5177 // parse and adjust accordingly before actually matching. We shouldn't ever
5178 // try to remove a cc_out operand that was explicitly set on the the
5179 // mnemonic, of course (CarrySetting == true). Reason number #317 the
5180 // table driven matcher doesn't fit well with the ARM instruction set.
5181 if (!CarrySetting
&& shouldOmitCCOutOperand(Mnemonic
, Operands
)) {
5182 ARMOperand
*Op
= static_cast<ARMOperand
*>(Operands
[1]);
5183 Operands
.erase(Operands
.begin() + 1);
5187 // ARM mode 'blx' need special handling, as the register operand version
5188 // is predicable, but the label operand version is not. So, we can't rely
5189 // on the Mnemonic based checking to correctly figure out when to put
5190 // a k_CondCode operand in the list. If we're trying to match the label
5191 // version, remove the k_CondCode operand here.
5192 if (!isThumb() && Mnemonic
== "blx" && Operands
.size() == 3 &&
5193 static_cast<ARMOperand
*>(Operands
[2])->isImm()) {
5194 ARMOperand
*Op
= static_cast<ARMOperand
*>(Operands
[1]);
5195 Operands
.erase(Operands
.begin() + 1);
5199 // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5200 // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5201 // a single GPRPair reg operand is used in the .td file to replace the two
5202 // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5203 // expressed as a GPRPair, so we have to manually merge them.
5204 // FIXME: We would really like to be able to tablegen'erate this.
5205 if (!isThumb() && Operands
.size() > 4 &&
5206 (Mnemonic
== "ldrexd" || Mnemonic
== "strexd")) {
5207 bool isLoad
= (Mnemonic
== "ldrexd");
5208 unsigned Idx
= isLoad
? 2 : 3;
5209 ARMOperand
* Op1
= static_cast<ARMOperand
*>(Operands
[Idx
]);
5210 ARMOperand
* Op2
= static_cast<ARMOperand
*>(Operands
[Idx
+1]);
5212 const MCRegisterClass
& MRC
= MRI
->getRegClass(ARM::GPRRegClassID
);
5213 // Adjust only if Op1 and Op2 are GPRs.
5214 if (Op1
->isReg() && Op2
->isReg() && MRC
.contains(Op1
->getReg()) &&
5215 MRC
.contains(Op2
->getReg())) {
5216 unsigned Reg1
= Op1
->getReg();
5217 unsigned Reg2
= Op2
->getReg();
5218 unsigned Rt
= MRI
->getEncodingValue(Reg1
);
5219 unsigned Rt2
= MRI
->getEncodingValue(Reg2
);
5221 // Rt2 must be Rt + 1 and Rt must be even.
5222 if (Rt
+ 1 != Rt2
|| (Rt
& 1)) {
5223 Error(Op2
->getStartLoc(), isLoad
?
5224 "destination operands must be sequential" :
5225 "source operands must be sequential");
5228 unsigned NewReg
= MRI
->getMatchingSuperReg(Reg1
, ARM::gsub_0
,
5229 &(MRI
->getRegClass(ARM::GPRPairRegClassID
)));
5230 Operands
.erase(Operands
.begin() + Idx
, Operands
.begin() + Idx
+ 2);
5231 Operands
.insert(Operands
.begin() + Idx
, ARMOperand::CreateReg(
5232 NewReg
, Op1
->getStartLoc(), Op2
->getEndLoc()));
5241 // Validate context-sensitive operand constraints.
5243 // return 'true' if register list contains non-low GPR registers,
5244 // 'false' otherwise. If Reg is in the register list or is HiReg, set
5245 // 'containsReg' to true.
5246 static bool checkLowRegisterList(MCInst Inst
, unsigned OpNo
, unsigned Reg
,
5247 unsigned HiReg
, bool &containsReg
) {
5248 containsReg
= false;
5249 for (unsigned i
= OpNo
; i
< Inst
.getNumOperands(); ++i
) {
5250 unsigned OpReg
= Inst
.getOperand(i
).getReg();
5253 // Anything other than a low register isn't legal here.
5254 if (!isARMLowRegister(OpReg
) && (!HiReg
|| OpReg
!= HiReg
))
5260 // Check if the specified regisgter is in the register list of the inst,
5261 // starting at the indicated operand number.
5262 static bool listContainsReg(MCInst
&Inst
, unsigned OpNo
, unsigned Reg
) {
5263 for (unsigned i
= OpNo
; i
< Inst
.getNumOperands(); ++i
) {
5264 unsigned OpReg
= Inst
.getOperand(i
).getReg();
5271 // FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5272 // the ARMInsts array) instead. Getting that here requires awkward
5273 // API changes, though. Better way?
5275 extern const MCInstrDesc ARMInsts
[];
5277 static const MCInstrDesc
&getInstDesc(unsigned Opcode
) {
5278 return ARMInsts
[Opcode
];
5281 // FIXME: We would really like to be able to tablegen'erate this.
5283 validateInstruction(MCInst
&Inst
,
5284 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
5285 const MCInstrDesc
&MCID
= getInstDesc(Inst
.getOpcode());
5286 SMLoc Loc
= Operands
[0]->getStartLoc();
5287 // Check the IT block state first.
5288 // NOTE: BKPT instruction has the interesting property of being
5289 // allowed in IT blocks, but not being predicable. It just always
5291 if (inITBlock() && Inst
.getOpcode() != ARM::tBKPT
&&
5292 Inst
.getOpcode() != ARM::BKPT
) {
5294 if (ITState
.FirstCond
)
5295 ITState
.FirstCond
= false;
5297 bit
= (ITState
.Mask
>> (5 - ITState
.CurPosition
)) & 1;
5298 // The instruction must be predicable.
5299 if (!MCID
.isPredicable())
5300 return Error(Loc
, "instructions in IT block must be predicable");
5301 unsigned Cond
= Inst
.getOperand(MCID
.findFirstPredOperandIdx()).getImm();
5302 unsigned ITCond
= bit
? ITState
.Cond
:
5303 ARMCC::getOppositeCondition(ITState
.Cond
);
5304 if (Cond
!= ITCond
) {
5305 // Find the condition code Operand to get its SMLoc information.
5307 for (unsigned i
= 1; i
< Operands
.size(); ++i
)
5308 if (static_cast<ARMOperand
*>(Operands
[i
])->isCondCode())
5309 CondLoc
= Operands
[i
]->getStartLoc();
5310 return Error(CondLoc
, "incorrect condition in IT block; got '" +
5311 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond
))) +
5312 "', but expected '" +
5313 ARMCondCodeToString(ARMCC::CondCodes(ITCond
)) + "'");
5315 // Check for non-'al' condition codes outside of the IT block.
5316 } else if (isThumbTwo() && MCID
.isPredicable() &&
5317 Inst
.getOperand(MCID
.findFirstPredOperandIdx()).getImm() !=
5318 ARMCC::AL
&& Inst
.getOpcode() != ARM::tB
&&
5319 Inst
.getOpcode() != ARM::t2B
)
5320 return Error(Loc
, "predicated instructions must be in IT block");
5322 switch (Inst
.getOpcode()) {
5325 case ARM::LDRD_POST
: {
5326 // Rt2 must be Rt + 1.
5327 unsigned Rt
= MRI
->getEncodingValue(Inst
.getOperand(0).getReg());
5328 unsigned Rt2
= MRI
->getEncodingValue(Inst
.getOperand(1).getReg());
5330 return Error(Operands
[3]->getStartLoc(),
5331 "destination operands must be sequential");
5335 // Rt2 must be Rt + 1.
5336 unsigned Rt
= MRI
->getEncodingValue(Inst
.getOperand(0).getReg());
5337 unsigned Rt2
= MRI
->getEncodingValue(Inst
.getOperand(1).getReg());
5339 return Error(Operands
[3]->getStartLoc(),
5340 "source operands must be sequential");
5344 case ARM::STRD_POST
: {
5345 // Rt2 must be Rt + 1.
5346 unsigned Rt
= MRI
->getEncodingValue(Inst
.getOperand(1).getReg());
5347 unsigned Rt2
= MRI
->getEncodingValue(Inst
.getOperand(2).getReg());
5349 return Error(Operands
[3]->getStartLoc(),
5350 "source operands must be sequential");
5355 // width must be in range [1, 32-lsb]
5356 unsigned lsb
= Inst
.getOperand(2).getImm();
5357 unsigned widthm1
= Inst
.getOperand(3).getImm();
5358 if (widthm1
>= 32 - lsb
)
5359 return Error(Operands
[5]->getStartLoc(),
5360 "bitfield width must be in range [1,32-lsb]");
5364 // If we're parsing Thumb2, the .w variant is available and handles
5365 // most cases that are normally illegal for a Thumb1 LDM
5366 // instruction. We'll make the transformation in processInstruction()
5369 // Thumb LDM instructions are writeback iff the base register is not
5370 // in the register list.
5371 unsigned Rn
= Inst
.getOperand(0).getReg();
5372 bool hasWritebackToken
=
5373 (static_cast<ARMOperand
*>(Operands
[3])->isToken() &&
5374 static_cast<ARMOperand
*>(Operands
[3])->getToken() == "!");
5375 bool listContainsBase
;
5376 if (checkLowRegisterList(Inst
, 3, Rn
, 0, listContainsBase
) && !isThumbTwo())
5377 return Error(Operands
[3 + hasWritebackToken
]->getStartLoc(),
5378 "registers must be in range r0-r7");
5379 // If we should have writeback, then there should be a '!' token.
5380 if (!listContainsBase
&& !hasWritebackToken
&& !isThumbTwo())
5381 return Error(Operands
[2]->getStartLoc(),
5382 "writeback operator '!' expected");
5383 // If we should not have writeback, there must not be a '!'. This is
5384 // true even for the 32-bit wide encodings.
5385 if (listContainsBase
&& hasWritebackToken
)
5386 return Error(Operands
[3]->getStartLoc(),
5387 "writeback operator '!' not allowed when base register "
5388 "in register list");
5392 case ARM::t2LDMIA_UPD
: {
5393 if (listContainsReg(Inst
, 3, Inst
.getOperand(0).getReg()))
5394 return Error(Operands
[4]->getStartLoc(),
5395 "writeback operator '!' not allowed when base register "
5396 "in register list");
5400 // The second source operand must be the same register as the destination
5403 // In this case, we must directly check the parsed operands because the
5404 // cvtThumbMultiply() function is written in such a way that it guarantees
5405 // this first statement is always true for the new Inst. Essentially, the
5406 // destination is unconditionally copied into the second source operand
5407 // without checking to see if it matches what we actually parsed.
5408 if (Operands
.size() == 6 &&
5409 (((ARMOperand
*)Operands
[3])->getReg() !=
5410 ((ARMOperand
*)Operands
[5])->getReg()) &&
5411 (((ARMOperand
*)Operands
[3])->getReg() !=
5412 ((ARMOperand
*)Operands
[4])->getReg())) {
5413 return Error(Operands
[3]->getStartLoc(),
5414 "destination register must match source register");
5418 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5419 // so only issue a diagnostic for thumb1. The instructions will be
5420 // switched to the t2 encodings in processInstruction() if necessary.
5422 bool listContainsBase
;
5423 if (checkLowRegisterList(Inst
, 2, 0, ARM::PC
, listContainsBase
) &&
5425 return Error(Operands
[2]->getStartLoc(),
5426 "registers must be in range r0-r7 or pc");
5430 bool listContainsBase
;
5431 if (checkLowRegisterList(Inst
, 2, 0, ARM::LR
, listContainsBase
) &&
5433 return Error(Operands
[2]->getStartLoc(),
5434 "registers must be in range r0-r7 or lr");
5437 case ARM::tSTMIA_UPD
: {
5438 bool listContainsBase
;
5439 if (checkLowRegisterList(Inst
, 4, 0, 0, listContainsBase
) && !isThumbTwo())
5440 return Error(Operands
[4]->getStartLoc(),
5441 "registers must be in range r0-r7");
5444 case ARM::tADDrSP
: {
5445 // If the non-SP source operand and the destination operand are not the
5446 // same, we need thumb2 (for the wide encoding), or we have an error.
5447 if (!isThumbTwo() &&
5448 Inst
.getOperand(0).getReg() != Inst
.getOperand(2).getReg()) {
5449 return Error(Operands
[4]->getStartLoc(),
5450 "source register must be the same as destination");
5459 static unsigned getRealVSTOpcode(unsigned Opc
, unsigned &Spacing
) {
5461 default: llvm_unreachable("unexpected opcode!");
5463 case ARM::VST1LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST1LNd8_UPD
;
5464 case ARM::VST1LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST1LNd16_UPD
;
5465 case ARM::VST1LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST1LNd32_UPD
;
5466 case ARM::VST1LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VST1LNd8_UPD
;
5467 case ARM::VST1LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VST1LNd16_UPD
;
5468 case ARM::VST1LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VST1LNd32_UPD
;
5469 case ARM::VST1LNdAsm_8
: Spacing
= 1; return ARM::VST1LNd8
;
5470 case ARM::VST1LNdAsm_16
: Spacing
= 1; return ARM::VST1LNd16
;
5471 case ARM::VST1LNdAsm_32
: Spacing
= 1; return ARM::VST1LNd32
;
5474 case ARM::VST2LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST2LNd8_UPD
;
5475 case ARM::VST2LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST2LNd16_UPD
;
5476 case ARM::VST2LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST2LNd32_UPD
;
5477 case ARM::VST2LNqWB_fixed_Asm_16
: Spacing
= 2; return ARM::VST2LNq16_UPD
;
5478 case ARM::VST2LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VST2LNq32_UPD
;
5480 case ARM::VST2LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VST2LNd8_UPD
;
5481 case ARM::VST2LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VST2LNd16_UPD
;
5482 case ARM::VST2LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VST2LNd32_UPD
;
5483 case ARM::VST2LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VST2LNq16_UPD
;
5484 case ARM::VST2LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VST2LNq32_UPD
;
5486 case ARM::VST2LNdAsm_8
: Spacing
= 1; return ARM::VST2LNd8
;
5487 case ARM::VST2LNdAsm_16
: Spacing
= 1; return ARM::VST2LNd16
;
5488 case ARM::VST2LNdAsm_32
: Spacing
= 1; return ARM::VST2LNd32
;
5489 case ARM::VST2LNqAsm_16
: Spacing
= 2; return ARM::VST2LNq16
;
5490 case ARM::VST2LNqAsm_32
: Spacing
= 2; return ARM::VST2LNq32
;
5493 case ARM::VST3LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST3LNd8_UPD
;
5494 case ARM::VST3LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST3LNd16_UPD
;
5495 case ARM::VST3LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST3LNd32_UPD
;
5496 case ARM::VST3LNqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST3LNq16_UPD
;
5497 case ARM::VST3LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VST3LNq32_UPD
;
5498 case ARM::VST3LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VST3LNd8_UPD
;
5499 case ARM::VST3LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VST3LNd16_UPD
;
5500 case ARM::VST3LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VST3LNd32_UPD
;
5501 case ARM::VST3LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VST3LNq16_UPD
;
5502 case ARM::VST3LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VST3LNq32_UPD
;
5503 case ARM::VST3LNdAsm_8
: Spacing
= 1; return ARM::VST3LNd8
;
5504 case ARM::VST3LNdAsm_16
: Spacing
= 1; return ARM::VST3LNd16
;
5505 case ARM::VST3LNdAsm_32
: Spacing
= 1; return ARM::VST3LNd32
;
5506 case ARM::VST3LNqAsm_16
: Spacing
= 2; return ARM::VST3LNq16
;
5507 case ARM::VST3LNqAsm_32
: Spacing
= 2; return ARM::VST3LNq32
;
5510 case ARM::VST3dWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST3d8_UPD
;
5511 case ARM::VST3dWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST3d16_UPD
;
5512 case ARM::VST3dWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST3d32_UPD
;
5513 case ARM::VST3qWB_fixed_Asm_8
: Spacing
= 2; return ARM::VST3q8_UPD
;
5514 case ARM::VST3qWB_fixed_Asm_16
: Spacing
= 2; return ARM::VST3q16_UPD
;
5515 case ARM::VST3qWB_fixed_Asm_32
: Spacing
= 2; return ARM::VST3q32_UPD
;
5516 case ARM::VST3dWB_register_Asm_8
: Spacing
= 1; return ARM::VST3d8_UPD
;
5517 case ARM::VST3dWB_register_Asm_16
: Spacing
= 1; return ARM::VST3d16_UPD
;
5518 case ARM::VST3dWB_register_Asm_32
: Spacing
= 1; return ARM::VST3d32_UPD
;
5519 case ARM::VST3qWB_register_Asm_8
: Spacing
= 2; return ARM::VST3q8_UPD
;
5520 case ARM::VST3qWB_register_Asm_16
: Spacing
= 2; return ARM::VST3q16_UPD
;
5521 case ARM::VST3qWB_register_Asm_32
: Spacing
= 2; return ARM::VST3q32_UPD
;
5522 case ARM::VST3dAsm_8
: Spacing
= 1; return ARM::VST3d8
;
5523 case ARM::VST3dAsm_16
: Spacing
= 1; return ARM::VST3d16
;
5524 case ARM::VST3dAsm_32
: Spacing
= 1; return ARM::VST3d32
;
5525 case ARM::VST3qAsm_8
: Spacing
= 2; return ARM::VST3q8
;
5526 case ARM::VST3qAsm_16
: Spacing
= 2; return ARM::VST3q16
;
5527 case ARM::VST3qAsm_32
: Spacing
= 2; return ARM::VST3q32
;
5530 case ARM::VST4LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST4LNd8_UPD
;
5531 case ARM::VST4LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST4LNd16_UPD
;
5532 case ARM::VST4LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST4LNd32_UPD
;
5533 case ARM::VST4LNqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST4LNq16_UPD
;
5534 case ARM::VST4LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VST4LNq32_UPD
;
5535 case ARM::VST4LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VST4LNd8_UPD
;
5536 case ARM::VST4LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VST4LNd16_UPD
;
5537 case ARM::VST4LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VST4LNd32_UPD
;
5538 case ARM::VST4LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VST4LNq16_UPD
;
5539 case ARM::VST4LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VST4LNq32_UPD
;
5540 case ARM::VST4LNdAsm_8
: Spacing
= 1; return ARM::VST4LNd8
;
5541 case ARM::VST4LNdAsm_16
: Spacing
= 1; return ARM::VST4LNd16
;
5542 case ARM::VST4LNdAsm_32
: Spacing
= 1; return ARM::VST4LNd32
;
5543 case ARM::VST4LNqAsm_16
: Spacing
= 2; return ARM::VST4LNq16
;
5544 case ARM::VST4LNqAsm_32
: Spacing
= 2; return ARM::VST4LNq32
;
5547 case ARM::VST4dWB_fixed_Asm_8
: Spacing
= 1; return ARM::VST4d8_UPD
;
5548 case ARM::VST4dWB_fixed_Asm_16
: Spacing
= 1; return ARM::VST4d16_UPD
;
5549 case ARM::VST4dWB_fixed_Asm_32
: Spacing
= 1; return ARM::VST4d32_UPD
;
5550 case ARM::VST4qWB_fixed_Asm_8
: Spacing
= 2; return ARM::VST4q8_UPD
;
5551 case ARM::VST4qWB_fixed_Asm_16
: Spacing
= 2; return ARM::VST4q16_UPD
;
5552 case ARM::VST4qWB_fixed_Asm_32
: Spacing
= 2; return ARM::VST4q32_UPD
;
5553 case ARM::VST4dWB_register_Asm_8
: Spacing
= 1; return ARM::VST4d8_UPD
;
5554 case ARM::VST4dWB_register_Asm_16
: Spacing
= 1; return ARM::VST4d16_UPD
;
5555 case ARM::VST4dWB_register_Asm_32
: Spacing
= 1; return ARM::VST4d32_UPD
;
5556 case ARM::VST4qWB_register_Asm_8
: Spacing
= 2; return ARM::VST4q8_UPD
;
5557 case ARM::VST4qWB_register_Asm_16
: Spacing
= 2; return ARM::VST4q16_UPD
;
5558 case ARM::VST4qWB_register_Asm_32
: Spacing
= 2; return ARM::VST4q32_UPD
;
5559 case ARM::VST4dAsm_8
: Spacing
= 1; return ARM::VST4d8
;
5560 case ARM::VST4dAsm_16
: Spacing
= 1; return ARM::VST4d16
;
5561 case ARM::VST4dAsm_32
: Spacing
= 1; return ARM::VST4d32
;
5562 case ARM::VST4qAsm_8
: Spacing
= 2; return ARM::VST4q8
;
5563 case ARM::VST4qAsm_16
: Spacing
= 2; return ARM::VST4q16
;
5564 case ARM::VST4qAsm_32
: Spacing
= 2; return ARM::VST4q32
;
5568 static unsigned getRealVLDOpcode(unsigned Opc
, unsigned &Spacing
) {
5570 default: llvm_unreachable("unexpected opcode!");
5572 case ARM::VLD1LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD1LNd8_UPD
;
5573 case ARM::VLD1LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD1LNd16_UPD
;
5574 case ARM::VLD1LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD1LNd32_UPD
;
5575 case ARM::VLD1LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD1LNd8_UPD
;
5576 case ARM::VLD1LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD1LNd16_UPD
;
5577 case ARM::VLD1LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD1LNd32_UPD
;
5578 case ARM::VLD1LNdAsm_8
: Spacing
= 1; return ARM::VLD1LNd8
;
5579 case ARM::VLD1LNdAsm_16
: Spacing
= 1; return ARM::VLD1LNd16
;
5580 case ARM::VLD1LNdAsm_32
: Spacing
= 1; return ARM::VLD1LNd32
;
5583 case ARM::VLD2LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD2LNd8_UPD
;
5584 case ARM::VLD2LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD2LNd16_UPD
;
5585 case ARM::VLD2LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD2LNd32_UPD
;
5586 case ARM::VLD2LNqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD2LNq16_UPD
;
5587 case ARM::VLD2LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD2LNq32_UPD
;
5588 case ARM::VLD2LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD2LNd8_UPD
;
5589 case ARM::VLD2LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD2LNd16_UPD
;
5590 case ARM::VLD2LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD2LNd32_UPD
;
5591 case ARM::VLD2LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VLD2LNq16_UPD
;
5592 case ARM::VLD2LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VLD2LNq32_UPD
;
5593 case ARM::VLD2LNdAsm_8
: Spacing
= 1; return ARM::VLD2LNd8
;
5594 case ARM::VLD2LNdAsm_16
: Spacing
= 1; return ARM::VLD2LNd16
;
5595 case ARM::VLD2LNdAsm_32
: Spacing
= 1; return ARM::VLD2LNd32
;
5596 case ARM::VLD2LNqAsm_16
: Spacing
= 2; return ARM::VLD2LNq16
;
5597 case ARM::VLD2LNqAsm_32
: Spacing
= 2; return ARM::VLD2LNq32
;
5600 case ARM::VLD3DUPdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD3DUPd8_UPD
;
5601 case ARM::VLD3DUPdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD3DUPd16_UPD
;
5602 case ARM::VLD3DUPdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD3DUPd32_UPD
;
5603 case ARM::VLD3DUPqWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD3DUPq8_UPD
;
5604 case ARM::VLD3DUPqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD3DUPq16_UPD
;
5605 case ARM::VLD3DUPqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD3DUPq32_UPD
;
5606 case ARM::VLD3DUPdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD3DUPd8_UPD
;
5607 case ARM::VLD3DUPdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD3DUPd16_UPD
;
5608 case ARM::VLD3DUPdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD3DUPd32_UPD
;
5609 case ARM::VLD3DUPqWB_register_Asm_8
: Spacing
= 2; return ARM::VLD3DUPq8_UPD
;
5610 case ARM::VLD3DUPqWB_register_Asm_16
: Spacing
= 2; return ARM::VLD3DUPq16_UPD
;
5611 case ARM::VLD3DUPqWB_register_Asm_32
: Spacing
= 2; return ARM::VLD3DUPq32_UPD
;
5612 case ARM::VLD3DUPdAsm_8
: Spacing
= 1; return ARM::VLD3DUPd8
;
5613 case ARM::VLD3DUPdAsm_16
: Spacing
= 1; return ARM::VLD3DUPd16
;
5614 case ARM::VLD3DUPdAsm_32
: Spacing
= 1; return ARM::VLD3DUPd32
;
5615 case ARM::VLD3DUPqAsm_8
: Spacing
= 2; return ARM::VLD3DUPq8
;
5616 case ARM::VLD3DUPqAsm_16
: Spacing
= 2; return ARM::VLD3DUPq16
;
5617 case ARM::VLD3DUPqAsm_32
: Spacing
= 2; return ARM::VLD3DUPq32
;
5620 case ARM::VLD3LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD3LNd8_UPD
;
5621 case ARM::VLD3LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD3LNd16_UPD
;
5622 case ARM::VLD3LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD3LNd32_UPD
;
5623 case ARM::VLD3LNqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD3LNq16_UPD
;
5624 case ARM::VLD3LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD3LNq32_UPD
;
5625 case ARM::VLD3LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD3LNd8_UPD
;
5626 case ARM::VLD3LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD3LNd16_UPD
;
5627 case ARM::VLD3LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD3LNd32_UPD
;
5628 case ARM::VLD3LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VLD3LNq16_UPD
;
5629 case ARM::VLD3LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VLD3LNq32_UPD
;
5630 case ARM::VLD3LNdAsm_8
: Spacing
= 1; return ARM::VLD3LNd8
;
5631 case ARM::VLD3LNdAsm_16
: Spacing
= 1; return ARM::VLD3LNd16
;
5632 case ARM::VLD3LNdAsm_32
: Spacing
= 1; return ARM::VLD3LNd32
;
5633 case ARM::VLD3LNqAsm_16
: Spacing
= 2; return ARM::VLD3LNq16
;
5634 case ARM::VLD3LNqAsm_32
: Spacing
= 2; return ARM::VLD3LNq32
;
5637 case ARM::VLD3dWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD3d8_UPD
;
5638 case ARM::VLD3dWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD3d16_UPD
;
5639 case ARM::VLD3dWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD3d32_UPD
;
5640 case ARM::VLD3qWB_fixed_Asm_8
: Spacing
= 2; return ARM::VLD3q8_UPD
;
5641 case ARM::VLD3qWB_fixed_Asm_16
: Spacing
= 2; return ARM::VLD3q16_UPD
;
5642 case ARM::VLD3qWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD3q32_UPD
;
5643 case ARM::VLD3dWB_register_Asm_8
: Spacing
= 1; return ARM::VLD3d8_UPD
;
5644 case ARM::VLD3dWB_register_Asm_16
: Spacing
= 1; return ARM::VLD3d16_UPD
;
5645 case ARM::VLD3dWB_register_Asm_32
: Spacing
= 1; return ARM::VLD3d32_UPD
;
5646 case ARM::VLD3qWB_register_Asm_8
: Spacing
= 2; return ARM::VLD3q8_UPD
;
5647 case ARM::VLD3qWB_register_Asm_16
: Spacing
= 2; return ARM::VLD3q16_UPD
;
5648 case ARM::VLD3qWB_register_Asm_32
: Spacing
= 2; return ARM::VLD3q32_UPD
;
5649 case ARM::VLD3dAsm_8
: Spacing
= 1; return ARM::VLD3d8
;
5650 case ARM::VLD3dAsm_16
: Spacing
= 1; return ARM::VLD3d16
;
5651 case ARM::VLD3dAsm_32
: Spacing
= 1; return ARM::VLD3d32
;
5652 case ARM::VLD3qAsm_8
: Spacing
= 2; return ARM::VLD3q8
;
5653 case ARM::VLD3qAsm_16
: Spacing
= 2; return ARM::VLD3q16
;
5654 case ARM::VLD3qAsm_32
: Spacing
= 2; return ARM::VLD3q32
;
5657 case ARM::VLD4LNdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD4LNd8_UPD
;
5658 case ARM::VLD4LNdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD4LNd16_UPD
;
5659 case ARM::VLD4LNdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD4LNd32_UPD
;
5660 case ARM::VLD4LNqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD4LNq16_UPD
;
5661 case ARM::VLD4LNqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD4LNq32_UPD
;
5662 case ARM::VLD4LNdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD4LNd8_UPD
;
5663 case ARM::VLD4LNdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD4LNd16_UPD
;
5664 case ARM::VLD4LNdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD4LNd32_UPD
;
5665 case ARM::VLD4LNqWB_register_Asm_16
: Spacing
= 2; return ARM::VLD4LNq16_UPD
;
5666 case ARM::VLD4LNqWB_register_Asm_32
: Spacing
= 2; return ARM::VLD4LNq32_UPD
;
5667 case ARM::VLD4LNdAsm_8
: Spacing
= 1; return ARM::VLD4LNd8
;
5668 case ARM::VLD4LNdAsm_16
: Spacing
= 1; return ARM::VLD4LNd16
;
5669 case ARM::VLD4LNdAsm_32
: Spacing
= 1; return ARM::VLD4LNd32
;
5670 case ARM::VLD4LNqAsm_16
: Spacing
= 2; return ARM::VLD4LNq16
;
5671 case ARM::VLD4LNqAsm_32
: Spacing
= 2; return ARM::VLD4LNq32
;
5674 case ARM::VLD4DUPdWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD4DUPd8_UPD
;
5675 case ARM::VLD4DUPdWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD4DUPd16_UPD
;
5676 case ARM::VLD4DUPdWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD4DUPd32_UPD
;
5677 case ARM::VLD4DUPqWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD4DUPq8_UPD
;
5678 case ARM::VLD4DUPqWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD4DUPq16_UPD
;
5679 case ARM::VLD4DUPqWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD4DUPq32_UPD
;
5680 case ARM::VLD4DUPdWB_register_Asm_8
: Spacing
= 1; return ARM::VLD4DUPd8_UPD
;
5681 case ARM::VLD4DUPdWB_register_Asm_16
: Spacing
= 1; return ARM::VLD4DUPd16_UPD
;
5682 case ARM::VLD4DUPdWB_register_Asm_32
: Spacing
= 1; return ARM::VLD4DUPd32_UPD
;
5683 case ARM::VLD4DUPqWB_register_Asm_8
: Spacing
= 2; return ARM::VLD4DUPq8_UPD
;
5684 case ARM::VLD4DUPqWB_register_Asm_16
: Spacing
= 2; return ARM::VLD4DUPq16_UPD
;
5685 case ARM::VLD4DUPqWB_register_Asm_32
: Spacing
= 2; return ARM::VLD4DUPq32_UPD
;
5686 case ARM::VLD4DUPdAsm_8
: Spacing
= 1; return ARM::VLD4DUPd8
;
5687 case ARM::VLD4DUPdAsm_16
: Spacing
= 1; return ARM::VLD4DUPd16
;
5688 case ARM::VLD4DUPdAsm_32
: Spacing
= 1; return ARM::VLD4DUPd32
;
5689 case ARM::VLD4DUPqAsm_8
: Spacing
= 2; return ARM::VLD4DUPq8
;
5690 case ARM::VLD4DUPqAsm_16
: Spacing
= 2; return ARM::VLD4DUPq16
;
5691 case ARM::VLD4DUPqAsm_32
: Spacing
= 2; return ARM::VLD4DUPq32
;
5694 case ARM::VLD4dWB_fixed_Asm_8
: Spacing
= 1; return ARM::VLD4d8_UPD
;
5695 case ARM::VLD4dWB_fixed_Asm_16
: Spacing
= 1; return ARM::VLD4d16_UPD
;
5696 case ARM::VLD4dWB_fixed_Asm_32
: Spacing
= 1; return ARM::VLD4d32_UPD
;
5697 case ARM::VLD4qWB_fixed_Asm_8
: Spacing
= 2; return ARM::VLD4q8_UPD
;
5698 case ARM::VLD4qWB_fixed_Asm_16
: Spacing
= 2; return ARM::VLD4q16_UPD
;
5699 case ARM::VLD4qWB_fixed_Asm_32
: Spacing
= 2; return ARM::VLD4q32_UPD
;
5700 case ARM::VLD4dWB_register_Asm_8
: Spacing
= 1; return ARM::VLD4d8_UPD
;
5701 case ARM::VLD4dWB_register_Asm_16
: Spacing
= 1; return ARM::VLD4d16_UPD
;
5702 case ARM::VLD4dWB_register_Asm_32
: Spacing
= 1; return ARM::VLD4d32_UPD
;
5703 case ARM::VLD4qWB_register_Asm_8
: Spacing
= 2; return ARM::VLD4q8_UPD
;
5704 case ARM::VLD4qWB_register_Asm_16
: Spacing
= 2; return ARM::VLD4q16_UPD
;
5705 case ARM::VLD4qWB_register_Asm_32
: Spacing
= 2; return ARM::VLD4q32_UPD
;
5706 case ARM::VLD4dAsm_8
: Spacing
= 1; return ARM::VLD4d8
;
5707 case ARM::VLD4dAsm_16
: Spacing
= 1; return ARM::VLD4d16
;
5708 case ARM::VLD4dAsm_32
: Spacing
= 1; return ARM::VLD4d32
;
5709 case ARM::VLD4qAsm_8
: Spacing
= 2; return ARM::VLD4q8
;
5710 case ARM::VLD4qAsm_16
: Spacing
= 2; return ARM::VLD4q16
;
5711 case ARM::VLD4qAsm_32
: Spacing
= 2; return ARM::VLD4q32
;
5716 processInstruction(MCInst
&Inst
,
5717 const SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
) {
5718 switch (Inst
.getOpcode()) {
5719 // Alias for alternate form of 'ADR Rd, #imm' instruction.
5721 if (Inst
.getOperand(1).getReg() != ARM::PC
||
5722 Inst
.getOperand(5).getReg() != 0)
5725 TmpInst
.setOpcode(ARM::ADR
);
5726 TmpInst
.addOperand(Inst
.getOperand(0));
5727 TmpInst
.addOperand(Inst
.getOperand(2));
5728 TmpInst
.addOperand(Inst
.getOperand(3));
5729 TmpInst
.addOperand(Inst
.getOperand(4));
5733 // Aliases for alternate PC+imm syntax of LDR instructions.
5734 case ARM::t2LDRpcrel
:
5735 // Select the narrow version if the immediate will fit.
5736 if (Inst
.getOperand(1).getImm() > 0 &&
5737 Inst
.getOperand(1).getImm() <= 0xff)
5738 Inst
.setOpcode(ARM::tLDRpci
);
5740 Inst
.setOpcode(ARM::t2LDRpci
);
5742 case ARM::t2LDRBpcrel
:
5743 Inst
.setOpcode(ARM::t2LDRBpci
);
5745 case ARM::t2LDRHpcrel
:
5746 Inst
.setOpcode(ARM::t2LDRHpci
);
5748 case ARM::t2LDRSBpcrel
:
5749 Inst
.setOpcode(ARM::t2LDRSBpci
);
5751 case ARM::t2LDRSHpcrel
:
5752 Inst
.setOpcode(ARM::t2LDRSHpci
);
5754 // Handle NEON VST complex aliases.
5755 case ARM::VST1LNdWB_register_Asm_8
:
5756 case ARM::VST1LNdWB_register_Asm_16
:
5757 case ARM::VST1LNdWB_register_Asm_32
: {
5759 // Shuffle the operands around so the lane index operand is in the
5762 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5763 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5764 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5765 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5766 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
5767 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5768 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5769 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
5770 TmpInst
.addOperand(Inst
.getOperand(6));
5775 case ARM::VST2LNdWB_register_Asm_8
:
5776 case ARM::VST2LNdWB_register_Asm_16
:
5777 case ARM::VST2LNdWB_register_Asm_32
:
5778 case ARM::VST2LNqWB_register_Asm_16
:
5779 case ARM::VST2LNqWB_register_Asm_32
: {
5781 // Shuffle the operands around so the lane index operand is in the
5784 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5785 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5786 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5787 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5788 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
5789 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5790 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5792 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5793 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
5794 TmpInst
.addOperand(Inst
.getOperand(6));
5799 case ARM::VST3LNdWB_register_Asm_8
:
5800 case ARM::VST3LNdWB_register_Asm_16
:
5801 case ARM::VST3LNdWB_register_Asm_32
:
5802 case ARM::VST3LNqWB_register_Asm_16
:
5803 case ARM::VST3LNqWB_register_Asm_32
: {
5805 // Shuffle the operands around so the lane index operand is in the
5808 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5809 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5810 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5811 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5812 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
5813 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5814 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5816 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5818 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5819 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
5820 TmpInst
.addOperand(Inst
.getOperand(6));
5825 case ARM::VST4LNdWB_register_Asm_8
:
5826 case ARM::VST4LNdWB_register_Asm_16
:
5827 case ARM::VST4LNdWB_register_Asm_32
:
5828 case ARM::VST4LNqWB_register_Asm_16
:
5829 case ARM::VST4LNqWB_register_Asm_32
: {
5831 // Shuffle the operands around so the lane index operand is in the
5834 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5835 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5836 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5837 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5838 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
5839 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5840 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5842 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5844 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5846 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5847 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
5848 TmpInst
.addOperand(Inst
.getOperand(6));
5853 case ARM::VST1LNdWB_fixed_Asm_8
:
5854 case ARM::VST1LNdWB_fixed_Asm_16
:
5855 case ARM::VST1LNdWB_fixed_Asm_32
: {
5857 // Shuffle the operands around so the lane index operand is in the
5860 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5861 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5862 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5863 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5864 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
5865 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5866 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5867 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5868 TmpInst
.addOperand(Inst
.getOperand(5));
5873 case ARM::VST2LNdWB_fixed_Asm_8
:
5874 case ARM::VST2LNdWB_fixed_Asm_16
:
5875 case ARM::VST2LNdWB_fixed_Asm_32
:
5876 case ARM::VST2LNqWB_fixed_Asm_16
:
5877 case ARM::VST2LNqWB_fixed_Asm_32
: {
5879 // Shuffle the operands around so the lane index operand is in the
5882 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5883 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5884 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5885 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5886 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
5887 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5888 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5890 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5891 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5892 TmpInst
.addOperand(Inst
.getOperand(5));
5897 case ARM::VST3LNdWB_fixed_Asm_8
:
5898 case ARM::VST3LNdWB_fixed_Asm_16
:
5899 case ARM::VST3LNdWB_fixed_Asm_32
:
5900 case ARM::VST3LNqWB_fixed_Asm_16
:
5901 case ARM::VST3LNqWB_fixed_Asm_32
: {
5903 // Shuffle the operands around so the lane index operand is in the
5906 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5907 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5908 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5909 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5910 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
5911 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5912 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5914 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5916 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5917 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5918 TmpInst
.addOperand(Inst
.getOperand(5));
5923 case ARM::VST4LNdWB_fixed_Asm_8
:
5924 case ARM::VST4LNdWB_fixed_Asm_16
:
5925 case ARM::VST4LNdWB_fixed_Asm_32
:
5926 case ARM::VST4LNqWB_fixed_Asm_16
:
5927 case ARM::VST4LNqWB_fixed_Asm_32
: {
5929 // Shuffle the operands around so the lane index operand is in the
5932 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5933 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
5934 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5935 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5936 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
5937 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5938 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5940 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5942 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5944 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5945 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5946 TmpInst
.addOperand(Inst
.getOperand(5));
5951 case ARM::VST1LNdAsm_8
:
5952 case ARM::VST1LNdAsm_16
:
5953 case ARM::VST1LNdAsm_32
: {
5955 // Shuffle the operands around so the lane index operand is in the
5958 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5959 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5960 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5961 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5962 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5963 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5964 TmpInst
.addOperand(Inst
.getOperand(5));
5969 case ARM::VST2LNdAsm_8
:
5970 case ARM::VST2LNdAsm_16
:
5971 case ARM::VST2LNdAsm_32
:
5972 case ARM::VST2LNqAsm_16
:
5973 case ARM::VST2LNqAsm_32
: {
5975 // Shuffle the operands around so the lane index operand is in the
5978 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
5979 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
5980 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
5981 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
5982 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
5984 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
5985 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
5986 TmpInst
.addOperand(Inst
.getOperand(5));
5991 case ARM::VST3LNdAsm_8
:
5992 case ARM::VST3LNdAsm_16
:
5993 case ARM::VST3LNdAsm_32
:
5994 case ARM::VST3LNqAsm_16
:
5995 case ARM::VST3LNqAsm_32
: {
5997 // Shuffle the operands around so the lane index operand is in the
6000 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6001 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6002 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6003 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6004 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6006 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6008 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6009 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6010 TmpInst
.addOperand(Inst
.getOperand(5));
6015 case ARM::VST4LNdAsm_8
:
6016 case ARM::VST4LNdAsm_16
:
6017 case ARM::VST4LNdAsm_32
:
6018 case ARM::VST4LNqAsm_16
:
6019 case ARM::VST4LNqAsm_32
: {
6021 // Shuffle the operands around so the lane index operand is in the
6024 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6025 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6026 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6027 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6028 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6030 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6032 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6034 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6035 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6036 TmpInst
.addOperand(Inst
.getOperand(5));
6041 // Handle NEON VLD complex aliases.
6042 case ARM::VLD1LNdWB_register_Asm_8
:
6043 case ARM::VLD1LNdWB_register_Asm_16
:
6044 case ARM::VLD1LNdWB_register_Asm_32
: {
6046 // Shuffle the operands around so the lane index operand is in the
6049 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6050 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6051 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6052 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6053 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6054 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
6055 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6056 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6057 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
6058 TmpInst
.addOperand(Inst
.getOperand(6));
6063 case ARM::VLD2LNdWB_register_Asm_8
:
6064 case ARM::VLD2LNdWB_register_Asm_16
:
6065 case ARM::VLD2LNdWB_register_Asm_32
:
6066 case ARM::VLD2LNqWB_register_Asm_16
:
6067 case ARM::VLD2LNqWB_register_Asm_32
: {
6069 // Shuffle the operands around so the lane index operand is in the
6072 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6073 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6074 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6076 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6077 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6078 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6079 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
6080 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6081 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6083 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6084 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
6085 TmpInst
.addOperand(Inst
.getOperand(6));
6090 case ARM::VLD3LNdWB_register_Asm_8
:
6091 case ARM::VLD3LNdWB_register_Asm_16
:
6092 case ARM::VLD3LNdWB_register_Asm_32
:
6093 case ARM::VLD3LNqWB_register_Asm_16
:
6094 case ARM::VLD3LNqWB_register_Asm_32
: {
6096 // Shuffle the operands around so the lane index operand is in the
6099 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6100 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6101 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6103 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6105 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6106 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6107 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6108 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
6109 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6110 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6112 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6114 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6115 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
6116 TmpInst
.addOperand(Inst
.getOperand(6));
6121 case ARM::VLD4LNdWB_register_Asm_8
:
6122 case ARM::VLD4LNdWB_register_Asm_16
:
6123 case ARM::VLD4LNdWB_register_Asm_32
:
6124 case ARM::VLD4LNqWB_register_Asm_16
:
6125 case ARM::VLD4LNqWB_register_Asm_32
: {
6127 // Shuffle the operands around so the lane index operand is in the
6130 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6131 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6132 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6134 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6136 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6138 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6139 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6140 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6141 TmpInst
.addOperand(Inst
.getOperand(4)); // Rm
6142 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6143 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6145 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6147 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6149 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6150 TmpInst
.addOperand(Inst
.getOperand(5)); // CondCode
6151 TmpInst
.addOperand(Inst
.getOperand(6));
6156 case ARM::VLD1LNdWB_fixed_Asm_8
:
6157 case ARM::VLD1LNdWB_fixed_Asm_16
:
6158 case ARM::VLD1LNdWB_fixed_Asm_32
: {
6160 // Shuffle the operands around so the lane index operand is in the
6163 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6164 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6165 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6166 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6167 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6168 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6169 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6170 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6171 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6172 TmpInst
.addOperand(Inst
.getOperand(5));
6177 case ARM::VLD2LNdWB_fixed_Asm_8
:
6178 case ARM::VLD2LNdWB_fixed_Asm_16
:
6179 case ARM::VLD2LNdWB_fixed_Asm_32
:
6180 case ARM::VLD2LNqWB_fixed_Asm_16
:
6181 case ARM::VLD2LNqWB_fixed_Asm_32
: {
6183 // Shuffle the operands around so the lane index operand is in the
6186 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6187 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6188 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6190 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6191 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6192 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6193 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6194 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6195 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6197 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6198 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6199 TmpInst
.addOperand(Inst
.getOperand(5));
6204 case ARM::VLD3LNdWB_fixed_Asm_8
:
6205 case ARM::VLD3LNdWB_fixed_Asm_16
:
6206 case ARM::VLD3LNdWB_fixed_Asm_32
:
6207 case ARM::VLD3LNqWB_fixed_Asm_16
:
6208 case ARM::VLD3LNqWB_fixed_Asm_32
: {
6210 // Shuffle the operands around so the lane index operand is in the
6213 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6214 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6215 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6217 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6219 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6220 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6221 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6222 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6223 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6224 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6226 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6228 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6229 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6230 TmpInst
.addOperand(Inst
.getOperand(5));
6235 case ARM::VLD4LNdWB_fixed_Asm_8
:
6236 case ARM::VLD4LNdWB_fixed_Asm_16
:
6237 case ARM::VLD4LNdWB_fixed_Asm_32
:
6238 case ARM::VLD4LNqWB_fixed_Asm_16
:
6239 case ARM::VLD4LNqWB_fixed_Asm_32
: {
6241 // Shuffle the operands around so the lane index operand is in the
6244 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6245 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6246 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6248 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6250 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6252 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn_wb
6253 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6254 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6255 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6256 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6257 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6259 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6261 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6263 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6264 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6265 TmpInst
.addOperand(Inst
.getOperand(5));
6270 case ARM::VLD1LNdAsm_8
:
6271 case ARM::VLD1LNdAsm_16
:
6272 case ARM::VLD1LNdAsm_32
: {
6274 // Shuffle the operands around so the lane index operand is in the
6277 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6278 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6279 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6280 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6281 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6282 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6283 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6284 TmpInst
.addOperand(Inst
.getOperand(5));
6289 case ARM::VLD2LNdAsm_8
:
6290 case ARM::VLD2LNdAsm_16
:
6291 case ARM::VLD2LNdAsm_32
:
6292 case ARM::VLD2LNqAsm_16
:
6293 case ARM::VLD2LNqAsm_32
: {
6295 // Shuffle the operands around so the lane index operand is in the
6298 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6299 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6300 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6302 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6303 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6304 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6305 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6307 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6308 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6309 TmpInst
.addOperand(Inst
.getOperand(5));
6314 case ARM::VLD3LNdAsm_8
:
6315 case ARM::VLD3LNdAsm_16
:
6316 case ARM::VLD3LNdAsm_32
:
6317 case ARM::VLD3LNqAsm_16
:
6318 case ARM::VLD3LNqAsm_32
: {
6320 // Shuffle the operands around so the lane index operand is in the
6323 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6324 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6325 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6327 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6329 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6330 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6331 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6332 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6334 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6336 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6337 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6338 TmpInst
.addOperand(Inst
.getOperand(5));
6343 case ARM::VLD4LNdAsm_8
:
6344 case ARM::VLD4LNdAsm_16
:
6345 case ARM::VLD4LNdAsm_32
:
6346 case ARM::VLD4LNqAsm_16
:
6347 case ARM::VLD4LNqAsm_32
: {
6349 // Shuffle the operands around so the lane index operand is in the
6352 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6353 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6354 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6356 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6358 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6360 TmpInst
.addOperand(Inst
.getOperand(2)); // Rn
6361 TmpInst
.addOperand(Inst
.getOperand(3)); // alignment
6362 TmpInst
.addOperand(Inst
.getOperand(0)); // Tied operand src (== Vd)
6363 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6365 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6367 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6369 TmpInst
.addOperand(Inst
.getOperand(1)); // lane
6370 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6371 TmpInst
.addOperand(Inst
.getOperand(5));
6376 // VLD3DUP single 3-element structure to all lanes instructions.
6377 case ARM::VLD3DUPdAsm_8
:
6378 case ARM::VLD3DUPdAsm_16
:
6379 case ARM::VLD3DUPdAsm_32
:
6380 case ARM::VLD3DUPqAsm_8
:
6381 case ARM::VLD3DUPqAsm_16
:
6382 case ARM::VLD3DUPqAsm_32
: {
6385 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6386 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6387 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6389 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6391 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6392 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6393 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6394 TmpInst
.addOperand(Inst
.getOperand(4));
6399 case ARM::VLD3DUPdWB_fixed_Asm_8
:
6400 case ARM::VLD3DUPdWB_fixed_Asm_16
:
6401 case ARM::VLD3DUPdWB_fixed_Asm_32
:
6402 case ARM::VLD3DUPqWB_fixed_Asm_8
:
6403 case ARM::VLD3DUPqWB_fixed_Asm_16
:
6404 case ARM::VLD3DUPqWB_fixed_Asm_32
: {
6407 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6408 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6409 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6411 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6413 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6414 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6415 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6416 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6417 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6418 TmpInst
.addOperand(Inst
.getOperand(4));
6423 case ARM::VLD3DUPdWB_register_Asm_8
:
6424 case ARM::VLD3DUPdWB_register_Asm_16
:
6425 case ARM::VLD3DUPdWB_register_Asm_32
:
6426 case ARM::VLD3DUPqWB_register_Asm_8
:
6427 case ARM::VLD3DUPqWB_register_Asm_16
:
6428 case ARM::VLD3DUPqWB_register_Asm_32
: {
6431 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6432 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6433 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6435 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6437 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6438 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6439 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6440 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6441 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6442 TmpInst
.addOperand(Inst
.getOperand(5));
6447 // VLD3 multiple 3-element structure instructions.
6448 case ARM::VLD3dAsm_8
:
6449 case ARM::VLD3dAsm_16
:
6450 case ARM::VLD3dAsm_32
:
6451 case ARM::VLD3qAsm_8
:
6452 case ARM::VLD3qAsm_16
:
6453 case ARM::VLD3qAsm_32
: {
6456 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6457 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6458 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6460 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6462 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6463 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6464 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6465 TmpInst
.addOperand(Inst
.getOperand(4));
6470 case ARM::VLD3dWB_fixed_Asm_8
:
6471 case ARM::VLD3dWB_fixed_Asm_16
:
6472 case ARM::VLD3dWB_fixed_Asm_32
:
6473 case ARM::VLD3qWB_fixed_Asm_8
:
6474 case ARM::VLD3qWB_fixed_Asm_16
:
6475 case ARM::VLD3qWB_fixed_Asm_32
: {
6478 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6479 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6480 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6482 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6484 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6485 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6486 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6487 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6488 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6489 TmpInst
.addOperand(Inst
.getOperand(4));
6494 case ARM::VLD3dWB_register_Asm_8
:
6495 case ARM::VLD3dWB_register_Asm_16
:
6496 case ARM::VLD3dWB_register_Asm_32
:
6497 case ARM::VLD3qWB_register_Asm_8
:
6498 case ARM::VLD3qWB_register_Asm_16
:
6499 case ARM::VLD3qWB_register_Asm_32
: {
6502 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6503 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6504 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6506 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6508 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6509 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6510 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6511 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6512 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6513 TmpInst
.addOperand(Inst
.getOperand(5));
6518 // VLD4DUP single 3-element structure to all lanes instructions.
6519 case ARM::VLD4DUPdAsm_8
:
6520 case ARM::VLD4DUPdAsm_16
:
6521 case ARM::VLD4DUPdAsm_32
:
6522 case ARM::VLD4DUPqAsm_8
:
6523 case ARM::VLD4DUPqAsm_16
:
6524 case ARM::VLD4DUPqAsm_32
: {
6527 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6528 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6529 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6531 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6533 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6535 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6536 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6537 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6538 TmpInst
.addOperand(Inst
.getOperand(4));
6543 case ARM::VLD4DUPdWB_fixed_Asm_8
:
6544 case ARM::VLD4DUPdWB_fixed_Asm_16
:
6545 case ARM::VLD4DUPdWB_fixed_Asm_32
:
6546 case ARM::VLD4DUPqWB_fixed_Asm_8
:
6547 case ARM::VLD4DUPqWB_fixed_Asm_16
:
6548 case ARM::VLD4DUPqWB_fixed_Asm_32
: {
6551 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6552 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6553 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6555 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6557 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6559 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6560 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6561 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6562 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6563 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6564 TmpInst
.addOperand(Inst
.getOperand(4));
6569 case ARM::VLD4DUPdWB_register_Asm_8
:
6570 case ARM::VLD4DUPdWB_register_Asm_16
:
6571 case ARM::VLD4DUPdWB_register_Asm_32
:
6572 case ARM::VLD4DUPqWB_register_Asm_8
:
6573 case ARM::VLD4DUPqWB_register_Asm_16
:
6574 case ARM::VLD4DUPqWB_register_Asm_32
: {
6577 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6578 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6579 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6581 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6583 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6585 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6586 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6587 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6588 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6589 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6590 TmpInst
.addOperand(Inst
.getOperand(5));
6595 // VLD4 multiple 4-element structure instructions.
6596 case ARM::VLD4dAsm_8
:
6597 case ARM::VLD4dAsm_16
:
6598 case ARM::VLD4dAsm_32
:
6599 case ARM::VLD4qAsm_8
:
6600 case ARM::VLD4qAsm_16
:
6601 case ARM::VLD4qAsm_32
: {
6604 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6605 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6606 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6608 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6610 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6612 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6613 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6614 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6615 TmpInst
.addOperand(Inst
.getOperand(4));
6620 case ARM::VLD4dWB_fixed_Asm_8
:
6621 case ARM::VLD4dWB_fixed_Asm_16
:
6622 case ARM::VLD4dWB_fixed_Asm_32
:
6623 case ARM::VLD4qWB_fixed_Asm_8
:
6624 case ARM::VLD4qWB_fixed_Asm_16
:
6625 case ARM::VLD4qWB_fixed_Asm_32
: {
6628 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6629 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6630 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6632 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6634 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6636 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6637 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6638 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6639 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6640 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6641 TmpInst
.addOperand(Inst
.getOperand(4));
6646 case ARM::VLD4dWB_register_Asm_8
:
6647 case ARM::VLD4dWB_register_Asm_16
:
6648 case ARM::VLD4dWB_register_Asm_32
:
6649 case ARM::VLD4qWB_register_Asm_8
:
6650 case ARM::VLD4qWB_register_Asm_16
:
6651 case ARM::VLD4qWB_register_Asm_32
: {
6654 TmpInst
.setOpcode(getRealVLDOpcode(Inst
.getOpcode(), Spacing
));
6655 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6656 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6658 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6660 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6662 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6663 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6664 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6665 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6666 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6667 TmpInst
.addOperand(Inst
.getOperand(5));
6672 // VST3 multiple 3-element structure instructions.
6673 case ARM::VST3dAsm_8
:
6674 case ARM::VST3dAsm_16
:
6675 case ARM::VST3dAsm_32
:
6676 case ARM::VST3qAsm_8
:
6677 case ARM::VST3qAsm_16
:
6678 case ARM::VST3qAsm_32
: {
6681 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6682 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6683 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6684 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6685 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6687 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6689 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6690 TmpInst
.addOperand(Inst
.getOperand(4));
6695 case ARM::VST3dWB_fixed_Asm_8
:
6696 case ARM::VST3dWB_fixed_Asm_16
:
6697 case ARM::VST3dWB_fixed_Asm_32
:
6698 case ARM::VST3qWB_fixed_Asm_8
:
6699 case ARM::VST3qWB_fixed_Asm_16
:
6700 case ARM::VST3qWB_fixed_Asm_32
: {
6703 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6704 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6705 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6706 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6707 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6708 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6709 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6711 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6713 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6714 TmpInst
.addOperand(Inst
.getOperand(4));
6719 case ARM::VST3dWB_register_Asm_8
:
6720 case ARM::VST3dWB_register_Asm_16
:
6721 case ARM::VST3dWB_register_Asm_32
:
6722 case ARM::VST3qWB_register_Asm_8
:
6723 case ARM::VST3qWB_register_Asm_16
:
6724 case ARM::VST3qWB_register_Asm_32
: {
6727 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6728 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6729 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6730 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6731 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6732 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6733 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6735 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6737 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6738 TmpInst
.addOperand(Inst
.getOperand(5));
6743 // VST4 multiple 3-element structure instructions.
6744 case ARM::VST4dAsm_8
:
6745 case ARM::VST4dAsm_16
:
6746 case ARM::VST4dAsm_32
:
6747 case ARM::VST4qAsm_8
:
6748 case ARM::VST4qAsm_16
:
6749 case ARM::VST4qAsm_32
: {
6752 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6753 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6754 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6755 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6756 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6758 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6760 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6762 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6763 TmpInst
.addOperand(Inst
.getOperand(4));
6768 case ARM::VST4dWB_fixed_Asm_8
:
6769 case ARM::VST4dWB_fixed_Asm_16
:
6770 case ARM::VST4dWB_fixed_Asm_32
:
6771 case ARM::VST4qWB_fixed_Asm_8
:
6772 case ARM::VST4qWB_fixed_Asm_16
:
6773 case ARM::VST4qWB_fixed_Asm_32
: {
6776 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6777 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6778 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6779 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6780 TmpInst
.addOperand(MCOperand::CreateReg(0)); // Rm
6781 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6782 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6784 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6786 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6788 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6789 TmpInst
.addOperand(Inst
.getOperand(4));
6794 case ARM::VST4dWB_register_Asm_8
:
6795 case ARM::VST4dWB_register_Asm_16
:
6796 case ARM::VST4dWB_register_Asm_32
:
6797 case ARM::VST4qWB_register_Asm_8
:
6798 case ARM::VST4qWB_register_Asm_16
:
6799 case ARM::VST4qWB_register_Asm_32
: {
6802 TmpInst
.setOpcode(getRealVSTOpcode(Inst
.getOpcode(), Spacing
));
6803 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6804 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn_wb == tied Rn
6805 TmpInst
.addOperand(Inst
.getOperand(2)); // alignment
6806 TmpInst
.addOperand(Inst
.getOperand(3)); // Rm
6807 TmpInst
.addOperand(Inst
.getOperand(0)); // Vd
6808 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6810 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6812 TmpInst
.addOperand(MCOperand::CreateReg(Inst
.getOperand(0).getReg() +
6814 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6815 TmpInst
.addOperand(Inst
.getOperand(5));
6820 // Handle encoding choice for the shift-immediate instructions.
6823 case ARM::t2ASRri
: {
6824 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
6825 Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg() &&
6826 Inst
.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR
) &&
6827 !(static_cast<ARMOperand
*>(Operands
[3])->isToken() &&
6828 static_cast<ARMOperand
*>(Operands
[3])->getToken() == ".w")) {
6830 switch (Inst
.getOpcode()) {
6831 default: llvm_unreachable("unexpected opcode");
6832 case ARM::t2LSLri
: NewOpc
= ARM::tLSLri
; break;
6833 case ARM::t2LSRri
: NewOpc
= ARM::tLSRri
; break;
6834 case ARM::t2ASRri
: NewOpc
= ARM::tASRri
; break;
6836 // The Thumb1 operands aren't in the same order. Awesome, eh?
6838 TmpInst
.setOpcode(NewOpc
);
6839 TmpInst
.addOperand(Inst
.getOperand(0));
6840 TmpInst
.addOperand(Inst
.getOperand(5));
6841 TmpInst
.addOperand(Inst
.getOperand(1));
6842 TmpInst
.addOperand(Inst
.getOperand(2));
6843 TmpInst
.addOperand(Inst
.getOperand(3));
6844 TmpInst
.addOperand(Inst
.getOperand(4));
6851 // Handle the Thumb2 mode MOV complex aliases.
6853 case ARM::t2MOVSsr
: {
6854 // Which instruction to expand to depends on the CCOut operand and
6855 // whether we're in an IT block if the register operands are low
6857 bool isNarrow
= false;
6858 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
6859 isARMLowRegister(Inst
.getOperand(1).getReg()) &&
6860 isARMLowRegister(Inst
.getOperand(2).getReg()) &&
6861 Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg() &&
6862 inITBlock() == (Inst
.getOpcode() == ARM::t2MOVsr
))
6866 switch(ARM_AM::getSORegShOp(Inst
.getOperand(3).getImm())) {
6867 default: llvm_unreachable("unexpected opcode!");
6868 case ARM_AM::asr
: newOpc
= isNarrow
? ARM::tASRrr
: ARM::t2ASRrr
; break;
6869 case ARM_AM::lsr
: newOpc
= isNarrow
? ARM::tLSRrr
: ARM::t2LSRrr
; break;
6870 case ARM_AM::lsl
: newOpc
= isNarrow
? ARM::tLSLrr
: ARM::t2LSLrr
; break;
6871 case ARM_AM::ror
: newOpc
= isNarrow
? ARM::tROR
: ARM::t2RORrr
; break;
6873 TmpInst
.setOpcode(newOpc
);
6874 TmpInst
.addOperand(Inst
.getOperand(0)); // Rd
6876 TmpInst
.addOperand(MCOperand::CreateReg(
6877 Inst
.getOpcode() == ARM::t2MOVSsr
? ARM::CPSR
: 0));
6878 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6879 TmpInst
.addOperand(Inst
.getOperand(2)); // Rm
6880 TmpInst
.addOperand(Inst
.getOperand(4)); // CondCode
6881 TmpInst
.addOperand(Inst
.getOperand(5));
6883 TmpInst
.addOperand(MCOperand::CreateReg(
6884 Inst
.getOpcode() == ARM::t2MOVSsr
? ARM::CPSR
: 0));
6889 case ARM::t2MOVSsi
: {
6890 // Which instruction to expand to depends on the CCOut operand and
6891 // whether we're in an IT block if the register operands are low
6893 bool isNarrow
= false;
6894 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
6895 isARMLowRegister(Inst
.getOperand(1).getReg()) &&
6896 inITBlock() == (Inst
.getOpcode() == ARM::t2MOVsi
))
6900 switch(ARM_AM::getSORegShOp(Inst
.getOperand(2).getImm())) {
6901 default: llvm_unreachable("unexpected opcode!");
6902 case ARM_AM::asr
: newOpc
= isNarrow
? ARM::tASRri
: ARM::t2ASRri
; break;
6903 case ARM_AM::lsr
: newOpc
= isNarrow
? ARM::tLSRri
: ARM::t2LSRri
; break;
6904 case ARM_AM::lsl
: newOpc
= isNarrow
? ARM::tLSLri
: ARM::t2LSLri
; break;
6905 case ARM_AM::ror
: newOpc
= ARM::t2RORri
; isNarrow
= false; break;
6906 case ARM_AM::rrx
: isNarrow
= false; newOpc
= ARM::t2RRX
; break;
6908 unsigned Amount
= ARM_AM::getSORegOffset(Inst
.getOperand(2).getImm());
6909 if (Amount
== 32) Amount
= 0;
6910 TmpInst
.setOpcode(newOpc
);
6911 TmpInst
.addOperand(Inst
.getOperand(0)); // Rd
6913 TmpInst
.addOperand(MCOperand::CreateReg(
6914 Inst
.getOpcode() == ARM::t2MOVSsi
? ARM::CPSR
: 0));
6915 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6916 if (newOpc
!= ARM::t2RRX
)
6917 TmpInst
.addOperand(MCOperand::CreateImm(Amount
));
6918 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6919 TmpInst
.addOperand(Inst
.getOperand(4));
6921 TmpInst
.addOperand(MCOperand::CreateReg(
6922 Inst
.getOpcode() == ARM::t2MOVSsi
? ARM::CPSR
: 0));
6926 // Handle the ARM mode MOV complex aliases.
6931 ARM_AM::ShiftOpc ShiftTy
;
6932 switch(Inst
.getOpcode()) {
6933 default: llvm_unreachable("unexpected opcode!");
6934 case ARM::ASRr
: ShiftTy
= ARM_AM::asr
; break;
6935 case ARM::LSRr
: ShiftTy
= ARM_AM::lsr
; break;
6936 case ARM::LSLr
: ShiftTy
= ARM_AM::lsl
; break;
6937 case ARM::RORr
: ShiftTy
= ARM_AM::ror
; break;
6939 unsigned Shifter
= ARM_AM::getSORegOpc(ShiftTy
, 0);
6941 TmpInst
.setOpcode(ARM::MOVsr
);
6942 TmpInst
.addOperand(Inst
.getOperand(0)); // Rd
6943 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6944 TmpInst
.addOperand(Inst
.getOperand(2)); // Rm
6945 TmpInst
.addOperand(MCOperand::CreateImm(Shifter
)); // Shift value and ty
6946 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6947 TmpInst
.addOperand(Inst
.getOperand(4));
6948 TmpInst
.addOperand(Inst
.getOperand(5)); // cc_out
6956 ARM_AM::ShiftOpc ShiftTy
;
6957 switch(Inst
.getOpcode()) {
6958 default: llvm_unreachable("unexpected opcode!");
6959 case ARM::ASRi
: ShiftTy
= ARM_AM::asr
; break;
6960 case ARM::LSRi
: ShiftTy
= ARM_AM::lsr
; break;
6961 case ARM::LSLi
: ShiftTy
= ARM_AM::lsl
; break;
6962 case ARM::RORi
: ShiftTy
= ARM_AM::ror
; break;
6964 // A shift by zero is a plain MOVr, not a MOVsi.
6965 unsigned Amt
= Inst
.getOperand(2).getImm();
6966 unsigned Opc
= Amt
== 0 ? ARM::MOVr
: ARM::MOVsi
;
6967 // A shift by 32 should be encoded as 0 when permitted
6968 if (Amt
== 32 && (ShiftTy
== ARM_AM::lsr
|| ShiftTy
== ARM_AM::asr
))
6970 unsigned Shifter
= ARM_AM::getSORegOpc(ShiftTy
, Amt
);
6972 TmpInst
.setOpcode(Opc
);
6973 TmpInst
.addOperand(Inst
.getOperand(0)); // Rd
6974 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6975 if (Opc
== ARM::MOVsi
)
6976 TmpInst
.addOperand(MCOperand::CreateImm(Shifter
)); // Shift value and ty
6977 TmpInst
.addOperand(Inst
.getOperand(3)); // CondCode
6978 TmpInst
.addOperand(Inst
.getOperand(4));
6979 TmpInst
.addOperand(Inst
.getOperand(5)); // cc_out
6984 unsigned Shifter
= ARM_AM::getSORegOpc(ARM_AM::rrx
, 0);
6986 TmpInst
.setOpcode(ARM::MOVsi
);
6987 TmpInst
.addOperand(Inst
.getOperand(0)); // Rd
6988 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
6989 TmpInst
.addOperand(MCOperand::CreateImm(Shifter
)); // Shift value and ty
6990 TmpInst
.addOperand(Inst
.getOperand(2)); // CondCode
6991 TmpInst
.addOperand(Inst
.getOperand(3));
6992 TmpInst
.addOperand(Inst
.getOperand(4)); // cc_out
6996 case ARM::t2LDMIA_UPD
: {
6997 // If this is a load of a single register, then we should use
6998 // a post-indexed LDR instruction instead, per the ARM ARM.
6999 if (Inst
.getNumOperands() != 5)
7002 TmpInst
.setOpcode(ARM::t2LDR_POST
);
7003 TmpInst
.addOperand(Inst
.getOperand(4)); // Rt
7004 TmpInst
.addOperand(Inst
.getOperand(0)); // Rn_wb
7005 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
7006 TmpInst
.addOperand(MCOperand::CreateImm(4));
7007 TmpInst
.addOperand(Inst
.getOperand(2)); // CondCode
7008 TmpInst
.addOperand(Inst
.getOperand(3));
7012 case ARM::t2STMDB_UPD
: {
7013 // If this is a store of a single register, then we should use
7014 // a pre-indexed STR instruction instead, per the ARM ARM.
7015 if (Inst
.getNumOperands() != 5)
7018 TmpInst
.setOpcode(ARM::t2STR_PRE
);
7019 TmpInst
.addOperand(Inst
.getOperand(0)); // Rn_wb
7020 TmpInst
.addOperand(Inst
.getOperand(4)); // Rt
7021 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
7022 TmpInst
.addOperand(MCOperand::CreateImm(-4));
7023 TmpInst
.addOperand(Inst
.getOperand(2)); // CondCode
7024 TmpInst
.addOperand(Inst
.getOperand(3));
7028 case ARM::LDMIA_UPD
:
7029 // If this is a load of a single register via a 'pop', then we should use
7030 // a post-indexed LDR instruction instead, per the ARM ARM.
7031 if (static_cast<ARMOperand
*>(Operands
[0])->getToken() == "pop" &&
7032 Inst
.getNumOperands() == 5) {
7034 TmpInst
.setOpcode(ARM::LDR_POST_IMM
);
7035 TmpInst
.addOperand(Inst
.getOperand(4)); // Rt
7036 TmpInst
.addOperand(Inst
.getOperand(0)); // Rn_wb
7037 TmpInst
.addOperand(Inst
.getOperand(1)); // Rn
7038 TmpInst
.addOperand(MCOperand::CreateReg(0)); // am2offset
7039 TmpInst
.addOperand(MCOperand::CreateImm(4));
7040 TmpInst
.addOperand(Inst
.getOperand(2)); // CondCode
7041 TmpInst
.addOperand(Inst
.getOperand(3));
7046 case ARM::STMDB_UPD
:
7047 // If this is a store of a single register via a 'push', then we should use
7048 // a pre-indexed STR instruction instead, per the ARM ARM.
7049 if (static_cast<ARMOperand
*>(Operands
[0])->getToken() == "push" &&
7050 Inst
.getNumOperands() == 5) {
7052 TmpInst
.setOpcode(ARM::STR_PRE_IMM
);
7053 TmpInst
.addOperand(Inst
.getOperand(0)); // Rn_wb
7054 TmpInst
.addOperand(Inst
.getOperand(4)); // Rt
7055 TmpInst
.addOperand(Inst
.getOperand(1)); // addrmode_imm12
7056 TmpInst
.addOperand(MCOperand::CreateImm(-4));
7057 TmpInst
.addOperand(Inst
.getOperand(2)); // CondCode
7058 TmpInst
.addOperand(Inst
.getOperand(3));
7062 case ARM::t2ADDri12
:
7063 // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7064 // mnemonic was used (not "addw"), encoding T3 is preferred.
7065 if (static_cast<ARMOperand
*>(Operands
[0])->getToken() != "add" ||
7066 ARM_AM::getT2SOImmVal(Inst
.getOperand(2).getImm()) == -1)
7068 Inst
.setOpcode(ARM::t2ADDri
);
7069 Inst
.addOperand(MCOperand::CreateReg(0)); // cc_out
7071 case ARM::t2SUBri12
:
7072 // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7073 // mnemonic was used (not "subw"), encoding T3 is preferred.
7074 if (static_cast<ARMOperand
*>(Operands
[0])->getToken() != "sub" ||
7075 ARM_AM::getT2SOImmVal(Inst
.getOperand(2).getImm()) == -1)
7077 Inst
.setOpcode(ARM::t2SUBri
);
7078 Inst
.addOperand(MCOperand::CreateReg(0)); // cc_out
7081 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7082 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7083 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7084 // to encoding T1 if <Rd> is omitted."
7085 if ((unsigned)Inst
.getOperand(3).getImm() < 8 && Operands
.size() == 6) {
7086 Inst
.setOpcode(ARM::tADDi3
);
7091 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7092 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7093 // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7094 // to encoding T1 if <Rd> is omitted."
7095 if ((unsigned)Inst
.getOperand(3).getImm() < 8 && Operands
.size() == 6) {
7096 Inst
.setOpcode(ARM::tSUBi3
);
7101 case ARM::t2SUBri
: {
7102 // If the destination and first source operand are the same, and
7103 // the flags are compatible with the current IT status, use encoding T2
7104 // instead of T3. For compatibility with the system 'as'. Make sure the
7105 // wide encoding wasn't explicit.
7106 if (Inst
.getOperand(0).getReg() != Inst
.getOperand(1).getReg() ||
7107 !isARMLowRegister(Inst
.getOperand(0).getReg()) ||
7108 (unsigned)Inst
.getOperand(2).getImm() > 255 ||
7109 ((!inITBlock() && Inst
.getOperand(5).getReg() != ARM::CPSR
) ||
7110 (inITBlock() && Inst
.getOperand(5).getReg() != 0)) ||
7111 (static_cast<ARMOperand
*>(Operands
[3])->isToken() &&
7112 static_cast<ARMOperand
*>(Operands
[3])->getToken() == ".w"))
7115 TmpInst
.setOpcode(Inst
.getOpcode() == ARM::t2ADDri
?
7116 ARM::tADDi8
: ARM::tSUBi8
);
7117 TmpInst
.addOperand(Inst
.getOperand(0));
7118 TmpInst
.addOperand(Inst
.getOperand(5));
7119 TmpInst
.addOperand(Inst
.getOperand(0));
7120 TmpInst
.addOperand(Inst
.getOperand(2));
7121 TmpInst
.addOperand(Inst
.getOperand(3));
7122 TmpInst
.addOperand(Inst
.getOperand(4));
7126 case ARM::t2ADDrr
: {
7127 // If the destination and first source operand are the same, and
7128 // there's no setting of the flags, use encoding T2 instead of T3.
7129 // Note that this is only for ADD, not SUB. This mirrors the system
7130 // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7131 if (Inst
.getOperand(0).getReg() != Inst
.getOperand(1).getReg() ||
7132 Inst
.getOperand(5).getReg() != 0 ||
7133 (static_cast<ARMOperand
*>(Operands
[3])->isToken() &&
7134 static_cast<ARMOperand
*>(Operands
[3])->getToken() == ".w"))
7137 TmpInst
.setOpcode(ARM::tADDhirr
);
7138 TmpInst
.addOperand(Inst
.getOperand(0));
7139 TmpInst
.addOperand(Inst
.getOperand(0));
7140 TmpInst
.addOperand(Inst
.getOperand(2));
7141 TmpInst
.addOperand(Inst
.getOperand(3));
7142 TmpInst
.addOperand(Inst
.getOperand(4));
7146 case ARM::tADDrSP
: {
7147 // If the non-SP source operand and the destination operand are not the
7148 // same, we need to use the 32-bit encoding if it's available.
7149 if (Inst
.getOperand(0).getReg() != Inst
.getOperand(2).getReg()) {
7150 Inst
.setOpcode(ARM::t2ADDrr
);
7151 Inst
.addOperand(MCOperand::CreateReg(0)); // cc_out
7157 // A Thumb conditional branch outside of an IT block is a tBcc.
7158 if (Inst
.getOperand(1).getImm() != ARMCC::AL
&& !inITBlock()) {
7159 Inst
.setOpcode(ARM::tBcc
);
7164 // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7165 if (Inst
.getOperand(1).getImm() != ARMCC::AL
&& !inITBlock()){
7166 Inst
.setOpcode(ARM::t2Bcc
);
7171 // If the conditional is AL or we're in an IT block, we really want t2B.
7172 if (Inst
.getOperand(1).getImm() == ARMCC::AL
|| inITBlock()) {
7173 Inst
.setOpcode(ARM::t2B
);
7178 // If the conditional is AL, we really want tB.
7179 if (Inst
.getOperand(1).getImm() == ARMCC::AL
) {
7180 Inst
.setOpcode(ARM::tB
);
7185 // If the register list contains any high registers, or if the writeback
7186 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7187 // instead if we're in Thumb2. Otherwise, this should have generated
7188 // an error in validateInstruction().
7189 unsigned Rn
= Inst
.getOperand(0).getReg();
7190 bool hasWritebackToken
=
7191 (static_cast<ARMOperand
*>(Operands
[3])->isToken() &&
7192 static_cast<ARMOperand
*>(Operands
[3])->getToken() == "!");
7193 bool listContainsBase
;
7194 if (checkLowRegisterList(Inst
, 3, Rn
, 0, listContainsBase
) ||
7195 (!listContainsBase
&& !hasWritebackToken
) ||
7196 (listContainsBase
&& hasWritebackToken
)) {
7197 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7198 assert (isThumbTwo());
7199 Inst
.setOpcode(hasWritebackToken
? ARM::t2LDMIA_UPD
: ARM::t2LDMIA
);
7200 // If we're switching to the updating version, we need to insert
7201 // the writeback tied operand.
7202 if (hasWritebackToken
)
7203 Inst
.insert(Inst
.begin(),
7204 MCOperand::CreateReg(Inst
.getOperand(0).getReg()));
7209 case ARM::tSTMIA_UPD
: {
7210 // If the register list contains any high registers, we need to use
7211 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7212 // should have generated an error in validateInstruction().
7213 unsigned Rn
= Inst
.getOperand(0).getReg();
7214 bool listContainsBase
;
7215 if (checkLowRegisterList(Inst
, 4, Rn
, 0, listContainsBase
)) {
7216 // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7217 assert (isThumbTwo());
7218 Inst
.setOpcode(ARM::t2STMIA_UPD
);
7224 bool listContainsBase
;
7225 // If the register list contains any high registers, we need to use
7226 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7227 // should have generated an error in validateInstruction().
7228 if (!checkLowRegisterList(Inst
, 2, 0, ARM::PC
, listContainsBase
))
7230 assert (isThumbTwo());
7231 Inst
.setOpcode(ARM::t2LDMIA_UPD
);
7232 // Add the base register and writeback operands.
7233 Inst
.insert(Inst
.begin(), MCOperand::CreateReg(ARM::SP
));
7234 Inst
.insert(Inst
.begin(), MCOperand::CreateReg(ARM::SP
));
7238 bool listContainsBase
;
7239 if (!checkLowRegisterList(Inst
, 2, 0, ARM::LR
, listContainsBase
))
7241 assert (isThumbTwo());
7242 Inst
.setOpcode(ARM::t2STMDB_UPD
);
7243 // Add the base register and writeback operands.
7244 Inst
.insert(Inst
.begin(), MCOperand::CreateReg(ARM::SP
));
7245 Inst
.insert(Inst
.begin(), MCOperand::CreateReg(ARM::SP
));
7249 // If we can use the 16-bit encoding and the user didn't explicitly
7250 // request the 32-bit variant, transform it here.
7251 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
7252 (unsigned)Inst
.getOperand(1).getImm() <= 255 &&
7253 ((!inITBlock() && Inst
.getOperand(2).getImm() == ARMCC::AL
&&
7254 Inst
.getOperand(4).getReg() == ARM::CPSR
) ||
7255 (inITBlock() && Inst
.getOperand(4).getReg() == 0)) &&
7256 (!static_cast<ARMOperand
*>(Operands
[2])->isToken() ||
7257 static_cast<ARMOperand
*>(Operands
[2])->getToken() != ".w")) {
7258 // The operands aren't in the same order for tMOVi8...
7260 TmpInst
.setOpcode(ARM::tMOVi8
);
7261 TmpInst
.addOperand(Inst
.getOperand(0));
7262 TmpInst
.addOperand(Inst
.getOperand(4));
7263 TmpInst
.addOperand(Inst
.getOperand(1));
7264 TmpInst
.addOperand(Inst
.getOperand(2));
7265 TmpInst
.addOperand(Inst
.getOperand(3));
7272 // If we can use the 16-bit encoding and the user didn't explicitly
7273 // request the 32-bit variant, transform it here.
7274 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
7275 isARMLowRegister(Inst
.getOperand(1).getReg()) &&
7276 Inst
.getOperand(2).getImm() == ARMCC::AL
&&
7277 Inst
.getOperand(4).getReg() == ARM::CPSR
&&
7278 (!static_cast<ARMOperand
*>(Operands
[2])->isToken() ||
7279 static_cast<ARMOperand
*>(Operands
[2])->getToken() != ".w")) {
7280 // The operands aren't the same for tMOV[S]r... (no cc_out)
7282 TmpInst
.setOpcode(Inst
.getOperand(4).getReg() ? ARM::tMOVSr
: ARM::tMOVr
);
7283 TmpInst
.addOperand(Inst
.getOperand(0));
7284 TmpInst
.addOperand(Inst
.getOperand(1));
7285 TmpInst
.addOperand(Inst
.getOperand(2));
7286 TmpInst
.addOperand(Inst
.getOperand(3));
7296 // If we can use the 16-bit encoding and the user didn't explicitly
7297 // request the 32-bit variant, transform it here.
7298 if (isARMLowRegister(Inst
.getOperand(0).getReg()) &&
7299 isARMLowRegister(Inst
.getOperand(1).getReg()) &&
7300 Inst
.getOperand(2).getImm() == 0 &&
7301 (!static_cast<ARMOperand
*>(Operands
[2])->isToken() ||
7302 static_cast<ARMOperand
*>(Operands
[2])->getToken() != ".w")) {
7304 switch (Inst
.getOpcode()) {
7305 default: llvm_unreachable("Illegal opcode!");
7306 case ARM::t2SXTH
: NewOpc
= ARM::tSXTH
; break;
7307 case ARM::t2SXTB
: NewOpc
= ARM::tSXTB
; break;
7308 case ARM::t2UXTH
: NewOpc
= ARM::tUXTH
; break;
7309 case ARM::t2UXTB
: NewOpc
= ARM::tUXTB
; break;
7311 // The operands aren't the same for thumb1 (no rotate operand).
7313 TmpInst
.setOpcode(NewOpc
);
7314 TmpInst
.addOperand(Inst
.getOperand(0));
7315 TmpInst
.addOperand(Inst
.getOperand(1));
7316 TmpInst
.addOperand(Inst
.getOperand(3));
7317 TmpInst
.addOperand(Inst
.getOperand(4));
7324 ARM_AM::ShiftOpc SOpc
= ARM_AM::getSORegShOp(Inst
.getOperand(2).getImm());
7325 // rrx shifts and asr/lsr of #32 is encoded as 0
7326 if (SOpc
== ARM_AM::rrx
|| SOpc
== ARM_AM::asr
|| SOpc
== ARM_AM::lsr
)
7328 if (ARM_AM::getSORegOffset(Inst
.getOperand(2).getImm()) == 0) {
7329 // Shifting by zero is accepted as a vanilla 'MOVr'
7331 TmpInst
.setOpcode(ARM::MOVr
);
7332 TmpInst
.addOperand(Inst
.getOperand(0));
7333 TmpInst
.addOperand(Inst
.getOperand(1));
7334 TmpInst
.addOperand(Inst
.getOperand(3));
7335 TmpInst
.addOperand(Inst
.getOperand(4));
7336 TmpInst
.addOperand(Inst
.getOperand(5));
7349 ARM_AM::ShiftOpc SOpc
= ARM_AM::getSORegShOp(Inst
.getOperand(3).getImm());
7350 if (SOpc
== ARM_AM::rrx
) return false;
7351 switch (Inst
.getOpcode()) {
7352 default: llvm_unreachable("unexpected opcode!");
7353 case ARM::ANDrsi
: newOpc
= ARM::ANDrr
; break;
7354 case ARM::ORRrsi
: newOpc
= ARM::ORRrr
; break;
7355 case ARM::EORrsi
: newOpc
= ARM::EORrr
; break;
7356 case ARM::BICrsi
: newOpc
= ARM::BICrr
; break;
7357 case ARM::SUBrsi
: newOpc
= ARM::SUBrr
; break;
7358 case ARM::ADDrsi
: newOpc
= ARM::ADDrr
; break;
7360 // If the shift is by zero, use the non-shifted instruction definition.
7361 // The exception is for right shifts, where 0 == 32
7362 if (ARM_AM::getSORegOffset(Inst
.getOperand(3).getImm()) == 0 &&
7363 !(SOpc
== ARM_AM::lsr
|| SOpc
== ARM_AM::asr
)) {
7365 TmpInst
.setOpcode(newOpc
);
7366 TmpInst
.addOperand(Inst
.getOperand(0));
7367 TmpInst
.addOperand(Inst
.getOperand(1));
7368 TmpInst
.addOperand(Inst
.getOperand(2));
7369 TmpInst
.addOperand(Inst
.getOperand(4));
7370 TmpInst
.addOperand(Inst
.getOperand(5));
7371 TmpInst
.addOperand(Inst
.getOperand(6));
7379 // The mask bits for all but the first condition are represented as
7380 // the low bit of the condition code value implies 't'. We currently
7381 // always have 1 implies 't', so XOR toggle the bits if the low bit
7382 // of the condition code is zero.
7383 MCOperand
&MO
= Inst
.getOperand(1);
7384 unsigned Mask
= MO
.getImm();
7385 unsigned OrigMask
= Mask
;
7386 unsigned TZ
= CountTrailingZeros_32(Mask
);
7387 if ((Inst
.getOperand(0).getImm() & 1) == 0) {
7388 assert(Mask
&& TZ
<= 3 && "illegal IT mask value!");
7389 for (unsigned i
= 3; i
!= TZ
; --i
)
7394 // Set up the IT block state according to the IT instruction we just
7396 assert(!inITBlock() && "nested IT blocks?!");
7397 ITState
.Cond
= ARMCC::CondCodes(Inst
.getOperand(0).getImm());
7398 ITState
.Mask
= OrigMask
; // Use the original mask, not the updated one.
7399 ITState
.CurPosition
= 0;
7400 ITState
.FirstCond
= true;
7410 // Assemblers should use the narrow encodings of these instructions when permissible.
7411 if ((isARMLowRegister(Inst
.getOperand(1).getReg()) &&
7412 isARMLowRegister(Inst
.getOperand(2).getReg())) &&
7413 Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg() &&
7414 ((!inITBlock() && Inst
.getOperand(5).getReg() == ARM::CPSR
) ||
7415 (inITBlock() && Inst
.getOperand(5).getReg() != ARM::CPSR
)) &&
7416 (!static_cast<ARMOperand
*>(Operands
[3])->isToken() ||
7417 !static_cast<ARMOperand
*>(Operands
[3])->getToken().equals_lower(".w"))) {
7419 switch (Inst
.getOpcode()) {
7420 default: llvm_unreachable("unexpected opcode");
7421 case ARM::t2LSLrr
: NewOpc
= ARM::tLSLrr
; break;
7422 case ARM::t2LSRrr
: NewOpc
= ARM::tLSRrr
; break;
7423 case ARM::t2ASRrr
: NewOpc
= ARM::tASRrr
; break;
7424 case ARM::t2SBCrr
: NewOpc
= ARM::tSBC
; break;
7425 case ARM::t2RORrr
: NewOpc
= ARM::tROR
; break;
7426 case ARM::t2BICrr
: NewOpc
= ARM::tBIC
; break;
7429 TmpInst
.setOpcode(NewOpc
);
7430 TmpInst
.addOperand(Inst
.getOperand(0));
7431 TmpInst
.addOperand(Inst
.getOperand(5));
7432 TmpInst
.addOperand(Inst
.getOperand(1));
7433 TmpInst
.addOperand(Inst
.getOperand(2));
7434 TmpInst
.addOperand(Inst
.getOperand(3));
7435 TmpInst
.addOperand(Inst
.getOperand(4));
7446 // Assemblers should use the narrow encodings of these instructions when permissible.
7447 // These instructions are special in that they are commutable, so shorter encodings
7448 // are available more often.
7449 if ((isARMLowRegister(Inst
.getOperand(1).getReg()) &&
7450 isARMLowRegister(Inst
.getOperand(2).getReg())) &&
7451 (Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg() ||
7452 Inst
.getOperand(0).getReg() == Inst
.getOperand(2).getReg()) &&
7453 ((!inITBlock() && Inst
.getOperand(5).getReg() == ARM::CPSR
) ||
7454 (inITBlock() && Inst
.getOperand(5).getReg() != ARM::CPSR
)) &&
7455 (!static_cast<ARMOperand
*>(Operands
[3])->isToken() ||
7456 !static_cast<ARMOperand
*>(Operands
[3])->getToken().equals_lower(".w"))) {
7458 switch (Inst
.getOpcode()) {
7459 default: llvm_unreachable("unexpected opcode");
7460 case ARM::t2ADCrr
: NewOpc
= ARM::tADC
; break;
7461 case ARM::t2ANDrr
: NewOpc
= ARM::tAND
; break;
7462 case ARM::t2EORrr
: NewOpc
= ARM::tEOR
; break;
7463 case ARM::t2ORRrr
: NewOpc
= ARM::tORR
; break;
7466 TmpInst
.setOpcode(NewOpc
);
7467 TmpInst
.addOperand(Inst
.getOperand(0));
7468 TmpInst
.addOperand(Inst
.getOperand(5));
7469 if (Inst
.getOperand(0).getReg() == Inst
.getOperand(1).getReg()) {
7470 TmpInst
.addOperand(Inst
.getOperand(1));
7471 TmpInst
.addOperand(Inst
.getOperand(2));
7473 TmpInst
.addOperand(Inst
.getOperand(2));
7474 TmpInst
.addOperand(Inst
.getOperand(1));
7476 TmpInst
.addOperand(Inst
.getOperand(3));
7477 TmpInst
.addOperand(Inst
.getOperand(4));
7487 unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst
&Inst
) {
7488 // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7489 // suffix depending on whether they're in an IT block or not.
7490 unsigned Opc
= Inst
.getOpcode();
7491 const MCInstrDesc
&MCID
= getInstDesc(Opc
);
7492 if (MCID
.TSFlags
& ARMII::ThumbArithFlagSetting
) {
7493 assert(MCID
.hasOptionalDef() &&
7494 "optionally flag setting instruction missing optional def operand");
7495 assert(MCID
.NumOperands
== Inst
.getNumOperands() &&
7496 "operand count mismatch!");
7497 // Find the optional-def operand (cc_out).
7500 !MCID
.OpInfo
[OpNo
].isOptionalDef() && OpNo
< MCID
.NumOperands
;
7503 // If we're parsing Thumb1, reject it completely.
7504 if (isThumbOne() && Inst
.getOperand(OpNo
).getReg() != ARM::CPSR
)
7505 return Match_MnemonicFail
;
7506 // If we're parsing Thumb2, which form is legal depends on whether we're
7508 if (isThumbTwo() && Inst
.getOperand(OpNo
).getReg() != ARM::CPSR
&&
7510 return Match_RequiresITBlock
;
7511 if (isThumbTwo() && Inst
.getOperand(OpNo
).getReg() == ARM::CPSR
&&
7513 return Match_RequiresNotITBlock
;
7515 // Some high-register supporting Thumb1 encodings only allow both registers
7516 // to be from r0-r7 when in Thumb2.
7517 else if (Opc
== ARM::tADDhirr
&& isThumbOne() &&
7518 isARMLowRegister(Inst
.getOperand(1).getReg()) &&
7519 isARMLowRegister(Inst
.getOperand(2).getReg()))
7520 return Match_RequiresThumb2
;
7521 // Others only require ARMv6 or later.
7522 else if (Opc
== ARM::tMOVr
&& isThumbOne() && !hasV6Ops() &&
7523 isARMLowRegister(Inst
.getOperand(0).getReg()) &&
7524 isARMLowRegister(Inst
.getOperand(1).getReg()))
7525 return Match_RequiresV6
;
7526 return Match_Success
;
7529 static const char *getSubtargetFeatureName(unsigned Val
);
7531 MatchAndEmitInstruction(SMLoc IDLoc
, unsigned &Opcode
,
7532 SmallVectorImpl
<MCParsedAsmOperand
*> &Operands
,
7533 MCStreamer
&Out
, unsigned &ErrorInfo
,
7534 bool MatchingInlineAsm
) {
7536 unsigned MatchResult
;
7538 MatchResult
= MatchInstructionImpl(Operands
, Inst
, ErrorInfo
,
7540 switch (MatchResult
) {
7543 // Context sensitive operand constraints aren't handled by the matcher,
7544 // so check them here.
7545 if (validateInstruction(Inst
, Operands
)) {
7546 // Still progress the IT block, otherwise one wrong condition causes
7547 // nasty cascading errors.
7548 forwardITPosition();
7552 // Some instructions need post-processing to, for example, tweak which
7553 // encoding is selected. Loop on it while changes happen so the
7554 // individual transformations can chain off each other. E.g.,
7555 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7556 while (processInstruction(Inst
, Operands
))
7559 // Only move forward at the very end so that everything in validate
7560 // and process gets a consistent answer about whether we're in an IT
7562 forwardITPosition();
7564 // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7565 // doesn't actually encode.
7566 if (Inst
.getOpcode() == ARM::ITasm
)
7570 Out
.EmitInstruction(Inst
);
7572 case Match_MissingFeature
: {
7573 assert(ErrorInfo
&& "Unknown missing feature!");
7574 // Special case the error message for the very common case where only
7575 // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7576 std::string Msg
= "instruction requires:";
7578 for (unsigned i
= 0; i
< (sizeof(ErrorInfo
)*8-1); ++i
) {
7579 if (ErrorInfo
& Mask
) {
7581 Msg
+= getSubtargetFeatureName(ErrorInfo
& Mask
);
7585 return Error(IDLoc
, Msg
);
7587 case Match_InvalidOperand
: {
7588 SMLoc ErrorLoc
= IDLoc
;
7589 if (ErrorInfo
!= ~0U) {
7590 if (ErrorInfo
>= Operands
.size())
7591 return Error(IDLoc
, "too few operands for instruction");
7593 ErrorLoc
= ((ARMOperand
*)Operands
[ErrorInfo
])->getStartLoc();
7594 if (ErrorLoc
== SMLoc()) ErrorLoc
= IDLoc
;
7597 return Error(ErrorLoc
, "invalid operand for instruction");
7599 case Match_MnemonicFail
:
7600 return Error(IDLoc
, "invalid instruction",
7601 ((ARMOperand
*)Operands
[0])->getLocRange());
7602 case Match_RequiresNotITBlock
:
7603 return Error(IDLoc
, "flag setting instruction only valid outside IT block");
7604 case Match_RequiresITBlock
:
7605 return Error(IDLoc
, "instruction only valid inside IT block");
7606 case Match_RequiresV6
:
7607 return Error(IDLoc
, "instruction variant requires ARMv6 or later");
7608 case Match_RequiresThumb2
:
7609 return Error(IDLoc
, "instruction variant requires Thumb2");
7610 case Match_ImmRange0_15
: {
7611 SMLoc ErrorLoc
= ((ARMOperand
*)Operands
[ErrorInfo
])->getStartLoc();
7612 if (ErrorLoc
== SMLoc()) ErrorLoc
= IDLoc
;
7613 return Error(ErrorLoc
, "immediate operand must be in the range [0,15]");
7617 llvm_unreachable("Implement any new match types added!");
7620 /// parseDirective parses the arm specific directives
7621 bool ARMAsmParser::ParseDirective(AsmToken DirectiveID
) {
7622 StringRef IDVal
= DirectiveID
.getIdentifier();
7623 if (IDVal
== ".word")
7624 return parseDirectiveWord(4, DirectiveID
.getLoc());
7625 else if (IDVal
== ".thumb")
7626 return parseDirectiveThumb(DirectiveID
.getLoc());
7627 else if (IDVal
== ".arm")
7628 return parseDirectiveARM(DirectiveID
.getLoc());
7629 else if (IDVal
== ".thumb_func")
7630 return parseDirectiveThumbFunc(DirectiveID
.getLoc());
7631 else if (IDVal
== ".code")
7632 return parseDirectiveCode(DirectiveID
.getLoc());
7633 else if (IDVal
== ".syntax")
7634 return parseDirectiveSyntax(DirectiveID
.getLoc());
7635 else if (IDVal
== ".unreq")
7636 return parseDirectiveUnreq(DirectiveID
.getLoc());
7637 else if (IDVal
== ".arch")
7638 return parseDirectiveArch(DirectiveID
.getLoc());
7639 else if (IDVal
== ".eabi_attribute")
7640 return parseDirectiveEabiAttr(DirectiveID
.getLoc());
7644 /// parseDirectiveWord
7645 /// ::= .word [ expression (, expression)* ]
7646 bool ARMAsmParser::parseDirectiveWord(unsigned Size
, SMLoc L
) {
7647 if (getLexer().isNot(AsmToken::EndOfStatement
)) {
7649 const MCExpr
*Value
;
7650 if (getParser().parseExpression(Value
))
7653 getParser().getStreamer().EmitValue(Value
, Size
);
7655 if (getLexer().is(AsmToken::EndOfStatement
))
7658 // FIXME: Improve diagnostic.
7659 if (getLexer().isNot(AsmToken::Comma
))
7660 return Error(L
, "unexpected token in directive");
7669 /// parseDirectiveThumb
7671 bool ARMAsmParser::parseDirectiveThumb(SMLoc L
) {
7672 if (getLexer().isNot(AsmToken::EndOfStatement
))
7673 return Error(L
, "unexpected token in directive");
7678 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16
);
7682 /// parseDirectiveARM
7684 bool ARMAsmParser::parseDirectiveARM(SMLoc L
) {
7685 if (getLexer().isNot(AsmToken::EndOfStatement
))
7686 return Error(L
, "unexpected token in directive");
7691 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32
);
7695 /// parseDirectiveThumbFunc
7696 /// ::= .thumbfunc symbol_name
7697 bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L
) {
7698 const MCAsmInfo
&MAI
= getParser().getStreamer().getContext().getAsmInfo();
7699 bool isMachO
= MAI
.hasSubsectionsViaSymbols();
7701 bool needFuncName
= true;
7703 // Darwin asm has (optionally) function name after .thumb_func direction
7706 const AsmToken
&Tok
= Parser
.getTok();
7707 if (Tok
.isNot(AsmToken::EndOfStatement
)) {
7708 if (Tok
.isNot(AsmToken::Identifier
) && Tok
.isNot(AsmToken::String
))
7709 return Error(L
, "unexpected token in .thumb_func directive");
7710 Name
= Tok
.getIdentifier();
7711 Parser
.Lex(); // Consume the identifier token.
7712 needFuncName
= false;
7716 if (getLexer().isNot(AsmToken::EndOfStatement
))
7717 return Error(L
, "unexpected token in directive");
7719 // Eat the end of statement and any blank lines that follow.
7720 while (getLexer().is(AsmToken::EndOfStatement
))
7723 // FIXME: assuming function name will be the line following .thumb_func
7724 // We really should be checking the next symbol definition even if there's
7725 // stuff in between.
7727 Name
= Parser
.getTok().getIdentifier();
7730 // Mark symbol as a thumb symbol.
7731 MCSymbol
*Func
= getParser().getContext().GetOrCreateSymbol(Name
);
7732 getParser().getStreamer().EmitThumbFunc(Func
);
7736 /// parseDirectiveSyntax
7737 /// ::= .syntax unified | divided
7738 bool ARMAsmParser::parseDirectiveSyntax(SMLoc L
) {
7739 const AsmToken
&Tok
= Parser
.getTok();
7740 if (Tok
.isNot(AsmToken::Identifier
))
7741 return Error(L
, "unexpected token in .syntax directive");
7742 StringRef Mode
= Tok
.getString();
7743 if (Mode
== "unified" || Mode
== "UNIFIED")
7745 else if (Mode
== "divided" || Mode
== "DIVIDED")
7746 return Error(L
, "'.syntax divided' arm asssembly not supported");
7748 return Error(L
, "unrecognized syntax mode in .syntax directive");
7750 if (getLexer().isNot(AsmToken::EndOfStatement
))
7751 return Error(Parser
.getTok().getLoc(), "unexpected token in directive");
7754 // TODO tell the MC streamer the mode
7755 // getParser().getStreamer().Emit???();
7759 /// parseDirectiveCode
7760 /// ::= .code 16 | 32
7761 bool ARMAsmParser::parseDirectiveCode(SMLoc L
) {
7762 const AsmToken
&Tok
= Parser
.getTok();
7763 if (Tok
.isNot(AsmToken::Integer
))
7764 return Error(L
, "unexpected token in .code directive");
7765 int64_t Val
= Parser
.getTok().getIntVal();
7771 return Error(L
, "invalid operand to .code directive");
7773 if (getLexer().isNot(AsmToken::EndOfStatement
))
7774 return Error(Parser
.getTok().getLoc(), "unexpected token in directive");
7780 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16
);
7784 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32
);
7790 /// parseDirectiveReq
7791 /// ::= name .req registername
7792 bool ARMAsmParser::parseDirectiveReq(StringRef Name
, SMLoc L
) {
7793 Parser
.Lex(); // Eat the '.req' token.
7795 SMLoc SRegLoc
, ERegLoc
;
7796 if (ParseRegister(Reg
, SRegLoc
, ERegLoc
)) {
7797 Parser
.eatToEndOfStatement();
7798 return Error(SRegLoc
, "register name expected");
7801 // Shouldn't be anything else.
7802 if (Parser
.getTok().isNot(AsmToken::EndOfStatement
)) {
7803 Parser
.eatToEndOfStatement();
7804 return Error(Parser
.getTok().getLoc(),
7805 "unexpected input in .req directive.");
7808 Parser
.Lex(); // Consume the EndOfStatement
7810 if (RegisterReqs
.GetOrCreateValue(Name
, Reg
).getValue() != Reg
)
7811 return Error(SRegLoc
, "redefinition of '" + Name
+
7812 "' does not match original.");
7817 /// parseDirectiveUneq
7818 /// ::= .unreq registername
7819 bool ARMAsmParser::parseDirectiveUnreq(SMLoc L
) {
7820 if (Parser
.getTok().isNot(AsmToken::Identifier
)) {
7821 Parser
.eatToEndOfStatement();
7822 return Error(L
, "unexpected input in .unreq directive.");
7824 RegisterReqs
.erase(Parser
.getTok().getIdentifier());
7825 Parser
.Lex(); // Eat the identifier.
7829 /// parseDirectiveArch
7831 bool ARMAsmParser::parseDirectiveArch(SMLoc L
) {
7835 /// parseDirectiveEabiAttr
7836 /// ::= .eabi_attribute int, int
7837 bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L
) {
7841 /// Force static initialization.
7842 extern "C" void LLVMInitializeARMAsmParser() {
7843 RegisterMCAsmParser
<ARMAsmParser
> X(TheARMTarget
);
7844 RegisterMCAsmParser
<ARMAsmParser
> Y(TheThumbTarget
);
7847 #define GET_REGISTER_MATCHER
7848 #define GET_SUBTARGET_FEATURE_NAME
7849 #define GET_MATCHER_IMPLEMENTATION
7850 #include "ARMGenAsmMatcher.inc"
7852 // Define this matcher function after the auto-generated include so we
7853 // have the match class enum definitions.
7854 unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand
*AsmOp
,
7856 ARMOperand
*Op
= static_cast<ARMOperand
*>(AsmOp
);
7857 // If the kind is a token for a literal immediate, check if our asm
7858 // operand matches. This is for InstAliases which have a fixed-value
7859 // immediate in the syntax.
7860 if (Kind
== MCK__35_0
&& Op
->isImm()) {
7861 const MCConstantExpr
*CE
= dyn_cast
<MCConstantExpr
>(Op
->getImm());
7863 return Match_InvalidOperand
;
7864 if (CE
->getValue() == 0)
7865 return Match_Success
;
7867 return Match_InvalidOperand
;