]> git.proxmox.com Git - rustc.git/blob - src/llvm/lib/Target/ARM/ARMInstrVFP.td
Imported Upstream version 0.6
[rustc.git] / src / llvm / lib / Target / ARM / ARMInstrVFP.td
1 //===-- ARMInstrVFP.td - VFP support for ARM ---------------*- tablegen -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file describes the ARM VFP instruction set.
11 //
12 //===----------------------------------------------------------------------===//
13
14 def SDT_FTOI : SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
15 def SDT_ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
16 def SDT_CMPFP0 : SDTypeProfile<0, 1, [SDTCisFP<0>]>;
17 def SDT_VMOVDRR : SDTypeProfile<1, 2, [SDTCisVT<0, f64>, SDTCisVT<1, i32>,
18 SDTCisSameAs<1, 2>]>;
19
20 def arm_ftoui : SDNode<"ARMISD::FTOUI", SDT_FTOI>;
21 def arm_ftosi : SDNode<"ARMISD::FTOSI", SDT_FTOI>;
22 def arm_sitof : SDNode<"ARMISD::SITOF", SDT_ITOF>;
23 def arm_uitof : SDNode<"ARMISD::UITOF", SDT_ITOF>;
24 def arm_fmstat : SDNode<"ARMISD::FMSTAT", SDTNone, [SDNPInGlue, SDNPOutGlue]>;
25 def arm_cmpfp : SDNode<"ARMISD::CMPFP", SDT_ARMCmp, [SDNPOutGlue]>;
26 def arm_cmpfp0 : SDNode<"ARMISD::CMPFPw0", SDT_CMPFP0, [SDNPOutGlue]>;
27 def arm_fmdrr : SDNode<"ARMISD::VMOVDRR", SDT_VMOVDRR>;
28
29
30 //===----------------------------------------------------------------------===//
31 // Operand Definitions.
32 //
33
34 // 8-bit floating-point immediate encodings.
35 def FPImmOperand : AsmOperandClass {
36 let Name = "FPImm";
37 let ParserMethod = "parseFPImm";
38 }
39
40 def vfp_f32imm : Operand<f32>,
41 PatLeaf<(f32 fpimm), [{
42 return ARM_AM::getFP32Imm(N->getValueAPF()) != -1;
43 }], SDNodeXForm<fpimm, [{
44 APFloat InVal = N->getValueAPF();
45 uint32_t enc = ARM_AM::getFP32Imm(InVal);
46 return CurDAG->getTargetConstant(enc, MVT::i32);
47 }]>> {
48 let PrintMethod = "printFPImmOperand";
49 let ParserMatchClass = FPImmOperand;
50 }
51
52 def vfp_f64imm : Operand<f64>,
53 PatLeaf<(f64 fpimm), [{
54 return ARM_AM::getFP64Imm(N->getValueAPF()) != -1;
55 }], SDNodeXForm<fpimm, [{
56 APFloat InVal = N->getValueAPF();
57 uint32_t enc = ARM_AM::getFP64Imm(InVal);
58 return CurDAG->getTargetConstant(enc, MVT::i32);
59 }]>> {
60 let PrintMethod = "printFPImmOperand";
61 let ParserMatchClass = FPImmOperand;
62 }
63
64 def alignedload32 : PatFrag<(ops node:$ptr), (load node:$ptr), [{
65 return cast<LoadSDNode>(N)->getAlignment() >= 4;
66 }]>;
67
68 def alignedstore32 : PatFrag<(ops node:$val, node:$ptr),
69 (store node:$val, node:$ptr), [{
70 return cast<StoreSDNode>(N)->getAlignment() >= 4;
71 }]>;
72
73 // The VCVT to/from fixed-point instructions encode the 'fbits' operand
74 // (the number of fixed bits) differently than it appears in the assembly
75 // source. It's encoded as "Size - fbits" where Size is the size of the
76 // fixed-point representation (32 or 16) and fbits is the value appearing
77 // in the assembly source, an integer in [0,16] or (0,32], depending on size.
78 def fbits32_asm_operand : AsmOperandClass { let Name = "FBits32"; }
79 def fbits32 : Operand<i32> {
80 let PrintMethod = "printFBits32";
81 let ParserMatchClass = fbits32_asm_operand;
82 }
83
84 def fbits16_asm_operand : AsmOperandClass { let Name = "FBits16"; }
85 def fbits16 : Operand<i32> {
86 let PrintMethod = "printFBits16";
87 let ParserMatchClass = fbits16_asm_operand;
88 }
89
90 //===----------------------------------------------------------------------===//
91 // Load / store Instructions.
92 //
93
94 let canFoldAsLoad = 1, isReMaterializable = 1 in {
95
96 def VLDRD : ADI5<0b1101, 0b01, (outs DPR:$Dd), (ins addrmode5:$addr),
97 IIC_fpLoad64, "vldr", "\t$Dd, $addr",
98 [(set DPR:$Dd, (f64 (alignedload32 addrmode5:$addr)))]>;
99
100 def VLDRS : ASI5<0b1101, 0b01, (outs SPR:$Sd), (ins addrmode5:$addr),
101 IIC_fpLoad32, "vldr", "\t$Sd, $addr",
102 [(set SPR:$Sd, (load addrmode5:$addr))]> {
103 // Some single precision VFP instructions may be executed on both NEON and VFP
104 // pipelines.
105 let D = VFPNeonDomain;
106 }
107
108 } // End of 'let canFoldAsLoad = 1, isReMaterializable = 1 in'
109
110 def VSTRD : ADI5<0b1101, 0b00, (outs), (ins DPR:$Dd, addrmode5:$addr),
111 IIC_fpStore64, "vstr", "\t$Dd, $addr",
112 [(alignedstore32 (f64 DPR:$Dd), addrmode5:$addr)]>;
113
114 def VSTRS : ASI5<0b1101, 0b00, (outs), (ins SPR:$Sd, addrmode5:$addr),
115 IIC_fpStore32, "vstr", "\t$Sd, $addr",
116 [(store SPR:$Sd, addrmode5:$addr)]> {
117 // Some single precision VFP instructions may be executed on both NEON and VFP
118 // pipelines.
119 let D = VFPNeonDomain;
120 }
121
122 //===----------------------------------------------------------------------===//
123 // Load / store multiple Instructions.
124 //
125
126 multiclass vfp_ldst_mult<string asm, bit L_bit,
127 InstrItinClass itin, InstrItinClass itin_upd> {
128 // Double Precision
129 def DIA :
130 AXDI4<(outs), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs, variable_ops),
131 IndexModeNone, itin,
132 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
133 let Inst{24-23} = 0b01; // Increment After
134 let Inst{21} = 0; // No writeback
135 let Inst{20} = L_bit;
136 }
137 def DIA_UPD :
138 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
139 variable_ops),
140 IndexModeUpd, itin_upd,
141 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
142 let Inst{24-23} = 0b01; // Increment After
143 let Inst{21} = 1; // Writeback
144 let Inst{20} = L_bit;
145 }
146 def DDB_UPD :
147 AXDI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, dpr_reglist:$regs,
148 variable_ops),
149 IndexModeUpd, itin_upd,
150 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
151 let Inst{24-23} = 0b10; // Decrement Before
152 let Inst{21} = 1; // Writeback
153 let Inst{20} = L_bit;
154 }
155
156 // Single Precision
157 def SIA :
158 AXSI4<(outs), (ins GPR:$Rn, pred:$p, spr_reglist:$regs, variable_ops),
159 IndexModeNone, itin,
160 !strconcat(asm, "ia${p}\t$Rn, $regs"), "", []> {
161 let Inst{24-23} = 0b01; // Increment After
162 let Inst{21} = 0; // No writeback
163 let Inst{20} = L_bit;
164
165 // Some single precision VFP instructions may be executed on both NEON and
166 // VFP pipelines.
167 let D = VFPNeonDomain;
168 }
169 def SIA_UPD :
170 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
171 variable_ops),
172 IndexModeUpd, itin_upd,
173 !strconcat(asm, "ia${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
174 let Inst{24-23} = 0b01; // Increment After
175 let Inst{21} = 1; // Writeback
176 let Inst{20} = L_bit;
177
178 // Some single precision VFP instructions may be executed on both NEON and
179 // VFP pipelines.
180 let D = VFPNeonDomain;
181 }
182 def SDB_UPD :
183 AXSI4<(outs GPR:$wb), (ins GPR:$Rn, pred:$p, spr_reglist:$regs,
184 variable_ops),
185 IndexModeUpd, itin_upd,
186 !strconcat(asm, "db${p}\t$Rn!, $regs"), "$Rn = $wb", []> {
187 let Inst{24-23} = 0b10; // Decrement Before
188 let Inst{21} = 1; // Writeback
189 let Inst{20} = L_bit;
190
191 // Some single precision VFP instructions may be executed on both NEON and
192 // VFP pipelines.
193 let D = VFPNeonDomain;
194 }
195 }
196
197 let neverHasSideEffects = 1 in {
198
199 let mayLoad = 1, hasExtraDefRegAllocReq = 1 in
200 defm VLDM : vfp_ldst_mult<"vldm", 1, IIC_fpLoad_m, IIC_fpLoad_mu>;
201
202 let mayStore = 1, hasExtraSrcRegAllocReq = 1 in
203 defm VSTM : vfp_ldst_mult<"vstm", 0, IIC_fpLoad_m, IIC_fpLoad_mu>;
204
205 } // neverHasSideEffects
206
207 def : MnemonicAlias<"vldm", "vldmia">;
208 def : MnemonicAlias<"vstm", "vstmia">;
209
210 def : InstAlias<"vpush${p} $r", (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>,
211 Requires<[HasVFP2]>;
212 def : InstAlias<"vpush${p} $r", (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>,
213 Requires<[HasVFP2]>;
214 def : InstAlias<"vpop${p} $r", (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>,
215 Requires<[HasVFP2]>;
216 def : InstAlias<"vpop${p} $r", (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>,
217 Requires<[HasVFP2]>;
218 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
219 (VSTMSDB_UPD SP, pred:$p, spr_reglist:$r)>;
220 defm : VFPDTAnyInstAlias<"vpush${p}", "$r",
221 (VSTMDDB_UPD SP, pred:$p, dpr_reglist:$r)>;
222 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
223 (VLDMSIA_UPD SP, pred:$p, spr_reglist:$r)>;
224 defm : VFPDTAnyInstAlias<"vpop${p}", "$r",
225 (VLDMDIA_UPD SP, pred:$p, dpr_reglist:$r)>;
226
227 // FLDMX, FSTMX - mixing S/D registers for pre-armv6 cores
228
229 //===----------------------------------------------------------------------===//
230 // FP Binary Operations.
231 //
232
233 let TwoOperandAliasConstraint = "$Dn = $Dd" in
234 def VADDD : ADbI<0b11100, 0b11, 0, 0,
235 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
236 IIC_fpALU64, "vadd", ".f64\t$Dd, $Dn, $Dm",
237 [(set DPR:$Dd, (fadd DPR:$Dn, (f64 DPR:$Dm)))]>;
238
239 let TwoOperandAliasConstraint = "$Sn = $Sd" in
240 def VADDS : ASbIn<0b11100, 0b11, 0, 0,
241 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
242 IIC_fpALU32, "vadd", ".f32\t$Sd, $Sn, $Sm",
243 [(set SPR:$Sd, (fadd SPR:$Sn, SPR:$Sm))]> {
244 // Some single precision VFP instructions may be executed on both NEON and
245 // VFP pipelines on A8.
246 let D = VFPNeonA8Domain;
247 }
248
249 let TwoOperandAliasConstraint = "$Dn = $Dd" in
250 def VSUBD : ADbI<0b11100, 0b11, 1, 0,
251 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
252 IIC_fpALU64, "vsub", ".f64\t$Dd, $Dn, $Dm",
253 [(set DPR:$Dd, (fsub DPR:$Dn, (f64 DPR:$Dm)))]>;
254
255 let TwoOperandAliasConstraint = "$Sn = $Sd" in
256 def VSUBS : ASbIn<0b11100, 0b11, 1, 0,
257 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
258 IIC_fpALU32, "vsub", ".f32\t$Sd, $Sn, $Sm",
259 [(set SPR:$Sd, (fsub SPR:$Sn, SPR:$Sm))]> {
260 // Some single precision VFP instructions may be executed on both NEON and
261 // VFP pipelines on A8.
262 let D = VFPNeonA8Domain;
263 }
264
265 let TwoOperandAliasConstraint = "$Dn = $Dd" in
266 def VDIVD : ADbI<0b11101, 0b00, 0, 0,
267 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
268 IIC_fpDIV64, "vdiv", ".f64\t$Dd, $Dn, $Dm",
269 [(set DPR:$Dd, (fdiv DPR:$Dn, (f64 DPR:$Dm)))]>;
270
271 let TwoOperandAliasConstraint = "$Sn = $Sd" in
272 def VDIVS : ASbI<0b11101, 0b00, 0, 0,
273 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
274 IIC_fpDIV32, "vdiv", ".f32\t$Sd, $Sn, $Sm",
275 [(set SPR:$Sd, (fdiv SPR:$Sn, SPR:$Sm))]>;
276
277 let TwoOperandAliasConstraint = "$Dn = $Dd" in
278 def VMULD : ADbI<0b11100, 0b10, 0, 0,
279 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
280 IIC_fpMUL64, "vmul", ".f64\t$Dd, $Dn, $Dm",
281 [(set DPR:$Dd, (fmul DPR:$Dn, (f64 DPR:$Dm)))]>;
282
283 let TwoOperandAliasConstraint = "$Sn = $Sd" in
284 def VMULS : ASbIn<0b11100, 0b10, 0, 0,
285 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
286 IIC_fpMUL32, "vmul", ".f32\t$Sd, $Sn, $Sm",
287 [(set SPR:$Sd, (fmul SPR:$Sn, SPR:$Sm))]> {
288 // Some single precision VFP instructions may be executed on both NEON and
289 // VFP pipelines on A8.
290 let D = VFPNeonA8Domain;
291 }
292
293 def VNMULD : ADbI<0b11100, 0b10, 1, 0,
294 (outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm),
295 IIC_fpMUL64, "vnmul", ".f64\t$Dd, $Dn, $Dm",
296 [(set DPR:$Dd, (fneg (fmul DPR:$Dn, (f64 DPR:$Dm))))]>;
297
298 def VNMULS : ASbI<0b11100, 0b10, 1, 0,
299 (outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm),
300 IIC_fpMUL32, "vnmul", ".f32\t$Sd, $Sn, $Sm",
301 [(set SPR:$Sd, (fneg (fmul SPR:$Sn, SPR:$Sm)))]> {
302 // Some single precision VFP instructions may be executed on both NEON and
303 // VFP pipelines on A8.
304 let D = VFPNeonA8Domain;
305 }
306
307 // Match reassociated forms only if not sign dependent rounding.
308 def : Pat<(fmul (fneg DPR:$a), (f64 DPR:$b)),
309 (VNMULD DPR:$a, DPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
310 def : Pat<(fmul (fneg SPR:$a), SPR:$b),
311 (VNMULS SPR:$a, SPR:$b)>, Requires<[NoHonorSignDependentRounding]>;
312
313 // These are encoded as unary instructions.
314 let Defs = [FPSCR_NZCV] in {
315 def VCMPED : ADuI<0b11101, 0b11, 0b0100, 0b11, 0,
316 (outs), (ins DPR:$Dd, DPR:$Dm),
317 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, $Dm",
318 [(arm_cmpfp DPR:$Dd, (f64 DPR:$Dm))]>;
319
320 def VCMPES : ASuI<0b11101, 0b11, 0b0100, 0b11, 0,
321 (outs), (ins SPR:$Sd, SPR:$Sm),
322 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, $Sm",
323 [(arm_cmpfp SPR:$Sd, SPR:$Sm)]> {
324 // Some single precision VFP instructions may be executed on both NEON and
325 // VFP pipelines on A8.
326 let D = VFPNeonA8Domain;
327 }
328
329 // FIXME: Verify encoding after integrated assembler is working.
330 def VCMPD : ADuI<0b11101, 0b11, 0b0100, 0b01, 0,
331 (outs), (ins DPR:$Dd, DPR:$Dm),
332 IIC_fpCMP64, "vcmp", ".f64\t$Dd, $Dm",
333 [/* For disassembly only; pattern left blank */]>;
334
335 def VCMPS : ASuI<0b11101, 0b11, 0b0100, 0b01, 0,
336 (outs), (ins SPR:$Sd, SPR:$Sm),
337 IIC_fpCMP32, "vcmp", ".f32\t$Sd, $Sm",
338 [/* For disassembly only; pattern left blank */]> {
339 // Some single precision VFP instructions may be executed on both NEON and
340 // VFP pipelines on A8.
341 let D = VFPNeonA8Domain;
342 }
343 } // Defs = [FPSCR_NZCV]
344
345 //===----------------------------------------------------------------------===//
346 // FP Unary Operations.
347 //
348
349 def VABSD : ADuI<0b11101, 0b11, 0b0000, 0b11, 0,
350 (outs DPR:$Dd), (ins DPR:$Dm),
351 IIC_fpUNA64, "vabs", ".f64\t$Dd, $Dm",
352 [(set DPR:$Dd, (fabs (f64 DPR:$Dm)))]>;
353
354 def VABSS : ASuIn<0b11101, 0b11, 0b0000, 0b11, 0,
355 (outs SPR:$Sd), (ins SPR:$Sm),
356 IIC_fpUNA32, "vabs", ".f32\t$Sd, $Sm",
357 [(set SPR:$Sd, (fabs SPR:$Sm))]> {
358 // Some single precision VFP instructions may be executed on both NEON and
359 // VFP pipelines on A8.
360 let D = VFPNeonA8Domain;
361 }
362
363 let Defs = [FPSCR_NZCV] in {
364 def VCMPEZD : ADuI<0b11101, 0b11, 0b0101, 0b11, 0,
365 (outs), (ins DPR:$Dd),
366 IIC_fpCMP64, "vcmpe", ".f64\t$Dd, #0",
367 [(arm_cmpfp0 (f64 DPR:$Dd))]> {
368 let Inst{3-0} = 0b0000;
369 let Inst{5} = 0;
370 }
371
372 def VCMPEZS : ASuI<0b11101, 0b11, 0b0101, 0b11, 0,
373 (outs), (ins SPR:$Sd),
374 IIC_fpCMP32, "vcmpe", ".f32\t$Sd, #0",
375 [(arm_cmpfp0 SPR:$Sd)]> {
376 let Inst{3-0} = 0b0000;
377 let Inst{5} = 0;
378
379 // Some single precision VFP instructions may be executed on both NEON and
380 // VFP pipelines on A8.
381 let D = VFPNeonA8Domain;
382 }
383
384 // FIXME: Verify encoding after integrated assembler is working.
385 def VCMPZD : ADuI<0b11101, 0b11, 0b0101, 0b01, 0,
386 (outs), (ins DPR:$Dd),
387 IIC_fpCMP64, "vcmp", ".f64\t$Dd, #0",
388 [/* For disassembly only; pattern left blank */]> {
389 let Inst{3-0} = 0b0000;
390 let Inst{5} = 0;
391 }
392
393 def VCMPZS : ASuI<0b11101, 0b11, 0b0101, 0b01, 0,
394 (outs), (ins SPR:$Sd),
395 IIC_fpCMP32, "vcmp", ".f32\t$Sd, #0",
396 [/* For disassembly only; pattern left blank */]> {
397 let Inst{3-0} = 0b0000;
398 let Inst{5} = 0;
399
400 // Some single precision VFP instructions may be executed on both NEON and
401 // VFP pipelines on A8.
402 let D = VFPNeonA8Domain;
403 }
404 } // Defs = [FPSCR_NZCV]
405
406 def VCVTDS : ASuI<0b11101, 0b11, 0b0111, 0b11, 0,
407 (outs DPR:$Dd), (ins SPR:$Sm),
408 IIC_fpCVTDS, "vcvt", ".f64.f32\t$Dd, $Sm",
409 [(set DPR:$Dd, (fextend SPR:$Sm))]> {
410 // Instruction operands.
411 bits<5> Dd;
412 bits<5> Sm;
413
414 // Encode instruction operands.
415 let Inst{3-0} = Sm{4-1};
416 let Inst{5} = Sm{0};
417 let Inst{15-12} = Dd{3-0};
418 let Inst{22} = Dd{4};
419 }
420
421 // Special case encoding: bits 11-8 is 0b1011.
422 def VCVTSD : VFPAI<(outs SPR:$Sd), (ins DPR:$Dm), VFPUnaryFrm,
423 IIC_fpCVTSD, "vcvt", ".f32.f64\t$Sd, $Dm",
424 [(set SPR:$Sd, (fround DPR:$Dm))]> {
425 // Instruction operands.
426 bits<5> Sd;
427 bits<5> Dm;
428
429 // Encode instruction operands.
430 let Inst{3-0} = Dm{3-0};
431 let Inst{5} = Dm{4};
432 let Inst{15-12} = Sd{4-1};
433 let Inst{22} = Sd{0};
434
435 let Inst{27-23} = 0b11101;
436 let Inst{21-16} = 0b110111;
437 let Inst{11-8} = 0b1011;
438 let Inst{7-6} = 0b11;
439 let Inst{4} = 0;
440 }
441
442 // Between half-precision and single-precision. For disassembly only.
443
444 // FIXME: Verify encoding after integrated assembler is working.
445 def VCVTBHS: ASuI<0b11101, 0b11, 0b0010, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
446 /* FIXME */ IIC_fpCVTSH, "vcvtb", ".f32.f16\t$Sd, $Sm",
447 [/* For disassembly only; pattern left blank */]>;
448
449 def VCVTBSH: ASuI<0b11101, 0b11, 0b0011, 0b01, 0, (outs SPR:$Sd), (ins SPR:$Sm),
450 /* FIXME */ IIC_fpCVTHS, "vcvtb", ".f16.f32\t$Sd, $Sm",
451 [/* For disassembly only; pattern left blank */]>;
452
453 def : Pat<(f32_to_f16 SPR:$a),
454 (i32 (COPY_TO_REGCLASS (VCVTBSH SPR:$a), GPR))>;
455
456 def : Pat<(f16_to_f32 GPR:$a),
457 (VCVTBHS (COPY_TO_REGCLASS GPR:$a, SPR))>;
458
459 def VCVTTHS: ASuI<0b11101, 0b11, 0b0010, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
460 /* FIXME */ IIC_fpCVTSH, "vcvtt", ".f32.f16\t$Sd, $Sm",
461 [/* For disassembly only; pattern left blank */]>;
462
463 def VCVTTSH: ASuI<0b11101, 0b11, 0b0011, 0b11, 0, (outs SPR:$Sd), (ins SPR:$Sm),
464 /* FIXME */ IIC_fpCVTHS, "vcvtt", ".f16.f32\t$Sd, $Sm",
465 [/* For disassembly only; pattern left blank */]>;
466
467 def VNEGD : ADuI<0b11101, 0b11, 0b0001, 0b01, 0,
468 (outs DPR:$Dd), (ins DPR:$Dm),
469 IIC_fpUNA64, "vneg", ".f64\t$Dd, $Dm",
470 [(set DPR:$Dd, (fneg (f64 DPR:$Dm)))]>;
471
472 def VNEGS : ASuIn<0b11101, 0b11, 0b0001, 0b01, 0,
473 (outs SPR:$Sd), (ins SPR:$Sm),
474 IIC_fpUNA32, "vneg", ".f32\t$Sd, $Sm",
475 [(set SPR:$Sd, (fneg SPR:$Sm))]> {
476 // Some single precision VFP instructions may be executed on both NEON and
477 // VFP pipelines on A8.
478 let D = VFPNeonA8Domain;
479 }
480
481 def VSQRTD : ADuI<0b11101, 0b11, 0b0001, 0b11, 0,
482 (outs DPR:$Dd), (ins DPR:$Dm),
483 IIC_fpSQRT64, "vsqrt", ".f64\t$Dd, $Dm",
484 [(set DPR:$Dd, (fsqrt (f64 DPR:$Dm)))]>;
485
486 def VSQRTS : ASuI<0b11101, 0b11, 0b0001, 0b11, 0,
487 (outs SPR:$Sd), (ins SPR:$Sm),
488 IIC_fpSQRT32, "vsqrt", ".f32\t$Sd, $Sm",
489 [(set SPR:$Sd, (fsqrt SPR:$Sm))]>;
490
491 let neverHasSideEffects = 1 in {
492 def VMOVD : ADuI<0b11101, 0b11, 0b0000, 0b01, 0,
493 (outs DPR:$Dd), (ins DPR:$Dm),
494 IIC_fpUNA64, "vmov", ".f64\t$Dd, $Dm", []>;
495
496 def VMOVS : ASuI<0b11101, 0b11, 0b0000, 0b01, 0,
497 (outs SPR:$Sd), (ins SPR:$Sm),
498 IIC_fpUNA32, "vmov", ".f32\t$Sd, $Sm", []>;
499 } // neverHasSideEffects
500
501 //===----------------------------------------------------------------------===//
502 // FP <-> GPR Copies. Int <-> FP Conversions.
503 //
504
505 def VMOVRS : AVConv2I<0b11100001, 0b1010,
506 (outs GPR:$Rt), (ins SPR:$Sn),
507 IIC_fpMOVSI, "vmov", "\t$Rt, $Sn",
508 [(set GPR:$Rt, (bitconvert SPR:$Sn))]> {
509 // Instruction operands.
510 bits<4> Rt;
511 bits<5> Sn;
512
513 // Encode instruction operands.
514 let Inst{19-16} = Sn{4-1};
515 let Inst{7} = Sn{0};
516 let Inst{15-12} = Rt;
517
518 let Inst{6-5} = 0b00;
519 let Inst{3-0} = 0b0000;
520
521 // Some single precision VFP instructions may be executed on both NEON and VFP
522 // pipelines.
523 let D = VFPNeonDomain;
524 }
525
526 // Bitcast i32 -> f32. NEON prefers to use VMOVDRR.
527 def VMOVSR : AVConv4I<0b11100000, 0b1010,
528 (outs SPR:$Sn), (ins GPR:$Rt),
529 IIC_fpMOVIS, "vmov", "\t$Sn, $Rt",
530 [(set SPR:$Sn, (bitconvert GPR:$Rt))]>,
531 Requires<[HasVFP2, UseVMOVSR]> {
532 // Instruction operands.
533 bits<5> Sn;
534 bits<4> Rt;
535
536 // Encode instruction operands.
537 let Inst{19-16} = Sn{4-1};
538 let Inst{7} = Sn{0};
539 let Inst{15-12} = Rt;
540
541 let Inst{6-5} = 0b00;
542 let Inst{3-0} = 0b0000;
543
544 // Some single precision VFP instructions may be executed on both NEON and VFP
545 // pipelines.
546 let D = VFPNeonDomain;
547 }
548
549 let neverHasSideEffects = 1 in {
550 def VMOVRRD : AVConv3I<0b11000101, 0b1011,
551 (outs GPR:$Rt, GPR:$Rt2), (ins DPR:$Dm),
552 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $Dm",
553 [/* FIXME: Can't write pattern for multiple result instr*/]> {
554 // Instruction operands.
555 bits<5> Dm;
556 bits<4> Rt;
557 bits<4> Rt2;
558
559 // Encode instruction operands.
560 let Inst{3-0} = Dm{3-0};
561 let Inst{5} = Dm{4};
562 let Inst{15-12} = Rt;
563 let Inst{19-16} = Rt2;
564
565 let Inst{7-6} = 0b00;
566
567 // Some single precision VFP instructions may be executed on both NEON and VFP
568 // pipelines.
569 let D = VFPNeonDomain;
570 }
571
572 def VMOVRRS : AVConv3I<0b11000101, 0b1010,
573 (outs GPR:$Rt, GPR:$Rt2), (ins SPR:$src1, SPR:$src2),
574 IIC_fpMOVDI, "vmov", "\t$Rt, $Rt2, $src1, $src2",
575 [/* For disassembly only; pattern left blank */]> {
576 bits<5> src1;
577 bits<4> Rt;
578 bits<4> Rt2;
579
580 // Encode instruction operands.
581 let Inst{3-0} = src1{4-1};
582 let Inst{5} = src1{0};
583 let Inst{15-12} = Rt;
584 let Inst{19-16} = Rt2;
585
586 let Inst{7-6} = 0b00;
587
588 // Some single precision VFP instructions may be executed on both NEON and VFP
589 // pipelines.
590 let D = VFPNeonDomain;
591 let DecoderMethod = "DecodeVMOVRRS";
592 }
593 } // neverHasSideEffects
594
595 // FMDHR: GPR -> SPR
596 // FMDLR: GPR -> SPR
597
598 def VMOVDRR : AVConv5I<0b11000100, 0b1011,
599 (outs DPR:$Dm), (ins GPR:$Rt, GPR:$Rt2),
600 IIC_fpMOVID, "vmov", "\t$Dm, $Rt, $Rt2",
601 [(set DPR:$Dm, (arm_fmdrr GPR:$Rt, GPR:$Rt2))]> {
602 // Instruction operands.
603 bits<5> Dm;
604 bits<4> Rt;
605 bits<4> Rt2;
606
607 // Encode instruction operands.
608 let Inst{3-0} = Dm{3-0};
609 let Inst{5} = Dm{4};
610 let Inst{15-12} = Rt;
611 let Inst{19-16} = Rt2;
612
613 let Inst{7-6} = 0b00;
614
615 // Some single precision VFP instructions may be executed on both NEON and VFP
616 // pipelines.
617 let D = VFPNeonDomain;
618 }
619
620 let neverHasSideEffects = 1 in
621 def VMOVSRR : AVConv5I<0b11000100, 0b1010,
622 (outs SPR:$dst1, SPR:$dst2), (ins GPR:$src1, GPR:$src2),
623 IIC_fpMOVID, "vmov", "\t$dst1, $dst2, $src1, $src2",
624 [/* For disassembly only; pattern left blank */]> {
625 // Instruction operands.
626 bits<5> dst1;
627 bits<4> src1;
628 bits<4> src2;
629
630 // Encode instruction operands.
631 let Inst{3-0} = dst1{4-1};
632 let Inst{5} = dst1{0};
633 let Inst{15-12} = src1;
634 let Inst{19-16} = src2;
635
636 let Inst{7-6} = 0b00;
637
638 // Some single precision VFP instructions may be executed on both NEON and VFP
639 // pipelines.
640 let D = VFPNeonDomain;
641
642 let DecoderMethod = "DecodeVMOVSRR";
643 }
644
645 // FMRDH: SPR -> GPR
646 // FMRDL: SPR -> GPR
647 // FMRRS: SPR -> GPR
648 // FMRX: SPR system reg -> GPR
649 // FMSRR: GPR -> SPR
650 // FMXR: GPR -> VFP system reg
651
652
653 // Int -> FP:
654
655 class AVConv1IDs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
656 bits<4> opcod4, dag oops, dag iops,
657 InstrItinClass itin, string opc, string asm,
658 list<dag> pattern>
659 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
660 pattern> {
661 // Instruction operands.
662 bits<5> Dd;
663 bits<5> Sm;
664
665 // Encode instruction operands.
666 let Inst{3-0} = Sm{4-1};
667 let Inst{5} = Sm{0};
668 let Inst{15-12} = Dd{3-0};
669 let Inst{22} = Dd{4};
670 }
671
672 class AVConv1InSs_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
673 bits<4> opcod4, dag oops, dag iops,InstrItinClass itin,
674 string opc, string asm, list<dag> pattern>
675 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
676 pattern> {
677 // Instruction operands.
678 bits<5> Sd;
679 bits<5> Sm;
680
681 // Encode instruction operands.
682 let Inst{3-0} = Sm{4-1};
683 let Inst{5} = Sm{0};
684 let Inst{15-12} = Sd{4-1};
685 let Inst{22} = Sd{0};
686 }
687
688 def VSITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
689 (outs DPR:$Dd), (ins SPR:$Sm),
690 IIC_fpCVTID, "vcvt", ".f64.s32\t$Dd, $Sm",
691 [(set DPR:$Dd, (f64 (arm_sitof SPR:$Sm)))]> {
692 let Inst{7} = 1; // s32
693 }
694
695 def VSITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
696 (outs SPR:$Sd),(ins SPR:$Sm),
697 IIC_fpCVTIS, "vcvt", ".f32.s32\t$Sd, $Sm",
698 [(set SPR:$Sd, (arm_sitof SPR:$Sm))]> {
699 let Inst{7} = 1; // s32
700
701 // Some single precision VFP instructions may be executed on both NEON and
702 // VFP pipelines on A8.
703 let D = VFPNeonA8Domain;
704 }
705
706 def VUITOD : AVConv1IDs_Encode<0b11101, 0b11, 0b1000, 0b1011,
707 (outs DPR:$Dd), (ins SPR:$Sm),
708 IIC_fpCVTID, "vcvt", ".f64.u32\t$Dd, $Sm",
709 [(set DPR:$Dd, (f64 (arm_uitof SPR:$Sm)))]> {
710 let Inst{7} = 0; // u32
711 }
712
713 def VUITOS : AVConv1InSs_Encode<0b11101, 0b11, 0b1000, 0b1010,
714 (outs SPR:$Sd), (ins SPR:$Sm),
715 IIC_fpCVTIS, "vcvt", ".f32.u32\t$Sd, $Sm",
716 [(set SPR:$Sd, (arm_uitof SPR:$Sm))]> {
717 let Inst{7} = 0; // u32
718
719 // Some single precision VFP instructions may be executed on both NEON and
720 // VFP pipelines on A8.
721 let D = VFPNeonA8Domain;
722 }
723
724 // FP -> Int:
725
726 class AVConv1IsD_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
727 bits<4> opcod4, dag oops, dag iops,
728 InstrItinClass itin, string opc, string asm,
729 list<dag> pattern>
730 : AVConv1I<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
731 pattern> {
732 // Instruction operands.
733 bits<5> Sd;
734 bits<5> Dm;
735
736 // Encode instruction operands.
737 let Inst{3-0} = Dm{3-0};
738 let Inst{5} = Dm{4};
739 let Inst{15-12} = Sd{4-1};
740 let Inst{22} = Sd{0};
741 }
742
743 class AVConv1InsS_Encode<bits<5> opcod1, bits<2> opcod2, bits<4> opcod3,
744 bits<4> opcod4, dag oops, dag iops,
745 InstrItinClass itin, string opc, string asm,
746 list<dag> pattern>
747 : AVConv1In<opcod1, opcod2, opcod3, opcod4, oops, iops, itin, opc, asm,
748 pattern> {
749 // Instruction operands.
750 bits<5> Sd;
751 bits<5> Sm;
752
753 // Encode instruction operands.
754 let Inst{3-0} = Sm{4-1};
755 let Inst{5} = Sm{0};
756 let Inst{15-12} = Sd{4-1};
757 let Inst{22} = Sd{0};
758 }
759
760 // Always set Z bit in the instruction, i.e. "round towards zero" variants.
761 def VTOSIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
762 (outs SPR:$Sd), (ins DPR:$Dm),
763 IIC_fpCVTDI, "vcvt", ".s32.f64\t$Sd, $Dm",
764 [(set SPR:$Sd, (arm_ftosi (f64 DPR:$Dm)))]> {
765 let Inst{7} = 1; // Z bit
766 }
767
768 def VTOSIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
769 (outs SPR:$Sd), (ins SPR:$Sm),
770 IIC_fpCVTSI, "vcvt", ".s32.f32\t$Sd, $Sm",
771 [(set SPR:$Sd, (arm_ftosi SPR:$Sm))]> {
772 let Inst{7} = 1; // Z bit
773
774 // Some single precision VFP instructions may be executed on both NEON and
775 // VFP pipelines on A8.
776 let D = VFPNeonA8Domain;
777 }
778
779 def VTOUIZD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
780 (outs SPR:$Sd), (ins DPR:$Dm),
781 IIC_fpCVTDI, "vcvt", ".u32.f64\t$Sd, $Dm",
782 [(set SPR:$Sd, (arm_ftoui (f64 DPR:$Dm)))]> {
783 let Inst{7} = 1; // Z bit
784 }
785
786 def VTOUIZS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
787 (outs SPR:$Sd), (ins SPR:$Sm),
788 IIC_fpCVTSI, "vcvt", ".u32.f32\t$Sd, $Sm",
789 [(set SPR:$Sd, (arm_ftoui SPR:$Sm))]> {
790 let Inst{7} = 1; // Z bit
791
792 // Some single precision VFP instructions may be executed on both NEON and
793 // VFP pipelines on A8.
794 let D = VFPNeonA8Domain;
795 }
796
797 // And the Z bit '0' variants, i.e. use the rounding mode specified by FPSCR.
798 let Uses = [FPSCR] in {
799 // FIXME: Verify encoding after integrated assembler is working.
800 def VTOSIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1101, 0b1011,
801 (outs SPR:$Sd), (ins DPR:$Dm),
802 IIC_fpCVTDI, "vcvtr", ".s32.f64\t$Sd, $Dm",
803 [(set SPR:$Sd, (int_arm_vcvtr (f64 DPR:$Dm)))]>{
804 let Inst{7} = 0; // Z bit
805 }
806
807 def VTOSIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1101, 0b1010,
808 (outs SPR:$Sd), (ins SPR:$Sm),
809 IIC_fpCVTSI, "vcvtr", ".s32.f32\t$Sd, $Sm",
810 [(set SPR:$Sd, (int_arm_vcvtr SPR:$Sm))]> {
811 let Inst{7} = 0; // Z bit
812 }
813
814 def VTOUIRD : AVConv1IsD_Encode<0b11101, 0b11, 0b1100, 0b1011,
815 (outs SPR:$Sd), (ins DPR:$Dm),
816 IIC_fpCVTDI, "vcvtr", ".u32.f64\t$Sd, $Dm",
817 [(set SPR:$Sd, (int_arm_vcvtru(f64 DPR:$Dm)))]>{
818 let Inst{7} = 0; // Z bit
819 }
820
821 def VTOUIRS : AVConv1InsS_Encode<0b11101, 0b11, 0b1100, 0b1010,
822 (outs SPR:$Sd), (ins SPR:$Sm),
823 IIC_fpCVTSI, "vcvtr", ".u32.f32\t$Sd, $Sm",
824 [(set SPR:$Sd, (int_arm_vcvtru SPR:$Sm))]> {
825 let Inst{7} = 0; // Z bit
826 }
827 }
828
829 // Convert between floating-point and fixed-point
830 // Data type for fixed-point naming convention:
831 // S16 (U=0, sx=0) -> SH
832 // U16 (U=1, sx=0) -> UH
833 // S32 (U=0, sx=1) -> SL
834 // U32 (U=1, sx=1) -> UL
835
836 let Constraints = "$a = $dst" in {
837
838 // FP to Fixed-Point:
839
840 // Single Precision register
841 class AVConv1XInsS_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
842 bit op5, dag oops, dag iops, InstrItinClass itin,
843 string opc, string asm, list<dag> pattern>
844 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
845 bits<5> dst;
846 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
847 let Inst{22} = dst{0};
848 let Inst{15-12} = dst{4-1};
849 }
850
851 // Double Precision register
852 class AVConv1XInsD_Encode<bits<5> op1, bits<2> op2, bits<4> op3, bits<4> op4,
853 bit op5, dag oops, dag iops, InstrItinClass itin,
854 string opc, string asm, list<dag> pattern>
855 : AVConv1XI<op1, op2, op3, op4, op5, oops, iops, itin, opc, asm, pattern> {
856 bits<5> dst;
857 // if dp_operation then UInt(D:Vd) else UInt(Vd:D);
858 let Inst{22} = dst{4};
859 let Inst{15-12} = dst{3-0};
860 }
861
862 def VTOSHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 0,
863 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
864 IIC_fpCVTSI, "vcvt", ".s16.f32\t$dst, $a, $fbits", []> {
865 // Some single precision VFP instructions may be executed on both NEON and
866 // VFP pipelines on A8.
867 let D = VFPNeonA8Domain;
868 }
869
870 def VTOUHS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 0,
871 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
872 IIC_fpCVTSI, "vcvt", ".u16.f32\t$dst, $a, $fbits", []> {
873 // Some single precision VFP instructions may be executed on both NEON and
874 // VFP pipelines on A8.
875 let D = VFPNeonA8Domain;
876 }
877
878 def VTOSLS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1110, 0b1010, 1,
879 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
880 IIC_fpCVTSI, "vcvt", ".s32.f32\t$dst, $a, $fbits", []> {
881 // Some single precision VFP instructions may be executed on both NEON and
882 // VFP pipelines on A8.
883 let D = VFPNeonA8Domain;
884 }
885
886 def VTOULS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1111, 0b1010, 1,
887 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
888 IIC_fpCVTSI, "vcvt", ".u32.f32\t$dst, $a, $fbits", []> {
889 // Some single precision VFP instructions may be executed on both NEON and
890 // VFP pipelines on A8.
891 let D = VFPNeonA8Domain;
892 }
893
894 def VTOSHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 0,
895 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
896 IIC_fpCVTDI, "vcvt", ".s16.f64\t$dst, $a, $fbits", []>;
897
898 def VTOUHD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 0,
899 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
900 IIC_fpCVTDI, "vcvt", ".u16.f64\t$dst, $a, $fbits", []>;
901
902 def VTOSLD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1110, 0b1011, 1,
903 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
904 IIC_fpCVTDI, "vcvt", ".s32.f64\t$dst, $a, $fbits", []>;
905
906 def VTOULD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1111, 0b1011, 1,
907 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
908 IIC_fpCVTDI, "vcvt", ".u32.f64\t$dst, $a, $fbits", []>;
909
910 // Fixed-Point to FP:
911
912 def VSHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 0,
913 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
914 IIC_fpCVTIS, "vcvt", ".f32.s16\t$dst, $a, $fbits", []> {
915 // Some single precision VFP instructions may be executed on both NEON and
916 // VFP pipelines on A8.
917 let D = VFPNeonA8Domain;
918 }
919
920 def VUHTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 0,
921 (outs SPR:$dst), (ins SPR:$a, fbits16:$fbits),
922 IIC_fpCVTIS, "vcvt", ".f32.u16\t$dst, $a, $fbits", []> {
923 // Some single precision VFP instructions may be executed on both NEON and
924 // VFP pipelines on A8.
925 let D = VFPNeonA8Domain;
926 }
927
928 def VSLTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1010, 0b1010, 1,
929 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
930 IIC_fpCVTIS, "vcvt", ".f32.s32\t$dst, $a, $fbits", []> {
931 // Some single precision VFP instructions may be executed on both NEON and
932 // VFP pipelines on A8.
933 let D = VFPNeonA8Domain;
934 }
935
936 def VULTOS : AVConv1XInsS_Encode<0b11101, 0b11, 0b1011, 0b1010, 1,
937 (outs SPR:$dst), (ins SPR:$a, fbits32:$fbits),
938 IIC_fpCVTIS, "vcvt", ".f32.u32\t$dst, $a, $fbits", []> {
939 // Some single precision VFP instructions may be executed on both NEON and
940 // VFP pipelines on A8.
941 let D = VFPNeonA8Domain;
942 }
943
944 def VSHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 0,
945 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
946 IIC_fpCVTID, "vcvt", ".f64.s16\t$dst, $a, $fbits", []>;
947
948 def VUHTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 0,
949 (outs DPR:$dst), (ins DPR:$a, fbits16:$fbits),
950 IIC_fpCVTID, "vcvt", ".f64.u16\t$dst, $a, $fbits", []>;
951
952 def VSLTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1010, 0b1011, 1,
953 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
954 IIC_fpCVTID, "vcvt", ".f64.s32\t$dst, $a, $fbits", []>;
955
956 def VULTOD : AVConv1XInsD_Encode<0b11101, 0b11, 0b1011, 0b1011, 1,
957 (outs DPR:$dst), (ins DPR:$a, fbits32:$fbits),
958 IIC_fpCVTID, "vcvt", ".f64.u32\t$dst, $a, $fbits", []>;
959
960 } // End of 'let Constraints = "$a = $dst" in'
961
962 //===----------------------------------------------------------------------===//
963 // FP Multiply-Accumulate Operations.
964 //
965
966 def VMLAD : ADbI<0b11100, 0b00, 0, 0,
967 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
968 IIC_fpMAC64, "vmla", ".f64\t$Dd, $Dn, $Dm",
969 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
970 (f64 DPR:$Ddin)))]>,
971 RegConstraint<"$Ddin = $Dd">,
972 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
973
974 def VMLAS : ASbIn<0b11100, 0b00, 0, 0,
975 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
976 IIC_fpMAC32, "vmla", ".f32\t$Sd, $Sn, $Sm",
977 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
978 SPR:$Sdin))]>,
979 RegConstraint<"$Sdin = $Sd">,
980 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
981 // Some single precision VFP instructions may be executed on both NEON and
982 // VFP pipelines on A8.
983 let D = VFPNeonA8Domain;
984 }
985
986 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
987 (VMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
988 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
989 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
990 (VMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
991 Requires<[HasVFP2,DontUseNEONForFP, UseFPVMLx,DontUseFusedMAC]>;
992
993 def VMLSD : ADbI<0b11100, 0b00, 1, 0,
994 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
995 IIC_fpMAC64, "vmls", ".f64\t$Dd, $Dn, $Dm",
996 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
997 (f64 DPR:$Ddin)))]>,
998 RegConstraint<"$Ddin = $Dd">,
999 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1000
1001 def VMLSS : ASbIn<0b11100, 0b00, 1, 0,
1002 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1003 IIC_fpMAC32, "vmls", ".f32\t$Sd, $Sn, $Sm",
1004 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1005 SPR:$Sdin))]>,
1006 RegConstraint<"$Sdin = $Sd">,
1007 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1008 // Some single precision VFP instructions may be executed on both NEON and
1009 // VFP pipelines on A8.
1010 let D = VFPNeonA8Domain;
1011 }
1012
1013 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1014 (VMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1015 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1016 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1017 (VMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1018 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1019
1020 def VNMLAD : ADbI<0b11100, 0b01, 1, 0,
1021 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1022 IIC_fpMAC64, "vnmla", ".f64\t$Dd, $Dn, $Dm",
1023 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1024 (f64 DPR:$Ddin)))]>,
1025 RegConstraint<"$Ddin = $Dd">,
1026 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1027
1028 def VNMLAS : ASbI<0b11100, 0b01, 1, 0,
1029 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1030 IIC_fpMAC32, "vnmla", ".f32\t$Sd, $Sn, $Sm",
1031 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1032 SPR:$Sdin))]>,
1033 RegConstraint<"$Sdin = $Sd">,
1034 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1035 // Some single precision VFP instructions may be executed on both NEON and
1036 // VFP pipelines on A8.
1037 let D = VFPNeonA8Domain;
1038 }
1039
1040 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1041 (VNMLAD DPR:$dstin, DPR:$a, DPR:$b)>,
1042 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1043 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1044 (VNMLAS SPR:$dstin, SPR:$a, SPR:$b)>,
1045 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1046
1047 def VNMLSD : ADbI<0b11100, 0b01, 0, 0,
1048 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1049 IIC_fpMAC64, "vnmls", ".f64\t$Dd, $Dn, $Dm",
1050 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1051 (f64 DPR:$Ddin)))]>,
1052 RegConstraint<"$Ddin = $Dd">,
1053 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1054
1055 def VNMLSS : ASbI<0b11100, 0b01, 0, 0,
1056 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1057 IIC_fpMAC32, "vnmls", ".f32\t$Sd, $Sn, $Sm",
1058 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1059 RegConstraint<"$Sdin = $Sd">,
1060 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]> {
1061 // Some single precision VFP instructions may be executed on both NEON and
1062 // VFP pipelines on A8.
1063 let D = VFPNeonA8Domain;
1064 }
1065
1066 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1067 (VNMLSD DPR:$dstin, DPR:$a, DPR:$b)>,
1068 Requires<[HasVFP2,UseFPVMLx,DontUseFusedMAC]>;
1069 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1070 (VNMLSS SPR:$dstin, SPR:$a, SPR:$b)>,
1071 Requires<[HasVFP2,DontUseNEONForFP,UseFPVMLx,DontUseFusedMAC]>;
1072
1073 //===----------------------------------------------------------------------===//
1074 // Fused FP Multiply-Accumulate Operations.
1075 //
1076 def VFMAD : ADbI<0b11101, 0b10, 0, 0,
1077 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1078 IIC_fpFMAC64, "vfma", ".f64\t$Dd, $Dn, $Dm",
1079 [(set DPR:$Dd, (fadd_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1080 (f64 DPR:$Ddin)))]>,
1081 RegConstraint<"$Ddin = $Dd">,
1082 Requires<[HasVFP4,UseFusedMAC]>;
1083
1084 def VFMAS : ASbIn<0b11101, 0b10, 0, 0,
1085 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1086 IIC_fpFMAC32, "vfma", ".f32\t$Sd, $Sn, $Sm",
1087 [(set SPR:$Sd, (fadd_mlx (fmul_su SPR:$Sn, SPR:$Sm),
1088 SPR:$Sdin))]>,
1089 RegConstraint<"$Sdin = $Sd">,
1090 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1091 // Some single precision VFP instructions may be executed on both NEON and
1092 // VFP pipelines.
1093 }
1094
1095 def : Pat<(fadd_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1096 (VFMAD DPR:$dstin, DPR:$a, DPR:$b)>,
1097 Requires<[HasVFP4,UseFusedMAC]>;
1098 def : Pat<(fadd_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1099 (VFMAS SPR:$dstin, SPR:$a, SPR:$b)>,
1100 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1101
1102 // Match @llvm.fma.* intrinsics
1103 // (fma x, y, z) -> (vfms z, x, y)
1104 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, DPR:$Ddin)),
1105 (VFMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1106 Requires<[HasVFP4]>;
1107 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, SPR:$Sdin)),
1108 (VFMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1109 Requires<[HasVFP4]>;
1110
1111 def VFMSD : ADbI<0b11101, 0b10, 1, 0,
1112 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1113 IIC_fpFMAC64, "vfms", ".f64\t$Dd, $Dn, $Dm",
1114 [(set DPR:$Dd, (fadd_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1115 (f64 DPR:$Ddin)))]>,
1116 RegConstraint<"$Ddin = $Dd">,
1117 Requires<[HasVFP4,UseFusedMAC]>;
1118
1119 def VFMSS : ASbIn<0b11101, 0b10, 1, 0,
1120 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1121 IIC_fpFMAC32, "vfms", ".f32\t$Sd, $Sn, $Sm",
1122 [(set SPR:$Sd, (fadd_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1123 SPR:$Sdin))]>,
1124 RegConstraint<"$Sdin = $Sd">,
1125 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1126 // Some single precision VFP instructions may be executed on both NEON and
1127 // VFP pipelines.
1128 }
1129
1130 def : Pat<(fsub_mlx DPR:$dstin, (fmul_su DPR:$a, (f64 DPR:$b))),
1131 (VFMSD DPR:$dstin, DPR:$a, DPR:$b)>,
1132 Requires<[HasVFP4,UseFusedMAC]>;
1133 def : Pat<(fsub_mlx SPR:$dstin, (fmul_su SPR:$a, SPR:$b)),
1134 (VFMSS SPR:$dstin, SPR:$a, SPR:$b)>,
1135 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1136
1137 // Match @llvm.fma.* intrinsics
1138 // (fma (fneg x), y, z) -> (vfms z, x, y)
1139 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin)),
1140 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1141 Requires<[HasVFP4]>;
1142 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin)),
1143 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1144 Requires<[HasVFP4]>;
1145 // (fma x, (fneg y), z) -> (vfms z, x, y)
1146 def : Pat<(f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin)),
1147 (VFMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1148 Requires<[HasVFP4]>;
1149 def : Pat<(f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin)),
1150 (VFMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1151 Requires<[HasVFP4]>;
1152
1153 def VFNMAD : ADbI<0b11101, 0b01, 1, 0,
1154 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1155 IIC_fpFMAC64, "vfnma", ".f64\t$Dd, $Dn, $Dm",
1156 [(set DPR:$Dd,(fsub_mlx (fneg (fmul_su DPR:$Dn,DPR:$Dm)),
1157 (f64 DPR:$Ddin)))]>,
1158 RegConstraint<"$Ddin = $Dd">,
1159 Requires<[HasVFP4,UseFusedMAC]>;
1160
1161 def VFNMAS : ASbI<0b11101, 0b01, 1, 0,
1162 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1163 IIC_fpFMAC32, "vfnma", ".f32\t$Sd, $Sn, $Sm",
1164 [(set SPR:$Sd, (fsub_mlx (fneg (fmul_su SPR:$Sn, SPR:$Sm)),
1165 SPR:$Sdin))]>,
1166 RegConstraint<"$Sdin = $Sd">,
1167 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1168 // Some single precision VFP instructions may be executed on both NEON and
1169 // VFP pipelines.
1170 }
1171
1172 def : Pat<(fsub_mlx (fneg (fmul_su DPR:$a, (f64 DPR:$b))), DPR:$dstin),
1173 (VFNMAD DPR:$dstin, DPR:$a, DPR:$b)>,
1174 Requires<[HasVFP4,UseFusedMAC]>;
1175 def : Pat<(fsub_mlx (fneg (fmul_su SPR:$a, SPR:$b)), SPR:$dstin),
1176 (VFNMAS SPR:$dstin, SPR:$a, SPR:$b)>,
1177 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1178
1179 // Match @llvm.fma.* intrinsics
1180 // (fneg (fma x, y, z)) -> (vfnma z, x, y)
1181 def : Pat<(fneg (fma (f64 DPR:$Dn), (f64 DPR:$Dm), (f64 DPR:$Ddin))),
1182 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1183 Requires<[HasVFP4]>;
1184 def : Pat<(fneg (fma (f32 SPR:$Sn), (f32 SPR:$Sm), (f32 SPR:$Sdin))),
1185 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1186 Requires<[HasVFP4]>;
1187 // (fma (fneg x), y, (fneg z)) -> (vfnma z, x, y)
1188 def : Pat<(f64 (fma (fneg DPR:$Dn), DPR:$Dm, (fneg DPR:$Ddin))),
1189 (VFNMAD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1190 Requires<[HasVFP4]>;
1191 def : Pat<(f32 (fma (fneg SPR:$Sn), SPR:$Sm, (fneg SPR:$Sdin))),
1192 (VFNMAS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1193 Requires<[HasVFP4]>;
1194
1195 def VFNMSD : ADbI<0b11101, 0b01, 0, 0,
1196 (outs DPR:$Dd), (ins DPR:$Ddin, DPR:$Dn, DPR:$Dm),
1197 IIC_fpFMAC64, "vfnms", ".f64\t$Dd, $Dn, $Dm",
1198 [(set DPR:$Dd, (fsub_mlx (fmul_su DPR:$Dn, DPR:$Dm),
1199 (f64 DPR:$Ddin)))]>,
1200 RegConstraint<"$Ddin = $Dd">,
1201 Requires<[HasVFP4,UseFusedMAC]>;
1202
1203 def VFNMSS : ASbI<0b11101, 0b01, 0, 0,
1204 (outs SPR:$Sd), (ins SPR:$Sdin, SPR:$Sn, SPR:$Sm),
1205 IIC_fpFMAC32, "vfnms", ".f32\t$Sd, $Sn, $Sm",
1206 [(set SPR:$Sd, (fsub_mlx (fmul_su SPR:$Sn, SPR:$Sm), SPR:$Sdin))]>,
1207 RegConstraint<"$Sdin = $Sd">,
1208 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]> {
1209 // Some single precision VFP instructions may be executed on both NEON and
1210 // VFP pipelines.
1211 }
1212
1213 def : Pat<(fsub_mlx (fmul_su DPR:$a, (f64 DPR:$b)), DPR:$dstin),
1214 (VFNMSD DPR:$dstin, DPR:$a, DPR:$b)>,
1215 Requires<[HasVFP4,UseFusedMAC]>;
1216 def : Pat<(fsub_mlx (fmul_su SPR:$a, SPR:$b), SPR:$dstin),
1217 (VFNMSS SPR:$dstin, SPR:$a, SPR:$b)>,
1218 Requires<[HasVFP4,DontUseNEONForFP,UseFusedMAC]>;
1219
1220 // Match @llvm.fma.* intrinsics
1221
1222 // (fma x, y, (fneg z)) -> (vfnms z, x, y))
1223 def : Pat<(f64 (fma DPR:$Dn, DPR:$Dm, (fneg DPR:$Ddin))),
1224 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1225 Requires<[HasVFP4]>;
1226 def : Pat<(f32 (fma SPR:$Sn, SPR:$Sm, (fneg SPR:$Sdin))),
1227 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1228 Requires<[HasVFP4]>;
1229 // (fneg (fma (fneg x), y, z)) -> (vfnms z, x, y)
1230 def : Pat<(fneg (f64 (fma (fneg DPR:$Dn), DPR:$Dm, DPR:$Ddin))),
1231 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1232 Requires<[HasVFP4]>;
1233 def : Pat<(fneg (f32 (fma (fneg SPR:$Sn), SPR:$Sm, SPR:$Sdin))),
1234 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1235 Requires<[HasVFP4]>;
1236 // (fneg (fma x, (fneg y), z) -> (vfnms z, x, y)
1237 def : Pat<(fneg (f64 (fma DPR:$Dn, (fneg DPR:$Dm), DPR:$Ddin))),
1238 (VFNMSD DPR:$Ddin, DPR:$Dn, DPR:$Dm)>,
1239 Requires<[HasVFP4]>;
1240 def : Pat<(fneg (f32 (fma SPR:$Sn, (fneg SPR:$Sm), SPR:$Sdin))),
1241 (VFNMSS SPR:$Sdin, SPR:$Sn, SPR:$Sm)>,
1242 Requires<[HasVFP4]>;
1243
1244 //===----------------------------------------------------------------------===//
1245 // FP Conditional moves.
1246 //
1247
1248 let neverHasSideEffects = 1 in {
1249 def VMOVDcc : ARMPseudoInst<(outs DPR:$Dd), (ins DPR:$Dn, DPR:$Dm, pred:$p),
1250 4, IIC_fpUNA64,
1251 [/*(set DPR:$Dd, (ARMcmov DPR:$Dn, DPR:$Dm, imm:$cc))*/]>,
1252 RegConstraint<"$Dn = $Dd">;
1253
1254 def VMOVScc : ARMPseudoInst<(outs SPR:$Sd), (ins SPR:$Sn, SPR:$Sm, pred:$p),
1255 4, IIC_fpUNA32,
1256 [/*(set SPR:$Sd, (ARMcmov SPR:$Sn, SPR:$Sm, imm:$cc))*/]>,
1257 RegConstraint<"$Sn = $Sd">;
1258 } // neverHasSideEffects
1259
1260 //===----------------------------------------------------------------------===//
1261 // Move from VFP System Register to ARM core register.
1262 //
1263
1264 class MovFromVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1265 list<dag> pattern>:
1266 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1267
1268 // Instruction operand.
1269 bits<4> Rt;
1270
1271 let Inst{27-20} = 0b11101111;
1272 let Inst{19-16} = opc19_16;
1273 let Inst{15-12} = Rt;
1274 let Inst{11-8} = 0b1010;
1275 let Inst{7} = 0;
1276 let Inst{6-5} = 0b00;
1277 let Inst{4} = 1;
1278 let Inst{3-0} = 0b0000;
1279 }
1280
1281 // APSR is the application level alias of CPSR. This FPSCR N, Z, C, V flags
1282 // to APSR.
1283 let Defs = [CPSR], Uses = [FPSCR_NZCV], Rt = 0b1111 /* apsr_nzcv */ in
1284 def FMSTAT : MovFromVFP<0b0001 /* fpscr */, (outs), (ins),
1285 "vmrs", "\tAPSR_nzcv, fpscr", [(arm_fmstat)]>;
1286
1287 // Application level FPSCR -> GPR
1288 let hasSideEffects = 1, Uses = [FPSCR] in
1289 def VMRS : MovFromVFP<0b0001 /* fpscr */, (outs GPR:$Rt), (ins),
1290 "vmrs", "\t$Rt, fpscr",
1291 [(set GPR:$Rt, (int_arm_get_fpscr))]>;
1292
1293 // System level FPEXC, FPSID -> GPR
1294 let Uses = [FPSCR] in {
1295 def VMRS_FPEXC : MovFromVFP<0b1000 /* fpexc */, (outs GPR:$Rt), (ins),
1296 "vmrs", "\t$Rt, fpexc", []>;
1297 def VMRS_FPSID : MovFromVFP<0b0000 /* fpsid */, (outs GPR:$Rt), (ins),
1298 "vmrs", "\t$Rt, fpsid", []>;
1299 def VMRS_MVFR0 : MovFromVFP<0b0111 /* mvfr0 */, (outs GPR:$Rt), (ins),
1300 "vmrs", "\t$Rt, mvfr0", []>;
1301 def VMRS_MVFR1 : MovFromVFP<0b0110 /* mvfr1 */, (outs GPR:$Rt), (ins),
1302 "vmrs", "\t$Rt, mvfr1", []>;
1303 }
1304
1305 //===----------------------------------------------------------------------===//
1306 // Move from ARM core register to VFP System Register.
1307 //
1308
1309 class MovToVFP<bits<4> opc19_16, dag oops, dag iops, string opc, string asm,
1310 list<dag> pattern>:
1311 VFPAI<oops, iops, VFPMiscFrm, IIC_fpSTAT, opc, asm, pattern> {
1312
1313 // Instruction operand.
1314 bits<4> src;
1315
1316 // Encode instruction operand.
1317 let Inst{15-12} = src;
1318
1319 let Inst{27-20} = 0b11101110;
1320 let Inst{19-16} = opc19_16;
1321 let Inst{11-8} = 0b1010;
1322 let Inst{7} = 0;
1323 let Inst{4} = 1;
1324 }
1325
1326 let Defs = [FPSCR] in {
1327 // Application level GPR -> FPSCR
1328 def VMSR : MovToVFP<0b0001 /* fpscr */, (outs), (ins GPR:$src),
1329 "vmsr", "\tfpscr, $src", [(int_arm_set_fpscr GPR:$src)]>;
1330 // System level GPR -> FPEXC
1331 def VMSR_FPEXC : MovToVFP<0b1000 /* fpexc */, (outs), (ins GPR:$src),
1332 "vmsr", "\tfpexc, $src", []>;
1333 // System level GPR -> FPSID
1334 def VMSR_FPSID : MovToVFP<0b0000 /* fpsid */, (outs), (ins GPR:$src),
1335 "vmsr", "\tfpsid, $src", []>;
1336 }
1337
1338 //===----------------------------------------------------------------------===//
1339 // Misc.
1340 //
1341
1342 // Materialize FP immediates. VFP3 only.
1343 let isReMaterializable = 1 in {
1344 def FCONSTD : VFPAI<(outs DPR:$Dd), (ins vfp_f64imm:$imm),
1345 VFPMiscFrm, IIC_fpUNA64,
1346 "vmov", ".f64\t$Dd, $imm",
1347 [(set DPR:$Dd, vfp_f64imm:$imm)]>, Requires<[HasVFP3]> {
1348 bits<5> Dd;
1349 bits<8> imm;
1350
1351 let Inst{27-23} = 0b11101;
1352 let Inst{22} = Dd{4};
1353 let Inst{21-20} = 0b11;
1354 let Inst{19-16} = imm{7-4};
1355 let Inst{15-12} = Dd{3-0};
1356 let Inst{11-9} = 0b101;
1357 let Inst{8} = 1; // Double precision.
1358 let Inst{7-4} = 0b0000;
1359 let Inst{3-0} = imm{3-0};
1360 }
1361
1362 def FCONSTS : VFPAI<(outs SPR:$Sd), (ins vfp_f32imm:$imm),
1363 VFPMiscFrm, IIC_fpUNA32,
1364 "vmov", ".f32\t$Sd, $imm",
1365 [(set SPR:$Sd, vfp_f32imm:$imm)]>, Requires<[HasVFP3]> {
1366 bits<5> Sd;
1367 bits<8> imm;
1368
1369 let Inst{27-23} = 0b11101;
1370 let Inst{22} = Sd{0};
1371 let Inst{21-20} = 0b11;
1372 let Inst{19-16} = imm{7-4};
1373 let Inst{15-12} = Sd{4-1};
1374 let Inst{11-9} = 0b101;
1375 let Inst{8} = 0; // Single precision.
1376 let Inst{7-4} = 0b0000;
1377 let Inst{3-0} = imm{3-0};
1378 }
1379 }
1380
1381 //===----------------------------------------------------------------------===//
1382 // Assembler aliases.
1383 //
1384 // A few mnemnoic aliases for pre-unifixed syntax. We don't guarantee to
1385 // support them all, but supporting at least some of the basics is
1386 // good to be friendly.
1387 def : VFP2MnemonicAlias<"flds", "vldr">;
1388 def : VFP2MnemonicAlias<"fldd", "vldr">;
1389 def : VFP2MnemonicAlias<"fmrs", "vmov">;
1390 def : VFP2MnemonicAlias<"fmsr", "vmov">;
1391 def : VFP2MnemonicAlias<"fsqrts", "vsqrt">;
1392 def : VFP2MnemonicAlias<"fsqrtd", "vsqrt">;
1393 def : VFP2MnemonicAlias<"fadds", "vadd.f32">;
1394 def : VFP2MnemonicAlias<"faddd", "vadd.f64">;
1395 def : VFP2MnemonicAlias<"fmrdd", "vmov">;
1396 def : VFP2MnemonicAlias<"fmrds", "vmov">;
1397 def : VFP2MnemonicAlias<"fmrrd", "vmov">;
1398 def : VFP2MnemonicAlias<"fmdrr", "vmov">;
1399 def : VFP2MnemonicAlias<"fmuls", "vmul.f32">;
1400 def : VFP2MnemonicAlias<"fmuld", "vmul.f64">;
1401 def : VFP2MnemonicAlias<"fnegs", "vneg.f32">;
1402 def : VFP2MnemonicAlias<"fnegd", "vneg.f64">;
1403 def : VFP2MnemonicAlias<"ftosizd", "vcvt.s32.f64">;
1404 def : VFP2MnemonicAlias<"ftosid", "vcvtr.s32.f64">;
1405 def : VFP2MnemonicAlias<"ftosizs", "vcvt.s32.f32">;
1406 def : VFP2MnemonicAlias<"ftosis", "vcvtr.s32.f32">;
1407 def : VFP2MnemonicAlias<"ftouizd", "vcvt.u32.f64">;
1408 def : VFP2MnemonicAlias<"ftouid", "vcvtr.u32.f64">;
1409 def : VFP2MnemonicAlias<"ftouizs", "vcvt.u32.f32">;
1410 def : VFP2MnemonicAlias<"ftouis", "vcvtr.u32.f32">;
1411 def : VFP2MnemonicAlias<"fsitod", "vcvt.f64.s32">;
1412 def : VFP2MnemonicAlias<"fsitos", "vcvt.f32.s32">;
1413 def : VFP2MnemonicAlias<"fuitod", "vcvt.f64.u32">;
1414 def : VFP2MnemonicAlias<"fuitos", "vcvt.f32.u32">;
1415 def : VFP2MnemonicAlias<"fsts", "vstr">;
1416 def : VFP2MnemonicAlias<"fstd", "vstr">;
1417 def : VFP2MnemonicAlias<"fmacd", "vmla.f64">;
1418 def : VFP2MnemonicAlias<"fmacs", "vmla.f32">;
1419 def : VFP2MnemonicAlias<"fcpys", "vmov.f32">;
1420 def : VFP2MnemonicAlias<"fcpyd", "vmov.f64">;
1421 def : VFP2MnemonicAlias<"fcmps", "vcmp.f32">;
1422 def : VFP2MnemonicAlias<"fcmpd", "vcmp.f64">;
1423 def : VFP2MnemonicAlias<"fdivs", "vdiv.f32">;
1424 def : VFP2MnemonicAlias<"fdivd", "vdiv.f64">;
1425 def : VFP2MnemonicAlias<"fmrx", "vmrs">;
1426 def : VFP2MnemonicAlias<"fmxr", "vmsr">;
1427
1428 // Be friendly and accept the old form of zero-compare
1429 def : VFP2InstAlias<"fcmpzd${p} $val", (VCMPZD DPR:$val, pred:$p)>;
1430 def : VFP2InstAlias<"fcmpzs${p} $val", (VCMPZS SPR:$val, pred:$p)>;
1431
1432
1433 def : VFP2InstAlias<"fmstat${p}", (FMSTAT pred:$p)>;
1434 def : VFP2InstAlias<"fadds${p} $Sd, $Sn, $Sm",
1435 (VADDS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
1436 def : VFP2InstAlias<"faddd${p} $Dd, $Dn, $Dm",
1437 (VADDD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
1438 def : VFP2InstAlias<"fsubs${p} $Sd, $Sn, $Sm",
1439 (VSUBS SPR:$Sd, SPR:$Sn, SPR:$Sm, pred:$p)>;
1440 def : VFP2InstAlias<"fsubd${p} $Dd, $Dn, $Dm",
1441 (VSUBD DPR:$Dd, DPR:$Dn, DPR:$Dm, pred:$p)>;
1442
1443 // No need for the size suffix on VSQRT. It's implied by the register classes.
1444 def : VFP2InstAlias<"vsqrt${p} $Sd, $Sm", (VSQRTS SPR:$Sd, SPR:$Sm, pred:$p)>;
1445 def : VFP2InstAlias<"vsqrt${p} $Dd, $Dm", (VSQRTD DPR:$Dd, DPR:$Dm, pred:$p)>;
1446
1447 // VLDR/VSTR accept an optional type suffix.
1448 def : VFP2InstAlias<"vldr${p}.32 $Sd, $addr",
1449 (VLDRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
1450 def : VFP2InstAlias<"vstr${p}.32 $Sd, $addr",
1451 (VSTRS SPR:$Sd, addrmode5:$addr, pred:$p)>;
1452 def : VFP2InstAlias<"vldr${p}.64 $Dd, $addr",
1453 (VLDRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
1454 def : VFP2InstAlias<"vstr${p}.64 $Dd, $addr",
1455 (VSTRD DPR:$Dd, addrmode5:$addr, pred:$p)>;
1456
1457 // VMOV can accept optional 32-bit or less data type suffix suffix.
1458 def : VFP2InstAlias<"vmov${p}.8 $Rt, $Sn",
1459 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1460 def : VFP2InstAlias<"vmov${p}.16 $Rt, $Sn",
1461 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1462 def : VFP2InstAlias<"vmov${p}.32 $Rt, $Sn",
1463 (VMOVRS GPR:$Rt, SPR:$Sn, pred:$p)>;
1464 def : VFP2InstAlias<"vmov${p}.8 $Sn, $Rt",
1465 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1466 def : VFP2InstAlias<"vmov${p}.16 $Sn, $Rt",
1467 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1468 def : VFP2InstAlias<"vmov${p}.32 $Sn, $Rt",
1469 (VMOVSR SPR:$Sn, GPR:$Rt, pred:$p)>;
1470
1471 def : VFP2InstAlias<"vmov${p}.f64 $Rt, $Rt2, $Dn",
1472 (VMOVRRD GPR:$Rt, GPR:$Rt2, DPR:$Dn, pred:$p)>;
1473 def : VFP2InstAlias<"vmov${p}.f64 $Dn, $Rt, $Rt2",
1474 (VMOVDRR DPR:$Dn, GPR:$Rt, GPR:$Rt2, pred:$p)>;
1475
1476 // VMOVS doesn't need the .f32 to disambiguate from the NEON encoding the way
1477 // VMOVD does.
1478 def : VFP2InstAlias<"vmov${p} $Sd, $Sm",
1479 (VMOVS SPR:$Sd, SPR:$Sm, pred:$p)>;