1 use crate::builder
::Builder
;
2 use crate::context
::CodegenCx
;
4 use crate::type_
::Type
;
5 use crate::type_of
::LayoutLlvmExt
;
6 use crate::value
::Value
;
8 use rustc_ast
::LlvmAsmDialect
;
9 use rustc_ast
::{InlineAsmOptions, InlineAsmTemplatePiece}
;
10 use rustc_codegen_ssa
::mir
::operand
::OperandValue
;
11 use rustc_codegen_ssa
::mir
::place
::PlaceRef
;
12 use rustc_codegen_ssa
::traits
::*;
13 use rustc_data_structures
::fx
::FxHashMap
;
15 use rustc_middle
::ty
::layout
::TyAndLayout
;
16 use rustc_middle
::{bug, span_bug}
;
17 use rustc_span
::{Pos, Span}
;
18 use rustc_target
::abi
::*;
19 use rustc_target
::asm
::*;
21 use libc
::{c_char, c_uint}
;
24 impl AsmBuilderMethods
<'tcx
> for Builder
<'a
, 'll
, 'tcx
> {
25 fn codegen_llvm_inline_asm(
27 ia
: &hir
::LlvmInlineAsmInner
,
28 outputs
: Vec
<PlaceRef
<'tcx
, &'ll Value
>>,
29 mut inputs
: Vec
<&'ll Value
>,
32 let mut ext_constraints
= vec
![];
33 let mut output_types
= vec
![];
35 // Prepare the output operands
36 let mut indirect_outputs
= vec
![];
37 for (i
, (out
, &place
)) in ia
.outputs
.iter().zip(&outputs
).enumerate() {
39 let operand
= self.load_operand(place
);
40 if let OperandValue
::Immediate(_
) = operand
.val
{
41 inputs
.push(operand
.immediate());
43 ext_constraints
.push(i
.to_string());
46 let operand
= self.load_operand(place
);
47 if let OperandValue
::Immediate(_
) = operand
.val
{
48 indirect_outputs
.push(operand
.immediate());
51 output_types
.push(place
.layout
.llvm_type(self.cx
));
54 if !indirect_outputs
.is_empty() {
55 indirect_outputs
.extend_from_slice(&inputs
);
56 inputs
= indirect_outputs
;
59 let clobbers
= ia
.clobbers
.iter().map(|s
| format
!("~{{{}}}", &s
));
61 // Default per-arch clobbers
62 // Basically what clang does
63 let arch_clobbers
= match &self.sess().target
.arch
[..] {
64 "x86" | "x86_64" => &["~{dirflag}", "~{fpsr}", "~{flags}"][..],
65 "mips" | "mips64" => &["~{$1}"],
69 let all_constraints
= ia
72 .map(|out
| out
.constraint
.to_string())
73 .chain(ia
.inputs
.iter().map(|s
| s
.to_string()))
74 .chain(ext_constraints
)
76 .chain(arch_clobbers
.iter().map(|s
| (*s
).to_string()))
77 .collect
::<Vec
<String
>>()
80 debug
!("Asm Constraints: {}", &all_constraints
);
82 // Depending on how many outputs we have, the return type is different
83 let num_outputs
= output_types
.len();
84 let output_type
= match num_outputs
{
85 0 => self.type_void(),
87 _
=> self.type_struct(&output_types
, false),
90 let asm
= ia
.asm
.as_str();
91 let r
= inline_asm_call(
107 // Again, based on how many outputs we have
108 let outputs
= ia
.outputs
.iter().zip(&outputs
).filter(|&(ref o
, _
)| !o
.is_indirect
);
109 for (i
, (_
, &place
)) in outputs
.enumerate() {
110 let v
= if num_outputs
== 1 { r }
else { self.extract_value(r, i as u64) }
;
111 OperandValue
::Immediate(v
).store(self, place
);
117 fn codegen_inline_asm(
119 template
: &[InlineAsmTemplatePiece
],
120 operands
: &[InlineAsmOperandRef
<'tcx
, Self>],
121 options
: InlineAsmOptions
,
124 let asm_arch
= self.tcx
.sess
.asm_arch
.unwrap();
126 // Collect the types of output operands
127 let mut constraints
= vec
![];
128 let mut output_types
= vec
![];
129 let mut op_idx
= FxHashMap
::default();
130 for (idx
, op
) in operands
.iter().enumerate() {
132 InlineAsmOperandRef
::Out { reg, late, place }
=> {
133 let mut layout
= None
;
134 let ty
= if let Some(ref place
) = place
{
135 layout
= Some(&place
.layout
);
136 llvm_fixup_output_type(self.cx
, reg
.reg_class(), &place
.layout
)
138 // If the output is discarded, we don't really care what
139 // type is used. We're just using this to tell LLVM to
140 // reserve the register.
141 dummy_output_type(self.cx
, reg
.reg_class())
143 output_types
.push(ty
);
144 op_idx
.insert(idx
, constraints
.len());
145 let prefix
= if late { "=" }
else { "=&" }
;
146 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, layout
)));
148 InlineAsmOperandRef
::InOut { reg, late, in_value, out_place }
=> {
149 let layout
= if let Some(ref out_place
) = out_place
{
152 // LLVM required tied operands to have the same type,
153 // so we just use the type of the input.
156 let ty
= llvm_fixup_output_type(self.cx
, reg
.reg_class(), layout
);
157 output_types
.push(ty
);
158 op_idx
.insert(idx
, constraints
.len());
159 let prefix
= if late { "=" }
else { "=&" }
;
160 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, Some(layout
))));
166 // Collect input operands
167 let mut inputs
= vec
![];
168 for (idx
, op
) in operands
.iter().enumerate() {
170 InlineAsmOperandRef
::In { reg, value }
=> {
172 llvm_fixup_input(self, value
.immediate(), reg
.reg_class(), &value
.layout
);
174 op_idx
.insert(idx
, constraints
.len());
175 constraints
.push(reg_to_llvm(reg
, Some(&value
.layout
)));
177 InlineAsmOperandRef
::InOut { reg, late: _, in_value, out_place: _ }
=> {
178 let value
= llvm_fixup_input(
180 in_value
.immediate(),
185 constraints
.push(format
!("{}", op_idx
[&idx
]));
187 InlineAsmOperandRef
::SymFn { instance }
=> {
188 inputs
.push(self.cx
.get_fn(instance
));
189 op_idx
.insert(idx
, constraints
.len());
190 constraints
.push("s".to_string());
192 InlineAsmOperandRef
::SymStatic { def_id }
=> {
193 inputs
.push(self.cx
.get_static(def_id
));
194 op_idx
.insert(idx
, constraints
.len());
195 constraints
.push("s".to_string());
201 // Build the template string
202 let mut template_str
= String
::new();
203 for piece
in template
{
205 InlineAsmTemplatePiece
::String(ref s
) => {
209 template_str
.push_str("$$");
211 template_str
.push(c
);
215 template_str
.push_str(s
)
218 InlineAsmTemplatePiece
::Placeholder { operand_idx, modifier, span: _ }
=> {
219 match operands
[operand_idx
] {
220 InlineAsmOperandRef
::In { reg, .. }
221 | InlineAsmOperandRef
::Out { reg, .. }
222 | InlineAsmOperandRef
::InOut { reg, .. }
=> {
223 let modifier
= modifier_to_llvm(asm_arch
, reg
.reg_class(), modifier
);
224 if let Some(modifier
) = modifier
{
225 template_str
.push_str(&format
!(
227 op_idx
[&operand_idx
], modifier
230 template_str
.push_str(&format
!("${{{}}}", op_idx
[&operand_idx
]));
233 InlineAsmOperandRef
::Const { ref string }
=> {
234 // Const operands get injected directly into the template
235 template_str
.push_str(string
);
237 InlineAsmOperandRef
::SymFn { .. }
238 | InlineAsmOperandRef
::SymStatic { .. }
=> {
239 // Only emit the raw symbol name
240 template_str
.push_str(&format
!("${{{}:c}}", op_idx
[&operand_idx
]));
247 if !options
.contains(InlineAsmOptions
::PRESERVES_FLAGS
) {
249 InlineAsmArch
::AArch64
| InlineAsmArch
::Arm
=> {
250 constraints
.push("~{cc}".to_string());
252 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
=> {
253 constraints
.extend_from_slice(&[
254 "~{dirflag}".to_string(),
255 "~{fpsr}".to_string(),
256 "~{flags}".to_string(),
259 InlineAsmArch
::RiscV32
| InlineAsmArch
::RiscV64
=> {}
260 InlineAsmArch
::Nvptx64
=> {}
261 InlineAsmArch
::Hexagon
=> {}
262 InlineAsmArch
::Mips
| InlineAsmArch
::Mips64
=> {}
263 InlineAsmArch
::SpirV
=> {}
264 InlineAsmArch
::Wasm32
=> {}
267 if !options
.contains(InlineAsmOptions
::NOMEM
) {
268 // This is actually ignored by LLVM, but it's probably best to keep
269 // it just in case. LLVM instead uses the ReadOnly/ReadNone
270 // attributes on the call instruction to optimize.
271 constraints
.push("~{memory}".to_string());
273 let volatile
= !options
.contains(InlineAsmOptions
::PURE
);
274 let alignstack
= !options
.contains(InlineAsmOptions
::NOSTACK
);
275 let output_type
= match &output_types
[..] {
276 [] => self.type_void(),
278 tys
=> self.type_struct(&tys
, false),
280 let dialect
= match asm_arch
{
281 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
282 if !options
.contains(InlineAsmOptions
::ATT_SYNTAX
) =>
284 LlvmAsmDialect
::Intel
286 _
=> LlvmAsmDialect
::Att
,
288 let result
= inline_asm_call(
291 &constraints
.join(","),
299 .unwrap_or_else(|| span_bug
!(line_spans
[0], "LLVM asm constraint validation failed"));
301 if options
.contains(InlineAsmOptions
::PURE
) {
302 if options
.contains(InlineAsmOptions
::NOMEM
) {
303 llvm
::Attribute
::ReadNone
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
304 } else if options
.contains(InlineAsmOptions
::READONLY
) {
305 llvm
::Attribute
::ReadOnly
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
307 llvm
::Attribute
::WillReturn
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
308 } else if options
.contains(InlineAsmOptions
::NOMEM
) {
309 llvm
::Attribute
::InaccessibleMemOnly
310 .apply_callsite(llvm
::AttributePlace
::Function
, result
);
312 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
315 // Write results to outputs
316 for (idx
, op
) in operands
.iter().enumerate() {
317 if let InlineAsmOperandRef
::Out { reg, place: Some(place), .. }
318 | InlineAsmOperandRef
::InOut { reg, out_place: Some(place), .. }
= *op
320 let value
= if output_types
.len() == 1 {
323 self.extract_value(result
, op_idx
[&idx
] as u64)
325 let value
= llvm_fixup_output(self, value
, reg
.reg_class(), &place
.layout
);
326 OperandValue
::Immediate(value
).store(self, place
);
332 impl AsmMethods
for CodegenCx
<'ll
, 'tcx
> {
333 fn codegen_global_asm(&self, ga
: &hir
::GlobalAsm
) {
334 let asm
= ga
.asm
.as_str();
336 llvm
::LLVMRustAppendModuleInlineAsm(self.llmod
, asm
.as_ptr().cast(), asm
.len());
342 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
345 inputs
: &[&'ll Value
],
346 output
: &'ll llvm
::Type
,
351 ) -> Option
<&'ll Value
> {
352 let volatile
= if volatile { llvm::True }
else { llvm::False }
;
353 let alignstack
= if alignstack { llvm::True }
else { llvm::False }
;
358 debug
!("Asm Input Type: {:?}", *v
);
361 .collect
::<Vec
<_
>>();
363 debug
!("Asm Output Type: {:?}", output
);
364 let fty
= bx
.cx
.type_func(&argtys
[..], output
);
366 // Ask LLVM to verify that the constraints are well-formed.
367 let constraints_ok
= llvm
::LLVMRustInlineAsmVerify(fty
, cons
.as_ptr().cast(), cons
.len());
368 debug
!("constraint verification result: {:?}", constraints_ok
);
370 let v
= llvm
::LLVMRustInlineAsm(
374 cons
.as_ptr().cast(),
378 llvm
::AsmDialect
::from_generic(dia
),
380 let call
= bx
.call(v
, inputs
, None
);
382 // Store mark in a metadata node so we can map LLVM errors
383 // back to source locations. See #17552.
385 let kind
= llvm
::LLVMGetMDKindIDInContext(
387 key
.as_ptr() as *const c_char
,
391 // srcloc contains one integer for each line of assembly code.
392 // Unfortunately this isn't enough to encode a full span so instead
393 // we just encode the start position of each line.
394 // FIXME: Figure out a way to pass the entire line spans.
395 let mut srcloc
= vec
![];
396 if dia
== LlvmAsmDialect
::Intel
&& line_spans
.len() > 1 {
397 // LLVM inserts an extra line to add the ".intel_syntax", so add
398 // a dummy srcloc entry for it.
400 // Don't do this if we only have 1 line span since that may be
401 // due to the asm template string coming from a macro. LLVM will
402 // default to the first srcloc for lines that don't have an
403 // associated srcloc.
404 srcloc
.push(bx
.const_i32(0));
406 srcloc
.extend(line_spans
.iter().map(|span
| bx
.const_i32(span
.lo().to_u32() as i32)));
407 let md
= llvm
::LLVMMDNodeInContext(bx
.llcx
, srcloc
.as_ptr(), srcloc
.len() as u32);
408 llvm
::LLVMSetMetadata(call
, kind
, md
);
412 // LLVM has detected an issue with our constraints, bail out
418 /// If the register is an xmm/ymm/zmm register then return its index.
419 fn xmm_reg_index(reg
: InlineAsmReg
) -> Option
<u32> {
421 InlineAsmReg
::X86(reg
)
422 if reg
as u32 >= X86InlineAsmReg
::xmm0
as u32
423 && reg
as u32 <= X86InlineAsmReg
::xmm15
as u32 =>
425 Some(reg
as u32 - X86InlineAsmReg
::xmm0
as u32)
427 InlineAsmReg
::X86(reg
)
428 if reg
as u32 >= X86InlineAsmReg
::ymm0
as u32
429 && reg
as u32 <= X86InlineAsmReg
::ymm15
as u32 =>
431 Some(reg
as u32 - X86InlineAsmReg
::ymm0
as u32)
433 InlineAsmReg
::X86(reg
)
434 if reg
as u32 >= X86InlineAsmReg
::zmm0
as u32
435 && reg
as u32 <= X86InlineAsmReg
::zmm31
as u32 =>
437 Some(reg
as u32 - X86InlineAsmReg
::zmm0
as u32)
443 /// If the register is an AArch64 vector register then return its index.
444 fn a64_vreg_index(reg
: InlineAsmReg
) -> Option
<u32> {
446 InlineAsmReg
::AArch64(reg
)
447 if reg
as u32 >= AArch64InlineAsmReg
::v0
as u32
448 && reg
as u32 <= AArch64InlineAsmReg
::v31
as u32 =>
450 Some(reg
as u32 - AArch64InlineAsmReg
::v0
as u32)
456 /// Converts a register class to an LLVM constraint code.
457 fn reg_to_llvm(reg
: InlineAsmRegOrRegClass
, layout
: Option
<&TyAndLayout
<'tcx
>>) -> String
{
459 // For vector registers LLVM wants the register name to match the type size.
460 InlineAsmRegOrRegClass
::Reg(reg
) => {
461 if let Some(idx
) = xmm_reg_index(reg
) {
462 let class
= if let Some(layout
) = layout
{
463 match layout
.size
.bytes() {
469 // We use f32 as the type for discarded outputs
472 format
!("{{{}mm{}}}", class
, idx
)
473 } else if let Some(idx
) = a64_vreg_index(reg
) {
474 let class
= if let Some(layout
) = layout
{
475 match layout
.size
.bytes() {
480 1 => 'd'
, // We fixup i8 to i8x8
484 // We use i64x2 as the type for discarded outputs
487 format
!("{{{}{}}}", class
, idx
)
488 } else if reg
== InlineAsmReg
::AArch64(AArch64InlineAsmReg
::x30
) {
489 // LLVM doesn't recognize x30
491 } else if reg
== InlineAsmReg
::Arm(ArmInlineAsmReg
::r14
) {
492 // LLVM doesn't recognize r14
495 format
!("{{{}}}", reg
.name())
498 InlineAsmRegOrRegClass
::RegClass(reg
) => match reg
{
499 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => "r",
500 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
) => "w",
501 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => "x",
502 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
) => "r",
503 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => "l",
504 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
505 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
506 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
) => "t",
507 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
)
508 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
)
509 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => "x",
510 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
511 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
) => "w",
512 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => "r",
513 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => "r",
514 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => "f",
515 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => "h",
516 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => "r",
517 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => "l",
518 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => "r",
519 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => "f",
520 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
) => "r",
521 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => "Q",
522 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => "q",
523 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
524 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
) => "x",
525 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => "v",
526 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => "^Yk",
527 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => "r",
528 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
529 bug
!("LLVM backend does not support SPIR-V")
531 InlineAsmRegClass
::Err
=> unreachable
!(),
537 /// Converts a modifier into LLVM's equivalent modifier.
540 reg
: InlineAsmRegClass
,
541 modifier
: Option
<char>,
544 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => modifier
,
545 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
546 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
547 if modifier
== Some('v'
) { None }
else { modifier }
549 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
550 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => None
,
551 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
552 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => None
,
553 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
554 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
555 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => Some('P'
),
556 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
557 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
558 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
559 if modifier
.is_none() {
565 InlineAsmRegClass
::Hexagon(_
) => None
,
566 InlineAsmRegClass
::Mips(_
) => None
,
567 InlineAsmRegClass
::Nvptx(_
) => None
,
568 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
)
569 | InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => None
,
570 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
571 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => match modifier
{
572 None
if arch
== InlineAsmArch
::X86_64
=> Some('q'
),
574 Some('l'
) => Some('b'
),
575 Some('h'
) => Some('h'
),
576 Some('x'
) => Some('w'
),
577 Some('e'
) => Some('k'
),
578 Some('r'
) => Some('q'
),
581 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => None
,
582 InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::xmm_reg
)
583 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::ymm_reg
)
584 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::zmm_reg
) => match (reg
, modifier
) {
585 (X86InlineAsmRegClass
::xmm_reg
, None
) => Some('x'
),
586 (X86InlineAsmRegClass
::ymm_reg
, None
) => Some('t'
),
587 (X86InlineAsmRegClass
::zmm_reg
, None
) => Some('g'
),
588 (_
, Some('x'
)) => Some('x'
),
589 (_
, Some('y'
)) => Some('t'
),
590 (_
, Some('z'
)) => Some('g'
),
593 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => None
,
594 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => None
,
595 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
596 bug
!("LLVM backend does not support SPIR-V")
598 InlineAsmRegClass
::Err
=> unreachable
!(),
602 /// Type to use for outputs that are discarded. It doesn't really matter what
603 /// the type is, as long as it is valid for the constraint code.
604 fn dummy_output_type(cx
: &CodegenCx
<'ll
, 'tcx
>, reg
: InlineAsmRegClass
) -> &'ll Type
{
606 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => cx
.type_i32(),
607 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
608 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
609 cx
.type_vector(cx
.type_i64(), 2)
611 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
612 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => cx
.type_i32(),
613 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
614 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => cx
.type_f32(),
615 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
616 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
617 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => cx
.type_f64(),
618 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
619 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
620 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
621 cx
.type_vector(cx
.type_i64(), 2)
623 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => cx
.type_i32(),
624 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => cx
.type_i32(),
625 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => cx
.type_f32(),
626 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => cx
.type_i16(),
627 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => cx
.type_i32(),
628 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => cx
.type_i64(),
629 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => cx
.type_i32(),
630 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => cx
.type_f32(),
631 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
632 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => cx
.type_i32(),
633 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => cx
.type_i8(),
634 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
635 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
)
636 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => cx
.type_f32(),
637 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => cx
.type_i16(),
638 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => cx
.type_i32(),
639 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
640 bug
!("LLVM backend does not support SPIR-V")
642 InlineAsmRegClass
::Err
=> unreachable
!(),
646 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
647 /// the equivalent integer type.
648 fn llvm_asm_scalar_type(cx
: &CodegenCx
<'ll
, 'tcx
>, scalar
: &Scalar
) -> &'ll Type
{
650 Primitive
::Int(Integer
::I8
, _
) => cx
.type_i8(),
651 Primitive
::Int(Integer
::I16
, _
) => cx
.type_i16(),
652 Primitive
::Int(Integer
::I32
, _
) => cx
.type_i32(),
653 Primitive
::Int(Integer
::I64
, _
) => cx
.type_i64(),
654 Primitive
::F32
=> cx
.type_f32(),
655 Primitive
::F64
=> cx
.type_f64(),
656 Primitive
::Pointer
=> cx
.type_isize(),
661 /// Fix up an input value to work around LLVM bugs.
663 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
664 mut value
: &'ll Value
,
665 reg
: InlineAsmRegClass
,
666 layout
: &TyAndLayout
<'tcx
>,
668 match (reg
, &layout
.abi
) {
669 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
670 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
671 let vec_ty
= bx
.cx
.type_vector(bx
.cx
.type_i8(), 8);
672 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
677 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
678 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, s
);
679 let count
= 16 / layout
.size
.bytes();
680 let vec_ty
= bx
.cx
.type_vector(elem_ty
, count
);
681 if let Primitive
::Pointer
= s
.value
{
682 value
= bx
.ptrtoint(value
, bx
.cx
.type_isize());
684 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
687 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
688 Abi
::Vector { element, count }
,
689 ) if layout
.size
.bytes() == 8 => {
690 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
691 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
);
692 let indices
: Vec
<_
> = (0..count
* 2).map(|x
| bx
.const_i32(x
as i32)).collect();
693 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
695 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
696 if s
.value
== Primitive
::F64
=>
698 bx
.bitcast(value
, bx
.cx
.type_i64())
701 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
703 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, bx
.cx
.type_vector(bx
.cx
.type_f64(), 8)),
705 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
708 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
709 bx
.bitcast(value
, bx
.cx
.type_f32())
715 InlineAsmRegClass
::Arm(
716 ArmInlineAsmRegClass
::dreg
717 | ArmInlineAsmRegClass
::dreg_low8
718 | ArmInlineAsmRegClass
::dreg_low16
,
722 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
723 bx
.bitcast(value
, bx
.cx
.type_f64())
728 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
729 // MIPS only supports register-length arithmetics.
730 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => bx
.zext(value
, bx
.cx
.type_i32()),
731 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_i32()),
732 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_i64()),
739 /// Fix up an output value to work around LLVM bugs.
740 fn llvm_fixup_output(
741 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
742 mut value
: &'ll Value
,
743 reg
: InlineAsmRegClass
,
744 layout
: &TyAndLayout
<'tcx
>,
746 match (reg
, &layout
.abi
) {
747 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
748 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
749 bx
.extract_element(value
, bx
.const_i32(0))
754 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
755 value
= bx
.extract_element(value
, bx
.const_i32(0));
756 if let Primitive
::Pointer
= s
.value
{
757 value
= bx
.inttoptr(value
, layout
.llvm_type(bx
.cx
));
762 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
763 Abi
::Vector { element, count }
,
764 ) if layout
.size
.bytes() == 8 => {
765 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
766 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
* 2);
767 let indices
: Vec
<_
> = (0..*count
).map(|x
| bx
.const_i32(x
as i32)).collect();
768 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
770 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
771 if s
.value
== Primitive
::F64
=>
773 bx
.bitcast(value
, bx
.cx
.type_f64())
776 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
778 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, layout
.llvm_type(bx
.cx
)),
780 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
783 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
784 bx
.bitcast(value
, bx
.cx
.type_i32())
790 InlineAsmRegClass
::Arm(
791 ArmInlineAsmRegClass
::dreg
792 | ArmInlineAsmRegClass
::dreg_low8
793 | ArmInlineAsmRegClass
::dreg_low16
,
797 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
798 bx
.bitcast(value
, bx
.cx
.type_i64())
803 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
804 // MIPS only supports register-length arithmetics.
805 Primitive
::Int(Integer
::I8
, _
) => bx
.trunc(value
, bx
.cx
.type_i8()),
806 Primitive
::Int(Integer
::I16
, _
) => bx
.trunc(value
, bx
.cx
.type_i16()),
807 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_f32()),
808 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_f64()),
815 /// Output type to use for llvm_fixup_output.
816 fn llvm_fixup_output_type(
817 cx
: &CodegenCx
<'ll
, 'tcx
>,
818 reg
: InlineAsmRegClass
,
819 layout
: &TyAndLayout
<'tcx
>,
821 match (reg
, &layout
.abi
) {
822 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
823 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
824 cx
.type_vector(cx
.type_i8(), 8)
829 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
830 let elem_ty
= llvm_asm_scalar_type(cx
, s
);
831 let count
= 16 / layout
.size
.bytes();
832 cx
.type_vector(elem_ty
, count
)
835 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
836 Abi
::Vector { element, count }
,
837 ) if layout
.size
.bytes() == 8 => {
838 let elem_ty
= llvm_asm_scalar_type(cx
, element
);
839 cx
.type_vector(elem_ty
, count
* 2)
841 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
842 if s
.value
== Primitive
::F64
=>
847 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
849 ) if layout
.size
.bytes() == 64 => cx
.type_vector(cx
.type_f64(), 8),
851 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
854 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
861 InlineAsmRegClass
::Arm(
862 ArmInlineAsmRegClass
::dreg
863 | ArmInlineAsmRegClass
::dreg_low8
864 | ArmInlineAsmRegClass
::dreg_low16
,
868 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
874 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
875 // MIPS only supports register-length arithmetics.
876 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => cx
.type_i32(),
877 Primitive
::F32
=> cx
.type_i32(),
878 Primitive
::F64
=> cx
.type_i64(),
879 _
=> layout
.llvm_type(cx
),
881 _
=> layout
.llvm_type(cx
),