1 use crate::builder
::Builder
;
2 use crate::context
::CodegenCx
;
4 use crate::type_
::Type
;
5 use crate::type_of
::LayoutLlvmExt
;
6 use crate::value
::Value
;
8 use rustc_ast
::LlvmAsmDialect
;
9 use rustc_ast
::{InlineAsmOptions, InlineAsmTemplatePiece}
;
10 use rustc_codegen_ssa
::mir
::operand
::OperandValue
;
11 use rustc_codegen_ssa
::mir
::place
::PlaceRef
;
12 use rustc_codegen_ssa
::traits
::*;
13 use rustc_data_structures
::fx
::FxHashMap
;
15 use rustc_middle
::ty
::layout
::TyAndLayout
;
16 use rustc_middle
::{bug, span_bug}
;
17 use rustc_span
::{Pos, Span}
;
18 use rustc_target
::abi
::*;
19 use rustc_target
::asm
::*;
21 use libc
::{c_char, c_uint}
;
24 impl AsmBuilderMethods
<'tcx
> for Builder
<'a
, 'll
, 'tcx
> {
25 fn codegen_llvm_inline_asm(
27 ia
: &hir
::LlvmInlineAsmInner
,
28 outputs
: Vec
<PlaceRef
<'tcx
, &'ll Value
>>,
29 mut inputs
: Vec
<&'ll Value
>,
32 let mut ext_constraints
= vec
![];
33 let mut output_types
= vec
![];
35 // Prepare the output operands
36 let mut indirect_outputs
= vec
![];
37 for (i
, (out
, &place
)) in ia
.outputs
.iter().zip(&outputs
).enumerate() {
39 let operand
= self.load_operand(place
);
40 if let OperandValue
::Immediate(_
) = operand
.val
{
41 inputs
.push(operand
.immediate());
43 ext_constraints
.push(i
.to_string());
46 let operand
= self.load_operand(place
);
47 if let OperandValue
::Immediate(_
) = operand
.val
{
48 indirect_outputs
.push(operand
.immediate());
51 output_types
.push(place
.layout
.llvm_type(self.cx
));
54 if !indirect_outputs
.is_empty() {
55 indirect_outputs
.extend_from_slice(&inputs
);
56 inputs
= indirect_outputs
;
59 let clobbers
= ia
.clobbers
.iter().map(|s
| format
!("~{{{}}}", &s
));
61 // Default per-arch clobbers
62 // Basically what clang does
63 let arch_clobbers
= match &self.sess().target
.arch
[..] {
64 "x86" | "x86_64" => vec
!["~{dirflag}", "~{fpsr}", "~{flags}"],
65 "mips" | "mips64" => vec
!["~{$1}"],
69 let all_constraints
= ia
72 .map(|out
| out
.constraint
.to_string())
73 .chain(ia
.inputs
.iter().map(|s
| s
.to_string()))
74 .chain(ext_constraints
)
76 .chain(arch_clobbers
.iter().map(|s
| (*s
).to_string()))
77 .collect
::<Vec
<String
>>()
80 debug
!("Asm Constraints: {}", &all_constraints
);
82 // Depending on how many outputs we have, the return type is different
83 let num_outputs
= output_types
.len();
84 let output_type
= match num_outputs
{
85 0 => self.type_void(),
87 _
=> self.type_struct(&output_types
, false),
90 let asm
= ia
.asm
.as_str();
91 let r
= inline_asm_call(
107 // Again, based on how many outputs we have
108 let outputs
= ia
.outputs
.iter().zip(&outputs
).filter(|&(ref o
, _
)| !o
.is_indirect
);
109 for (i
, (_
, &place
)) in outputs
.enumerate() {
110 let v
= if num_outputs
== 1 { r }
else { self.extract_value(r, i as u64) }
;
111 OperandValue
::Immediate(v
).store(self, place
);
117 fn codegen_inline_asm(
119 template
: &[InlineAsmTemplatePiece
],
120 operands
: &[InlineAsmOperandRef
<'tcx
, Self>],
121 options
: InlineAsmOptions
,
124 let asm_arch
= self.tcx
.sess
.asm_arch
.unwrap();
126 // Collect the types of output operands
127 let mut constraints
= vec
![];
128 let mut output_types
= vec
![];
129 let mut op_idx
= FxHashMap
::default();
130 for (idx
, op
) in operands
.iter().enumerate() {
132 InlineAsmOperandRef
::Out { reg, late, place }
=> {
133 let mut layout
= None
;
134 let ty
= if let Some(ref place
) = place
{
135 layout
= Some(&place
.layout
);
136 llvm_fixup_output_type(self.cx
, reg
.reg_class(), &place
.layout
)
138 // If the output is discarded, we don't really care what
139 // type is used. We're just using this to tell LLVM to
140 // reserve the register.
141 dummy_output_type(self.cx
, reg
.reg_class())
143 output_types
.push(ty
);
144 op_idx
.insert(idx
, constraints
.len());
145 let prefix
= if late { "=" }
else { "=&" }
;
146 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, layout
)));
148 InlineAsmOperandRef
::InOut { reg, late, in_value, out_place }
=> {
149 let layout
= if let Some(ref out_place
) = out_place
{
152 // LLVM required tied operands to have the same type,
153 // so we just use the type of the input.
156 let ty
= llvm_fixup_output_type(self.cx
, reg
.reg_class(), layout
);
157 output_types
.push(ty
);
158 op_idx
.insert(idx
, constraints
.len());
159 let prefix
= if late { "=" }
else { "=&" }
;
160 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, Some(layout
))));
166 // Collect input operands
167 let mut inputs
= vec
![];
168 for (idx
, op
) in operands
.iter().enumerate() {
170 InlineAsmOperandRef
::In { reg, value }
=> {
172 llvm_fixup_input(self, value
.immediate(), reg
.reg_class(), &value
.layout
);
174 op_idx
.insert(idx
, constraints
.len());
175 constraints
.push(reg_to_llvm(reg
, Some(&value
.layout
)));
177 InlineAsmOperandRef
::InOut { reg, late: _, in_value, out_place: _ }
=> {
178 let value
= llvm_fixup_input(
180 in_value
.immediate(),
185 constraints
.push(format
!("{}", op_idx
[&idx
]));
187 InlineAsmOperandRef
::SymFn { instance }
=> {
188 inputs
.push(self.cx
.get_fn(instance
));
189 op_idx
.insert(idx
, constraints
.len());
190 constraints
.push("s".to_string());
192 InlineAsmOperandRef
::SymStatic { def_id }
=> {
193 inputs
.push(self.cx
.get_static(def_id
));
194 op_idx
.insert(idx
, constraints
.len());
195 constraints
.push("s".to_string());
201 // Build the template string
202 let mut template_str
= String
::new();
203 for piece
in template
{
205 InlineAsmTemplatePiece
::String(ref s
) => {
209 template_str
.push_str("$$");
211 template_str
.push(c
);
215 template_str
.push_str(s
)
218 InlineAsmTemplatePiece
::Placeholder { operand_idx, modifier, span: _ }
=> {
219 match operands
[operand_idx
] {
220 InlineAsmOperandRef
::In { reg, .. }
221 | InlineAsmOperandRef
::Out { reg, .. }
222 | InlineAsmOperandRef
::InOut { reg, .. }
=> {
223 let modifier
= modifier_to_llvm(asm_arch
, reg
.reg_class(), modifier
);
224 if let Some(modifier
) = modifier
{
225 template_str
.push_str(&format
!(
227 op_idx
[&operand_idx
], modifier
230 template_str
.push_str(&format
!("${{{}}}", op_idx
[&operand_idx
]));
233 InlineAsmOperandRef
::Const { ref string }
=> {
234 // Const operands get injected directly into the template
235 template_str
.push_str(string
);
237 InlineAsmOperandRef
::SymFn { .. }
238 | InlineAsmOperandRef
::SymStatic { .. }
=> {
239 // Only emit the raw symbol name
240 template_str
.push_str(&format
!("${{{}:c}}", op_idx
[&operand_idx
]));
247 if !options
.contains(InlineAsmOptions
::PRESERVES_FLAGS
) {
249 InlineAsmArch
::AArch64
| InlineAsmArch
::Arm
=> {
250 constraints
.push("~{cc}".to_string());
252 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
=> {
253 constraints
.extend_from_slice(&[
254 "~{dirflag}".to_string(),
255 "~{fpsr}".to_string(),
256 "~{flags}".to_string(),
259 InlineAsmArch
::RiscV32
| InlineAsmArch
::RiscV64
=> {}
260 InlineAsmArch
::Nvptx64
=> {}
261 InlineAsmArch
::Hexagon
=> {}
262 InlineAsmArch
::Mips
| InlineAsmArch
::Mips64
=> {}
263 InlineAsmArch
::SpirV
=> {}
264 InlineAsmArch
::Wasm32
=> {}
267 if !options
.contains(InlineAsmOptions
::NOMEM
) {
268 // This is actually ignored by LLVM, but it's probably best to keep
269 // it just in case. LLVM instead uses the ReadOnly/ReadNone
270 // attributes on the call instruction to optimize.
271 constraints
.push("~{memory}".to_string());
273 let volatile
= !options
.contains(InlineAsmOptions
::PURE
);
274 let alignstack
= !options
.contains(InlineAsmOptions
::NOSTACK
);
275 let output_type
= match &output_types
[..] {
276 [] => self.type_void(),
278 tys
=> self.type_struct(&tys
, false),
280 let dialect
= match asm_arch
{
281 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
282 if !options
.contains(InlineAsmOptions
::ATT_SYNTAX
) =>
284 LlvmAsmDialect
::Intel
286 _
=> LlvmAsmDialect
::Att
,
288 let result
= inline_asm_call(
291 &constraints
.join(","),
299 .unwrap_or_else(|| span_bug
!(line_spans
[0], "LLVM asm constraint validation failed"));
301 if options
.contains(InlineAsmOptions
::PURE
) {
302 if options
.contains(InlineAsmOptions
::NOMEM
) {
303 llvm
::Attribute
::ReadNone
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
304 } else if options
.contains(InlineAsmOptions
::READONLY
) {
305 llvm
::Attribute
::ReadOnly
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
307 } else if options
.contains(InlineAsmOptions
::NOMEM
) {
308 llvm
::Attribute
::InaccessibleMemOnly
309 .apply_callsite(llvm
::AttributePlace
::Function
, result
);
311 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
314 // Write results to outputs
315 for (idx
, op
) in operands
.iter().enumerate() {
316 if let InlineAsmOperandRef
::Out { reg, place: Some(place), .. }
317 | InlineAsmOperandRef
::InOut { reg, out_place: Some(place), .. }
= *op
319 let value
= if output_types
.len() == 1 {
322 self.extract_value(result
, op_idx
[&idx
] as u64)
324 let value
= llvm_fixup_output(self, value
, reg
.reg_class(), &place
.layout
);
325 OperandValue
::Immediate(value
).store(self, place
);
331 impl AsmMethods
for CodegenCx
<'ll
, 'tcx
> {
332 fn codegen_global_asm(&self, ga
: &hir
::GlobalAsm
) {
333 let asm
= ga
.asm
.as_str();
335 llvm
::LLVMRustAppendModuleInlineAsm(self.llmod
, asm
.as_ptr().cast(), asm
.len());
341 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
344 inputs
: &[&'ll Value
],
345 output
: &'ll llvm
::Type
,
350 ) -> Option
<&'ll Value
> {
351 let volatile
= if volatile { llvm::True }
else { llvm::False }
;
352 let alignstack
= if alignstack { llvm::True }
else { llvm::False }
;
357 debug
!("Asm Input Type: {:?}", *v
);
360 .collect
::<Vec
<_
>>();
362 debug
!("Asm Output Type: {:?}", output
);
363 let fty
= bx
.cx
.type_func(&argtys
[..], output
);
365 // Ask LLVM to verify that the constraints are well-formed.
366 let constraints_ok
= llvm
::LLVMRustInlineAsmVerify(fty
, cons
.as_ptr().cast(), cons
.len());
367 debug
!("constraint verification result: {:?}", constraints_ok
);
369 let v
= llvm
::LLVMRustInlineAsm(
373 cons
.as_ptr().cast(),
377 llvm
::AsmDialect
::from_generic(dia
),
379 let call
= bx
.call(v
, inputs
, None
);
381 // Store mark in a metadata node so we can map LLVM errors
382 // back to source locations. See #17552.
384 let kind
= llvm
::LLVMGetMDKindIDInContext(
386 key
.as_ptr() as *const c_char
,
390 // srcloc contains one integer for each line of assembly code.
391 // Unfortunately this isn't enough to encode a full span so instead
392 // we just encode the start position of each line.
393 // FIXME: Figure out a way to pass the entire line spans.
394 let mut srcloc
= vec
![];
395 if dia
== LlvmAsmDialect
::Intel
&& line_spans
.len() > 1 {
396 // LLVM inserts an extra line to add the ".intel_syntax", so add
397 // a dummy srcloc entry for it.
399 // Don't do this if we only have 1 line span since that may be
400 // due to the asm template string coming from a macro. LLVM will
401 // default to the first srcloc for lines that don't have an
402 // associated srcloc.
403 srcloc
.push(bx
.const_i32(0));
405 srcloc
.extend(line_spans
.iter().map(|span
| bx
.const_i32(span
.lo().to_u32() as i32)));
406 let md
= llvm
::LLVMMDNodeInContext(bx
.llcx
, srcloc
.as_ptr(), srcloc
.len() as u32);
407 llvm
::LLVMSetMetadata(call
, kind
, md
);
411 // LLVM has detected an issue with our constraints, bail out
417 /// If the register is an xmm/ymm/zmm register then return its index.
418 fn xmm_reg_index(reg
: InlineAsmReg
) -> Option
<u32> {
420 InlineAsmReg
::X86(reg
)
421 if reg
as u32 >= X86InlineAsmReg
::xmm0
as u32
422 && reg
as u32 <= X86InlineAsmReg
::xmm15
as u32 =>
424 Some(reg
as u32 - X86InlineAsmReg
::xmm0
as u32)
426 InlineAsmReg
::X86(reg
)
427 if reg
as u32 >= X86InlineAsmReg
::ymm0
as u32
428 && reg
as u32 <= X86InlineAsmReg
::ymm15
as u32 =>
430 Some(reg
as u32 - X86InlineAsmReg
::ymm0
as u32)
432 InlineAsmReg
::X86(reg
)
433 if reg
as u32 >= X86InlineAsmReg
::zmm0
as u32
434 && reg
as u32 <= X86InlineAsmReg
::zmm31
as u32 =>
436 Some(reg
as u32 - X86InlineAsmReg
::zmm0
as u32)
442 /// If the register is an AArch64 vector register then return its index.
443 fn a64_vreg_index(reg
: InlineAsmReg
) -> Option
<u32> {
445 InlineAsmReg
::AArch64(reg
)
446 if reg
as u32 >= AArch64InlineAsmReg
::v0
as u32
447 && reg
as u32 <= AArch64InlineAsmReg
::v31
as u32 =>
449 Some(reg
as u32 - AArch64InlineAsmReg
::v0
as u32)
455 /// Converts a register class to an LLVM constraint code.
456 fn reg_to_llvm(reg
: InlineAsmRegOrRegClass
, layout
: Option
<&TyAndLayout
<'tcx
>>) -> String
{
458 // For vector registers LLVM wants the register name to match the type size.
459 InlineAsmRegOrRegClass
::Reg(reg
) => {
460 if let Some(idx
) = xmm_reg_index(reg
) {
461 let class
= if let Some(layout
) = layout
{
462 match layout
.size
.bytes() {
468 // We use f32 as the type for discarded outputs
471 format
!("{{{}mm{}}}", class
, idx
)
472 } else if let Some(idx
) = a64_vreg_index(reg
) {
473 let class
= if let Some(layout
) = layout
{
474 match layout
.size
.bytes() {
479 1 => 'd'
, // We fixup i8 to i8x8
483 // We use i64x2 as the type for discarded outputs
486 format
!("{{{}{}}}", class
, idx
)
487 } else if reg
== InlineAsmReg
::AArch64(AArch64InlineAsmReg
::x30
) {
488 // LLVM doesn't recognize x30
491 format
!("{{{}}}", reg
.name())
494 InlineAsmRegOrRegClass
::RegClass(reg
) => match reg
{
495 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => "r",
496 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
) => "w",
497 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => "x",
498 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
) => "r",
499 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => "l",
500 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
501 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
502 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
) => "t",
503 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
)
504 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
)
505 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => "x",
506 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
507 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
) => "w",
508 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => "r",
509 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => "r",
510 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => "f",
511 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => "h",
512 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => "r",
513 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => "l",
514 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => "r",
515 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => "f",
516 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
) => "r",
517 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => "Q",
518 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => "q",
519 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
520 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
) => "x",
521 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => "v",
522 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => "^Yk",
523 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => "r",
524 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
525 bug
!("LLVM backend does not support SPIR-V")
532 /// Converts a modifier into LLVM's equivalent modifier.
535 reg
: InlineAsmRegClass
,
536 modifier
: Option
<char>,
539 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => modifier
,
540 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
541 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
542 if modifier
== Some('v'
) { None }
else { modifier }
544 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
545 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => None
,
546 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
547 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => None
,
548 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
549 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
550 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => Some('P'
),
551 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
552 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
553 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
554 if modifier
.is_none() {
560 InlineAsmRegClass
::Hexagon(_
) => None
,
561 InlineAsmRegClass
::Mips(_
) => None
,
562 InlineAsmRegClass
::Nvptx(_
) => None
,
563 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
)
564 | InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => None
,
565 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
566 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => match modifier
{
567 None
if arch
== InlineAsmArch
::X86_64
=> Some('q'
),
569 Some('l'
) => Some('b'
),
570 Some('h'
) => Some('h'
),
571 Some('x'
) => Some('w'
),
572 Some('e'
) => Some('k'
),
573 Some('r'
) => Some('q'
),
576 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => None
,
577 InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::xmm_reg
)
578 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::ymm_reg
)
579 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::zmm_reg
) => match (reg
, modifier
) {
580 (X86InlineAsmRegClass
::xmm_reg
, None
) => Some('x'
),
581 (X86InlineAsmRegClass
::ymm_reg
, None
) => Some('t'
),
582 (X86InlineAsmRegClass
::zmm_reg
, None
) => Some('g'
),
583 (_
, Some('x'
)) => Some('x'
),
584 (_
, Some('y'
)) => Some('t'
),
585 (_
, Some('z'
)) => Some('g'
),
588 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => None
,
589 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => None
,
590 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
591 bug
!("LLVM backend does not support SPIR-V")
596 /// Type to use for outputs that are discarded. It doesn't really matter what
597 /// the type is, as long as it is valid for the constraint code.
598 fn dummy_output_type(cx
: &CodegenCx
<'ll
, 'tcx
>, reg
: InlineAsmRegClass
) -> &'ll Type
{
600 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => cx
.type_i32(),
601 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
602 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
603 cx
.type_vector(cx
.type_i64(), 2)
605 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
606 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => cx
.type_i32(),
607 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
608 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => cx
.type_f32(),
609 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
610 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
611 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => cx
.type_f64(),
612 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
613 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
614 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
615 cx
.type_vector(cx
.type_i64(), 2)
617 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => cx
.type_i32(),
618 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => cx
.type_i32(),
619 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => cx
.type_f32(),
620 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => cx
.type_i16(),
621 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => cx
.type_i32(),
622 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => cx
.type_i64(),
623 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => cx
.type_i32(),
624 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => cx
.type_f32(),
625 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
626 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => cx
.type_i32(),
627 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => cx
.type_i8(),
628 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
629 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
)
630 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => cx
.type_f32(),
631 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => cx
.type_i16(),
632 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => cx
.type_i32(),
633 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
634 bug
!("LLVM backend does not support SPIR-V")
639 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
640 /// the equivalent integer type.
641 fn llvm_asm_scalar_type(cx
: &CodegenCx
<'ll
, 'tcx
>, scalar
: &Scalar
) -> &'ll Type
{
643 Primitive
::Int(Integer
::I8
, _
) => cx
.type_i8(),
644 Primitive
::Int(Integer
::I16
, _
) => cx
.type_i16(),
645 Primitive
::Int(Integer
::I32
, _
) => cx
.type_i32(),
646 Primitive
::Int(Integer
::I64
, _
) => cx
.type_i64(),
647 Primitive
::F32
=> cx
.type_f32(),
648 Primitive
::F64
=> cx
.type_f64(),
649 Primitive
::Pointer
=> cx
.type_isize(),
654 /// Fix up an input value to work around LLVM bugs.
656 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
657 mut value
: &'ll Value
,
658 reg
: InlineAsmRegClass
,
659 layout
: &TyAndLayout
<'tcx
>,
661 match (reg
, &layout
.abi
) {
662 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
663 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
664 let vec_ty
= bx
.cx
.type_vector(bx
.cx
.type_i8(), 8);
665 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
670 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
671 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, s
);
672 let count
= 16 / layout
.size
.bytes();
673 let vec_ty
= bx
.cx
.type_vector(elem_ty
, count
);
674 if let Primitive
::Pointer
= s
.value
{
675 value
= bx
.ptrtoint(value
, bx
.cx
.type_isize());
677 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
680 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
681 Abi
::Vector { element, count }
,
682 ) if layout
.size
.bytes() == 8 => {
683 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
684 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
);
685 let indices
: Vec
<_
> = (0..count
* 2).map(|x
| bx
.const_i32(x
as i32)).collect();
686 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
688 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
689 if s
.value
== Primitive
::F64
=>
691 bx
.bitcast(value
, bx
.cx
.type_i64())
694 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
696 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, bx
.cx
.type_vector(bx
.cx
.type_f64(), 8)),
698 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
701 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
702 bx
.bitcast(value
, bx
.cx
.type_f32())
708 InlineAsmRegClass
::Arm(
709 ArmInlineAsmRegClass
::dreg
710 | ArmInlineAsmRegClass
::dreg_low8
711 | ArmInlineAsmRegClass
::dreg_low16
,
715 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
716 bx
.bitcast(value
, bx
.cx
.type_f64())
721 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
722 // MIPS only supports register-length arithmetics.
723 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => bx
.zext(value
, bx
.cx
.type_i32()),
724 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_i32()),
725 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_i64()),
732 /// Fix up an output value to work around LLVM bugs.
733 fn llvm_fixup_output(
734 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
735 mut value
: &'ll Value
,
736 reg
: InlineAsmRegClass
,
737 layout
: &TyAndLayout
<'tcx
>,
739 match (reg
, &layout
.abi
) {
740 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
741 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
742 bx
.extract_element(value
, bx
.const_i32(0))
747 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
748 value
= bx
.extract_element(value
, bx
.const_i32(0));
749 if let Primitive
::Pointer
= s
.value
{
750 value
= bx
.inttoptr(value
, layout
.llvm_type(bx
.cx
));
755 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
756 Abi
::Vector { element, count }
,
757 ) if layout
.size
.bytes() == 8 => {
758 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
759 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
* 2);
760 let indices
: Vec
<_
> = (0..*count
).map(|x
| bx
.const_i32(x
as i32)).collect();
761 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
763 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
764 if s
.value
== Primitive
::F64
=>
766 bx
.bitcast(value
, bx
.cx
.type_f64())
769 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
771 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, layout
.llvm_type(bx
.cx
)),
773 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
776 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
777 bx
.bitcast(value
, bx
.cx
.type_i32())
783 InlineAsmRegClass
::Arm(
784 ArmInlineAsmRegClass
::dreg
785 | ArmInlineAsmRegClass
::dreg_low8
786 | ArmInlineAsmRegClass
::dreg_low16
,
790 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
791 bx
.bitcast(value
, bx
.cx
.type_i64())
796 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
797 // MIPS only supports register-length arithmetics.
798 Primitive
::Int(Integer
::I8
, _
) => bx
.trunc(value
, bx
.cx
.type_i8()),
799 Primitive
::Int(Integer
::I16
, _
) => bx
.trunc(value
, bx
.cx
.type_i16()),
800 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_f32()),
801 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_f64()),
808 /// Output type to use for llvm_fixup_output.
809 fn llvm_fixup_output_type(
810 cx
: &CodegenCx
<'ll
, 'tcx
>,
811 reg
: InlineAsmRegClass
,
812 layout
: &TyAndLayout
<'tcx
>,
814 match (reg
, &layout
.abi
) {
815 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
816 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
817 cx
.type_vector(cx
.type_i8(), 8)
822 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
823 let elem_ty
= llvm_asm_scalar_type(cx
, s
);
824 let count
= 16 / layout
.size
.bytes();
825 cx
.type_vector(elem_ty
, count
)
828 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
829 Abi
::Vector { element, count }
,
830 ) if layout
.size
.bytes() == 8 => {
831 let elem_ty
= llvm_asm_scalar_type(cx
, element
);
832 cx
.type_vector(elem_ty
, count
* 2)
834 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
835 if s
.value
== Primitive
::F64
=>
840 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
842 ) if layout
.size
.bytes() == 64 => cx
.type_vector(cx
.type_f64(), 8),
844 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
847 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
854 InlineAsmRegClass
::Arm(
855 ArmInlineAsmRegClass
::dreg
856 | ArmInlineAsmRegClass
::dreg_low8
857 | ArmInlineAsmRegClass
::dreg_low16
,
861 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
867 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
868 // MIPS only supports register-length arithmetics.
869 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => cx
.type_i32(),
870 Primitive
::F32
=> cx
.type_i32(),
871 Primitive
::F64
=> cx
.type_i64(),
872 _
=> layout
.llvm_type(cx
),
874 _
=> layout
.llvm_type(cx
),