1 use crate::builder
::Builder
;
2 use crate::context
::CodegenCx
;
4 use crate::type_
::Type
;
5 use crate::type_of
::LayoutLlvmExt
;
6 use crate::value
::Value
;
8 use rustc_ast
::LlvmAsmDialect
;
9 use rustc_ast
::{InlineAsmOptions, InlineAsmTemplatePiece}
;
10 use rustc_codegen_ssa
::mir
::operand
::OperandValue
;
11 use rustc_codegen_ssa
::mir
::place
::PlaceRef
;
12 use rustc_codegen_ssa
::traits
::*;
13 use rustc_data_structures
::fx
::FxHashMap
;
15 use rustc_middle
::ty
::layout
::TyAndLayout
;
16 use rustc_middle
::{bug, span_bug}
;
17 use rustc_span
::{Pos, Span, Symbol}
;
18 use rustc_target
::abi
::*;
19 use rustc_target
::asm
::*;
21 use libc
::{c_char, c_uint}
;
24 impl AsmBuilderMethods
<'tcx
> for Builder
<'a
, 'll
, 'tcx
> {
25 fn codegen_llvm_inline_asm(
27 ia
: &hir
::LlvmInlineAsmInner
,
28 outputs
: Vec
<PlaceRef
<'tcx
, &'ll Value
>>,
29 mut inputs
: Vec
<&'ll Value
>,
32 let mut ext_constraints
= vec
![];
33 let mut output_types
= vec
![];
35 // Prepare the output operands
36 let mut indirect_outputs
= vec
![];
37 for (i
, (out
, &place
)) in ia
.outputs
.iter().zip(&outputs
).enumerate() {
39 let operand
= self.load_operand(place
);
40 if let OperandValue
::Immediate(_
) = operand
.val
{
41 inputs
.push(operand
.immediate());
43 ext_constraints
.push(i
.to_string());
46 let operand
= self.load_operand(place
);
47 if let OperandValue
::Immediate(_
) = operand
.val
{
48 indirect_outputs
.push(operand
.immediate());
51 output_types
.push(place
.layout
.llvm_type(self.cx
));
54 if !indirect_outputs
.is_empty() {
55 indirect_outputs
.extend_from_slice(&inputs
);
56 inputs
= indirect_outputs
;
59 let clobbers
= ia
.clobbers
.iter().map(|s
| format
!("~{{{}}}", &s
));
61 // Default per-arch clobbers
62 // Basically what clang does
63 let arch_clobbers
= match &self.sess().target
.arch
[..] {
64 "x86" | "x86_64" => &["~{dirflag}", "~{fpsr}", "~{flags}"][..],
65 "mips" | "mips64" => &["~{$1}"],
69 let all_constraints
= ia
72 .map(|out
| out
.constraint
.to_string())
73 .chain(ia
.inputs
.iter().map(|s
| s
.to_string()))
74 .chain(ext_constraints
)
76 .chain(arch_clobbers
.iter().map(|s
| (*s
).to_string()))
77 .collect
::<Vec
<String
>>()
80 debug
!("Asm Constraints: {}", &all_constraints
);
82 // Depending on how many outputs we have, the return type is different
83 let num_outputs
= output_types
.len();
84 let output_type
= match num_outputs
{
85 0 => self.type_void(),
87 _
=> self.type_struct(&output_types
, false),
90 let asm
= ia
.asm
.as_str();
91 let r
= inline_asm_call(
107 // Again, based on how many outputs we have
108 let outputs
= ia
.outputs
.iter().zip(&outputs
).filter(|&(ref o
, _
)| !o
.is_indirect
);
109 for (i
, (_
, &place
)) in outputs
.enumerate() {
110 let v
= if num_outputs
== 1 { r }
else { self.extract_value(r, i as u64) }
;
111 OperandValue
::Immediate(v
).store(self, place
);
117 fn codegen_inline_asm(
119 template
: &[InlineAsmTemplatePiece
],
120 operands
: &[InlineAsmOperandRef
<'tcx
, Self>],
121 options
: InlineAsmOptions
,
124 let asm_arch
= self.tcx
.sess
.asm_arch
.unwrap();
126 // Collect the types of output operands
127 let mut constraints
= vec
![];
128 let mut clobbers
= vec
![];
129 let mut output_types
= vec
![];
130 let mut op_idx
= FxHashMap
::default();
131 let mut clobbered_x87
= false;
132 for (idx
, op
) in operands
.iter().enumerate() {
134 InlineAsmOperandRef
::Out { reg, late, place }
=> {
135 let is_target_supported
= |reg_class
: InlineAsmRegClass
| {
136 for &(_
, feature
) in reg_class
.supported_types(asm_arch
) {
137 if let Some(feature
) = feature
{
138 if self.tcx
.sess
.target_features
.contains(&Symbol
::intern(feature
))
143 // Register class is unconditionally supported
150 let mut layout
= None
;
151 let ty
= if let Some(ref place
) = place
{
152 layout
= Some(&place
.layout
);
153 llvm_fixup_output_type(self.cx
, reg
.reg_class(), &place
.layout
)
156 InlineAsmRegClass
::X86(
157 X86InlineAsmRegClass
::mmx_reg
| X86InlineAsmRegClass
::x87_reg
160 // Special handling for x87/mmx registers: we always
161 // clobber the whole set if one register is marked as
162 // clobbered. This is due to the way LLVM handles the
163 // FP stack in inline assembly.
165 clobbered_x87
= true;
166 clobbers
.push("~{st}".to_string());
168 clobbers
.push(format
!("~{{st({})}}", i
));
172 } else if !is_target_supported(reg
.reg_class())
173 || reg
.reg_class().is_clobber_only(asm_arch
)
175 // We turn discarded outputs into clobber constraints
176 // if the target feature needed by the register class is
177 // disabled. This is necessary otherwise LLVM will try
178 // to actually allocate a register for the dummy output.
179 assert
!(matches
!(reg
, InlineAsmRegOrRegClass
::Reg(_
)));
180 clobbers
.push(format
!("~{}", reg_to_llvm(reg
, None
)));
183 // If the output is discarded, we don't really care what
184 // type is used. We're just using this to tell LLVM to
185 // reserve the register.
186 dummy_output_type(self.cx
, reg
.reg_class())
188 output_types
.push(ty
);
189 op_idx
.insert(idx
, constraints
.len());
190 let prefix
= if late { "=" }
else { "=&" }
;
191 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, layout
)));
193 InlineAsmOperandRef
::InOut { reg, late, in_value, out_place }
=> {
194 let layout
= if let Some(ref out_place
) = out_place
{
197 // LLVM required tied operands to have the same type,
198 // so we just use the type of the input.
201 let ty
= llvm_fixup_output_type(self.cx
, reg
.reg_class(), layout
);
202 output_types
.push(ty
);
203 op_idx
.insert(idx
, constraints
.len());
204 let prefix
= if late { "=" }
else { "=&" }
;
205 constraints
.push(format
!("{}{}", prefix
, reg_to_llvm(reg
, Some(layout
))));
211 // Collect input operands
212 let mut inputs
= vec
![];
213 for (idx
, op
) in operands
.iter().enumerate() {
215 InlineAsmOperandRef
::In { reg, value }
=> {
217 llvm_fixup_input(self, value
.immediate(), reg
.reg_class(), &value
.layout
);
219 op_idx
.insert(idx
, constraints
.len());
220 constraints
.push(reg_to_llvm(reg
, Some(&value
.layout
)));
222 InlineAsmOperandRef
::InOut { reg, late: _, in_value, out_place: _ }
=> {
223 let value
= llvm_fixup_input(
225 in_value
.immediate(),
230 constraints
.push(format
!("{}", op_idx
[&idx
]));
232 InlineAsmOperandRef
::SymFn { instance }
=> {
233 inputs
.push(self.cx
.get_fn(instance
));
234 op_idx
.insert(idx
, constraints
.len());
235 constraints
.push("s".to_string());
237 InlineAsmOperandRef
::SymStatic { def_id }
=> {
238 inputs
.push(self.cx
.get_static(def_id
));
239 op_idx
.insert(idx
, constraints
.len());
240 constraints
.push("s".to_string());
246 // Build the template string
247 let mut template_str
= String
::new();
248 for piece
in template
{
250 InlineAsmTemplatePiece
::String(ref s
) => {
254 template_str
.push_str("$$");
256 template_str
.push(c
);
260 template_str
.push_str(s
)
263 InlineAsmTemplatePiece
::Placeholder { operand_idx, modifier, span: _ }
=> {
264 match operands
[operand_idx
] {
265 InlineAsmOperandRef
::In { reg, .. }
266 | InlineAsmOperandRef
::Out { reg, .. }
267 | InlineAsmOperandRef
::InOut { reg, .. }
=> {
268 let modifier
= modifier_to_llvm(asm_arch
, reg
.reg_class(), modifier
);
269 if let Some(modifier
) = modifier
{
270 template_str
.push_str(&format
!(
272 op_idx
[&operand_idx
], modifier
275 template_str
.push_str(&format
!("${{{}}}", op_idx
[&operand_idx
]));
278 InlineAsmOperandRef
::Const { ref string }
=> {
279 // Const operands get injected directly into the template
280 template_str
.push_str(string
);
282 InlineAsmOperandRef
::SymFn { .. }
283 | InlineAsmOperandRef
::SymStatic { .. }
=> {
284 // Only emit the raw symbol name
285 template_str
.push_str(&format
!("${{{}:c}}", op_idx
[&operand_idx
]));
292 constraints
.append(&mut clobbers
);
293 if !options
.contains(InlineAsmOptions
::PRESERVES_FLAGS
) {
295 InlineAsmArch
::AArch64
| InlineAsmArch
::Arm
=> {
296 constraints
.push("~{cc}".to_string());
298 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
=> {
299 constraints
.extend_from_slice(&[
300 "~{dirflag}".to_string(),
301 "~{fpsr}".to_string(),
302 "~{flags}".to_string(),
305 InlineAsmArch
::RiscV32
| InlineAsmArch
::RiscV64
=> {
306 constraints
.extend_from_slice(&[
307 "~{vtype}".to_string(),
309 "~{vxsat}".to_string(),
310 "~{vxrm}".to_string(),
313 InlineAsmArch
::Nvptx64
=> {}
314 InlineAsmArch
::PowerPC
| InlineAsmArch
::PowerPC64
=> {}
315 InlineAsmArch
::Hexagon
=> {}
316 InlineAsmArch
::Mips
| InlineAsmArch
::Mips64
=> {}
317 InlineAsmArch
::S390x
=> {}
318 InlineAsmArch
::SpirV
=> {}
319 InlineAsmArch
::Wasm32
=> {}
320 InlineAsmArch
::Bpf
=> {}
323 if !options
.contains(InlineAsmOptions
::NOMEM
) {
324 // This is actually ignored by LLVM, but it's probably best to keep
325 // it just in case. LLVM instead uses the ReadOnly/ReadNone
326 // attributes on the call instruction to optimize.
327 constraints
.push("~{memory}".to_string());
329 let volatile
= !options
.contains(InlineAsmOptions
::PURE
);
330 let alignstack
= !options
.contains(InlineAsmOptions
::NOSTACK
);
331 let output_type
= match &output_types
[..] {
332 [] => self.type_void(),
334 tys
=> self.type_struct(&tys
, false),
336 let dialect
= match asm_arch
{
337 InlineAsmArch
::X86
| InlineAsmArch
::X86_64
338 if !options
.contains(InlineAsmOptions
::ATT_SYNTAX
) =>
340 LlvmAsmDialect
::Intel
342 _
=> LlvmAsmDialect
::Att
,
344 let result
= inline_asm_call(
347 &constraints
.join(","),
355 .unwrap_or_else(|| span_bug
!(line_spans
[0], "LLVM asm constraint validation failed"));
357 if options
.contains(InlineAsmOptions
::PURE
) {
358 if options
.contains(InlineAsmOptions
::NOMEM
) {
359 llvm
::Attribute
::ReadNone
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
360 } else if options
.contains(InlineAsmOptions
::READONLY
) {
361 llvm
::Attribute
::ReadOnly
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
363 llvm
::Attribute
::WillReturn
.apply_callsite(llvm
::AttributePlace
::Function
, result
);
364 } else if options
.contains(InlineAsmOptions
::NOMEM
) {
365 llvm
::Attribute
::InaccessibleMemOnly
366 .apply_callsite(llvm
::AttributePlace
::Function
, result
);
368 // LLVM doesn't have an attribute to represent ReadOnly + SideEffect
371 // Write results to outputs
372 for (idx
, op
) in operands
.iter().enumerate() {
373 if let InlineAsmOperandRef
::Out { reg, place: Some(place), .. }
374 | InlineAsmOperandRef
::InOut { reg, out_place: Some(place), .. }
= *op
376 let value
= if output_types
.len() == 1 {
379 self.extract_value(result
, op_idx
[&idx
] as u64)
381 let value
= llvm_fixup_output(self, value
, reg
.reg_class(), &place
.layout
);
382 OperandValue
::Immediate(value
).store(self, place
);
388 impl AsmMethods
for CodegenCx
<'ll
, 'tcx
> {
389 fn codegen_global_asm(
391 template
: &[InlineAsmTemplatePiece
],
392 operands
: &[GlobalAsmOperandRef
],
393 options
: InlineAsmOptions
,
394 _line_spans
: &[Span
],
396 let asm_arch
= self.tcx
.sess
.asm_arch
.unwrap();
398 // Default to Intel syntax on x86
399 let intel_syntax
= matches
!(asm_arch
, InlineAsmArch
::X86
| InlineAsmArch
::X86_64
)
400 && !options
.contains(InlineAsmOptions
::ATT_SYNTAX
);
402 // Build the template string
403 let mut template_str
= String
::new();
405 template_str
.push_str(".intel_syntax\n");
407 for piece
in template
{
409 InlineAsmTemplatePiece
::String(ref s
) => template_str
.push_str(s
),
410 InlineAsmTemplatePiece
::Placeholder { operand_idx, modifier: _, span: _ }
=> {
411 match operands
[operand_idx
] {
412 GlobalAsmOperandRef
::Const { ref string }
=> {
413 // Const operands get injected directly into the
414 // template. Note that we don't need to escape $
415 // here unlike normal inline assembly.
416 template_str
.push_str(string
);
423 template_str
.push_str("\n.att_syntax\n");
427 llvm
::LLVMRustAppendModuleInlineAsm(
429 template_str
.as_ptr().cast(),
436 pub(crate) fn inline_asm_call(
437 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
440 inputs
: &[&'ll Value
],
441 output
: &'ll llvm
::Type
,
446 ) -> Option
<&'ll Value
> {
447 let volatile
= if volatile { llvm::True }
else { llvm::False }
;
448 let alignstack
= if alignstack { llvm::True }
else { llvm::False }
;
453 debug
!("Asm Input Type: {:?}", *v
);
456 .collect
::<Vec
<_
>>();
458 debug
!("Asm Output Type: {:?}", output
);
459 let fty
= bx
.cx
.type_func(&argtys
[..], output
);
461 // Ask LLVM to verify that the constraints are well-formed.
462 let constraints_ok
= llvm
::LLVMRustInlineAsmVerify(fty
, cons
.as_ptr().cast(), cons
.len());
463 debug
!("constraint verification result: {:?}", constraints_ok
);
465 let v
= llvm
::LLVMRustInlineAsm(
469 cons
.as_ptr().cast(),
473 llvm
::AsmDialect
::from_generic(dia
),
475 let call
= bx
.call(fty
, v
, inputs
, None
);
477 // Store mark in a metadata node so we can map LLVM errors
478 // back to source locations. See #17552.
480 let kind
= llvm
::LLVMGetMDKindIDInContext(
482 key
.as_ptr() as *const c_char
,
486 // srcloc contains one integer for each line of assembly code.
487 // Unfortunately this isn't enough to encode a full span so instead
488 // we just encode the start position of each line.
489 // FIXME: Figure out a way to pass the entire line spans.
490 let mut srcloc
= vec
![];
491 if dia
== LlvmAsmDialect
::Intel
&& line_spans
.len() > 1 {
492 // LLVM inserts an extra line to add the ".intel_syntax", so add
493 // a dummy srcloc entry for it.
495 // Don't do this if we only have 1 line span since that may be
496 // due to the asm template string coming from a macro. LLVM will
497 // default to the first srcloc for lines that don't have an
498 // associated srcloc.
499 srcloc
.push(bx
.const_i32(0));
501 srcloc
.extend(line_spans
.iter().map(|span
| bx
.const_i32(span
.lo().to_u32() as i32)));
502 let md
= llvm
::LLVMMDNodeInContext(bx
.llcx
, srcloc
.as_ptr(), srcloc
.len() as u32);
503 llvm
::LLVMSetMetadata(call
, kind
, md
);
507 // LLVM has detected an issue with our constraints, bail out
513 /// If the register is an xmm/ymm/zmm register then return its index.
514 fn xmm_reg_index(reg
: InlineAsmReg
) -> Option
<u32> {
516 InlineAsmReg
::X86(reg
)
517 if reg
as u32 >= X86InlineAsmReg
::xmm0
as u32
518 && reg
as u32 <= X86InlineAsmReg
::xmm15
as u32 =>
520 Some(reg
as u32 - X86InlineAsmReg
::xmm0
as u32)
522 InlineAsmReg
::X86(reg
)
523 if reg
as u32 >= X86InlineAsmReg
::ymm0
as u32
524 && reg
as u32 <= X86InlineAsmReg
::ymm15
as u32 =>
526 Some(reg
as u32 - X86InlineAsmReg
::ymm0
as u32)
528 InlineAsmReg
::X86(reg
)
529 if reg
as u32 >= X86InlineAsmReg
::zmm0
as u32
530 && reg
as u32 <= X86InlineAsmReg
::zmm31
as u32 =>
532 Some(reg
as u32 - X86InlineAsmReg
::zmm0
as u32)
538 /// If the register is an AArch64 vector register then return its index.
539 fn a64_vreg_index(reg
: InlineAsmReg
) -> Option
<u32> {
541 InlineAsmReg
::AArch64(reg
)
542 if reg
as u32 >= AArch64InlineAsmReg
::v0
as u32
543 && reg
as u32 <= AArch64InlineAsmReg
::v31
as u32 =>
545 Some(reg
as u32 - AArch64InlineAsmReg
::v0
as u32)
551 /// Converts a register class to an LLVM constraint code.
552 fn reg_to_llvm(reg
: InlineAsmRegOrRegClass
, layout
: Option
<&TyAndLayout
<'tcx
>>) -> String
{
554 // For vector registers LLVM wants the register name to match the type size.
555 InlineAsmRegOrRegClass
::Reg(reg
) => {
556 if let Some(idx
) = xmm_reg_index(reg
) {
557 let class
= if let Some(layout
) = layout
{
558 match layout
.size
.bytes() {
564 // We use f32 as the type for discarded outputs
567 format
!("{{{}mm{}}}", class
, idx
)
568 } else if let Some(idx
) = a64_vreg_index(reg
) {
569 let class
= if let Some(layout
) = layout
{
570 match layout
.size
.bytes() {
575 1 => 'd'
, // We fixup i8 to i8x8
579 // We use i64x2 as the type for discarded outputs
582 format
!("{{{}{}}}", class
, idx
)
583 } else if reg
== InlineAsmReg
::AArch64(AArch64InlineAsmReg
::x30
) {
584 // LLVM doesn't recognize x30
586 } else if reg
== InlineAsmReg
::Arm(ArmInlineAsmReg
::r14
) {
587 // LLVM doesn't recognize r14
590 format
!("{{{}}}", reg
.name())
593 InlineAsmRegOrRegClass
::RegClass(reg
) => match reg
{
594 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => "r",
595 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
) => "w",
596 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => "x",
597 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::preg
) => {
598 unreachable
!("clobber-only")
600 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
) => "r",
601 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => "l",
602 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
603 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
604 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
) => "t",
605 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
)
606 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
)
607 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => "x",
608 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
609 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
) => "w",
610 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => "r",
611 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => "r",
612 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => "f",
613 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => "h",
614 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => "r",
615 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => "l",
616 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::reg
) => "r",
617 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::reg_nonzero
) => "b",
618 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::freg
) => "f",
619 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::cr
)
620 | InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::xer
) => {
621 unreachable
!("clobber-only")
623 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => "r",
624 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => "f",
625 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::vreg
) => {
626 unreachable
!("clobber-only")
628 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
) => "r",
629 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => "Q",
630 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => "q",
631 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
632 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
) => "x",
633 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => "v",
634 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => "^Yk",
635 InlineAsmRegClass
::X86(
636 X86InlineAsmRegClass
::x87_reg
| X86InlineAsmRegClass
::mmx_reg
,
637 ) => unreachable
!("clobber-only"),
638 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => "r",
639 InlineAsmRegClass
::Bpf(BpfInlineAsmRegClass
::reg
) => "r",
640 InlineAsmRegClass
::Bpf(BpfInlineAsmRegClass
::wreg
) => "w",
641 InlineAsmRegClass
::S390x(S390xInlineAsmRegClass
::reg
) => "r",
642 InlineAsmRegClass
::S390x(S390xInlineAsmRegClass
::freg
) => "f",
643 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
644 bug
!("LLVM backend does not support SPIR-V")
646 InlineAsmRegClass
::Err
=> unreachable
!(),
652 /// Converts a modifier into LLVM's equivalent modifier.
655 reg
: InlineAsmRegClass
,
656 modifier
: Option
<char>,
659 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => modifier
,
660 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
661 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
662 if modifier
== Some('v'
) { None }
else { modifier }
664 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::preg
) => {
665 unreachable
!("clobber-only")
667 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
668 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => None
,
669 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
670 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => None
,
671 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
672 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
673 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => Some('P'
),
674 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
675 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
676 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
677 if modifier
.is_none() {
683 InlineAsmRegClass
::Hexagon(_
) => None
,
684 InlineAsmRegClass
::Mips(_
) => None
,
685 InlineAsmRegClass
::Nvptx(_
) => None
,
686 InlineAsmRegClass
::PowerPC(_
) => None
,
687 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
)
688 | InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => None
,
689 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::vreg
) => {
690 unreachable
!("clobber-only")
692 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
693 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => match modifier
{
694 None
if arch
== InlineAsmArch
::X86_64
=> Some('q'
),
696 Some('l'
) => Some('b'
),
697 Some('h'
) => Some('h'
),
698 Some('x'
) => Some('w'
),
699 Some('e'
) => Some('k'
),
700 Some('r'
) => Some('q'
),
703 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => None
,
704 InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::xmm_reg
)
705 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::ymm_reg
)
706 | InlineAsmRegClass
::X86(reg @ X86InlineAsmRegClass
::zmm_reg
) => match (reg
, modifier
) {
707 (X86InlineAsmRegClass
::xmm_reg
, None
) => Some('x'
),
708 (X86InlineAsmRegClass
::ymm_reg
, None
) => Some('t'
),
709 (X86InlineAsmRegClass
::zmm_reg
, None
) => Some('g'
),
710 (_
, Some('x'
)) => Some('x'
),
711 (_
, Some('y'
)) => Some('t'
),
712 (_
, Some('z'
)) => Some('g'
),
715 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => None
,
716 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::x87_reg
| X86InlineAsmRegClass
::mmx_reg
) => {
717 unreachable
!("clobber-only")
719 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => None
,
720 InlineAsmRegClass
::Bpf(_
) => None
,
721 InlineAsmRegClass
::S390x(_
) => None
,
722 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
723 bug
!("LLVM backend does not support SPIR-V")
725 InlineAsmRegClass
::Err
=> unreachable
!(),
729 /// Type to use for outputs that are discarded. It doesn't really matter what
730 /// the type is, as long as it is valid for the constraint code.
731 fn dummy_output_type(cx
: &CodegenCx
<'ll
, 'tcx
>, reg
: InlineAsmRegClass
) -> &'ll Type
{
733 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::reg
) => cx
.type_i32(),
734 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
)
735 | InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
) => {
736 cx
.type_vector(cx
.type_i64(), 2)
738 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::preg
) => {
739 unreachable
!("clobber-only")
741 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg
)
742 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::reg_thumb
) => cx
.type_i32(),
743 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
)
744 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg_low16
) => cx
.type_f32(),
745 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg
)
746 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low16
)
747 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::dreg_low8
) => cx
.type_f64(),
748 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg
)
749 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low8
)
750 | InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::qreg_low4
) => {
751 cx
.type_vector(cx
.type_i64(), 2)
753 InlineAsmRegClass
::Hexagon(HexagonInlineAsmRegClass
::reg
) => cx
.type_i32(),
754 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
) => cx
.type_i32(),
755 InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::freg
) => cx
.type_f32(),
756 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg16
) => cx
.type_i16(),
757 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg32
) => cx
.type_i32(),
758 InlineAsmRegClass
::Nvptx(NvptxInlineAsmRegClass
::reg64
) => cx
.type_i64(),
759 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::reg
) => cx
.type_i32(),
760 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::reg_nonzero
) => cx
.type_i32(),
761 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::freg
) => cx
.type_f64(),
762 InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::cr
)
763 | InlineAsmRegClass
::PowerPC(PowerPCInlineAsmRegClass
::xer
) => {
764 unreachable
!("clobber-only")
766 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::reg
) => cx
.type_i32(),
767 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::freg
) => cx
.type_f32(),
768 InlineAsmRegClass
::RiscV(RiscVInlineAsmRegClass
::vreg
) => {
769 unreachable
!("clobber-only")
771 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg
)
772 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
) => cx
.type_i32(),
773 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_byte
) => cx
.type_i8(),
774 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
)
775 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::ymm_reg
)
776 | InlineAsmRegClass
::X86(X86InlineAsmRegClass
::zmm_reg
) => cx
.type_f32(),
777 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::kreg
) => cx
.type_i16(),
778 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::x87_reg
| X86InlineAsmRegClass
::mmx_reg
) => {
779 unreachable
!("clobber-only")
781 InlineAsmRegClass
::Wasm(WasmInlineAsmRegClass
::local
) => cx
.type_i32(),
782 InlineAsmRegClass
::Bpf(BpfInlineAsmRegClass
::reg
) => cx
.type_i64(),
783 InlineAsmRegClass
::Bpf(BpfInlineAsmRegClass
::wreg
) => cx
.type_i32(),
784 InlineAsmRegClass
::S390x(S390xInlineAsmRegClass
::reg
) => cx
.type_i32(),
785 InlineAsmRegClass
::S390x(S390xInlineAsmRegClass
::freg
) => cx
.type_f64(),
786 InlineAsmRegClass
::SpirV(SpirVInlineAsmRegClass
::reg
) => {
787 bug
!("LLVM backend does not support SPIR-V")
789 InlineAsmRegClass
::Err
=> unreachable
!(),
793 /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
794 /// the equivalent integer type.
795 fn llvm_asm_scalar_type(cx
: &CodegenCx
<'ll
, 'tcx
>, scalar
: &Scalar
) -> &'ll Type
{
797 Primitive
::Int(Integer
::I8
, _
) => cx
.type_i8(),
798 Primitive
::Int(Integer
::I16
, _
) => cx
.type_i16(),
799 Primitive
::Int(Integer
::I32
, _
) => cx
.type_i32(),
800 Primitive
::Int(Integer
::I64
, _
) => cx
.type_i64(),
801 Primitive
::F32
=> cx
.type_f32(),
802 Primitive
::F64
=> cx
.type_f64(),
803 Primitive
::Pointer
=> cx
.type_isize(),
808 /// Fix up an input value to work around LLVM bugs.
810 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
811 mut value
: &'ll Value
,
812 reg
: InlineAsmRegClass
,
813 layout
: &TyAndLayout
<'tcx
>,
815 match (reg
, &layout
.abi
) {
816 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
817 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
818 let vec_ty
= bx
.cx
.type_vector(bx
.cx
.type_i8(), 8);
819 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
824 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
825 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, s
);
826 let count
= 16 / layout
.size
.bytes();
827 let vec_ty
= bx
.cx
.type_vector(elem_ty
, count
);
828 if let Primitive
::Pointer
= s
.value
{
829 value
= bx
.ptrtoint(value
, bx
.cx
.type_isize());
831 bx
.insert_element(bx
.const_undef(vec_ty
), value
, bx
.const_i32(0))
834 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
835 Abi
::Vector { element, count }
,
836 ) if layout
.size
.bytes() == 8 => {
837 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
838 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
);
839 let indices
: Vec
<_
> = (0..count
* 2).map(|x
| bx
.const_i32(x
as i32)).collect();
840 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
842 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
843 if s
.value
== Primitive
::F64
=>
845 bx
.bitcast(value
, bx
.cx
.type_i64())
848 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
850 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, bx
.cx
.type_vector(bx
.cx
.type_f64(), 8)),
852 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
855 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
856 bx
.bitcast(value
, bx
.cx
.type_f32())
862 InlineAsmRegClass
::Arm(
863 ArmInlineAsmRegClass
::dreg
864 | ArmInlineAsmRegClass
::dreg_low8
865 | ArmInlineAsmRegClass
::dreg_low16
,
869 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
870 bx
.bitcast(value
, bx
.cx
.type_f64())
875 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
876 // MIPS only supports register-length arithmetics.
877 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => bx
.zext(value
, bx
.cx
.type_i32()),
878 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_i32()),
879 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_i64()),
886 /// Fix up an output value to work around LLVM bugs.
887 fn llvm_fixup_output(
888 bx
: &mut Builder
<'a
, 'll
, 'tcx
>,
889 mut value
: &'ll Value
,
890 reg
: InlineAsmRegClass
,
891 layout
: &TyAndLayout
<'tcx
>,
893 match (reg
, &layout
.abi
) {
894 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
895 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
896 bx
.extract_element(value
, bx
.const_i32(0))
901 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
902 value
= bx
.extract_element(value
, bx
.const_i32(0));
903 if let Primitive
::Pointer
= s
.value
{
904 value
= bx
.inttoptr(value
, layout
.llvm_type(bx
.cx
));
909 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
910 Abi
::Vector { element, count }
,
911 ) if layout
.size
.bytes() == 8 => {
912 let elem_ty
= llvm_asm_scalar_type(bx
.cx
, element
);
913 let vec_ty
= bx
.cx
.type_vector(elem_ty
, *count
* 2);
914 let indices
: Vec
<_
> = (0..*count
).map(|x
| bx
.const_i32(x
as i32)).collect();
915 bx
.shuffle_vector(value
, bx
.const_undef(vec_ty
), bx
.const_vector(&indices
))
917 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
918 if s
.value
== Primitive
::F64
=>
920 bx
.bitcast(value
, bx
.cx
.type_f64())
923 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
925 ) if layout
.size
.bytes() == 64 => bx
.bitcast(value
, layout
.llvm_type(bx
.cx
)),
927 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
930 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
931 bx
.bitcast(value
, bx
.cx
.type_i32())
937 InlineAsmRegClass
::Arm(
938 ArmInlineAsmRegClass
::dreg
939 | ArmInlineAsmRegClass
::dreg_low8
940 | ArmInlineAsmRegClass
::dreg_low16
,
944 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
945 bx
.bitcast(value
, bx
.cx
.type_i64())
950 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
951 // MIPS only supports register-length arithmetics.
952 Primitive
::Int(Integer
::I8
, _
) => bx
.trunc(value
, bx
.cx
.type_i8()),
953 Primitive
::Int(Integer
::I16
, _
) => bx
.trunc(value
, bx
.cx
.type_i16()),
954 Primitive
::F32
=> bx
.bitcast(value
, bx
.cx
.type_f32()),
955 Primitive
::F64
=> bx
.bitcast(value
, bx
.cx
.type_f64()),
962 /// Output type to use for llvm_fixup_output.
963 fn llvm_fixup_output_type(
964 cx
: &CodegenCx
<'ll
, 'tcx
>,
965 reg
: InlineAsmRegClass
,
966 layout
: &TyAndLayout
<'tcx
>,
968 match (reg
, &layout
.abi
) {
969 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg
), Abi
::Scalar(s
)) => {
970 if let Primitive
::Int(Integer
::I8
, _
) = s
.value
{
971 cx
.type_vector(cx
.type_i8(), 8)
976 (InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
), Abi
::Scalar(s
)) => {
977 let elem_ty
= llvm_asm_scalar_type(cx
, s
);
978 let count
= 16 / layout
.size
.bytes();
979 cx
.type_vector(elem_ty
, count
)
982 InlineAsmRegClass
::AArch64(AArch64InlineAsmRegClass
::vreg_low16
),
983 Abi
::Vector { element, count }
,
984 ) if layout
.size
.bytes() == 8 => {
985 let elem_ty
= llvm_asm_scalar_type(cx
, element
);
986 cx
.type_vector(elem_ty
, count
* 2)
988 (InlineAsmRegClass
::X86(X86InlineAsmRegClass
::reg_abcd
), Abi
::Scalar(s
))
989 if s
.value
== Primitive
::F64
=>
994 InlineAsmRegClass
::X86(X86InlineAsmRegClass
::xmm_reg
| X86InlineAsmRegClass
::zmm_reg
),
996 ) if layout
.size
.bytes() == 64 => cx
.type_vector(cx
.type_f64(), 8),
998 InlineAsmRegClass
::Arm(ArmInlineAsmRegClass
::sreg
| ArmInlineAsmRegClass
::sreg_low16
),
1001 if let Primitive
::Int(Integer
::I32
, _
) = s
.value
{
1004 layout
.llvm_type(cx
)
1008 InlineAsmRegClass
::Arm(
1009 ArmInlineAsmRegClass
::dreg
1010 | ArmInlineAsmRegClass
::dreg_low8
1011 | ArmInlineAsmRegClass
::dreg_low16
,
1015 if let Primitive
::Int(Integer
::I64
, _
) = s
.value
{
1018 layout
.llvm_type(cx
)
1021 (InlineAsmRegClass
::Mips(MipsInlineAsmRegClass
::reg
), Abi
::Scalar(s
)) => match s
.value
{
1022 // MIPS only supports register-length arithmetics.
1023 Primitive
::Int(Integer
::I8
| Integer
::I16
, _
) => cx
.type_i32(),
1024 Primitive
::F32
=> cx
.type_i32(),
1025 Primitive
::F64
=> cx
.type_i64(),
1026 _
=> layout
.llvm_type(cx
),
1028 _
=> layout
.llvm_type(cx
),