1 use super::operand
::OperandRef
;
2 use super::operand
::OperandValue
::{Immediate, Pair, Ref}
;
3 use super::place
::PlaceRef
;
4 use super::{CachedLlbb, FunctionCx, LocalRef}
;
7 use crate::common
::{self, IntPredicate}
;
13 use rustc_ast
::{InlineAsmOptions, InlineAsmTemplatePiece}
;
14 use rustc_hir
::lang_items
::LangItem
;
15 use rustc_index
::vec
::Idx
;
16 use rustc_middle
::mir
::{self, AssertKind, SwitchTargets}
;
17 use rustc_middle
::ty
::layout
::{HasTyCtxt, LayoutOf, ValidityRequirement}
;
18 use rustc_middle
::ty
::print
::{with_no_trimmed_paths, with_no_visible_paths}
;
19 use rustc_middle
::ty
::{self, Instance, Ty}
;
20 use rustc_session
::config
::OptLevel
;
21 use rustc_span
::source_map
::Span
;
22 use rustc_span
::{sym, Symbol}
;
23 use rustc_symbol_mangling
::typeid
::typeid_for_fnabi
;
24 use rustc_target
::abi
::call
::{ArgAbi, FnAbi, PassMode, Reg}
;
25 use rustc_target
::abi
::{self, HasDataLayout, WrappingRange}
;
26 use rustc_target
::spec
::abi
::Abi
;
28 // Indicates if we are in the middle of merging a BB's successor into it. This
29 // can happen when BB jumps directly to its successor and the successor has no
30 // other predecessors.
31 #[derive(Debug, PartialEq)]
37 /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
38 /// e.g., creating a basic block, calling a function, etc.
39 struct TerminatorCodegenHelper
<'tcx
> {
41 terminator
: &'tcx mir
::Terminator
<'tcx
>,
44 impl<'a
, 'tcx
> TerminatorCodegenHelper
<'tcx
> {
45 /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
46 /// either already previously cached, or newly created, by `landing_pad_for`.
47 fn funclet
<'b
, Bx
: BuilderMethods
<'a
, 'tcx
>>(
49 fx
: &'b
mut FunctionCx
<'a
, 'tcx
, Bx
>,
50 ) -> Option
<&'b Bx
::Funclet
> {
51 let cleanup_kinds
= (&fx
.cleanup_kinds
).as_ref()?
;
52 let funclet_bb
= cleanup_kinds
[self.bb
].funclet_bb(self.bb
)?
;
53 // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
54 // it has to be now. This may not seem necessary, as RPO should lead
55 // to all the unwind edges being visited (and so to `landing_pad_for`
56 // getting called for them), before building any of the blocks inside
57 // the funclet itself - however, if MIR contains edges that end up not
58 // being needed in the LLVM IR after monomorphization, the funclet may
59 // be unreachable, and we don't have yet a way to skip building it in
60 // such an eventuality (which may be a better solution than this).
61 if fx
.funclets
[funclet_bb
].is_none() {
62 fx
.landing_pad_for(funclet_bb
);
65 fx
.funclets
[funclet_bb
]
67 .expect("landing_pad_for didn't also create funclets entry"),
71 /// Get a basic block (creating it if necessary), possibly with cleanup
72 /// stuff in it or next to it.
73 fn llbb_with_cleanup
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
75 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
76 target
: mir
::BasicBlock
,
78 let (needs_landing_pad
, is_cleanupret
) = self.llbb_characteristics(fx
, target
);
79 let mut lltarget
= fx
.llbb(target
);
80 if needs_landing_pad
{
81 lltarget
= fx
.landing_pad_for(target
);
84 // MSVC cross-funclet jump - need a trampoline
85 debug_assert
!(base
::wants_msvc_seh(fx
.cx
.tcx().sess
));
86 debug
!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target
);
87 let name
= &format
!("{:?}_cleanup_trampoline_{:?}", self.bb
, target
);
88 let trampoline_llbb
= Bx
::append_block(fx
.cx
, fx
.llfn
, name
);
89 let mut trampoline_bx
= Bx
::build(fx
.cx
, trampoline_llbb
);
90 trampoline_bx
.cleanup_ret(self.funclet(fx
).unwrap(), Some(lltarget
));
97 fn llbb_characteristics
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
99 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
100 target
: mir
::BasicBlock
,
102 if let Some(ref cleanup_kinds
) = fx
.cleanup_kinds
{
103 let funclet_bb
= cleanup_kinds
[self.bb
].funclet_bb(self.bb
);
104 let target_funclet
= cleanup_kinds
[target
].funclet_bb(target
);
105 let (needs_landing_pad
, is_cleanupret
) = match (funclet_bb
, target_funclet
) {
106 (None
, None
) => (false, false),
107 (None
, Some(_
)) => (true, false),
108 (Some(f
), Some(t_f
)) => (f
!= t_f
, f
!= t_f
),
110 let span
= self.terminator
.source_info
.span
;
111 span_bug
!(span
, "{:?} - jump out of cleanup?", self.terminator
);
114 (needs_landing_pad
, is_cleanupret
)
116 let needs_landing_pad
= !fx
.mir
[self.bb
].is_cleanup
&& fx
.mir
[target
].is_cleanup
;
117 let is_cleanupret
= false;
118 (needs_landing_pad
, is_cleanupret
)
122 fn funclet_br
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
124 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
126 target
: mir
::BasicBlock
,
127 mergeable_succ
: bool
,
129 let (needs_landing_pad
, is_cleanupret
) = self.llbb_characteristics(fx
, target
);
130 if mergeable_succ
&& !needs_landing_pad
&& !is_cleanupret
{
131 // We can merge the successor into this bb, so no need for a `br`.
134 let mut lltarget
= fx
.llbb(target
);
135 if needs_landing_pad
{
136 lltarget
= fx
.landing_pad_for(target
);
139 // micro-optimization: generate a `ret` rather than a jump
141 bx
.cleanup_ret(self.funclet(fx
).unwrap(), Some(lltarget
));
149 /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
150 /// return destination `destination` and the unwind action `unwind`.
151 fn do_call
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
153 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
155 fn_abi
: &'tcx FnAbi
<'tcx
, Ty
<'tcx
>>,
157 llargs
: &[Bx
::Value
],
158 destination
: Option
<(ReturnDest
<'tcx
, Bx
::Value
>, mir
::BasicBlock
)>,
159 mut unwind
: mir
::UnwindAction
,
160 copied_constant_arguments
: &[PlaceRef
<'tcx
, <Bx
as BackendTypes
>::Value
>],
161 mergeable_succ
: bool
,
163 // If there is a cleanup block and the function we're calling can unwind, then
164 // do an invoke, otherwise do a call.
165 let fn_ty
= bx
.fn_decl_backend_type(&fn_abi
);
167 if !fn_abi
.can_unwind
{
168 unwind
= mir
::UnwindAction
::Unreachable
;
171 let unwind_block
= match unwind
{
172 mir
::UnwindAction
::Cleanup(cleanup
) => Some(self.llbb_with_cleanup(fx
, cleanup
)),
173 mir
::UnwindAction
::Continue
=> None
,
174 mir
::UnwindAction
::Unreachable
=> None
,
175 mir
::UnwindAction
::Terminate
=> {
176 if fx
.mir
[self.bb
].is_cleanup
&& base
::wants_msvc_seh(fx
.cx
.tcx().sess
) {
177 // SEH will abort automatically if an exception tries to
178 // propagate out from cleanup.
181 Some(fx
.terminate_block())
186 if let Some(unwind_block
) = unwind_block
{
187 let ret_llbb
= if let Some((_
, target
)) = destination
{
190 fx
.unreachable_block()
192 let invokeret
= bx
.invoke(
201 if fx
.mir
[self.bb
].is_cleanup
{
202 bx
.do_not_inline(invokeret
);
205 if let Some((ret_dest
, target
)) = destination
{
206 bx
.switch_to_block(fx
.llbb(target
));
207 fx
.set_debug_loc(bx
, self.terminator
.source_info
);
208 for tmp
in copied_constant_arguments
{
209 bx
.lifetime_end(tmp
.llval
, tmp
.layout
.size
);
211 fx
.store_return(bx
, ret_dest
, &fn_abi
.ret
, invokeret
);
215 let llret
= bx
.call(fn_ty
, Some(&fn_abi
), fn_ptr
, &llargs
, self.funclet(fx
));
216 if fx
.mir
[self.bb
].is_cleanup
{
217 // Cleanup is always the cold path. Don't inline
218 // drop glue. Also, when there is a deeply-nested
219 // struct, there are "symmetry" issues that cause
220 // exponential inlining - see issue #41696.
221 bx
.do_not_inline(llret
);
224 if let Some((ret_dest
, target
)) = destination
{
225 for tmp
in copied_constant_arguments
{
226 bx
.lifetime_end(tmp
.llval
, tmp
.layout
.size
);
228 fx
.store_return(bx
, ret_dest
, &fn_abi
.ret
, llret
);
229 self.funclet_br(fx
, bx
, target
, mergeable_succ
)
237 /// Generates inline assembly with optional `destination` and `unwind`.
238 fn do_inlineasm
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
240 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
242 template
: &[InlineAsmTemplatePiece
],
243 operands
: &[InlineAsmOperandRef
<'tcx
, Bx
>],
244 options
: InlineAsmOptions
,
246 destination
: Option
<mir
::BasicBlock
>,
247 unwind
: mir
::UnwindAction
,
248 instance
: Instance
<'_
>,
249 mergeable_succ
: bool
,
251 let unwind_target
= match unwind
{
252 mir
::UnwindAction
::Cleanup(cleanup
) => Some(self.llbb_with_cleanup(fx
, cleanup
)),
253 mir
::UnwindAction
::Terminate
=> Some(fx
.terminate_block()),
254 mir
::UnwindAction
::Continue
=> None
,
255 mir
::UnwindAction
::Unreachable
=> None
,
258 if let Some(cleanup
) = unwind_target
{
259 let ret_llbb
= if let Some(target
) = destination
{
262 fx
.unreachable_block()
265 bx
.codegen_inline_asm(
271 Some((ret_llbb
, cleanup
, self.funclet(fx
))),
275 bx
.codegen_inline_asm(template
, &operands
, options
, line_spans
, instance
, None
);
277 if let Some(target
) = destination
{
278 self.funclet_br(fx
, bx
, target
, mergeable_succ
)
287 /// Codegen implementations for some terminator variants.
288 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
289 /// Generates code for a `Resume` terminator.
290 fn codegen_resume_terminator(&mut self, helper
: TerminatorCodegenHelper
<'tcx
>, bx
: &mut Bx
) {
291 if let Some(funclet
) = helper
.funclet(self) {
292 bx
.cleanup_ret(funclet
, None
);
294 let slot
= self.get_personality_slot(bx
);
295 let exn0
= slot
.project_field(bx
, 0);
296 let exn0
= bx
.load_operand(exn0
).immediate();
297 let exn1
= slot
.project_field(bx
, 1);
298 let exn1
= bx
.load_operand(exn1
).immediate();
299 slot
.storage_dead(bx
);
301 bx
.resume(exn0
, exn1
);
305 fn codegen_switchint_terminator(
307 helper
: TerminatorCodegenHelper
<'tcx
>,
309 discr
: &mir
::Operand
<'tcx
>,
310 targets
: &SwitchTargets
,
312 let discr
= self.codegen_operand(bx
, &discr
);
313 let switch_ty
= discr
.layout
.ty
;
314 let mut target_iter
= targets
.iter();
315 if target_iter
.len() == 1 {
316 // If there are two targets (one conditional, one fallback), emit `br` instead of
318 let (test_value
, target
) = target_iter
.next().unwrap();
319 let lltrue
= helper
.llbb_with_cleanup(self, target
);
320 let llfalse
= helper
.llbb_with_cleanup(self, targets
.otherwise());
321 if switch_ty
== bx
.tcx().types
.bool
{
322 // Don't generate trivial icmps when switching on bool.
324 0 => bx
.cond_br(discr
.immediate(), llfalse
, lltrue
),
325 1 => bx
.cond_br(discr
.immediate(), lltrue
, llfalse
),
329 let switch_llty
= bx
.immediate_backend_type(bx
.layout_of(switch_ty
));
330 let llval
= bx
.const_uint_big(switch_llty
, test_value
);
331 let cmp
= bx
.icmp(IntPredicate
::IntEQ
, discr
.immediate(), llval
);
332 bx
.cond_br(cmp
, lltrue
, llfalse
);
334 } else if self.cx
.sess().opts
.optimize
== OptLevel
::No
335 && target_iter
.len() == 2
336 && self.mir
[targets
.otherwise()].is_empty_unreachable()
338 // In unoptimized builds, if there are two normal targets and the `otherwise` target is
339 // an unreachable BB, emit `br` instead of `switch`. This leaves behind the unreachable
340 // BB, which will usually (but not always) be dead code.
342 // Why only in unoptimized builds?
343 // - In unoptimized builds LLVM uses FastISel which does not support switches, so it
344 // must fall back to the to the slower SelectionDAG isel. Therefore, using `br` gives
345 // significant compile time speedups for unoptimized builds.
346 // - In optimized builds the above doesn't hold, and using `br` sometimes results in
347 // worse generated code because LLVM can no longer tell that the value being switched
348 // on can only have two values, e.g. 0 and 1.
350 let (test_value1
, target1
) = target_iter
.next().unwrap();
351 let (_test_value2
, target2
) = target_iter
.next().unwrap();
352 let ll1
= helper
.llbb_with_cleanup(self, target1
);
353 let ll2
= helper
.llbb_with_cleanup(self, target2
);
354 let switch_llty
= bx
.immediate_backend_type(bx
.layout_of(switch_ty
));
355 let llval
= bx
.const_uint_big(switch_llty
, test_value1
);
356 let cmp
= bx
.icmp(IntPredicate
::IntEQ
, discr
.immediate(), llval
);
357 bx
.cond_br(cmp
, ll1
, ll2
);
361 helper
.llbb_with_cleanup(self, targets
.otherwise()),
362 target_iter
.map(|(value
, target
)| (value
, helper
.llbb_with_cleanup(self, target
))),
367 fn codegen_return_terminator(&mut self, bx
: &mut Bx
) {
368 // Call `va_end` if this is the definition of a C-variadic function.
369 if self.fn_abi
.c_variadic
{
370 // The `VaList` "spoofed" argument is just after all the real arguments.
371 let va_list_arg_idx
= self.fn_abi
.args
.len();
372 match self.locals
[mir
::Local
::new(1 + va_list_arg_idx
)] {
373 LocalRef
::Place(va_list
) => {
374 bx
.va_end(va_list
.llval
);
376 _
=> bug
!("C-variadic function must have a `VaList` place"),
379 if self.fn_abi
.ret
.layout
.abi
.is_uninhabited() {
380 // Functions with uninhabited return values are marked `noreturn`,
381 // so we should make sure that we never actually do.
382 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
383 // if that turns out to be helpful.
385 // `abort` does not terminate the block, so we still need to generate
386 // an `unreachable` terminator after it.
390 let llval
= match &self.fn_abi
.ret
.mode
{
391 PassMode
::Ignore
| PassMode
::Indirect { .. }
=> {
396 PassMode
::Direct(_
) | PassMode
::Pair(..) => {
397 let op
= self.codegen_consume(bx
, mir
::Place
::return_place().as_ref());
398 if let Ref(llval
, _
, align
) = op
.val
{
399 bx
.load(bx
.backend_type(op
.layout
), llval
, align
)
401 op
.immediate_or_packed_pair(bx
)
405 PassMode
::Cast(cast_ty
, _
) => {
406 let op
= match self.locals
[mir
::RETURN_PLACE
] {
407 LocalRef
::Operand(op
) => op
,
408 LocalRef
::PendingOperand
=> bug
!("use of return before def"),
409 LocalRef
::Place(cg_place
) => OperandRef
{
410 val
: Ref(cg_place
.llval
, None
, cg_place
.align
),
411 layout
: cg_place
.layout
,
413 LocalRef
::UnsizedPlace(_
) => bug
!("return type must be sized"),
415 let llslot
= match op
.val
{
416 Immediate(_
) | Pair(..) => {
417 let scratch
= PlaceRef
::alloca(bx
, self.fn_abi
.ret
.layout
);
418 op
.val
.store(bx
, scratch
);
421 Ref(llval
, _
, align
) => {
422 assert_eq
!(align
, op
.layout
.align
.abi
, "return place is unaligned!");
426 let ty
= bx
.cast_backend_type(cast_ty
);
427 let addr
= bx
.pointercast(llslot
, bx
.type_ptr_to(ty
));
428 bx
.load(ty
, addr
, self.fn_abi
.ret
.layout
.align
.abi
)
434 #[tracing::instrument(level = "trace", skip(self, helper, bx))]
435 fn codegen_drop_terminator(
437 helper
: TerminatorCodegenHelper
<'tcx
>,
439 location
: mir
::Place
<'tcx
>,
440 target
: mir
::BasicBlock
,
441 unwind
: mir
::UnwindAction
,
442 mergeable_succ
: bool
,
444 let ty
= location
.ty(self.mir
, bx
.tcx()).ty
;
445 let ty
= self.monomorphize(ty
);
446 let drop_fn
= Instance
::resolve_drop_in_place(bx
.tcx(), ty
);
448 if let ty
::InstanceDef
::DropGlue(_
, None
) = drop_fn
.def
{
449 // we don't actually need to drop anything.
450 return helper
.funclet_br(self, bx
, target
, mergeable_succ
);
453 let place
= self.codegen_place(bx
, location
.as_ref());
455 let mut args
= if let Some(llextra
) = place
.llextra
{
456 args2
= [place
.llval
, llextra
];
459 args1
= [place
.llval
];
462 let (drop_fn
, fn_abi
) =
464 // FIXME(eddyb) perhaps move some of this logic into
465 // `Instance::resolve_drop_in_place`?
466 ty
::Dynamic(_
, _
, ty
::Dyn
) => {
467 // IN THIS ARM, WE HAVE:
468 // ty = *mut (dyn Trait)
469 // which is: exists<T> ( *mut T, Vtable<T: Trait> )
472 // args = ( Data, Vtable )
479 let virtual_drop
= Instance
{
480 def
: ty
::InstanceDef
::Virtual(drop_fn
.def_id(), 0),
481 substs
: drop_fn
.substs
,
483 debug
!("ty = {:?}", ty
);
484 debug
!("drop_fn = {:?}", drop_fn
);
485 debug
!("args = {:?}", args
);
486 let fn_abi
= bx
.fn_abi_of_instance(virtual_drop
, ty
::List
::empty());
487 let vtable
= args
[1];
488 // Truncate vtable off of args list
491 meth
::VirtualIndex
::from_index(ty
::COMMON_VTABLE_ENTRIES_DROPINPLACE
)
492 .get_fn(bx
, vtable
, ty
, &fn_abi
),
496 ty
::Dynamic(_
, _
, ty
::DynStar
) => {
497 // IN THIS ARM, WE HAVE:
498 // ty = *mut (dyn* Trait)
499 // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
512 // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
514 // data = &(*args[0]).0 // gives a pointer to Data above (really the same pointer)
515 // vtable = (*args[0]).1 // loads the vtable out
516 // (data, vtable) // an equivalent Rust `*mut dyn Trait`
518 // SO THEN WE CAN USE THE ABOVE CODE.
519 let virtual_drop
= Instance
{
520 def
: ty
::InstanceDef
::Virtual(drop_fn
.def_id(), 0),
521 substs
: drop_fn
.substs
,
523 debug
!("ty = {:?}", ty
);
524 debug
!("drop_fn = {:?}", drop_fn
);
525 debug
!("args = {:?}", args
);
526 let fn_abi
= bx
.fn_abi_of_instance(virtual_drop
, ty
::List
::empty());
527 let meta_ptr
= place
.project_field(bx
, 1);
528 let meta
= bx
.load_operand(meta_ptr
);
529 // Truncate vtable off of args list
531 debug
!("args' = {:?}", args
);
533 meth
::VirtualIndex
::from_index(ty
::COMMON_VTABLE_ENTRIES_DROPINPLACE
)
534 .get_fn(bx
, meta
.immediate(), ty
, &fn_abi
),
538 _
=> (bx
.get_fn_addr(drop_fn
), bx
.fn_abi_of_instance(drop_fn
, ty
::List
::empty())),
546 Some((ReturnDest
::Nothing
, target
)),
553 fn codegen_assert_terminator(
555 helper
: TerminatorCodegenHelper
<'tcx
>,
557 terminator
: &mir
::Terminator
<'tcx
>,
558 cond
: &mir
::Operand
<'tcx
>,
560 msg
: &mir
::AssertMessage
<'tcx
>,
561 target
: mir
::BasicBlock
,
562 unwind
: mir
::UnwindAction
,
563 mergeable_succ
: bool
,
565 let span
= terminator
.source_info
.span
;
566 let cond
= self.codegen_operand(bx
, cond
).immediate();
567 let mut const_cond
= bx
.const_to_opt_u128(cond
, false).map(|c
| c
== 1);
569 // This case can currently arise only from functions marked
570 // with #[rustc_inherit_overflow_checks] and inlined from
571 // another crate (mostly core::num generic/#[inline] fns),
572 // while the current crate doesn't use overflow checks.
573 if !bx
.cx().check_overflow() && msg
.is_optional_overflow_check() {
574 const_cond
= Some(expected
);
577 // Don't codegen the panic block if success if known.
578 if const_cond
== Some(expected
) {
579 return helper
.funclet_br(self, bx
, target
, mergeable_succ
);
582 // Pass the condition through llvm.expect for branch hinting.
583 let cond
= bx
.expect(cond
, expected
);
585 // Create the failure block and the conditional branch to it.
586 let lltarget
= helper
.llbb_with_cleanup(self, target
);
587 let panic_block
= bx
.append_sibling_block("panic");
589 bx
.cond_br(cond
, lltarget
, panic_block
);
591 bx
.cond_br(cond
, panic_block
, lltarget
);
594 // After this point, bx is the block for the call to panic.
595 bx
.switch_to_block(panic_block
);
596 self.set_debug_loc(bx
, terminator
.source_info
);
598 // Get the location information.
599 let location
= self.get_caller_location(bx
, terminator
.source_info
).immediate();
601 // Put together the arguments to the panic entry point.
602 let (lang_item
, args
) = match msg
{
603 AssertKind
::BoundsCheck { ref len, ref index }
=> {
604 let len
= self.codegen_operand(bx
, len
).immediate();
605 let index
= self.codegen_operand(bx
, index
).immediate();
606 // It's `fn panic_bounds_check(index: usize, len: usize)`,
607 // and `#[track_caller]` adds an implicit third argument.
608 (LangItem
::PanicBoundsCheck
, vec
![index
, len
, location
])
610 AssertKind
::MisalignedPointerDereference { ref required, ref found }
=> {
611 let required
= self.codegen_operand(bx
, required
).immediate();
612 let found
= self.codegen_operand(bx
, found
).immediate();
613 // It's `fn panic_bounds_check(index: usize, len: usize)`,
614 // and `#[track_caller]` adds an implicit third argument.
615 (LangItem
::PanicMisalignedPointerDereference
, vec
![required
, found
, location
])
618 let msg
= bx
.const_str(msg
.description());
619 // It's `pub fn panic(expr: &str)`, with the wide reference being passed
620 // as two arguments, and `#[track_caller]` adds an implicit third argument.
621 (LangItem
::Panic
, vec
![msg
.0, msg
.1, location
])
625 let (fn_abi
, llfn
) = common
::build_langcall(bx
, Some(span
), lang_item
);
627 // Codegen the actual panic invoke/call.
628 let merging_succ
= helper
.do_call(self, bx
, fn_abi
, llfn
, &args
, None
, unwind
, &[], false);
629 assert_eq
!(merging_succ
, MergingSucc
::False
);
633 fn codegen_terminate_terminator(
635 helper
: TerminatorCodegenHelper
<'tcx
>,
637 terminator
: &mir
::Terminator
<'tcx
>,
639 let span
= terminator
.source_info
.span
;
640 self.set_debug_loc(bx
, terminator
.source_info
);
642 // Obtain the panic entry point.
643 let (fn_abi
, llfn
) = common
::build_langcall(bx
, Some(span
), LangItem
::PanicCannotUnwind
);
645 // Codegen the actual panic invoke/call.
646 let merging_succ
= helper
.do_call(
653 mir
::UnwindAction
::Unreachable
,
657 assert_eq
!(merging_succ
, MergingSucc
::False
);
660 /// Returns `Some` if this is indeed a panic intrinsic and codegen is done.
661 fn codegen_panic_intrinsic(
663 helper
: &TerminatorCodegenHelper
<'tcx
>,
665 intrinsic
: Option
<Symbol
>,
666 instance
: Option
<Instance
<'tcx
>>,
667 source_info
: mir
::SourceInfo
,
668 target
: Option
<mir
::BasicBlock
>,
669 unwind
: mir
::UnwindAction
,
670 mergeable_succ
: bool
,
671 ) -> Option
<MergingSucc
> {
672 // Emit a panic or a no-op for `assert_*` intrinsics.
673 // These are intrinsics that compile to panics so that we can get a message
674 // which mentions the offending type, even from a const context.
675 let panic_intrinsic
= intrinsic
.and_then(|s
| ValidityRequirement
::from_intrinsic(s
));
676 if let Some(requirement
) = panic_intrinsic
{
677 let ty
= instance
.unwrap().substs
.type_at(0);
681 .check_validity_requirement((requirement
, bx
.param_env().and(ty
)))
682 .expect("expect to have layout during codegen");
684 let layout
= bx
.layout_of(ty
);
687 let msg_str
= with_no_visible_paths
!({
688 with_no_trimmed_paths
!({
689 if layout
.abi
.is_uninhabited() {
690 // Use this error even for the other intrinsics as it is more precise.
691 format
!("attempted to instantiate uninhabited type `{}`", ty
)
692 } else if requirement
== ValidityRequirement
::Zero
{
693 format
!("attempted to zero-initialize type `{}`, which is invalid", ty
)
696 "attempted to leave type `{}` uninitialized, which is invalid",
702 let msg
= bx
.const_str(&msg_str
);
704 // Obtain the panic entry point.
706 common
::build_langcall(bx
, Some(source_info
.span
), LangItem
::PanicNounwind
);
708 // Codegen the actual panic invoke/call.
715 target
.as_ref().map(|bb
| (ReturnDest
::Nothing
, *bb
)),
722 let target
= target
.unwrap();
723 helper
.funclet_br(self, bx
, target
, mergeable_succ
)
730 fn codegen_call_terminator(
732 helper
: TerminatorCodegenHelper
<'tcx
>,
734 terminator
: &mir
::Terminator
<'tcx
>,
735 func
: &mir
::Operand
<'tcx
>,
736 args
: &[mir
::Operand
<'tcx
>],
737 destination
: mir
::Place
<'tcx
>,
738 target
: Option
<mir
::BasicBlock
>,
739 unwind
: mir
::UnwindAction
,
741 mergeable_succ
: bool
,
743 let source_info
= terminator
.source_info
;
744 let span
= source_info
.span
;
746 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
747 let callee
= self.codegen_operand(bx
, func
);
749 let (instance
, mut llfn
) = match *callee
.layout
.ty
.kind() {
750 ty
::FnDef(def_id
, substs
) => (
752 ty
::Instance
::expect_resolve(
754 ty
::ParamEnv
::reveal_all(),
758 .polymorphize(bx
.tcx()),
762 ty
::FnPtr(_
) => (None
, Some(callee
.immediate())),
763 _
=> bug
!("{} is not callable", callee
.layout
.ty
),
765 let def
= instance
.map(|i
| i
.def
);
767 if let Some(ty
::InstanceDef
::DropGlue(_
, None
)) = def
{
768 // Empty drop glue; a no-op.
769 let target
= target
.unwrap();
770 return helper
.funclet_br(self, bx
, target
, mergeable_succ
);
773 // FIXME(eddyb) avoid computing this if possible, when `instance` is
774 // available - right now `sig` is only needed for getting the `abi`
775 // and figuring out how many extra args were passed to a C-variadic `fn`.
776 let sig
= callee
.layout
.ty
.fn_sig(bx
.tcx());
779 // Handle intrinsics old codegen wants Expr's for, ourselves.
780 let intrinsic
= match def
{
781 Some(ty
::InstanceDef
::Intrinsic(def_id
)) => Some(bx
.tcx().item_name(def_id
)),
785 let extra_args
= &args
[sig
.inputs().skip_binder().len()..];
786 let extra_args
= bx
.tcx().mk_type_list_from_iter(extra_args
.iter().map(|op_arg
| {
787 let op_ty
= op_arg
.ty(self.mir
, bx
.tcx());
788 self.monomorphize(op_ty
)
791 let fn_abi
= match instance
{
792 Some(instance
) => bx
.fn_abi_of_instance(instance
, extra_args
),
793 None
=> bx
.fn_abi_of_fn_ptr(sig
, extra_args
),
796 if let Some(merging_succ
) = self.codegen_panic_intrinsic(
809 // The arguments we'll be passing. Plus one to account for outptr, if used.
810 let arg_count
= fn_abi
.args
.len() + fn_abi
.ret
.is_indirect() as usize;
811 let mut llargs
= Vec
::with_capacity(arg_count
);
813 // Prepare the return value destination
814 let ret_dest
= if target
.is_some() {
815 let is_intrinsic
= intrinsic
.is_some();
816 self.make_return_dest(bx
, destination
, &fn_abi
.ret
, &mut llargs
, is_intrinsic
)
821 if intrinsic
== Some(sym
::caller_location
) {
822 return if let Some(target
) = target
{
824 self.get_caller_location(bx
, mir
::SourceInfo { span: fn_span, ..source_info }
);
826 if let ReturnDest
::IndirectOperand(tmp
, _
) = ret_dest
{
827 location
.val
.store(bx
, tmp
);
829 self.store_return(bx
, ret_dest
, &fn_abi
.ret
, location
.immediate());
830 helper
.funclet_br(self, bx
, target
, mergeable_succ
)
837 None
| Some(sym
::drop_in_place
) => {}
839 let dest
= match ret_dest
{
840 _
if fn_abi
.ret
.is_indirect() => llargs
[0],
841 ReturnDest
::Nothing
=> {
842 bx
.const_undef(bx
.type_ptr_to(bx
.arg_memory_ty(&fn_abi
.ret
)))
844 ReturnDest
::IndirectOperand(dst
, _
) | ReturnDest
::Store(dst
) => dst
.llval
,
845 ReturnDest
::DirectOperand(_
) => {
846 bug
!("Cannot use direct operand with an intrinsic call")
850 let args
: Vec
<_
> = args
854 // The indices passed to simd_shuffle* in the
855 // third argument must be constant. This is
856 // checked by const-qualification, which also
857 // promotes any complex rvalues to constants.
858 if i
== 2 && intrinsic
.as_str().starts_with("simd_shuffle") {
859 if let mir
::Operand
::Constant(constant
) = arg
{
860 let c
= self.eval_mir_constant(constant
);
861 let (llval
, ty
) = self.simd_shuffle_indices(
864 self.monomorphize(constant
.ty()),
868 val
: Immediate(llval
),
869 layout
: bx
.layout_of(ty
),
872 span_bug
!(span
, "shuffle indices must be constant");
876 self.codegen_operand(bx
, arg
)
880 Self::codegen_intrinsic_call(
882 *instance
.as_ref().unwrap(),
889 if let ReturnDest
::IndirectOperand(dst
, _
) = ret_dest
{
890 self.store_return(bx
, ret_dest
, &fn_abi
.ret
, dst
.llval
);
893 return if let Some(target
) = target
{
894 helper
.funclet_br(self, bx
, target
, mergeable_succ
)
902 // Split the rust-call tupled arguments off.
903 let (first_args
, untuple
) = if abi
== Abi
::RustCall
&& !args
.is_empty() {
904 let (tup
, args
) = args
.split_last().unwrap();
910 let mut copied_constant_arguments
= vec
![];
911 'make_args
: for (i
, arg
) in first_args
.iter().enumerate() {
912 let mut op
= self.codegen_operand(bx
, arg
);
914 if let (0, Some(ty
::InstanceDef
::Virtual(_
, idx
))) = (i
, def
) {
916 Pair(data_ptr
, meta
) => {
917 // In the case of Rc<Self>, we need to explicitly pass a
918 // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
919 // that is understood elsewhere in the compiler as a method on
921 // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
922 // we get a value of a built-in pointer type.
924 // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
925 'descend_newtypes
: while !op
.layout
.ty
.is_unsafe_ptr()
926 && !op
.layout
.ty
.is_ref()
928 for i
in 0..op
.layout
.fields
.count() {
929 let field
= op
.extract_field(bx
, i
);
930 if !field
.layout
.is_zst() {
931 // we found the one non-zero-sized field that is allowed
932 // now find *its* non-zero-sized field, or stop if it's a
935 continue 'descend_newtypes
;
939 span_bug
!(span
, "receiver has no non-zero-sized fields {:?}", op
);
942 // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
943 // data pointer and vtable. Look up the method in the vtable, and pass
944 // the data pointer as the first argument
945 llfn
= Some(meth
::VirtualIndex
::from_index(idx
).get_fn(
951 llargs
.push(data_ptr
);
954 Ref(data_ptr
, Some(meta
), _
) => {
955 // by-value dynamic dispatch
956 llfn
= Some(meth
::VirtualIndex
::from_index(idx
).get_fn(
962 llargs
.push(data_ptr
);
966 // See comment above explaining why we peel these newtypes
967 'descend_newtypes
: while !op
.layout
.ty
.is_unsafe_ptr()
968 && !op
.layout
.ty
.is_ref()
970 for i
in 0..op
.layout
.fields
.count() {
971 let field
= op
.extract_field(bx
, i
);
972 if !field
.layout
.is_zst() {
973 // we found the one non-zero-sized field that is allowed
974 // now find *its* non-zero-sized field, or stop if it's a
977 continue 'descend_newtypes
;
981 span_bug
!(span
, "receiver has no non-zero-sized fields {:?}", op
);
984 // Make sure that we've actually unwrapped the rcvr down
985 // to a pointer or ref to `dyn* Trait`.
986 if !op
.layout
.ty
.builtin_deref(true).unwrap().ty
.is_dyn_star() {
987 span_bug
!(span
, "can't codegen a virtual call on {:#?}", op
);
989 let place
= op
.deref(bx
.cx());
990 let data_ptr
= place
.project_field(bx
, 0);
991 let meta_ptr
= place
.project_field(bx
, 1);
992 let meta
= bx
.load_operand(meta_ptr
);
993 llfn
= Some(meth
::VirtualIndex
::from_index(idx
).get_fn(
999 llargs
.push(data_ptr
.llval
);
1003 span_bug
!(span
, "can't codegen a virtual call on {:#?}", op
);
1008 // The callee needs to own the argument memory if we pass it
1009 // by-ref, so make a local copy of non-immediate constants.
1010 match (arg
, op
.val
) {
1011 (&mir
::Operand
::Copy(_
), Ref(_
, None
, _
))
1012 | (&mir
::Operand
::Constant(_
), Ref(_
, None
, _
)) => {
1013 let tmp
= PlaceRef
::alloca(bx
, op
.layout
);
1014 bx
.lifetime_start(tmp
.llval
, tmp
.layout
.size
);
1015 op
.val
.store(bx
, tmp
);
1016 op
.val
= Ref(tmp
.llval
, None
, tmp
.align
);
1017 copied_constant_arguments
.push(tmp
);
1022 self.codegen_argument(bx
, op
, &mut llargs
, &fn_abi
.args
[i
]);
1024 let num_untupled
= untuple
.map(|tup
| {
1025 self.codegen_arguments_untupled(bx
, tup
, &mut llargs
, &fn_abi
.args
[first_args
.len()..])
1028 let needs_location
=
1029 instance
.map_or(false, |i
| i
.def
.requires_caller_location(self.cx
.tcx()));
1031 let mir_args
= if let Some(num_untupled
) = num_untupled
{
1032 first_args
.len() + num_untupled
1039 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
1045 self.get_caller_location(bx
, mir
::SourceInfo { span: fn_span, ..source_info }
);
1047 "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
1048 terminator
, location
, fn_span
1051 let last_arg
= fn_abi
.args
.last().unwrap();
1052 self.codegen_argument(bx
, location
, &mut llargs
, last_arg
);
1055 let (is_indirect_call
, fn_ptr
) = match (llfn
, instance
) {
1056 (Some(llfn
), _
) => (true, llfn
),
1057 (None
, Some(instance
)) => (false, bx
.get_fn_addr(instance
)),
1058 _
=> span_bug
!(span
, "no llfn for call"),
1061 // For backends that support CFI using type membership (i.e., testing whether a given
1062 // pointer is associated with a type identifier).
1063 if bx
.tcx().sess
.is_sanitizer_cfi_enabled() && is_indirect_call
{
1064 // Emit type metadata and checks.
1065 // FIXME(rcvalle): Add support for generalized identifiers.
1066 // FIXME(rcvalle): Create distinct unnamed MDNodes for internal identifiers.
1067 let typeid
= typeid_for_fnabi(bx
.tcx(), fn_abi
);
1068 let typeid_metadata
= self.cx
.typeid_metadata(typeid
);
1070 // Test whether the function pointer is associated with the type identifier.
1071 let cond
= bx
.type_test(fn_ptr
, typeid_metadata
);
1072 let bb_pass
= bx
.append_sibling_block("type_test.pass");
1073 let bb_fail
= bx
.append_sibling_block("type_test.fail");
1074 bx
.cond_br(cond
, bb_pass
, bb_fail
);
1076 bx
.switch_to_block(bb_pass
);
1077 let merging_succ
= helper
.do_call(
1083 target
.as_ref().map(|&target
| (ret_dest
, target
)),
1085 &copied_constant_arguments
,
1088 assert_eq
!(merging_succ
, MergingSucc
::False
);
1090 bx
.switch_to_block(bb_fail
);
1094 return MergingSucc
::False
;
1103 target
.as_ref().map(|&target
| (ret_dest
, target
)),
1105 &copied_constant_arguments
,
1110 fn codegen_asm_terminator(
1112 helper
: TerminatorCodegenHelper
<'tcx
>,
1114 terminator
: &mir
::Terminator
<'tcx
>,
1115 template
: &[ast
::InlineAsmTemplatePiece
],
1116 operands
: &[mir
::InlineAsmOperand
<'tcx
>],
1117 options
: ast
::InlineAsmOptions
,
1118 line_spans
: &[Span
],
1119 destination
: Option
<mir
::BasicBlock
>,
1120 unwind
: mir
::UnwindAction
,
1121 instance
: Instance
<'_
>,
1122 mergeable_succ
: bool
,
1124 let span
= terminator
.source_info
.span
;
1126 let operands
: Vec
<_
> = operands
1128 .map(|op
| match *op
{
1129 mir
::InlineAsmOperand
::In { reg, ref value }
=> {
1130 let value
= self.codegen_operand(bx
, value
);
1131 InlineAsmOperandRef
::In { reg, value }
1133 mir
::InlineAsmOperand
::Out { reg, late, ref place }
=> {
1134 let place
= place
.map(|place
| self.codegen_place(bx
, place
.as_ref()));
1135 InlineAsmOperandRef
::Out { reg, late, place }
1137 mir
::InlineAsmOperand
::InOut { reg, late, ref in_value, ref out_place }
=> {
1138 let in_value
= self.codegen_operand(bx
, in_value
);
1140 out_place
.map(|out_place
| self.codegen_place(bx
, out_place
.as_ref()));
1141 InlineAsmOperandRef
::InOut { reg, late, in_value, out_place }
1143 mir
::InlineAsmOperand
::Const { ref value }
=> {
1144 let const_value
= self
1145 .eval_mir_constant(value
)
1146 .unwrap_or_else(|_
| span_bug
!(span
, "asm const cannot be resolved"));
1147 let string
= common
::asm_const_to_str(
1151 bx
.layout_of(value
.ty()),
1153 InlineAsmOperandRef
::Const { string }
1155 mir
::InlineAsmOperand
::SymFn { ref value }
=> {
1156 let literal
= self.monomorphize(value
.literal
);
1157 if let ty
::FnDef(def_id
, substs
) = *literal
.ty().kind() {
1158 let instance
= ty
::Instance
::resolve_for_fn_ptr(
1160 ty
::ParamEnv
::reveal_all(),
1165 InlineAsmOperandRef
::SymFn { instance }
1167 span_bug
!(span
, "invalid type for asm sym (fn)");
1170 mir
::InlineAsmOperand
::SymStatic { def_id }
=> {
1171 InlineAsmOperandRef
::SymStatic { def_id }
1176 helper
.do_inlineasm(
1191 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
1192 pub fn codegen_block(&mut self, mut bb
: mir
::BasicBlock
) {
1193 let llbb
= match self.try_llbb(bb
) {
1197 let bx
= &mut Bx
::build(self.cx
, llbb
);
1200 // MIR basic blocks stop at any function call. This may not be the case
1201 // for the backend's basic blocks, in which case we might be able to
1202 // combine multiple MIR basic blocks into a single backend basic block.
1204 let data
= &mir
[bb
];
1206 debug
!("codegen_block({:?}={:?})", bb
, data
);
1208 for statement
in &data
.statements
{
1209 self.codegen_statement(bx
, statement
);
1212 let merging_succ
= self.codegen_terminator(bx
, bb
, data
.terminator());
1213 if let MergingSucc
::False
= merging_succ
{
1217 // We are merging the successor into the produced backend basic
1218 // block. Record that the successor should be skipped when it is
1221 // Note: we must not have already generated code for the successor.
1222 // This is implicitly ensured by the reverse postorder traversal,
1223 // and the assertion explicitly guarantees that.
1224 let mut successors
= data
.terminator().successors();
1225 let succ
= successors
.next().unwrap();
1226 assert
!(matches
!(self.cached_llbbs
[succ
], CachedLlbb
::None
));
1227 self.cached_llbbs
[succ
] = CachedLlbb
::Skip
;
1232 fn codegen_terminator(
1235 bb
: mir
::BasicBlock
,
1236 terminator
: &'tcx mir
::Terminator
<'tcx
>,
1238 debug
!("codegen_terminator: {:?}", terminator
);
1240 let helper
= TerminatorCodegenHelper { bb, terminator }
;
1242 let mergeable_succ
= || {
1243 // Note: any call to `switch_to_block` will invalidate a `true` value
1244 // of `mergeable_succ`.
1245 let mut successors
= terminator
.successors();
1246 if let Some(succ
) = successors
.next()
1247 && successors
.next().is_none()
1248 && let &[succ_pred
] = self.mir
.basic_blocks
.predecessors()[succ
].as_slice()
1250 // bb has a single successor, and bb is its only predecessor. This
1251 // makes it a candidate for merging.
1252 assert_eq
!(succ_pred
, bb
);
1259 self.set_debug_loc(bx
, terminator
.source_info
);
1260 match terminator
.kind
{
1261 mir
::TerminatorKind
::Resume
=> {
1262 self.codegen_resume_terminator(helper
, bx
);
1266 mir
::TerminatorKind
::Terminate
=> {
1267 self.codegen_terminate_terminator(helper
, bx
, terminator
);
1271 mir
::TerminatorKind
::Goto { target }
=> {
1272 helper
.funclet_br(self, bx
, target
, mergeable_succ())
1275 mir
::TerminatorKind
::SwitchInt { ref discr, ref targets }
=> {
1276 self.codegen_switchint_terminator(helper
, bx
, discr
, targets
);
1280 mir
::TerminatorKind
::Return
=> {
1281 self.codegen_return_terminator(bx
);
1285 mir
::TerminatorKind
::Unreachable
=> {
1290 mir
::TerminatorKind
::Drop { place, target, unwind }
=> {
1291 self.codegen_drop_terminator(helper
, bx
, place
, target
, unwind
, mergeable_succ())
1294 mir
::TerminatorKind
::Assert { ref cond, expected, ref msg, target, unwind }
=> self
1295 .codegen_assert_terminator(
1307 mir
::TerminatorKind
::Call
{
1315 } => self.codegen_call_terminator(
1327 mir
::TerminatorKind
::GeneratorDrop
| mir
::TerminatorKind
::Yield { .. }
=> {
1328 bug
!("generator ops in codegen")
1330 mir
::TerminatorKind
::FalseEdge { .. }
| mir
::TerminatorKind
::FalseUnwind { .. }
=> {
1331 bug
!("borrowck false edges in codegen")
1334 mir
::TerminatorKind
::InlineAsm
{
1341 } => self.codegen_asm_terminator(
1357 fn codegen_argument(
1360 op
: OperandRef
<'tcx
, Bx
::Value
>,
1361 llargs
: &mut Vec
<Bx
::Value
>,
1362 arg
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1365 PassMode
::Ignore
=> return,
1366 PassMode
::Cast(_
, true) => {
1367 // Fill padding with undef value, where applicable.
1368 llargs
.push(bx
.const_undef(bx
.reg_backend_type(&Reg
::i32())));
1370 PassMode
::Pair(..) => match op
.val
{
1376 _
=> bug
!("codegen_argument: {:?} invalid for pair argument", op
),
1378 PassMode
::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ }
=> match op
.val
{
1379 Ref(a
, Some(b
), _
) => {
1384 _
=> bug
!("codegen_argument: {:?} invalid for unsized indirect argument", op
),
1389 // Force by-ref if we have to load through a cast pointer.
1390 let (mut llval
, align
, by_ref
) = match op
.val
{
1391 Immediate(_
) | Pair(..) => match arg
.mode
{
1392 PassMode
::Indirect { .. }
| PassMode
::Cast(..) => {
1393 let scratch
= PlaceRef
::alloca(bx
, arg
.layout
);
1394 op
.val
.store(bx
, scratch
);
1395 (scratch
.llval
, scratch
.align
, true)
1397 _
=> (op
.immediate_or_packed_pair(bx
), arg
.layout
.align
.abi
, false),
1399 Ref(llval
, _
, align
) => {
1400 if arg
.is_indirect() && align
< arg
.layout
.align
.abi
{
1401 // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
1402 // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
1403 // have scary latent bugs around.
1405 let scratch
= PlaceRef
::alloca(bx
, arg
.layout
);
1415 (scratch
.llval
, scratch
.align
, true)
1417 (llval
, align
, true)
1422 if by_ref
&& !arg
.is_indirect() {
1423 // Have to load the argument, maybe while casting it.
1424 if let PassMode
::Cast(ty
, _
) = &arg
.mode
{
1425 let llty
= bx
.cast_backend_type(ty
);
1426 let addr
= bx
.pointercast(llval
, bx
.type_ptr_to(llty
));
1427 llval
= bx
.load(llty
, addr
, align
.min(arg
.layout
.align
.abi
));
1429 // We can't use `PlaceRef::load` here because the argument
1430 // may have a type we don't treat as immediate, but the ABI
1431 // used for this call is passing it by-value. In that case,
1432 // the load would just produce `OperandValue::Ref` instead
1433 // of the `OperandValue::Immediate` we need for the call.
1434 llval
= bx
.load(bx
.backend_type(arg
.layout
), llval
, align
);
1435 if let abi
::Abi
::Scalar(scalar
) = arg
.layout
.abi
{
1436 if scalar
.is_bool() {
1437 bx
.range_metadata(llval
, WrappingRange { start: 0, end: 1 }
);
1440 // We store bools as `i8` so we need to truncate to `i1`.
1441 llval
= bx
.to_immediate(llval
, arg
.layout
);
1448 fn codegen_arguments_untupled(
1451 operand
: &mir
::Operand
<'tcx
>,
1452 llargs
: &mut Vec
<Bx
::Value
>,
1453 args
: &[ArgAbi
<'tcx
, Ty
<'tcx
>>],
1455 let tuple
= self.codegen_operand(bx
, operand
);
1457 // Handle both by-ref and immediate tuples.
1458 if let Ref(llval
, None
, align
) = tuple
.val
{
1459 let tuple_ptr
= PlaceRef
::new_sized_aligned(llval
, tuple
.layout
, align
);
1460 for i
in 0..tuple
.layout
.fields
.count() {
1461 let field_ptr
= tuple_ptr
.project_field(bx
, i
);
1462 let field
= bx
.load_operand(field_ptr
);
1463 self.codegen_argument(bx
, field
, llargs
, &args
[i
]);
1465 } else if let Ref(_
, Some(_
), _
) = tuple
.val
{
1466 bug
!("closure arguments must be sized")
1468 // If the tuple is immediate, the elements are as well.
1469 for i
in 0..tuple
.layout
.fields
.count() {
1470 let op
= tuple
.extract_field(bx
, i
);
1471 self.codegen_argument(bx
, op
, llargs
, &args
[i
]);
1474 tuple
.layout
.fields
.count()
1477 fn get_caller_location(
1480 mut source_info
: mir
::SourceInfo
,
1481 ) -> OperandRef
<'tcx
, Bx
::Value
> {
1484 let mut span_to_caller_location
= |mut span
: Span
| {
1485 // Remove `Inlined` marks as they pollute `expansion_cause`.
1486 while span
.is_inlined() {
1489 let topmost
= span
.ctxt().outer_expn().expansion_cause().unwrap_or(span
);
1490 let caller
= tcx
.sess
.source_map().lookup_char_pos(topmost
.lo());
1491 let const_loc
= tcx
.const_caller_location((
1492 Symbol
::intern(&caller
.file
.name
.prefer_remapped().to_string_lossy()),
1494 caller
.col_display
as u32 + 1,
1496 OperandRef
::from_const(bx
, const_loc
, bx
.tcx().caller_location_ty())
1499 // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
1500 // If so, the starting `source_info.span` is in the innermost inlined
1501 // function, and will be replaced with outer callsite spans as long
1502 // as the inlined functions were `#[track_caller]`.
1504 let scope_data
= &self.mir
.source_scopes
[source_info
.scope
];
1506 if let Some((callee
, callsite_span
)) = scope_data
.inlined
{
1507 // Stop inside the most nested non-`#[track_caller]` function,
1508 // before ever reaching its caller (which is irrelevant).
1509 if !callee
.def
.requires_caller_location(tcx
) {
1510 return span_to_caller_location(source_info
.span
);
1512 source_info
.span
= callsite_span
;
1515 // Skip past all of the parents with `inlined: None`.
1516 match scope_data
.inlined_parent_scope
{
1517 Some(parent
) => source_info
.scope
= parent
,
1522 // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
1523 self.caller_location
.unwrap_or_else(|| span_to_caller_location(source_info
.span
))
1526 fn get_personality_slot(&mut self, bx
: &mut Bx
) -> PlaceRef
<'tcx
, Bx
::Value
> {
1528 if let Some(slot
) = self.personality_slot
{
1531 let layout
= cx
.layout_of(
1532 cx
.tcx().mk_tup(&[cx
.tcx().mk_mut_ptr(cx
.tcx().types
.u8), cx
.tcx().types
.i32]),
1534 let slot
= PlaceRef
::alloca(bx
, layout
);
1535 self.personality_slot
= Some(slot
);
1540 /// Returns the landing/cleanup pad wrapper around the given basic block.
1541 // FIXME(eddyb) rename this to `eh_pad_for`.
1542 fn landing_pad_for(&mut self, bb
: mir
::BasicBlock
) -> Bx
::BasicBlock
{
1543 if let Some(landing_pad
) = self.landing_pads
[bb
] {
1547 let landing_pad
= self.landing_pad_for_uncached(bb
);
1548 self.landing_pads
[bb
] = Some(landing_pad
);
1552 // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
1553 fn landing_pad_for_uncached(&mut self, bb
: mir
::BasicBlock
) -> Bx
::BasicBlock
{
1554 let llbb
= self.llbb(bb
);
1555 if base
::wants_msvc_seh(self.cx
.sess()) {
1556 let cleanup_bb
= Bx
::append_block(self.cx
, self.llfn
, &format
!("funclet_{:?}", bb
));
1557 let mut cleanup_bx
= Bx
::build(self.cx
, cleanup_bb
);
1558 let funclet
= cleanup_bx
.cleanup_pad(None
, &[]);
1559 cleanup_bx
.br(llbb
);
1560 self.funclets
[bb
] = Some(funclet
);
1563 let cleanup_llbb
= Bx
::append_block(self.cx
, self.llfn
, "cleanup");
1564 let mut cleanup_bx
= Bx
::build(self.cx
, cleanup_llbb
);
1566 let llpersonality
= self.cx
.eh_personality();
1567 let (exn0
, exn1
) = cleanup_bx
.cleanup_landing_pad(llpersonality
);
1569 let slot
= self.get_personality_slot(&mut cleanup_bx
);
1570 slot
.storage_live(&mut cleanup_bx
);
1571 Pair(exn0
, exn1
).store(&mut cleanup_bx
, slot
);
1573 cleanup_bx
.br(llbb
);
1578 fn unreachable_block(&mut self) -> Bx
::BasicBlock
{
1579 self.unreachable_block
.unwrap_or_else(|| {
1580 let llbb
= Bx
::append_block(self.cx
, self.llfn
, "unreachable");
1581 let mut bx
= Bx
::build(self.cx
, llbb
);
1583 self.unreachable_block
= Some(llbb
);
1588 fn terminate_block(&mut self) -> Bx
::BasicBlock
{
1589 self.terminate_block
.unwrap_or_else(|| {
1593 if base
::wants_msvc_seh(self.cx
.sess()) {
1594 // This is a basic block that we're aborting the program for,
1595 // notably in an `extern` function. These basic blocks are inserted
1596 // so that we assert that `extern` functions do indeed not panic,
1597 // and if they do we abort the process.
1599 // On MSVC these are tricky though (where we're doing funclets). If
1600 // we were to do a cleanuppad (like below) the normal functions like
1601 // `longjmp` would trigger the abort logic, terminating the
1602 // program. Instead we insert the equivalent of `catch(...)` for C++
1603 // which magically doesn't trigger when `longjmp` files over this
1606 // Lots more discussion can be found on #48251 but this codegen is
1607 // modeled after clang's for:
1614 llbb
= Bx
::append_block(self.cx
, self.llfn
, "cs_terminate");
1615 let cp_llbb
= Bx
::append_block(self.cx
, self.llfn
, "cp_terminate");
1617 let mut cs_bx
= Bx
::build(self.cx
, llbb
);
1618 let cs
= cs_bx
.catch_switch(None
, None
, &[cp_llbb
]);
1620 // The "null" here is actually a RTTI type descriptor for the
1621 // C++ personality function, but `catch (...)` has no type so
1622 // it's null. The 64 here is actually a bitfield which
1623 // represents that this is a catch-all block.
1624 bx
= Bx
::build(self.cx
, cp_llbb
);
1626 bx
.const_null(bx
.type_i8p_ext(bx
.cx().data_layout().instruction_address_space
));
1627 let sixty_four
= bx
.const_i32(64);
1628 funclet
= Some(bx
.catch_pad(cs
, &[null
, sixty_four
, null
]));
1630 llbb
= Bx
::append_block(self.cx
, self.llfn
, "terminate");
1631 bx
= Bx
::build(self.cx
, llbb
);
1633 let llpersonality
= self.cx
.eh_personality();
1634 bx
.cleanup_landing_pad(llpersonality
);
1639 self.set_debug_loc(&mut bx
, mir
::SourceInfo
::outermost(self.mir
.span
));
1641 let (fn_abi
, fn_ptr
) = common
::build_langcall(&bx
, None
, LangItem
::PanicCannotUnwind
);
1642 let fn_ty
= bx
.fn_decl_backend_type(&fn_abi
);
1644 let llret
= bx
.call(fn_ty
, Some(&fn_abi
), fn_ptr
, &[], funclet
.as_ref());
1645 bx
.do_not_inline(llret
);
1649 self.terminate_block
= Some(llbb
);
1654 /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
1655 /// cached in `self.cached_llbbs`, or created on demand (and cached).
1656 // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
1657 // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
1658 pub fn llbb(&mut self, bb
: mir
::BasicBlock
) -> Bx
::BasicBlock
{
1659 self.try_llbb(bb
).unwrap()
1662 /// Like `llbb`, but may fail if the basic block should be skipped.
1663 pub fn try_llbb(&mut self, bb
: mir
::BasicBlock
) -> Option
<Bx
::BasicBlock
> {
1664 match self.cached_llbbs
[bb
] {
1665 CachedLlbb
::None
=> {
1666 // FIXME(eddyb) only name the block if `fewer_names` is `false`.
1667 let llbb
= Bx
::append_block(self.cx
, self.llfn
, &format
!("{:?}", bb
));
1668 self.cached_llbbs
[bb
] = CachedLlbb
::Some(llbb
);
1671 CachedLlbb
::Some(llbb
) => Some(llbb
),
1672 CachedLlbb
::Skip
=> None
,
1676 fn make_return_dest(
1679 dest
: mir
::Place
<'tcx
>,
1680 fn_ret
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1681 llargs
: &mut Vec
<Bx
::Value
>,
1683 ) -> ReturnDest
<'tcx
, Bx
::Value
> {
1684 // If the return is ignored, we can just return a do-nothing `ReturnDest`.
1685 if fn_ret
.is_ignore() {
1686 return ReturnDest
::Nothing
;
1688 let dest
= if let Some(index
) = dest
.as_local() {
1689 match self.locals
[index
] {
1690 LocalRef
::Place(dest
) => dest
,
1691 LocalRef
::UnsizedPlace(_
) => bug
!("return type must be sized"),
1692 LocalRef
::PendingOperand
=> {
1693 // Handle temporary places, specifically `Operand` ones, as
1694 // they don't have `alloca`s.
1695 return if fn_ret
.is_indirect() {
1696 // Odd, but possible, case, we have an operand temporary,
1697 // but the calling convention has an indirect return.
1698 let tmp
= PlaceRef
::alloca(bx
, fn_ret
.layout
);
1699 tmp
.storage_live(bx
);
1700 llargs
.push(tmp
.llval
);
1701 ReturnDest
::IndirectOperand(tmp
, index
)
1702 } else if is_intrinsic
{
1703 // Currently, intrinsics always need a location to store
1704 // the result, so we create a temporary `alloca` for the
1706 let tmp
= PlaceRef
::alloca(bx
, fn_ret
.layout
);
1707 tmp
.storage_live(bx
);
1708 ReturnDest
::IndirectOperand(tmp
, index
)
1710 ReturnDest
::DirectOperand(index
)
1713 LocalRef
::Operand(_
) => {
1714 bug
!("place local already assigned to");
1720 mir
::PlaceRef { local: dest.local, projection: &dest.projection }
,
1723 if fn_ret
.is_indirect() {
1724 if dest
.align
< dest
.layout
.align
.abi
{
1725 // Currently, MIR code generation does not create calls
1726 // that store directly to fields of packed structs (in
1727 // fact, the calls it creates write only to temps).
1729 // If someone changes that, please update this code path
1730 // to create a temporary.
1731 span_bug
!(self.mir
.span
, "can't directly store to unaligned value");
1733 llargs
.push(dest
.llval
);
1736 ReturnDest
::Store(dest
)
1740 // Stores the return value of a function call into it's final location.
1744 dest
: ReturnDest
<'tcx
, Bx
::Value
>,
1745 ret_abi
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1748 use self::ReturnDest
::*;
1752 Store(dst
) => bx
.store_arg(&ret_abi
, llval
, dst
),
1753 IndirectOperand(tmp
, index
) => {
1754 let op
= bx
.load_operand(tmp
);
1755 tmp
.storage_dead(bx
);
1756 self.locals
[index
] = LocalRef
::Operand(op
);
1757 self.debug_introduce_local(bx
, index
);
1759 DirectOperand(index
) => {
1760 // If there is a cast, we have to store and reload.
1761 let op
= if let PassMode
::Cast(..) = ret_abi
.mode
{
1762 let tmp
= PlaceRef
::alloca(bx
, ret_abi
.layout
);
1763 tmp
.storage_live(bx
);
1764 bx
.store_arg(&ret_abi
, llval
, tmp
);
1765 let op
= bx
.load_operand(tmp
);
1766 tmp
.storage_dead(bx
);
1769 OperandRef
::from_immediate_or_packed_pair(bx
, llval
, ret_abi
.layout
)
1771 self.locals
[index
] = LocalRef
::Operand(op
);
1772 self.debug_introduce_local(bx
, index
);
1778 enum ReturnDest
<'tcx
, V
> {
1779 // Do nothing; the return value is indirect or ignored.
1781 // Store the return value to the pointer.
1782 Store(PlaceRef
<'tcx
, V
>),
1783 // Store an indirect return value to an operand local place.
1784 IndirectOperand(PlaceRef
<'tcx
, V
>, mir
::Local
),
1785 // Store a direct return value to an operand local place.
1786 DirectOperand(mir
::Local
),