1 use super::operand
::OperandRef
;
2 use super::operand
::OperandValue
::{Immediate, Pair, Ref}
;
3 use super::place
::PlaceRef
;
4 use super::{FunctionCx, LocalRef}
;
7 use crate::common
::{self, IntPredicate}
;
13 use rustc_hir
::lang_items
::LangItem
;
14 use rustc_index
::vec
::Idx
;
15 use rustc_middle
::mir
::interpret
::ConstValue
;
16 use rustc_middle
::mir
::AssertKind
;
17 use rustc_middle
::mir
::{self, SwitchTargets}
;
18 use rustc_middle
::ty
::layout
::{FnAbiExt, HasTyCtxt}
;
19 use rustc_middle
::ty
::print
::with_no_trimmed_paths
;
20 use rustc_middle
::ty
::{self, Instance, Ty, TypeFoldable}
;
21 use rustc_span
::source_map
::Span
;
22 use rustc_span
::{sym, Symbol}
;
23 use rustc_target
::abi
::call
::{ArgAbi, FnAbi, PassMode}
;
24 use rustc_target
::abi
::{self, LayoutOf}
;
25 use rustc_target
::spec
::abi
::Abi
;
27 /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
28 /// e.g., creating a basic block, calling a function, etc.
29 struct TerminatorCodegenHelper
<'tcx
> {
31 terminator
: &'tcx mir
::Terminator
<'tcx
>,
32 funclet_bb
: Option
<mir
::BasicBlock
>,
35 impl<'a
, 'tcx
> TerminatorCodegenHelper
<'tcx
> {
36 /// Returns the associated funclet from `FunctionCx::funclets` for the
37 /// `funclet_bb` member if it is not `None`.
38 fn funclet
<'b
, Bx
: BuilderMethods
<'a
, 'tcx
>>(
40 fx
: &'b
mut FunctionCx
<'a
, 'tcx
, Bx
>,
41 ) -> Option
<&'b Bx
::Funclet
> {
42 match self.funclet_bb
{
43 Some(funcl
) => fx
.funclets
[funcl
].as_ref(),
48 fn lltarget
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
50 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
51 target
: mir
::BasicBlock
,
52 ) -> (Bx
::BasicBlock
, bool
) {
53 let span
= self.terminator
.source_info
.span
;
54 let lltarget
= fx
.blocks
[target
];
55 let target_funclet
= fx
.cleanup_kinds
[target
].funclet_bb(target
);
56 match (self.funclet_bb
, target_funclet
) {
57 (None
, None
) => (lltarget
, false),
58 (Some(f
), Some(t_f
)) if f
== t_f
|| !base
::wants_msvc_seh(fx
.cx
.tcx().sess
) => {
61 // jump *into* cleanup - need a landing pad if GNU
62 (None
, Some(_
)) => (fx
.landing_pad_to(target
), false),
63 (Some(_
), None
) => span_bug
!(span
, "{:?} - jump out of cleanup?", self.terminator
),
64 (Some(_
), Some(_
)) => (fx
.landing_pad_to(target
), true),
68 /// Create a basic block.
69 fn llblock
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
71 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
72 target
: mir
::BasicBlock
,
74 let (lltarget
, is_cleanupret
) = self.lltarget(fx
, target
);
76 // MSVC cross-funclet jump - need a trampoline
78 debug
!("llblock: creating cleanup trampoline for {:?}", target
);
79 let name
= &format
!("{:?}_cleanup_trampoline_{:?}", self.bb
, target
);
80 let mut trampoline
= fx
.new_block(name
);
81 trampoline
.cleanup_ret(self.funclet(fx
).unwrap(), Some(lltarget
));
88 fn funclet_br
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
90 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
92 target
: mir
::BasicBlock
,
94 let (lltarget
, is_cleanupret
) = self.lltarget(fx
, target
);
96 // micro-optimization: generate a `ret` rather than a jump
98 bx
.cleanup_ret(self.funclet(fx
).unwrap(), Some(lltarget
));
104 /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
105 /// return destination `destination` and the cleanup function `cleanup`.
106 fn do_call
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
108 fx
: &mut FunctionCx
<'a
, 'tcx
, Bx
>,
110 fn_abi
: FnAbi
<'tcx
, Ty
<'tcx
>>,
112 llargs
: &[Bx
::Value
],
113 destination
: Option
<(ReturnDest
<'tcx
, Bx
::Value
>, mir
::BasicBlock
)>,
114 cleanup
: Option
<mir
::BasicBlock
>,
116 // If there is a cleanup block and the function we're calling can unwind, then
117 // do an invoke, otherwise do a call.
118 if let Some(cleanup
) = cleanup
.filter(|_
| fn_abi
.can_unwind
) {
119 let ret_bx
= if let Some((_
, target
)) = destination
{
122 fx
.unreachable_block()
125 bx
.invoke(fn_ptr
, &llargs
, ret_bx
, self.llblock(fx
, cleanup
), self.funclet(fx
));
126 bx
.apply_attrs_callsite(&fn_abi
, invokeret
);
128 if let Some((ret_dest
, target
)) = destination
{
129 let mut ret_bx
= fx
.build_block(target
);
130 fx
.set_debug_loc(&mut ret_bx
, self.terminator
.source_info
);
131 fx
.store_return(&mut ret_bx
, ret_dest
, &fn_abi
.ret
, invokeret
);
134 let llret
= bx
.call(fn_ptr
, &llargs
, self.funclet(fx
));
135 bx
.apply_attrs_callsite(&fn_abi
, llret
);
136 if fx
.mir
[self.bb
].is_cleanup
{
137 // Cleanup is always the cold path. Don't inline
138 // drop glue. Also, when there is a deeply-nested
139 // struct, there are "symmetry" issues that cause
140 // exponential inlining - see issue #41696.
141 bx
.do_not_inline(llret
);
144 if let Some((ret_dest
, target
)) = destination
{
145 fx
.store_return(bx
, ret_dest
, &fn_abi
.ret
, llret
);
146 self.funclet_br(fx
, bx
, target
);
153 // Generate sideeffect intrinsic if jumping to any of the targets can form
155 fn maybe_sideeffect
<Bx
: BuilderMethods
<'a
, 'tcx
>>(
157 mir
: &'tcx mir
::Body
<'tcx
>,
159 targets
: &[mir
::BasicBlock
],
161 if bx
.tcx().sess
.opts
.debugging_opts
.insert_sideeffect
{
162 if targets
.iter().any(|&target
| {
164 && target
.start_location().is_predecessor_of(self.bb
.start_location(), mir
)
166 bx
.sideeffect(false);
172 /// Codegen implementations for some terminator variants.
173 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
174 /// Generates code for a `Resume` terminator.
175 fn codegen_resume_terminator(&mut self, helper
: TerminatorCodegenHelper
<'tcx
>, mut bx
: Bx
) {
176 if let Some(funclet
) = helper
.funclet(self) {
177 bx
.cleanup_ret(funclet
, None
);
179 let slot
= self.get_personality_slot(&mut bx
);
180 let lp0
= slot
.project_field(&mut bx
, 0);
181 let lp0
= bx
.load_operand(lp0
).immediate();
182 let lp1
= slot
.project_field(&mut bx
, 1);
183 let lp1
= bx
.load_operand(lp1
).immediate();
184 slot
.storage_dead(&mut bx
);
186 let mut lp
= bx
.const_undef(self.landing_pad_type());
187 lp
= bx
.insert_value(lp
, lp0
, 0);
188 lp
= bx
.insert_value(lp
, lp1
, 1);
193 fn codegen_switchint_terminator(
195 helper
: TerminatorCodegenHelper
<'tcx
>,
197 discr
: &mir
::Operand
<'tcx
>,
199 targets
: &SwitchTargets
,
201 let discr
= self.codegen_operand(&mut bx
, &discr
);
202 // `switch_ty` is redundant, sanity-check that.
203 assert_eq
!(discr
.layout
.ty
, switch_ty
);
204 helper
.maybe_sideeffect(self.mir
, &mut bx
, targets
.all_targets());
206 let mut target_iter
= targets
.iter();
207 if target_iter
.len() == 1 {
208 // If there are two targets (one conditional, one fallback), emit br instead of switch
209 let (test_value
, target
) = target_iter
.next().unwrap();
210 let lltrue
= helper
.llblock(self, target
);
211 let llfalse
= helper
.llblock(self, targets
.otherwise());
212 if switch_ty
== bx
.tcx().types
.bool
{
213 // Don't generate trivial icmps when switching on bool
215 0 => bx
.cond_br(discr
.immediate(), llfalse
, lltrue
),
216 1 => bx
.cond_br(discr
.immediate(), lltrue
, llfalse
),
220 let switch_llty
= bx
.immediate_backend_type(bx
.layout_of(switch_ty
));
221 let llval
= bx
.const_uint_big(switch_llty
, test_value
);
222 let cmp
= bx
.icmp(IntPredicate
::IntEQ
, discr
.immediate(), llval
);
223 bx
.cond_br(cmp
, lltrue
, llfalse
);
228 helper
.llblock(self, targets
.otherwise()),
229 target_iter
.map(|(value
, target
)| (value
, helper
.llblock(self, target
))),
234 fn codegen_return_terminator(&mut self, mut bx
: Bx
) {
235 // Call `va_end` if this is the definition of a C-variadic function.
236 if self.fn_abi
.c_variadic
{
237 // The `VaList` "spoofed" argument is just after all the real arguments.
238 let va_list_arg_idx
= self.fn_abi
.args
.len();
239 match self.locals
[mir
::Local
::new(1 + va_list_arg_idx
)] {
240 LocalRef
::Place(va_list
) => {
241 bx
.va_end(va_list
.llval
);
243 _
=> bug
!("C-variadic function must have a `VaList` place"),
246 if self.fn_abi
.ret
.layout
.abi
.is_uninhabited() {
247 // Functions with uninhabited return values are marked `noreturn`,
248 // so we should make sure that we never actually do.
249 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
250 // if that turns out to be helpful.
252 // `abort` does not terminate the block, so we still need to generate
253 // an `unreachable` terminator after it.
257 let llval
= match self.fn_abi
.ret
.mode
{
258 PassMode
::Ignore
| PassMode
::Indirect(..) => {
263 PassMode
::Direct(_
) | PassMode
::Pair(..) => {
264 let op
= self.codegen_consume(&mut bx
, mir
::Place
::return_place().as_ref());
265 if let Ref(llval
, _
, align
) = op
.val
{
266 bx
.load(llval
, align
)
268 op
.immediate_or_packed_pair(&mut bx
)
272 PassMode
::Cast(cast_ty
) => {
273 let op
= match self.locals
[mir
::RETURN_PLACE
] {
274 LocalRef
::Operand(Some(op
)) => op
,
275 LocalRef
::Operand(None
) => bug
!("use of return before def"),
276 LocalRef
::Place(cg_place
) => OperandRef
{
277 val
: Ref(cg_place
.llval
, None
, cg_place
.align
),
278 layout
: cg_place
.layout
,
280 LocalRef
::UnsizedPlace(_
) => bug
!("return type must be sized"),
282 let llslot
= match op
.val
{
283 Immediate(_
) | Pair(..) => {
284 let scratch
= PlaceRef
::alloca(&mut bx
, self.fn_abi
.ret
.layout
);
285 op
.val
.store(&mut bx
, scratch
);
288 Ref(llval
, _
, align
) => {
289 assert_eq
!(align
, op
.layout
.align
.abi
, "return place is unaligned!");
293 let addr
= bx
.pointercast(llslot
, bx
.type_ptr_to(bx
.cast_backend_type(&cast_ty
)));
294 bx
.load(addr
, self.fn_abi
.ret
.layout
.align
.abi
)
300 fn codegen_drop_terminator(
302 helper
: TerminatorCodegenHelper
<'tcx
>,
304 location
: mir
::Place
<'tcx
>,
305 target
: mir
::BasicBlock
,
306 unwind
: Option
<mir
::BasicBlock
>,
308 let ty
= location
.ty(self.mir
, bx
.tcx()).ty
;
309 let ty
= self.monomorphize(&ty
);
310 let drop_fn
= Instance
::resolve_drop_in_place(bx
.tcx(), ty
);
312 if let ty
::InstanceDef
::DropGlue(_
, None
) = drop_fn
.def
{
313 // we don't actually need to drop anything.
314 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
315 helper
.funclet_br(self, &mut bx
, target
);
319 let place
= self.codegen_place(&mut bx
, location
.as_ref());
321 let mut args
= if let Some(llextra
) = place
.llextra
{
322 args2
= [place
.llval
, llextra
];
325 args1
= [place
.llval
];
328 let (drop_fn
, fn_abi
) = match ty
.kind() {
329 // FIXME(eddyb) perhaps move some of this logic into
330 // `Instance::resolve_drop_in_place`?
332 let virtual_drop
= Instance
{
333 def
: ty
::InstanceDef
::Virtual(drop_fn
.def_id(), 0),
334 substs
: drop_fn
.substs
,
336 let fn_abi
= FnAbi
::of_instance(&bx
, virtual_drop
, &[]);
337 let vtable
= args
[1];
339 (meth
::DESTRUCTOR
.get_fn(&mut bx
, vtable
, &fn_abi
), fn_abi
)
341 _
=> (bx
.get_fn_addr(drop_fn
), FnAbi
::of_instance(&bx
, drop_fn
, &[])),
343 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
350 Some((ReturnDest
::Nothing
, target
)),
355 fn codegen_assert_terminator(
357 helper
: TerminatorCodegenHelper
<'tcx
>,
359 terminator
: &mir
::Terminator
<'tcx
>,
360 cond
: &mir
::Operand
<'tcx
>,
362 msg
: &mir
::AssertMessage
<'tcx
>,
363 target
: mir
::BasicBlock
,
364 cleanup
: Option
<mir
::BasicBlock
>,
366 let span
= terminator
.source_info
.span
;
367 let cond
= self.codegen_operand(&mut bx
, cond
).immediate();
368 let mut const_cond
= bx
.const_to_opt_u128(cond
, false).map(|c
| c
== 1);
370 // This case can currently arise only from functions marked
371 // with #[rustc_inherit_overflow_checks] and inlined from
372 // another crate (mostly core::num generic/#[inline] fns),
373 // while the current crate doesn't use overflow checks.
374 // NOTE: Unlike binops, negation doesn't have its own
375 // checked operation, just a comparison with the minimum
376 // value, so we have to check for the assert message.
377 if !bx
.check_overflow() {
378 if let AssertKind
::OverflowNeg(_
) = *msg
{
379 const_cond
= Some(expected
);
383 // Don't codegen the panic block if success if known.
384 if const_cond
== Some(expected
) {
385 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
386 helper
.funclet_br(self, &mut bx
, target
);
390 // Pass the condition through llvm.expect for branch hinting.
391 let cond
= bx
.expect(cond
, expected
);
393 // Create the failure block and the conditional branch to it.
394 let lltarget
= helper
.llblock(self, target
);
395 let panic_block
= self.new_block("panic");
396 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
398 bx
.cond_br(cond
, lltarget
, panic_block
.llbb());
400 bx
.cond_br(cond
, panic_block
.llbb(), lltarget
);
403 // After this point, bx is the block for the call to panic.
405 self.set_debug_loc(&mut bx
, terminator
.source_info
);
407 // Get the location information.
408 let location
= self.get_caller_location(&mut bx
, terminator
.source_info
).immediate();
410 // Put together the arguments to the panic entry point.
411 let (lang_item
, args
) = match msg
{
412 AssertKind
::BoundsCheck { ref len, ref index }
=> {
413 let len
= self.codegen_operand(&mut bx
, len
).immediate();
414 let index
= self.codegen_operand(&mut bx
, index
).immediate();
415 // It's `fn panic_bounds_check(index: usize, len: usize)`,
416 // and `#[track_caller]` adds an implicit third argument.
417 (LangItem
::PanicBoundsCheck
, vec
![index
, len
, location
])
420 let msg_str
= Symbol
::intern(msg
.description());
421 let msg
= bx
.const_str(msg_str
);
422 // It's `pub fn panic(expr: &str)`, with the wide reference being passed
423 // as two arguments, and `#[track_caller]` adds an implicit third argument.
424 (LangItem
::Panic
, vec
![msg
.0, msg
.1, location
])
428 // Obtain the panic entry point.
429 let def_id
= common
::langcall(bx
.tcx(), Some(span
), "", lang_item
);
430 let instance
= ty
::Instance
::mono(bx
.tcx(), def_id
);
431 let fn_abi
= FnAbi
::of_instance(&bx
, instance
, &[]);
432 let llfn
= bx
.get_fn_addr(instance
);
434 // Codegen the actual panic invoke/call.
435 helper
.do_call(self, &mut bx
, fn_abi
, llfn
, &args
, None
, cleanup
);
438 /// Returns `true` if this is indeed a panic intrinsic and codegen is done.
439 fn codegen_panic_intrinsic(
441 helper
: &TerminatorCodegenHelper
<'tcx
>,
443 intrinsic
: Option
<Symbol
>,
444 instance
: Option
<Instance
<'tcx
>>,
445 source_info
: mir
::SourceInfo
,
446 destination
: &Option
<(mir
::Place
<'tcx
>, mir
::BasicBlock
)>,
447 cleanup
: Option
<mir
::BasicBlock
>,
449 // Emit a panic or a no-op for `assert_*` intrinsics.
450 // These are intrinsics that compile to panics so that we can get a message
451 // which mentions the offending type, even from a const context.
452 #[derive(Debug, PartialEq)]
453 enum AssertIntrinsic
{
458 let panic_intrinsic
= intrinsic
.and_then(|i
| match i
{
459 sym
::assert_inhabited
=> Some(AssertIntrinsic
::Inhabited
),
460 sym
::assert_zero_valid
=> Some(AssertIntrinsic
::ZeroValid
),
461 sym
::assert_uninit_valid
=> Some(AssertIntrinsic
::UninitValid
),
464 if let Some(intrinsic
) = panic_intrinsic
{
465 use AssertIntrinsic
::*;
466 let ty
= instance
.unwrap().substs
.type_at(0);
467 let layout
= bx
.layout_of(ty
);
468 let do_panic
= match intrinsic
{
469 Inhabited
=> layout
.abi
.is_uninhabited(),
470 // We unwrap as the error type is `!`.
471 ZeroValid
=> !layout
.might_permit_raw_init(bx
, /*zero:*/ true).unwrap(),
472 // We unwrap as the error type is `!`.
473 UninitValid
=> !layout
.might_permit_raw_init(bx
, /*zero:*/ false).unwrap(),
476 let msg_str
= with_no_trimmed_paths(|| {
477 if layout
.abi
.is_uninhabited() {
478 // Use this error even for the other intrinsics as it is more precise.
479 format
!("attempted to instantiate uninhabited type `{}`", ty
)
480 } else if intrinsic
== ZeroValid
{
481 format
!("attempted to zero-initialize type `{}`, which is invalid", ty
)
483 format
!("attempted to leave type `{}` uninitialized, which is invalid", ty
)
486 let msg
= bx
.const_str(Symbol
::intern(&msg_str
));
487 let location
= self.get_caller_location(bx
, source_info
).immediate();
489 // Obtain the panic entry point.
490 // FIXME: dedup this with `codegen_assert_terminator` above.
492 common
::langcall(bx
.tcx(), Some(source_info
.span
), "", LangItem
::Panic
);
493 let instance
= ty
::Instance
::mono(bx
.tcx(), def_id
);
494 let fn_abi
= FnAbi
::of_instance(bx
, instance
, &[]);
495 let llfn
= bx
.get_fn_addr(instance
);
497 if let Some((_
, target
)) = destination
.as_ref() {
498 helper
.maybe_sideeffect(self.mir
, bx
, &[*target
]);
500 // Codegen the actual panic invoke/call.
506 &[msg
.0, msg
.1, location
],
507 destination
.as_ref().map(|(_
, bb
)| (ReturnDest
::Nothing
, *bb
)),
512 let target
= destination
.as_ref().unwrap().1;
513 helper
.maybe_sideeffect(self.mir
, bx
, &[target
]);
514 helper
.funclet_br(self, bx
, target
)
522 fn codegen_call_terminator(
524 helper
: TerminatorCodegenHelper
<'tcx
>,
526 terminator
: &mir
::Terminator
<'tcx
>,
527 func
: &mir
::Operand
<'tcx
>,
528 args
: &Vec
<mir
::Operand
<'tcx
>>,
529 destination
: &Option
<(mir
::Place
<'tcx
>, mir
::BasicBlock
)>,
530 cleanup
: Option
<mir
::BasicBlock
>,
533 let source_info
= terminator
.source_info
;
534 let span
= source_info
.span
;
536 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
537 let callee
= self.codegen_operand(&mut bx
, func
);
539 let (instance
, mut llfn
) = match *callee
.layout
.ty
.kind() {
540 ty
::FnDef(def_id
, substs
) => (
542 ty
::Instance
::resolve(bx
.tcx(), ty
::ParamEnv
::reveal_all(), def_id
, substs
)
545 .polymorphize(bx
.tcx()),
549 ty
::FnPtr(_
) => (None
, Some(callee
.immediate())),
550 _
=> bug
!("{} is not callable", callee
.layout
.ty
),
552 let def
= instance
.map(|i
| i
.def
);
554 if let Some(ty
::InstanceDef
::DropGlue(_
, None
)) = def
{
555 // Empty drop glue; a no-op.
556 let &(_
, target
) = destination
.as_ref().unwrap();
557 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
558 helper
.funclet_br(self, &mut bx
, target
);
562 // FIXME(eddyb) avoid computing this if possible, when `instance` is
563 // available - right now `sig` is only needed for getting the `abi`
564 // and figuring out how many extra args were passed to a C-variadic `fn`.
565 let sig
= callee
.layout
.ty
.fn_sig(bx
.tcx());
568 // Handle intrinsics old codegen wants Expr's for, ourselves.
569 let intrinsic
= match def
{
570 Some(ty
::InstanceDef
::Intrinsic(def_id
)) => Some(bx
.tcx().item_name(def_id
)),
574 let extra_args
= &args
[sig
.inputs().skip_binder().len()..];
575 let extra_args
= extra_args
578 let op_ty
= op_arg
.ty(self.mir
, bx
.tcx());
579 self.monomorphize(&op_ty
)
581 .collect
::<Vec
<_
>>();
583 let fn_abi
= match instance
{
584 Some(instance
) => FnAbi
::of_instance(&bx
, instance
, &extra_args
),
585 None
=> FnAbi
::of_fn_ptr(&bx
, sig
, &extra_args
),
588 if intrinsic
== Some(sym
::transmute
) {
589 if let Some(destination_ref
) = destination
.as_ref() {
590 let &(dest
, target
) = destination_ref
;
591 self.codegen_transmute(&mut bx
, &args
[0], dest
);
592 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
593 helper
.funclet_br(self, &mut bx
, target
);
595 // If we are trying to transmute to an uninhabited type,
596 // it is likely there is no allotted destination. In fact,
597 // transmuting to an uninhabited type is UB, which means
598 // we can do what we like. Here, we declare that transmuting
599 // into an uninhabited type is impossible, so anything following
600 // it must be unreachable.
601 assert_eq
!(fn_abi
.ret
.layout
.abi
, abi
::Abi
::Uninhabited
);
607 if self.codegen_panic_intrinsic(
619 // The arguments we'll be passing. Plus one to account for outptr, if used.
620 let arg_count
= fn_abi
.args
.len() + fn_abi
.ret
.is_indirect() as usize;
621 let mut llargs
= Vec
::with_capacity(arg_count
);
623 // Prepare the return value destination
624 let ret_dest
= if let Some((dest
, _
)) = *destination
{
625 let is_intrinsic
= intrinsic
.is_some();
626 self.make_return_dest(&mut bx
, dest
, &fn_abi
.ret
, &mut llargs
, is_intrinsic
)
631 if intrinsic
== Some(sym
::caller_location
) {
632 if let Some((_
, target
)) = destination
.as_ref() {
634 .get_caller_location(&mut bx
, mir
::SourceInfo { span: fn_span, ..source_info }
);
636 if let ReturnDest
::IndirectOperand(tmp
, _
) = ret_dest
{
637 location
.val
.store(&mut bx
, tmp
);
639 self.store_return(&mut bx
, ret_dest
, &fn_abi
.ret
, location
.immediate());
641 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[*target
]);
642 helper
.funclet_br(self, &mut bx
, *target
);
647 if intrinsic
.is_some() && intrinsic
!= Some(sym
::drop_in_place
) {
648 let intrinsic
= intrinsic
.unwrap();
649 let dest
= match ret_dest
{
650 _
if fn_abi
.ret
.is_indirect() => llargs
[0],
651 ReturnDest
::Nothing
=> {
652 bx
.const_undef(bx
.type_ptr_to(bx
.arg_memory_ty(&fn_abi
.ret
)))
654 ReturnDest
::IndirectOperand(dst
, _
) | ReturnDest
::Store(dst
) => dst
.llval
,
655 ReturnDest
::DirectOperand(_
) => {
656 bug
!("Cannot use direct operand with an intrinsic call")
660 let args
: Vec
<_
> = args
664 // The indices passed to simd_shuffle* in the
665 // third argument must be constant. This is
666 // checked by const-qualification, which also
667 // promotes any complex rvalues to constants.
668 if i
== 2 && intrinsic
.as_str().starts_with("simd_shuffle") {
669 if let mir
::Operand
::Constant(constant
) = arg
{
670 let c
= self.eval_mir_constant(constant
);
671 let (llval
, ty
) = self.simd_shuffle_indices(
677 return OperandRef { val: Immediate(llval), layout: bx.layout_of(ty) }
;
679 span_bug
!(span
, "shuffle indices must be constant");
683 self.codegen_operand(&mut bx
, arg
)
687 Self::codegen_intrinsic_call(
689 *instance
.as_ref().unwrap(),
696 if let ReturnDest
::IndirectOperand(dst
, _
) = ret_dest
{
697 self.store_return(&mut bx
, ret_dest
, &fn_abi
.ret
, dst
.llval
);
700 if let Some((_
, target
)) = *destination
{
701 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
702 helper
.funclet_br(self, &mut bx
, target
);
710 // Split the rust-call tupled arguments off.
711 let (first_args
, untuple
) = if abi
== Abi
::RustCall
&& !args
.is_empty() {
712 let (tup
, args
) = args
.split_last().unwrap();
718 'make_args
: for (i
, arg
) in first_args
.iter().enumerate() {
719 let mut op
= self.codegen_operand(&mut bx
, arg
);
721 if let (0, Some(ty
::InstanceDef
::Virtual(_
, idx
))) = (i
, def
) {
722 if let Pair(..) = op
.val
{
723 // In the case of Rc<Self>, we need to explicitly pass a
724 // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
725 // that is understood elsewhere in the compiler as a method on
727 // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
728 // we get a value of a built-in pointer type
729 'descend_newtypes
: while !op
.layout
.ty
.is_unsafe_ptr()
730 && !op
.layout
.ty
.is_region_ptr()
732 for i
in 0..op
.layout
.fields
.count() {
733 let field
= op
.extract_field(&mut bx
, i
);
734 if !field
.layout
.is_zst() {
735 // we found the one non-zero-sized field that is allowed
736 // now find *its* non-zero-sized field, or stop if it's a
739 continue 'descend_newtypes
;
743 span_bug
!(span
, "receiver has no non-zero-sized fields {:?}", op
);
746 // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
747 // data pointer and vtable. Look up the method in the vtable, and pass
748 // the data pointer as the first argument
750 Pair(data_ptr
, meta
) => {
752 meth
::VirtualIndex
::from_index(idx
).get_fn(&mut bx
, meta
, &fn_abi
),
754 llargs
.push(data_ptr
);
757 other
=> bug
!("expected a Pair, got {:?}", other
),
759 } else if let Ref(data_ptr
, Some(meta
), _
) = op
.val
{
760 // by-value dynamic dispatch
761 llfn
= Some(meth
::VirtualIndex
::from_index(idx
).get_fn(&mut bx
, meta
, &fn_abi
));
762 llargs
.push(data_ptr
);
765 span_bug
!(span
, "can't codegen a virtual call on {:?}", op
);
769 // The callee needs to own the argument memory if we pass it
770 // by-ref, so make a local copy of non-immediate constants.
771 match (arg
, op
.val
) {
772 (&mir
::Operand
::Copy(_
), Ref(_
, None
, _
))
773 | (&mir
::Operand
::Constant(_
), Ref(_
, None
, _
)) => {
774 let tmp
= PlaceRef
::alloca(&mut bx
, op
.layout
);
775 op
.val
.store(&mut bx
, tmp
);
776 op
.val
= Ref(tmp
.llval
, None
, tmp
.align
);
781 self.codegen_argument(&mut bx
, op
, &mut llargs
, &fn_abi
.args
[i
]);
783 if let Some(tup
) = untuple
{
784 self.codegen_arguments_untupled(
788 &fn_abi
.args
[first_args
.len()..],
793 instance
.map_or(false, |i
| i
.def
.requires_caller_location(self.cx
.tcx()));
798 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
801 self.get_caller_location(&mut bx
, mir
::SourceInfo { span: fn_span, ..source_info }
);
803 "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
804 terminator
, location
, fn_span
807 let last_arg
= fn_abi
.args
.last().unwrap();
808 self.codegen_argument(&mut bx
, location
, &mut llargs
, last_arg
);
811 let fn_ptr
= match (llfn
, instance
) {
812 (Some(llfn
), _
) => llfn
,
813 (None
, Some(instance
)) => bx
.get_fn_addr(instance
),
814 _
=> span_bug
!(span
, "no llfn for call"),
817 if let Some((_
, target
)) = destination
.as_ref() {
818 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[*target
]);
826 destination
.as_ref().map(|&(_
, target
)| (ret_dest
, target
)),
831 fn codegen_asm_terminator(
833 helper
: TerminatorCodegenHelper
<'tcx
>,
835 terminator
: &mir
::Terminator
<'tcx
>,
836 template
: &[ast
::InlineAsmTemplatePiece
],
837 operands
: &[mir
::InlineAsmOperand
<'tcx
>],
838 options
: ast
::InlineAsmOptions
,
840 destination
: Option
<mir
::BasicBlock
>,
842 let span
= terminator
.source_info
.span
;
844 let operands
: Vec
<_
> = operands
846 .map(|op
| match *op
{
847 mir
::InlineAsmOperand
::In { reg, ref value }
=> {
848 let value
= self.codegen_operand(&mut bx
, value
);
849 InlineAsmOperandRef
::In { reg, value }
851 mir
::InlineAsmOperand
::Out { reg, late, ref place }
=> {
852 let place
= place
.map(|place
| self.codegen_place(&mut bx
, place
.as_ref()));
853 InlineAsmOperandRef
::Out { reg, late, place }
855 mir
::InlineAsmOperand
::InOut { reg, late, ref in_value, ref out_place }
=> {
856 let in_value
= self.codegen_operand(&mut bx
, in_value
);
858 out_place
.map(|out_place
| self.codegen_place(&mut bx
, out_place
.as_ref()));
859 InlineAsmOperandRef
::InOut { reg, late, in_value, out_place }
861 mir
::InlineAsmOperand
::Const { ref value }
=> {
862 if let mir
::Operand
::Constant(constant
) = value
{
863 let const_value
= self
864 .eval_mir_constant(constant
)
865 .unwrap_or_else(|_
| span_bug
!(span
, "asm const cannot be resolved"));
866 let ty
= constant
.literal
.ty
;
867 let size
= bx
.layout_of(ty
).size
;
868 let scalar
= match const_value
{
869 ConstValue
::Scalar(s
) => s
,
872 "expected Scalar for promoted asm const, but got {:#?}",
876 let value
= scalar
.assert_bits(size
);
877 let string
= match ty
.kind() {
878 ty
::Uint(_
) => value
.to_string(),
880 match int_ty
.normalize(bx
.tcx().sess
.target
.pointer_width
) {
881 ast
::IntTy
::I8
=> (value
as i8).to_string(),
882 ast
::IntTy
::I16
=> (value
as i16).to_string(),
883 ast
::IntTy
::I32
=> (value
as i32).to_string(),
884 ast
::IntTy
::I64
=> (value
as i64).to_string(),
885 ast
::IntTy
::I128
=> (value
as i128
).to_string(),
886 ast
::IntTy
::Isize
=> unreachable
!(),
889 ty
::Float(ast
::FloatTy
::F32
) => {
890 f32::from_bits(value
as u32).to_string()
892 ty
::Float(ast
::FloatTy
::F64
) => {
893 f64::from_bits(value
as u64).to_string()
895 _
=> span_bug
!(span
, "asm const has bad type {}", ty
),
897 InlineAsmOperandRef
::Const { string }
899 span_bug
!(span
, "asm const is not a constant");
902 mir
::InlineAsmOperand
::SymFn { ref value }
=> {
903 let literal
= self.monomorphize(&value
.literal
);
904 if let ty
::FnDef(def_id
, substs
) = *literal
.ty
.kind() {
905 let instance
= ty
::Instance
::resolve_for_fn_ptr(
907 ty
::ParamEnv
::reveal_all(),
912 InlineAsmOperandRef
::SymFn { instance }
914 span_bug
!(span
, "invalid type for asm sym (fn)");
917 mir
::InlineAsmOperand
::SymStatic { def_id }
=> {
918 InlineAsmOperandRef
::SymStatic { def_id }
923 bx
.codegen_inline_asm(template
, &operands
, options
, line_spans
);
925 if let Some(target
) = destination
{
926 helper
.funclet_br(self, &mut bx
, target
);
933 impl<'a
, 'tcx
, Bx
: BuilderMethods
<'a
, 'tcx
>> FunctionCx
<'a
, 'tcx
, Bx
> {
934 pub fn codegen_block(&mut self, bb
: mir
::BasicBlock
) {
935 let mut bx
= self.build_block(bb
);
939 debug
!("codegen_block({:?}={:?})", bb
, data
);
941 for statement
in &data
.statements
{
942 bx
= self.codegen_statement(bx
, statement
);
945 self.codegen_terminator(bx
, bb
, data
.terminator());
948 fn codegen_terminator(
952 terminator
: &'tcx mir
::Terminator
<'tcx
>,
954 debug
!("codegen_terminator: {:?}", terminator
);
956 // Create the cleanup bundle, if needed.
957 let funclet_bb
= self.cleanup_kinds
[bb
].funclet_bb(bb
);
958 let helper
= TerminatorCodegenHelper { bb, terminator, funclet_bb }
;
960 self.set_debug_loc(&mut bx
, terminator
.source_info
);
961 match terminator
.kind
{
962 mir
::TerminatorKind
::Resume
=> self.codegen_resume_terminator(helper
, bx
),
964 mir
::TerminatorKind
::Abort
=> {
966 // `abort` does not terminate the block, so we still need to generate
967 // an `unreachable` terminator after it.
971 mir
::TerminatorKind
::Goto { target }
=> {
973 // This is an unconditional branch back to this same basic
974 // block. That means we have something like a `loop {}`
975 // statement. Currently LLVM miscompiles this because it
976 // assumes forward progress. We want to prevent this in all
977 // cases, but that has a fairly high cost to compile times
978 // currently. Instead, try to handle this specific case
979 // which comes up commonly in practice (e.g., in embedded
982 // The `true` here means we insert side effects regardless
983 // of -Zinsert-sideeffect being passed on unconditional
984 // branching to the same basic block.
987 helper
.maybe_sideeffect(self.mir
, &mut bx
, &[target
]);
989 helper
.funclet_br(self, &mut bx
, target
);
992 mir
::TerminatorKind
::SwitchInt { ref discr, switch_ty, ref targets }
=> {
993 self.codegen_switchint_terminator(helper
, bx
, discr
, switch_ty
, targets
);
996 mir
::TerminatorKind
::Return
=> {
997 self.codegen_return_terminator(bx
);
1000 mir
::TerminatorKind
::Unreachable
=> {
1004 mir
::TerminatorKind
::Drop { place, target, unwind }
=> {
1005 self.codegen_drop_terminator(helper
, bx
, place
, target
, unwind
);
1008 mir
::TerminatorKind
::Assert { ref cond, expected, ref msg, target, cleanup }
=> {
1009 self.codegen_assert_terminator(
1010 helper
, bx
, terminator
, cond
, expected
, msg
, target
, cleanup
,
1014 mir
::TerminatorKind
::DropAndReplace { .. }
=> {
1015 bug
!("undesugared DropAndReplace in codegen: {:?}", terminator
);
1018 mir
::TerminatorKind
::Call
{
1026 self.codegen_call_terminator(
1037 mir
::TerminatorKind
::GeneratorDrop
| mir
::TerminatorKind
::Yield { .. }
=> {
1038 bug
!("generator ops in codegen")
1040 mir
::TerminatorKind
::FalseEdge { .. }
| mir
::TerminatorKind
::FalseUnwind { .. }
=> {
1041 bug
!("borrowck false edges in codegen")
1044 mir
::TerminatorKind
::InlineAsm
{
1051 self.codegen_asm_terminator(
1065 fn codegen_argument(
1068 op
: OperandRef
<'tcx
, Bx
::Value
>,
1069 llargs
: &mut Vec
<Bx
::Value
>,
1070 arg
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1072 // Fill padding with undef value, where applicable.
1073 if let Some(ty
) = arg
.pad
{
1074 llargs
.push(bx
.const_undef(bx
.reg_backend_type(&ty
)))
1077 if arg
.is_ignore() {
1081 if let PassMode
::Pair(..) = arg
.mode
{
1088 _
=> bug
!("codegen_argument: {:?} invalid for pair argument", op
),
1090 } else if arg
.is_unsized_indirect() {
1092 Ref(a
, Some(b
), _
) => {
1097 _
=> bug
!("codegen_argument: {:?} invalid for unsized indirect argument", op
),
1101 // Force by-ref if we have to load through a cast pointer.
1102 let (mut llval
, align
, by_ref
) = match op
.val
{
1103 Immediate(_
) | Pair(..) => match arg
.mode
{
1104 PassMode
::Indirect(..) | PassMode
::Cast(_
) => {
1105 let scratch
= PlaceRef
::alloca(bx
, arg
.layout
);
1106 op
.val
.store(bx
, scratch
);
1107 (scratch
.llval
, scratch
.align
, true)
1109 _
=> (op
.immediate_or_packed_pair(bx
), arg
.layout
.align
.abi
, false),
1111 Ref(llval
, _
, align
) => {
1112 if arg
.is_indirect() && align
< arg
.layout
.align
.abi
{
1113 // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
1114 // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
1115 // have scary latent bugs around.
1117 let scratch
= PlaceRef
::alloca(bx
, arg
.layout
);
1127 (scratch
.llval
, scratch
.align
, true)
1129 (llval
, align
, true)
1134 if by_ref
&& !arg
.is_indirect() {
1135 // Have to load the argument, maybe while casting it.
1136 if let PassMode
::Cast(ty
) = arg
.mode
{
1137 let addr
= bx
.pointercast(llval
, bx
.type_ptr_to(bx
.cast_backend_type(&ty
)));
1138 llval
= bx
.load(addr
, align
.min(arg
.layout
.align
.abi
));
1140 // We can't use `PlaceRef::load` here because the argument
1141 // may have a type we don't treat as immediate, but the ABI
1142 // used for this call is passing it by-value. In that case,
1143 // the load would just produce `OperandValue::Ref` instead
1144 // of the `OperandValue::Immediate` we need for the call.
1145 llval
= bx
.load(llval
, align
);
1146 if let abi
::Abi
::Scalar(ref scalar
) = arg
.layout
.abi
{
1147 if scalar
.is_bool() {
1148 bx
.range_metadata(llval
, 0..2);
1151 // We store bools as `i8` so we need to truncate to `i1`.
1152 llval
= bx
.to_immediate(llval
, arg
.layout
);
1159 fn codegen_arguments_untupled(
1162 operand
: &mir
::Operand
<'tcx
>,
1163 llargs
: &mut Vec
<Bx
::Value
>,
1164 args
: &[ArgAbi
<'tcx
, Ty
<'tcx
>>],
1166 let tuple
= self.codegen_operand(bx
, operand
);
1168 // Handle both by-ref and immediate tuples.
1169 if let Ref(llval
, None
, align
) = tuple
.val
{
1170 let tuple_ptr
= PlaceRef
::new_sized_aligned(llval
, tuple
.layout
, align
);
1171 for i
in 0..tuple
.layout
.fields
.count() {
1172 let field_ptr
= tuple_ptr
.project_field(bx
, i
);
1173 let field
= bx
.load_operand(field_ptr
);
1174 self.codegen_argument(bx
, field
, llargs
, &args
[i
]);
1176 } else if let Ref(_
, Some(_
), _
) = tuple
.val
{
1177 bug
!("closure arguments must be sized")
1179 // If the tuple is immediate, the elements are as well.
1180 for i
in 0..tuple
.layout
.fields
.count() {
1181 let op
= tuple
.extract_field(bx
, i
);
1182 self.codegen_argument(bx
, op
, llargs
, &args
[i
]);
1187 fn get_caller_location(
1190 mut source_info
: mir
::SourceInfo
,
1191 ) -> OperandRef
<'tcx
, Bx
::Value
> {
1194 let mut span_to_caller_location
= |span
: Span
| {
1195 let topmost
= span
.ctxt().outer_expn().expansion_cause().unwrap_or(span
);
1196 let caller
= tcx
.sess
.source_map().lookup_char_pos(topmost
.lo());
1197 let const_loc
= tcx
.const_caller_location((
1198 Symbol
::intern(&caller
.file
.name
.to_string()),
1200 caller
.col_display
as u32 + 1,
1202 OperandRef
::from_const(bx
, const_loc
, bx
.tcx().caller_location_ty())
1205 // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
1206 // If so, the starting `source_info.span` is in the innermost inlined
1207 // function, and will be replaced with outer callsite spans as long
1208 // as the inlined functions were `#[track_caller]`.
1210 let scope_data
= &self.mir
.source_scopes
[source_info
.scope
];
1212 if let Some((callee
, callsite_span
)) = scope_data
.inlined
{
1213 // Stop inside the most nested non-`#[track_caller]` function,
1214 // before ever reaching its caller (which is irrelevant).
1215 if !callee
.def
.requires_caller_location(tcx
) {
1216 return span_to_caller_location(source_info
.span
);
1218 source_info
.span
= callsite_span
;
1221 // Skip past all of the parents with `inlined: None`.
1222 match scope_data
.inlined_parent_scope
{
1223 Some(parent
) => source_info
.scope
= parent
,
1228 // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
1229 self.caller_location
.unwrap_or_else(|| span_to_caller_location(source_info
.span
))
1232 fn get_personality_slot(&mut self, bx
: &mut Bx
) -> PlaceRef
<'tcx
, Bx
::Value
> {
1234 if let Some(slot
) = self.personality_slot
{
1237 let layout
= cx
.layout_of(
1238 cx
.tcx().intern_tup(&[cx
.tcx().mk_mut_ptr(cx
.tcx().types
.u8), cx
.tcx().types
.i32]),
1240 let slot
= PlaceRef
::alloca(bx
, layout
);
1241 self.personality_slot
= Some(slot
);
1246 /// Returns the landing-pad wrapper around the given basic block.
1248 /// No-op in MSVC SEH scheme.
1249 fn landing_pad_to(&mut self, target_bb
: mir
::BasicBlock
) -> Bx
::BasicBlock
{
1250 if let Some(block
) = self.landing_pads
[target_bb
] {
1254 let block
= self.blocks
[target_bb
];
1255 let landing_pad
= self.landing_pad_uncached(block
);
1256 self.landing_pads
[target_bb
] = Some(landing_pad
);
1260 fn landing_pad_uncached(&mut self, target_bb
: Bx
::BasicBlock
) -> Bx
::BasicBlock
{
1261 if base
::wants_msvc_seh(self.cx
.sess()) {
1262 span_bug
!(self.mir
.span
, "landing pad was not inserted?")
1265 let mut bx
= self.new_block("cleanup");
1267 let llpersonality
= self.cx
.eh_personality();
1268 let llretty
= self.landing_pad_type();
1269 let lp
= bx
.landing_pad(llretty
, llpersonality
, 1);
1272 let slot
= self.get_personality_slot(&mut bx
);
1273 slot
.storage_live(&mut bx
);
1274 Pair(bx
.extract_value(lp
, 0), bx
.extract_value(lp
, 1)).store(&mut bx
, slot
);
1280 fn landing_pad_type(&self) -> Bx
::Type
{
1282 cx
.type_struct(&[cx
.type_i8p(), cx
.type_i32()], false)
1285 fn unreachable_block(&mut self) -> Bx
::BasicBlock
{
1286 self.unreachable_block
.unwrap_or_else(|| {
1287 let mut bx
= self.new_block("unreachable");
1289 self.unreachable_block
= Some(bx
.llbb());
1294 pub fn new_block(&self, name
: &str) -> Bx
{
1295 Bx
::new_block(self.cx
, self.llfn
, name
)
1298 pub fn build_block(&self, bb
: mir
::BasicBlock
) -> Bx
{
1299 let mut bx
= Bx
::with_cx(self.cx
);
1300 bx
.position_at_end(self.blocks
[bb
]);
1304 fn make_return_dest(
1307 dest
: mir
::Place
<'tcx
>,
1308 fn_ret
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1309 llargs
: &mut Vec
<Bx
::Value
>,
1311 ) -> ReturnDest
<'tcx
, Bx
::Value
> {
1312 // If the return is ignored, we can just return a do-nothing `ReturnDest`.
1313 if fn_ret
.is_ignore() {
1314 return ReturnDest
::Nothing
;
1316 let dest
= if let Some(index
) = dest
.as_local() {
1317 match self.locals
[index
] {
1318 LocalRef
::Place(dest
) => dest
,
1319 LocalRef
::UnsizedPlace(_
) => bug
!("return type must be sized"),
1320 LocalRef
::Operand(None
) => {
1321 // Handle temporary places, specifically `Operand` ones, as
1322 // they don't have `alloca`s.
1323 return if fn_ret
.is_indirect() {
1324 // Odd, but possible, case, we have an operand temporary,
1325 // but the calling convention has an indirect return.
1326 let tmp
= PlaceRef
::alloca(bx
, fn_ret
.layout
);
1327 tmp
.storage_live(bx
);
1328 llargs
.push(tmp
.llval
);
1329 ReturnDest
::IndirectOperand(tmp
, index
)
1330 } else if is_intrinsic
{
1331 // Currently, intrinsics always need a location to store
1332 // the result, so we create a temporary `alloca` for the
1334 let tmp
= PlaceRef
::alloca(bx
, fn_ret
.layout
);
1335 tmp
.storage_live(bx
);
1336 ReturnDest
::IndirectOperand(tmp
, index
)
1338 ReturnDest
::DirectOperand(index
)
1341 LocalRef
::Operand(Some(_
)) => {
1342 bug
!("place local already assigned to");
1348 mir
::PlaceRef { local: dest.local, projection: &dest.projection }
,
1351 if fn_ret
.is_indirect() {
1352 if dest
.align
< dest
.layout
.align
.abi
{
1353 // Currently, MIR code generation does not create calls
1354 // that store directly to fields of packed structs (in
1355 // fact, the calls it creates write only to temps).
1357 // If someone changes that, please update this code path
1358 // to create a temporary.
1359 span_bug
!(self.mir
.span
, "can't directly store to unaligned value");
1361 llargs
.push(dest
.llval
);
1364 ReturnDest
::Store(dest
)
1368 fn codegen_transmute(&mut self, bx
: &mut Bx
, src
: &mir
::Operand
<'tcx
>, dst
: mir
::Place
<'tcx
>) {
1369 if let Some(index
) = dst
.as_local() {
1370 match self.locals
[index
] {
1371 LocalRef
::Place(place
) => self.codegen_transmute_into(bx
, src
, place
),
1372 LocalRef
::UnsizedPlace(_
) => bug
!("transmute must not involve unsized locals"),
1373 LocalRef
::Operand(None
) => {
1374 let dst_layout
= bx
.layout_of(self.monomorphized_place_ty(dst
.as_ref()));
1375 assert
!(!dst_layout
.ty
.has_erasable_regions());
1376 let place
= PlaceRef
::alloca(bx
, dst_layout
);
1377 place
.storage_live(bx
);
1378 self.codegen_transmute_into(bx
, src
, place
);
1379 let op
= bx
.load_operand(place
);
1380 place
.storage_dead(bx
);
1381 self.locals
[index
] = LocalRef
::Operand(Some(op
));
1382 self.debug_introduce_local(bx
, index
);
1384 LocalRef
::Operand(Some(op
)) => {
1385 assert
!(op
.layout
.is_zst(), "assigning to initialized SSAtemp");
1389 let dst
= self.codegen_place(bx
, dst
.as_ref());
1390 self.codegen_transmute_into(bx
, src
, dst
);
1394 fn codegen_transmute_into(
1397 src
: &mir
::Operand
<'tcx
>,
1398 dst
: PlaceRef
<'tcx
, Bx
::Value
>,
1400 let src
= self.codegen_operand(bx
, src
);
1401 let llty
= bx
.backend_type(src
.layout
);
1402 let cast_ptr
= bx
.pointercast(dst
.llval
, bx
.type_ptr_to(llty
));
1403 let align
= src
.layout
.align
.abi
.min(dst
.align
);
1404 src
.val
.store(bx
, PlaceRef
::new_sized_aligned(cast_ptr
, src
.layout
, align
));
1407 // Stores the return value of a function call into it's final location.
1411 dest
: ReturnDest
<'tcx
, Bx
::Value
>,
1412 ret_abi
: &ArgAbi
<'tcx
, Ty
<'tcx
>>,
1415 use self::ReturnDest
::*;
1419 Store(dst
) => bx
.store_arg(&ret_abi
, llval
, dst
),
1420 IndirectOperand(tmp
, index
) => {
1421 let op
= bx
.load_operand(tmp
);
1422 tmp
.storage_dead(bx
);
1423 self.locals
[index
] = LocalRef
::Operand(Some(op
));
1424 self.debug_introduce_local(bx
, index
);
1426 DirectOperand(index
) => {
1427 // If there is a cast, we have to store and reload.
1428 let op
= if let PassMode
::Cast(_
) = ret_abi
.mode
{
1429 let tmp
= PlaceRef
::alloca(bx
, ret_abi
.layout
);
1430 tmp
.storage_live(bx
);
1431 bx
.store_arg(&ret_abi
, llval
, tmp
);
1432 let op
= bx
.load_operand(tmp
);
1433 tmp
.storage_dead(bx
);
1436 OperandRef
::from_immediate_or_packed_pair(bx
, llval
, ret_abi
.layout
)
1438 self.locals
[index
] = LocalRef
::Operand(Some(op
));
1439 self.debug_introduce_local(bx
, index
);
1445 enum ReturnDest
<'tcx
, V
> {
1446 // Do nothing; the return value is indirect or ignored.
1448 // Store the return value to the pointer.
1449 Store(PlaceRef
<'tcx
, V
>),
1450 // Store an indirect return value to an operand local place.
1451 IndirectOperand(PlaceRef
<'tcx
, V
>, mir
::Local
),
1452 // Store a direct return value to an operand local place.
1453 DirectOperand(mir
::Local
),