1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm
::{self, ValueRef}
;
12 use rustc_const_eval
::ErrKind
;
13 use rustc
::middle
::lang_items
;
15 use rustc
::mir
::repr
as mir
;
16 use abi
::{Abi, FnType, ArgType}
;
20 use callee
::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual}
;
21 use common
::{self, Block, BlockAndBuilder, LandingPad}
;
22 use common
::{C_bool, C_str_slice, C_struct, C_u32, C_undef}
;
24 use debuginfo
::DebugLoc
;
27 use machine
::{llalign_of_min, llbitsize_of_real}
;
33 use rustc_data_structures
::fnv
::FnvHashMap
;
34 use syntax
::parse
::token
;
36 use super::{MirContext, LocalRef}
;
37 use super::analyze
::CleanupKind
;
38 use super::constant
::Const
;
39 use super::lvalue
::{LvalueRef, load_fat_ptr}
;
40 use super::operand
::OperandRef
;
41 use super::operand
::OperandValue
::*;
43 impl<'bcx
, 'tcx
> MirContext
<'bcx
, 'tcx
> {
44 pub fn trans_block(&mut self, bb
: mir
::BasicBlock
) {
45 let mut bcx
= self.bcx(bb
);
46 let mir
= self.mir
.clone();
49 debug
!("trans_block({:?}={:?})", bb
, data
);
51 // Create the cleanup bundle, if needed.
52 let cleanup_pad
= bcx
.lpad().and_then(|lp
| lp
.cleanuppad());
53 let cleanup_bundle
= bcx
.lpad().and_then(|l
| l
.bundle());
55 let funclet_br
= |this
: &Self, bcx
: BlockAndBuilder
, bb
: mir
::BasicBlock
| {
56 let lltarget
= this
.blocks
[bb
].llbb
;
57 if let Some(cp
) = cleanup_pad
{
58 match this
.cleanup_kinds
[bb
] {
59 CleanupKind
::Funclet
=> {
60 // micro-optimization: generate a `ret` rather than a jump
62 bcx
.cleanup_ret(cp
, Some(lltarget
));
64 CleanupKind
::Internal { .. }
=> bcx
.br(lltarget
),
65 CleanupKind
::NotCleanup
=> bug
!("jump from cleanup bb to bb {:?}", bb
)
72 let llblock
= |this
: &mut Self, target
: mir
::BasicBlock
| {
73 let lltarget
= this
.blocks
[target
].llbb
;
75 if let Some(cp
) = cleanup_pad
{
76 match this
.cleanup_kinds
[target
] {
77 CleanupKind
::Funclet
=> {
78 // MSVC cross-funclet jump - need a trampoline
80 debug
!("llblock: creating cleanup trampoline for {:?}", target
);
81 let name
= &format
!("{:?}_cleanup_trampoline_{:?}", bb
, target
);
82 let trampoline
= this
.fcx
.new_block(name
, None
).build();
83 trampoline
.set_personality_fn(this
.fcx
.eh_personality());
84 trampoline
.cleanup_ret(cp
, Some(lltarget
));
87 CleanupKind
::Internal { .. }
=> lltarget
,
88 CleanupKind
::NotCleanup
=>
89 bug
!("jump from cleanup bb {:?} to bb {:?}", bb
, target
)
92 if let (CleanupKind
::NotCleanup
, CleanupKind
::Funclet
) =
93 (this
.cleanup_kinds
[bb
], this
.cleanup_kinds
[target
])
95 // jump *into* cleanup - need a landing pad if GNU
96 this
.landing_pad_to(target
).llbb
103 for statement
in &data
.statements
{
104 bcx
= self.trans_statement(bcx
, statement
);
107 let terminator
= data
.terminator();
108 debug
!("trans_block: terminator: {:?}", terminator
);
110 let span
= terminator
.source_info
.span
;
111 let debug_loc
= self.debug_loc(terminator
.source_info
);
112 debug_loc
.apply_to_bcx(&bcx
);
113 debug_loc
.apply(bcx
.fcx());
114 match terminator
.kind
{
115 mir
::TerminatorKind
::Resume
=> {
116 if let Some(cleanup_pad
) = cleanup_pad
{
117 bcx
.cleanup_ret(cleanup_pad
, None
);
119 let ps
= self.get_personality_slot(&bcx
);
120 let lp
= bcx
.load(ps
);
121 bcx
.with_block(|bcx
| {
122 base
::call_lifetime_end(bcx
, ps
);
123 base
::trans_unwind_resume(bcx
, lp
);
128 mir
::TerminatorKind
::Goto { target }
=> {
129 funclet_br(self, bcx
, target
);
132 mir
::TerminatorKind
::If { ref cond, targets: (true_bb, false_bb) }
=> {
133 let cond
= self.trans_operand(&bcx
, cond
);
135 let lltrue
= llblock(self, true_bb
);
136 let llfalse
= llblock(self, false_bb
);
137 bcx
.cond_br(cond
.immediate(), lltrue
, llfalse
);
140 mir
::TerminatorKind
::Switch { ref discr, ref adt_def, ref targets }
=> {
141 let discr_lvalue
= self.trans_lvalue(&bcx
, discr
);
142 let ty
= discr_lvalue
.ty
.to_ty(bcx
.tcx());
143 let repr
= adt
::represent_type(bcx
.ccx(), ty
);
144 let discr
= bcx
.with_block(|bcx
|
145 adt
::trans_get_discr(bcx
, &repr
, discr_lvalue
.llval
, None
, true)
148 let mut bb_hist
= FnvHashMap();
149 for target
in targets
{
150 *bb_hist
.entry(target
).or_insert(0) += 1;
152 let (default_bb
, default_blk
) = match bb_hist
.iter().max_by_key(|&(_
, c
)| c
) {
153 // If a single target basic blocks is predominant, promote that to be the
154 // default case for the switch instruction to reduce the size of the generated
155 // code. This is especially helpful in cases like an if-let on a huge enum.
156 // Note: This optimization is only valid for exhaustive matches.
157 Some((&&bb
, &c
)) if c
> targets
.len() / 2 => {
158 (Some(bb
), llblock(self, bb
))
160 // We're generating an exhaustive switch, so the else branch
161 // can't be hit. Branching to an unreachable instruction
162 // lets LLVM know this
163 _
=> (None
, self.unreachable_block().llbb
)
165 let switch
= bcx
.switch(discr
, default_blk
, targets
.len());
166 assert_eq
!(adt_def
.variants
.len(), targets
.len());
167 for (adt_variant
, &target
) in adt_def
.variants
.iter().zip(targets
) {
168 if default_bb
!= Some(target
) {
169 let llbb
= llblock(self, target
);
170 let llval
= bcx
.with_block(|bcx
| adt
::trans_case(
171 bcx
, &repr
, Disr
::from(adt_variant
.disr_val
)));
172 build
::AddCase(switch
, llval
, llbb
)
177 mir
::TerminatorKind
::SwitchInt { ref discr, switch_ty, ref values, ref targets }
=> {
178 let (otherwise
, targets
) = targets
.split_last().unwrap();
179 let discr
= bcx
.load(self.trans_lvalue(&bcx
, discr
).llval
);
180 let discr
= bcx
.with_block(|bcx
| base
::to_immediate(bcx
, discr
, switch_ty
));
181 let switch
= bcx
.switch(discr
, llblock(self, *otherwise
), values
.len());
182 for (value
, target
) in values
.iter().zip(targets
) {
183 let val
= Const
::from_constval(bcx
.ccx(), value
.clone(), switch_ty
);
184 let llbb
= llblock(self, *target
);
185 build
::AddCase(switch
, val
.llval
, llbb
)
189 mir
::TerminatorKind
::Return
=> {
190 let ret
= bcx
.fcx().fn_ty
.ret
;
191 if ret
.is_ignore() || ret
.is_indirect() {
196 let llval
= if let Some(cast_ty
) = ret
.cast
{
197 let index
= mir
.local_index(&mir
::Lvalue
::ReturnPointer
).unwrap();
198 let op
= match self.locals
[index
] {
199 LocalRef
::Operand(Some(op
)) => op
,
200 LocalRef
::Operand(None
) => bug
!("use of return before def"),
201 LocalRef
::Lvalue(tr_lvalue
) => {
203 val
: Ref(tr_lvalue
.llval
),
204 ty
: tr_lvalue
.ty
.to_ty(bcx
.tcx())
208 let llslot
= match op
.val
{
209 Immediate(_
) | Pair(..) => {
210 let llscratch
= build
::AllocaFcx(bcx
.fcx(), ret
.original_ty
, "ret");
211 self.store_operand(&bcx
, llscratch
, op
);
216 let load
= bcx
.load(bcx
.pointercast(llslot
, cast_ty
.ptr_to()));
217 let llalign
= llalign_of_min(bcx
.ccx(), ret
.ty
);
219 llvm
::LLVMSetAlignment(load
, llalign
);
223 let op
= self.trans_consume(&bcx
, &mir
::Lvalue
::ReturnPointer
);
224 op
.pack_if_pair(&bcx
).immediate()
229 mir
::TerminatorKind
::Unreachable
=> {
233 mir
::TerminatorKind
::Drop { ref location, target, unwind }
=> {
234 let ty
= location
.ty(&mir
, bcx
.tcx()).to_ty(bcx
.tcx());
235 let ty
= bcx
.monomorphize(&ty
);
237 // Double check for necessity to drop
238 if !glue
::type_needs_drop(bcx
.tcx(), ty
) {
239 funclet_br(self, bcx
, target
);
243 let lvalue
= self.trans_lvalue(&bcx
, location
);
244 let drop_fn
= glue
::get_drop_glue(bcx
.ccx(), ty
);
245 let drop_ty
= glue
::get_drop_glue_type(bcx
.tcx(), ty
);
246 let is_sized
= common
::type_is_sized(bcx
.tcx(), ty
);
247 let llvalue
= if is_sized
{
249 bcx
.pointercast(lvalue
.llval
, type_of
::type_of(bcx
.ccx(), drop_ty
).ptr_to())
254 // FIXME(#36457) Currently drop glue takes sized
255 // values as a `*(data, meta)`, but elsewhere in
256 // MIR we pass `(data, meta)` as two separate
257 // arguments. It would be better to fix drop glue,
258 // but I am shooting for a quick fix to #35546
259 // here that can be cleanly backported to beta, so
260 // I want to avoid touching all of trans.
261 bcx
.with_block(|bcx
| {
262 let scratch
= base
::alloc_ty(bcx
, ty
, "drop");
263 base
::call_lifetime_start(bcx
, scratch
);
264 build
::Store(bcx
, lvalue
.llval
, expr
::get_dataptr(bcx
, scratch
));
265 build
::Store(bcx
, lvalue
.llextra
, expr
::get_meta(bcx
, scratch
));
269 if let Some(unwind
) = unwind
{
272 self.blocks
[target
].llbb
,
273 llblock(self, unwind
),
276 bcx
.call(drop_fn
, &[llvalue
], cleanup_bundle
);
277 funclet_br(self, bcx
, target
);
281 mir
::TerminatorKind
::Assert { ref cond, expected, ref msg, target, cleanup }
=> {
282 let cond
= self.trans_operand(&bcx
, cond
).immediate();
283 let mut const_cond
= common
::const_to_opt_uint(cond
).map(|c
| c
== 1);
285 // This case can currently arise only from functions marked
286 // with #[rustc_inherit_overflow_checks] and inlined from
287 // another crate (mostly core::num generic/#[inline] fns),
288 // while the current crate doesn't use overflow checks.
289 // NOTE: Unlike binops, negation doesn't have its own
290 // checked operation, just a comparison with the minimum
291 // value, so we have to check for the assert message.
292 if !bcx
.ccx().check_overflow() {
293 use rustc_const_math
::ConstMathErr
::Overflow
;
294 use rustc_const_math
::Op
::Neg
;
296 if let mir
::AssertMessage
::Math(Overflow(Neg
)) = *msg
{
297 const_cond
= Some(expected
);
301 // Don't translate the panic block if success if known.
302 if const_cond
== Some(expected
) {
303 funclet_br(self, bcx
, target
);
307 // Pass the condition through llvm.expect for branch hinting.
308 let expect
= bcx
.ccx().get_intrinsic(&"llvm.expect.i1");
309 let cond
= bcx
.call(expect
, &[cond
, C_bool(bcx
.ccx(), expected
)], None
);
311 // Create the failure block and the conditional branch to it.
312 let lltarget
= llblock(self, target
);
313 let panic_block
= self.fcx
.new_block("panic", None
);
315 bcx
.cond_br(cond
, lltarget
, panic_block
.llbb
);
317 bcx
.cond_br(cond
, panic_block
.llbb
, lltarget
);
320 // After this point, bcx is the block for the call to panic.
321 bcx
= panic_block
.build();
322 debug_loc
.apply_to_bcx(&bcx
);
324 // Get the location information.
325 let loc
= bcx
.sess().codemap().lookup_char_pos(span
.lo
);
326 let filename
= token
::intern_and_get_ident(&loc
.file
.name
);
327 let filename
= C_str_slice(bcx
.ccx(), filename
);
328 let line
= C_u32(bcx
.ccx(), loc
.line
as u32);
330 // Put together the arguments to the panic entry point.
331 let (lang_item
, args
, const_err
) = match *msg
{
332 mir
::AssertMessage
::BoundsCheck { ref len, ref index }
=> {
333 let len
= self.trans_operand(&mut bcx
, len
).immediate();
334 let index
= self.trans_operand(&mut bcx
, index
).immediate();
336 let const_err
= common
::const_to_opt_uint(len
).and_then(|len
| {
337 common
::const_to_opt_uint(index
).map(|index
| {
338 ErrKind
::IndexOutOfBounds
{
345 let file_line
= C_struct(bcx
.ccx(), &[filename
, line
], false);
346 let align
= llalign_of_min(bcx
.ccx(), common
::val_ty(file_line
));
347 let file_line
= consts
::addr_of(bcx
.ccx(),
350 "panic_bounds_check_loc");
351 (lang_items
::PanicBoundsCheckFnLangItem
,
352 vec
![file_line
, index
, len
],
355 mir
::AssertMessage
::Math(ref err
) => {
356 let msg_str
= token
::intern_and_get_ident(err
.description());
357 let msg_str
= C_str_slice(bcx
.ccx(), msg_str
);
358 let msg_file_line
= C_struct(bcx
.ccx(),
359 &[msg_str
, filename
, line
],
361 let align
= llalign_of_min(bcx
.ccx(), common
::val_ty(msg_file_line
));
362 let msg_file_line
= consts
::addr_of(bcx
.ccx(),
366 (lang_items
::PanicFnLangItem
,
368 Some(ErrKind
::Math(err
.clone())))
372 // If we know we always panic, and the error message
373 // is also constant, then we can produce a warning.
374 if const_cond
== Some(!expected
) {
375 if let Some(err
) = const_err
{
376 let _
= consts
::const_err(bcx
.ccx(), span
,
378 consts
::TrueConst
::No
);
382 // Obtain the panic entry point.
383 let def_id
= common
::langcall(bcx
.tcx(), Some(span
), "", lang_item
);
384 let callee
= Callee
::def(bcx
.ccx(), def_id
,
385 bcx
.ccx().empty_substs_for_def_id(def_id
));
386 let llfn
= callee
.reify(bcx
.ccx()).val
;
388 // Translate the actual panic invoke/call.
389 if let Some(unwind
) = cleanup
{
392 self.unreachable_block().llbb
,
393 llblock(self, unwind
),
396 bcx
.call(llfn
, &args
, cleanup_bundle
);
401 mir
::TerminatorKind
::DropAndReplace { .. }
=> {
402 bug
!("undesugared DropAndReplace in trans: {:?}", data
);
405 mir
::TerminatorKind
::Call { ref func, ref args, ref destination, ref cleanup }
=> {
406 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
407 let callee
= self.trans_operand(&bcx
, func
);
409 let (mut callee
, abi
, sig
) = match callee
.ty
.sty
{
410 ty
::TyFnDef(def_id
, substs
, f
) => {
411 (Callee
::def(bcx
.ccx(), def_id
, substs
), f
.abi
, &f
.sig
)
415 data
: Fn(callee
.immediate()),
419 _
=> bug
!("{} is not callable", callee
.ty
)
422 let sig
= bcx
.tcx().erase_late_bound_regions(sig
);
424 // Handle intrinsics old trans wants Expr's for, ourselves.
425 let intrinsic
= match (&callee
.ty
.sty
, &callee
.data
) {
426 (&ty
::TyFnDef(def_id
, _
, _
), &Intrinsic
) => {
427 Some(bcx
.tcx().item_name(def_id
).as_str())
431 let intrinsic
= intrinsic
.as_ref().map(|s
| &s
[..]);
433 if intrinsic
== Some("move_val_init") {
434 let &(_
, target
) = destination
.as_ref().unwrap();
435 // The first argument is a thin destination pointer.
436 let llptr
= self.trans_operand(&bcx
, &args
[0]).immediate();
437 let val
= self.trans_operand(&bcx
, &args
[1]);
438 self.store_operand(&bcx
, llptr
, val
);
439 funclet_br(self, bcx
, target
);
443 if intrinsic
== Some("transmute") {
444 let &(ref dest
, target
) = destination
.as_ref().unwrap();
445 self.with_lvalue_ref(&bcx
, dest
, |this
, dest
| {
446 this
.trans_transmute(&bcx
, &args
[0], dest
);
449 funclet_br(self, bcx
, target
);
453 let extra_args
= &args
[sig
.inputs
.len()..];
454 let extra_args
= extra_args
.iter().map(|op_arg
| {
455 let op_ty
= op_arg
.ty(&self.mir
, bcx
.tcx());
456 bcx
.monomorphize(&op_ty
)
457 }).collect
::<Vec
<_
>>();
458 let fn_ty
= callee
.direct_fn_type(bcx
.ccx(), &extra_args
);
460 // The arguments we'll be passing. Plus one to account for outptr, if used.
461 let arg_count
= fn_ty
.args
.len() + fn_ty
.ret
.is_indirect() as usize;
462 let mut llargs
= Vec
::with_capacity(arg_count
);
464 // Prepare the return value destination
465 let ret_dest
= if let Some((ref dest
, _
)) = *destination
{
466 let is_intrinsic
= if let Intrinsic
= callee
.data
{
471 self.make_return_dest(&bcx
, dest
, &fn_ty
.ret
, &mut llargs
, is_intrinsic
)
476 // Split the rust-call tupled arguments off.
477 let (first_args
, untuple
) = if abi
== Abi
::RustCall
&& !args
.is_empty() {
478 let (tup
, args
) = args
.split_last().unwrap();
484 let is_shuffle
= intrinsic
.map_or(false, |name
| {
485 name
.starts_with("simd_shuffle")
488 for arg
in first_args
{
489 // The indices passed to simd_shuffle* in the
490 // third argument must be constant. This is
491 // checked by const-qualification, which also
492 // promotes any complex rvalues to constants.
493 if is_shuffle
&& idx
== 2 {
495 mir
::Operand
::Consume(_
) => {
496 span_bug
!(span
, "shuffle indices must be constant");
498 mir
::Operand
::Constant(ref constant
) => {
499 let val
= self.trans_constant(&bcx
, constant
);
500 llargs
.push(val
.llval
);
507 let op
= self.trans_operand(&bcx
, arg
);
508 self.trans_argument(&bcx
, op
, &mut llargs
, &fn_ty
,
509 &mut idx
, &mut callee
.data
);
511 if let Some(tup
) = untuple
{
512 self.trans_arguments_untupled(&bcx
, tup
, &mut llargs
, &fn_ty
,
513 &mut idx
, &mut callee
.data
)
516 let fn_ptr
= match callee
.data
{
517 NamedTupleConstructor(_
) => {
518 // FIXME translate this like mir::Rvalue::Aggregate.
519 callee
.reify(bcx
.ccx()).val
523 use expr
::{Ignore, SaveIn}
;
524 use intrinsic
::trans_intrinsic_call
;
526 let (dest
, llargs
) = match ret_dest
{
527 _
if fn_ty
.ret
.is_indirect() => {
528 (SaveIn(llargs
[0]), &llargs
[1..])
530 ReturnDest
::Nothing
=> (Ignore
, &llargs
[..]),
531 ReturnDest
::IndirectOperand(dst
, _
) |
532 ReturnDest
::Store(dst
) => (SaveIn(dst
), &llargs
[..]),
533 ReturnDest
::DirectOperand(_
) =>
534 bug
!("Cannot use direct operand with an intrinsic call")
537 bcx
.with_block(|bcx
| {
538 trans_intrinsic_call(bcx
, callee
.ty
, &fn_ty
,
539 ArgVals(llargs
), dest
,
543 if let ReturnDest
::IndirectOperand(dst
, _
) = ret_dest
{
544 // Make a fake operand for store_return
545 let op
= OperandRef
{
549 self.store_return(&bcx
, ret_dest
, fn_ty
.ret
, op
);
552 if let Some((_
, target
)) = *destination
{
553 funclet_br(self, bcx
, target
);
555 // trans_intrinsic_call already used Unreachable.
556 // bcx.unreachable();
562 Virtual(_
) => bug
!("Virtual fn ptr not extracted")
565 // Many different ways to call a function handled here
566 if let &Some(cleanup
) = cleanup
{
567 let ret_bcx
= if let Some((_
, target
)) = *destination
{
570 self.unreachable_block()
572 let invokeret
= bcx
.invoke(fn_ptr
,
575 llblock(self, cleanup
),
577 fn_ty
.apply_attrs_callsite(invokeret
);
579 if destination
.is_some() {
580 let ret_bcx
= ret_bcx
.build();
581 ret_bcx
.at_start(|ret_bcx
| {
582 debug_loc
.apply_to_bcx(ret_bcx
);
583 let op
= OperandRef
{
584 val
: Immediate(invokeret
),
587 self.store_return(&ret_bcx
, ret_dest
, fn_ty
.ret
, op
);
591 let llret
= bcx
.call(fn_ptr
, &llargs
, cleanup_bundle
);
592 fn_ty
.apply_attrs_callsite(llret
);
593 if let Some((_
, target
)) = *destination
{
594 let op
= OperandRef
{
595 val
: Immediate(llret
),
598 self.store_return(&bcx
, ret_dest
, fn_ty
.ret
, op
);
599 funclet_br(self, bcx
, target
);
608 fn trans_argument(&mut self,
609 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
610 op
: OperandRef
<'tcx
>,
611 llargs
: &mut Vec
<ValueRef
>,
613 next_idx
: &mut usize,
614 callee
: &mut CalleeData
) {
615 if let Pair(a
, b
) = op
.val
{
616 // Treat the values in a fat pointer separately.
617 if common
::type_is_fat_ptr(bcx
.tcx(), op
.ty
) {
618 let (ptr
, meta
) = (a
, b
);
620 if let Virtual(idx
) = *callee
{
621 let llfn
= bcx
.with_block(|bcx
| {
622 meth
::get_virtual_method(bcx
, meta
, idx
)
624 let llty
= fn_ty
.llvm_type(bcx
.ccx()).ptr_to();
625 *callee
= Fn(bcx
.pointercast(llfn
, llty
));
629 let imm_op
= |x
| OperandRef
{
631 // We won't be checking the type again.
632 ty
: bcx
.tcx().types
.err
634 self.trans_argument(bcx
, imm_op(ptr
), llargs
, fn_ty
, next_idx
, callee
);
635 self.trans_argument(bcx
, imm_op(meta
), llargs
, fn_ty
, next_idx
, callee
);
640 let arg
= &fn_ty
.args
[*next_idx
];
643 // Fill padding with undef value, where applicable.
644 if let Some(ty
) = arg
.pad
{
645 llargs
.push(C_undef(ty
));
652 // Force by-ref if we have to load through a cast pointer.
653 let (mut llval
, by_ref
) = match op
.val
{
654 Immediate(_
) | Pair(..) => {
655 if arg
.is_indirect() || arg
.cast
.is_some() {
656 let llscratch
= build
::AllocaFcx(bcx
.fcx(), arg
.original_ty
, "arg");
657 self.store_operand(bcx
, llscratch
, op
);
660 (op
.pack_if_pair(bcx
).immediate(), false)
663 Ref(llval
) => (llval
, true)
666 if by_ref
&& !arg
.is_indirect() {
667 // Have to load the argument, maybe while casting it.
668 if arg
.original_ty
== Type
::i1(bcx
.ccx()) {
669 // We store bools as i8 so we need to truncate to i1.
670 llval
= bcx
.load_range_assert(llval
, 0, 2, llvm
::False
);
671 llval
= bcx
.trunc(llval
, arg
.original_ty
);
672 } else if let Some(ty
) = arg
.cast
{
673 llval
= bcx
.load(bcx
.pointercast(llval
, ty
.ptr_to()));
674 let llalign
= llalign_of_min(bcx
.ccx(), arg
.ty
);
676 llvm
::LLVMSetAlignment(llval
, llalign
);
679 llval
= bcx
.load(llval
);
686 fn trans_arguments_untupled(&mut self,
687 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
688 operand
: &mir
::Operand
<'tcx
>,
689 llargs
: &mut Vec
<ValueRef
>,
691 next_idx
: &mut usize,
692 callee
: &mut CalleeData
) {
693 let tuple
= self.trans_operand(bcx
, operand
);
695 let arg_types
= match tuple
.ty
.sty
{
696 ty
::TyTuple(ref tys
) => tys
,
697 _
=> span_bug
!(self.mir
.span
,
698 "bad final argument to \"rust-call\" fn {:?}", tuple
.ty
)
701 // Handle both by-ref and immediate tuples.
704 let base_repr
= adt
::represent_type(bcx
.ccx(), tuple
.ty
);
705 let base
= adt
::MaybeSizedValue
::sized(llval
);
706 for (n
, &ty
) in arg_types
.iter().enumerate() {
707 let ptr
= adt
::trans_field_ptr_builder(bcx
, &base_repr
, base
, Disr(0), n
);
708 let val
= if common
::type_is_fat_ptr(bcx
.tcx(), ty
) {
709 let (lldata
, llextra
) = load_fat_ptr(bcx
, ptr
);
710 Pair(lldata
, llextra
)
712 // trans_argument will load this if it needs to
715 let op
= OperandRef
{
719 self.trans_argument(bcx
, op
, llargs
, fn_ty
, next_idx
, callee
);
723 Immediate(llval
) => {
724 for (n
, &ty
) in arg_types
.iter().enumerate() {
725 let mut elem
= bcx
.extract_value(llval
, n
);
726 // Truncate bools to i1, if needed
727 if ty
.is_bool() && common
::val_ty(elem
) != Type
::i1(bcx
.ccx()) {
728 elem
= bcx
.trunc(elem
, Type
::i1(bcx
.ccx()));
730 // If the tuple is immediate, the elements are as well
731 let op
= OperandRef
{
732 val
: Immediate(elem
),
735 self.trans_argument(bcx
, op
, llargs
, fn_ty
, next_idx
, callee
);
740 for (n
, &ty
) in arg_types
.iter().enumerate() {
741 let mut elem
= elems
[n
];
742 // Truncate bools to i1, if needed
743 if ty
.is_bool() && common
::val_ty(elem
) != Type
::i1(bcx
.ccx()) {
744 elem
= bcx
.trunc(elem
, Type
::i1(bcx
.ccx()));
746 // Pair is always made up of immediates
747 let op
= OperandRef
{
748 val
: Immediate(elem
),
751 self.trans_argument(bcx
, op
, llargs
, fn_ty
, next_idx
, callee
);
758 fn get_personality_slot(&mut self, bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>) -> ValueRef
{
760 if let Some(slot
) = self.llpersonalityslot
{
763 let llretty
= Type
::struct_(ccx
, &[Type
::i8p(ccx
), Type
::i32(ccx
)], false);
764 bcx
.with_block(|bcx
| {
765 let slot
= base
::alloca(bcx
, llretty
, "personalityslot");
766 self.llpersonalityslot
= Some(slot
);
767 base
::call_lifetime_start(bcx
, slot
);
773 /// Return the landingpad wrapper around the given basic block
775 /// No-op in MSVC SEH scheme.
776 fn landing_pad_to(&mut self, target_bb
: mir
::BasicBlock
) -> Block
<'bcx
, 'tcx
>
778 if let Some(block
) = self.landing_pads
[target_bb
] {
782 if base
::wants_msvc_seh(self.fcx
.ccx
.sess()) {
783 return self.blocks
[target_bb
];
786 let target
= self.bcx(target_bb
);
788 let block
= self.fcx
.new_block("cleanup", None
);
789 self.landing_pads
[target_bb
] = Some(block
);
791 let bcx
= block
.build();
793 let llpersonality
= self.fcx
.eh_personality();
794 let llretty
= Type
::struct_(ccx
, &[Type
::i8p(ccx
), Type
::i32(ccx
)], false);
795 let llretval
= bcx
.landing_pad(llretty
, llpersonality
, 1, self.fcx
.llfn
);
796 bcx
.set_cleanup(llretval
);
797 let slot
= self.get_personality_slot(&bcx
);
798 bcx
.store(llretval
, slot
);
799 bcx
.br(target
.llbb());
803 pub fn init_cpad(&mut self, bb
: mir
::BasicBlock
) {
804 let bcx
= self.bcx(bb
);
805 let data
= &self.mir
[bb
];
806 debug
!("init_cpad({:?})", data
);
808 match self.cleanup_kinds
[bb
] {
809 CleanupKind
::NotCleanup
=> {
812 _
if !base
::wants_msvc_seh(bcx
.sess()) => {
813 bcx
.set_lpad(Some(LandingPad
::gnu()))
815 CleanupKind
::Internal { funclet }
=> {
816 // FIXME: is this needed?
817 bcx
.set_personality_fn(self.fcx
.eh_personality());
818 bcx
.set_lpad_ref(self.bcx(funclet
).lpad());
820 CleanupKind
::Funclet
=> {
821 bcx
.set_personality_fn(self.fcx
.eh_personality());
822 DebugLoc
::None
.apply_to_bcx(&bcx
);
823 let cleanup_pad
= bcx
.cleanup_pad(None
, &[]);
824 bcx
.set_lpad(Some(LandingPad
::msvc(cleanup_pad
)));
829 fn unreachable_block(&mut self) -> Block
<'bcx
, 'tcx
> {
830 self.unreachable_block
.unwrap_or_else(|| {
831 let bl
= self.fcx
.new_block("unreachable", None
);
832 bl
.build().unreachable();
833 self.unreachable_block
= Some(bl
);
838 fn bcx(&self, bb
: mir
::BasicBlock
) -> BlockAndBuilder
<'bcx
, 'tcx
> {
839 self.blocks
[bb
].build()
842 fn make_return_dest(&mut self, bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
843 dest
: &mir
::Lvalue
<'tcx
>, fn_ret_ty
: &ArgType
,
844 llargs
: &mut Vec
<ValueRef
>, is_intrinsic
: bool
) -> ReturnDest
{
845 // If the return is ignored, we can just return a do-nothing ReturnDest
846 if fn_ret_ty
.is_ignore() {
847 return ReturnDest
::Nothing
;
849 let dest
= if let Some(index
) = self.mir
.local_index(dest
) {
850 let ret_ty
= self.monomorphized_lvalue_ty(dest
);
851 match self.locals
[index
] {
852 LocalRef
::Lvalue(dest
) => dest
,
853 LocalRef
::Operand(None
) => {
854 // Handle temporary lvalues, specifically Operand ones, as
855 // they don't have allocas
856 return if fn_ret_ty
.is_indirect() {
857 // Odd, but possible, case, we have an operand temporary,
858 // but the calling convention has an indirect return.
859 let tmp
= bcx
.with_block(|bcx
| {
860 base
::alloc_ty(bcx
, ret_ty
, "tmp_ret")
863 ReturnDest
::IndirectOperand(tmp
, index
)
864 } else if is_intrinsic
{
865 // Currently, intrinsics always need a location to store
866 // the result. so we create a temporary alloca for the
868 let tmp
= bcx
.with_block(|bcx
| {
869 base
::alloc_ty(bcx
, ret_ty
, "tmp_ret")
871 ReturnDest
::IndirectOperand(tmp
, index
)
873 ReturnDest
::DirectOperand(index
)
876 LocalRef
::Operand(Some(_
)) => {
877 bug
!("lvalue local already assigned to");
881 self.trans_lvalue(bcx
, dest
)
883 if fn_ret_ty
.is_indirect() {
884 llargs
.push(dest
.llval
);
887 ReturnDest
::Store(dest
.llval
)
891 fn trans_transmute(&mut self, bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
892 src
: &mir
::Operand
<'tcx
>, dst
: LvalueRef
<'tcx
>) {
893 let mut val
= self.trans_operand(bcx
, src
);
894 if let ty
::TyFnDef(def_id
, substs
, _
) = val
.ty
.sty
{
895 let llouttype
= type_of
::type_of(bcx
.ccx(), dst
.ty
.to_ty(bcx
.tcx()));
896 let out_type_size
= llbitsize_of_real(bcx
.ccx(), llouttype
);
897 if out_type_size
!= 0 {
898 // FIXME #19925 Remove this hack after a release cycle.
899 let f
= Callee
::def(bcx
.ccx(), def_id
, substs
);
900 let datum
= f
.reify(bcx
.ccx());
902 val
: Immediate(datum
.val
),
908 let llty
= type_of
::type_of(bcx
.ccx(), val
.ty
);
909 let cast_ptr
= bcx
.pointercast(dst
.llval
, llty
.ptr_to());
910 self.store_operand(bcx
, cast_ptr
, val
);
914 // Stores the return value of a function call into it's final location.
915 fn store_return(&mut self,
916 bcx
: &BlockAndBuilder
<'bcx
, 'tcx
>,
919 op
: OperandRef
<'tcx
>) {
920 use self::ReturnDest
::*;
924 Store(dst
) => ret_ty
.store(bcx
, op
.immediate(), dst
),
925 IndirectOperand(tmp
, index
) => {
926 let op
= self.trans_load(bcx
, tmp
, op
.ty
);
927 self.locals
[index
] = LocalRef
::Operand(Some(op
));
929 DirectOperand(index
) => {
930 // If there is a cast, we have to store and reload.
931 let op
= if ret_ty
.cast
.is_some() {
932 let tmp
= bcx
.with_block(|bcx
| {
933 base
::alloc_ty(bcx
, op
.ty
, "tmp_ret")
935 ret_ty
.store(bcx
, op
.immediate(), tmp
);
936 self.trans_load(bcx
, tmp
, op
.ty
)
938 op
.unpack_if_pair(bcx
)
940 self.locals
[index
] = LocalRef
::Operand(Some(op
));
947 // Do nothing, the return value is indirect or ignored
949 // Store the return value to the pointer
951 // Stores an indirect return value to an operand local lvalue
952 IndirectOperand(ValueRef
, mir
::Local
),
953 // Stores a direct return value to an operand local lvalue
954 DirectOperand(mir
::Local
)