1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //! Translate the completed AST to the LLVM IR.
12 //! Some functions here, such as trans_block and trans_expr, return a value --
13 //! the result of the translation to LLVM -- while others, such as trans_fn,
14 //! trans_impl, and trans_item, are called only for the side effect of adding a
15 //! particular definition to the LLVM IR output we're producing.
17 //! Hopefully useful general knowledge about trans:
19 //! * There's no way to find out the Ty type of a ValueRef. Doing so
20 //! would be "trying to get the eggs out of an omelette" (credit:
21 //! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
22 //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
23 //! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
25 #![allow(non_camel_case_types)]
27 pub use self::ValueOrigin
::*;
29 use super::CrateTranslation
;
30 use super::ModuleTranslation
;
32 use back
::link
::mangle_exported_name
;
33 use back
::{link, abi}
;
35 use llvm
::{BasicBlockRef, Linkage, ValueRef, Vector, get_param}
;
37 use metadata
::{csearch, encoder, loader}
;
38 use middle
::astencode
;
40 use middle
::lang_items
::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem}
;
41 use middle
::weak_lang_items
;
42 use middle
::subst
::Substs
;
43 use middle
::ty
::{self, Ty, ClosureTyper, type_is_simd, simd_size}
;
44 use session
::config
::{self, NoDebugInfo}
;
48 use trans
::attributes
;
50 use trans
::builder
::{Builder, noname}
;
52 use trans
::cleanup
::CleanupMethods
;
55 use trans
::common
::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral}
;
56 use trans
::common
::{C_null, C_struct_in_context, C_u64, C_u8, C_undef}
;
57 use trans
::common
::{CrateContext, FunctionContext}
;
58 use trans
::common
::{Result, NodeIdAndSpan}
;
59 use trans
::common
::{node_id_type, return_type_is_void}
;
60 use trans
::common
::{type_is_immediate, type_is_zero_size, val_ty}
;
63 use trans
::context
::SharedCrateContext
;
64 use trans
::controlflow
;
66 use trans
::debuginfo
::{self, DebugLoc, ToDebugLoc}
;
73 use trans
::machine
::{llsize_of, llsize_of_real}
;
75 use trans
::monomorphize
;
77 use trans
::type_
::Type
;
79 use trans
::type_of
::*;
80 use trans
::value
::Value
;
81 use util
::common
::indenter
;
82 use util
::ppaux
::{Repr, ty_to_string}
;
83 use util
::sha2
::Sha256
;
84 use util
::nodemap
::NodeMap
;
86 use arena
::TypedArena
;
88 use std
::ffi
::{CStr, CString}
;
89 use std
::cell
::{Cell, RefCell}
;
90 use std
::collections
::HashSet
;
93 use std
::{i8, i16, i32, i64}
;
94 use syntax
::abi
::{Rust, RustCall, RustIntrinsic, Abi}
;
95 use syntax
::ast_util
::local_def
;
96 use syntax
::attr
::AttrMetaMethods
;
98 use syntax
::codemap
::Span
;
99 use syntax
::parse
::token
::InternedString
;
100 use syntax
::visit
::Visitor
;
102 use syntax
::{ast, ast_util, ast_map}
;
105 static TASK_LOCAL_INSN_KEY
: RefCell
<Option
<Vec
<&'
static str>>> = {
110 pub fn with_insn_ctxt
<F
>(blk
: F
) where
111 F
: FnOnce(&[&'
static str]),
113 TASK_LOCAL_INSN_KEY
.with(move |slot
| {
114 slot
.borrow().as_ref().map(move |s
| blk(s
));
118 pub fn init_insn_ctxt() {
119 TASK_LOCAL_INSN_KEY
.with(|slot
| {
120 *slot
.borrow_mut() = Some(Vec
::new());
124 pub struct _InsnCtxt
{
125 _cannot_construct_outside_of_this_module
: ()
129 impl Drop
for _InsnCtxt
{
131 TASK_LOCAL_INSN_KEY
.with(|slot
| {
132 match slot
.borrow_mut().as_mut() {
133 Some(ctx
) => { ctx.pop(); }
140 pub fn push_ctxt(s
: &'
static str) -> _InsnCtxt
{
141 debug
!("new InsnCtxt: {}", s
);
142 TASK_LOCAL_INSN_KEY
.with(|slot
| {
143 match slot
.borrow_mut().as_mut() {
144 Some(ctx
) => ctx
.push(s
),
148 _InsnCtxt { _cannot_construct_outside_of_this_module: () }
151 pub struct StatRecorder
<'a
, 'tcx
: 'a
> {
152 ccx
: &'a CrateContext
<'a
, 'tcx
>,
153 name
: Option
<String
>,
157 impl<'a
, 'tcx
> StatRecorder
<'a
, 'tcx
> {
158 pub fn new(ccx
: &'a CrateContext
<'a
, 'tcx
>, name
: String
)
159 -> StatRecorder
<'a
, 'tcx
> {
160 let istart
= ccx
.stats().n_llvm_insns
.get();
170 impl<'a
, 'tcx
> Drop
for StatRecorder
<'a
, 'tcx
> {
172 if self.ccx
.sess().trans_stats() {
173 let iend
= self.ccx
.stats().n_llvm_insns
.get();
174 self.ccx
.stats().fn_stats
.borrow_mut().push((self.name
.take().unwrap(),
175 iend
- self.istart
));
176 self.ccx
.stats().n_fns
.set(self.ccx
.stats().n_fns
.get() + 1);
177 // Reset LLVM insn count to avoid compound costs.
178 self.ccx
.stats().n_llvm_insns
.set(self.istart
);
183 fn get_extern_rust_fn
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>, fn_ty
: Ty
<'tcx
>,
184 name
: &str, did
: ast
::DefId
) -> ValueRef
{
185 match ccx
.externs().borrow().get(name
) {
186 Some(n
) => return *n
,
190 let f
= declare
::declare_rust_fn(ccx
, name
, fn_ty
);
192 let attrs
= csearch
::get_item_attrs(&ccx
.sess().cstore
, did
);
193 attributes
::from_fn_attrs(ccx
, &attrs
[..], f
);
195 ccx
.externs().borrow_mut().insert(name
.to_string(), f
);
199 pub fn self_type_for_closure
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
200 closure_id
: ast
::DefId
,
204 let closure_kind
= ccx
.tcx().closure_kind(closure_id
);
206 ty
::FnClosureKind
=> {
207 ty
::mk_imm_rptr(ccx
.tcx(), ccx
.tcx().mk_region(ty
::ReStatic
), fn_ty
)
209 ty
::FnMutClosureKind
=> {
210 ty
::mk_mut_rptr(ccx
.tcx(), ccx
.tcx().mk_region(ty
::ReStatic
), fn_ty
)
212 ty
::FnOnceClosureKind
=> fn_ty
216 pub fn kind_for_closure(ccx
: &CrateContext
, closure_id
: ast
::DefId
) -> ty
::ClosureKind
{
217 *ccx
.tcx().closure_kinds
.borrow().get(&closure_id
).unwrap()
220 pub fn get_extern_const
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>, did
: ast
::DefId
,
221 t
: Ty
<'tcx
>) -> ValueRef
{
222 let name
= csearch
::get_symbol(&ccx
.sess().cstore
, did
);
223 let ty
= type_of(ccx
, t
);
224 match ccx
.externs().borrow_mut().get(&name
) {
225 Some(n
) => return *n
,
228 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
229 // FIXME(nagisa): investigate whether it can be changed into define_global
230 let c
= declare
::declare_global(ccx
, &name
[..], ty
);
231 // Thread-local statics in some other crate need to *always* be linked
232 // against in a thread-local fashion, so we need to be sure to apply the
233 // thread-local attribute locally if it was present remotely. If we
234 // don't do this then linker errors can be generated where the linker
235 // complains that one object files has a thread local version of the
236 // symbol and another one doesn't.
237 for attr
in &*ty
::get_attrs(ccx
.tcx(), did
) {
238 if attr
.check_name("thread_local") {
239 llvm
::set_thread_local(c
, true);
242 ccx
.externs().borrow_mut().insert(name
.to_string(), c
);
246 fn require_alloc_fn
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
247 info_ty
: Ty
<'tcx
>, it
: LangItem
) -> ast
::DefId
{
248 match bcx
.tcx().lang_items
.require(it
) {
251 bcx
.sess().fatal(&format
!("allocation of `{}` {}",
252 bcx
.ty_to_string(info_ty
),
258 // The following malloc_raw_dyn* functions allocate a box to contain
259 // a given type, but with a potentially dynamic size.
261 pub fn malloc_raw_dyn
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
267 -> Result
<'blk
, 'tcx
> {
268 let _icx
= push_ctxt("malloc_raw_exchange");
271 let r
= callee
::trans_lang_call(bcx
,
272 require_alloc_fn(bcx
, info_ty
, ExchangeMallocFnLangItem
),
277 Result
::new(r
.bcx
, PointerCast(r
.bcx
, r
.val
, llty_ptr
))
281 pub fn bin_op_to_icmp_predicate(ccx
: &CrateContext
, op
: ast
::BinOp_
, signed
: bool
)
282 -> llvm
::IntPredicate
{
284 ast
::BiEq
=> llvm
::IntEQ
,
285 ast
::BiNe
=> llvm
::IntNE
,
286 ast
::BiLt
=> if signed { llvm::IntSLT }
else { llvm::IntULT }
,
287 ast
::BiLe
=> if signed { llvm::IntSLE }
else { llvm::IntULE }
,
288 ast
::BiGt
=> if signed { llvm::IntSGT }
else { llvm::IntUGT }
,
289 ast
::BiGe
=> if signed { llvm::IntSGE }
else { llvm::IntUGE }
,
291 ccx
.sess().bug(&format
!("comparison_op_to_icmp_predicate: expected \
292 comparison operator, found {:?}", op
));
297 pub fn bin_op_to_fcmp_predicate(ccx
: &CrateContext
, op
: ast
::BinOp_
)
298 -> llvm
::RealPredicate
{
300 ast
::BiEq
=> llvm
::RealOEQ
,
301 ast
::BiNe
=> llvm
::RealUNE
,
302 ast
::BiLt
=> llvm
::RealOLT
,
303 ast
::BiLe
=> llvm
::RealOLE
,
304 ast
::BiGt
=> llvm
::RealOGT
,
305 ast
::BiGe
=> llvm
::RealOGE
,
307 ccx
.sess().bug(&format
!("comparison_op_to_fcmp_predicate: expected \
308 comparison operator, found {:?}", op
));
313 pub fn compare_scalar_types
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
321 ty
::ty_tup(ref tys
) if tys
.is_empty() => {
322 // We don't need to do actual comparisons for nil.
323 // () == () holds but () < () does not.
325 ast
::BiEq
| ast
::BiLe
| ast
::BiGe
=> return C_bool(bcx
.ccx(), true),
326 ast
::BiNe
| ast
::BiLt
| ast
::BiGt
=> return C_bool(bcx
.ccx(), false),
327 // refinements would be nice
328 _
=> bcx
.sess().bug("compare_scalar_types: must be a comparison operator")
331 ty
::ty_bare_fn(..) | ty
::ty_bool
| ty
::ty_uint(_
) | ty
::ty_char
=> {
332 ICmp(bcx
, bin_op_to_icmp_predicate(bcx
.ccx(), op
, false), lhs
, rhs
, debug_loc
)
334 ty
::ty_ptr(mt
) if common
::type_is_sized(bcx
.tcx(), mt
.ty
) => {
335 ICmp(bcx
, bin_op_to_icmp_predicate(bcx
.ccx(), op
, false), lhs
, rhs
, debug_loc
)
338 ICmp(bcx
, bin_op_to_icmp_predicate(bcx
.ccx(), op
, true), lhs
, rhs
, debug_loc
)
341 FCmp(bcx
, bin_op_to_fcmp_predicate(bcx
.ccx(), op
), lhs
, rhs
, debug_loc
)
343 // Should never get here, because t is scalar.
344 _
=> bcx
.sess().bug("non-scalar type passed to compare_scalar_types")
348 pub fn compare_simd_types
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
355 let signed
= match t
.sty
{
357 // The comparison operators for floating point vectors are challenging.
358 // LLVM outputs a `< size x i1 >`, but if we perform a sign extension
359 // then bitcast to a floating point vector, the result will be `-NaN`
360 // for each truth value. Because of this they are unsupported.
361 bcx
.sess().bug("compare_simd_types: comparison operators \
362 not supported for floating point SIMD types")
364 ty
::ty_uint(_
) => false,
365 ty
::ty_int(_
) => true,
366 _
=> bcx
.sess().bug("compare_simd_types: invalid SIMD type"),
369 let cmp
= bin_op_to_icmp_predicate(bcx
.ccx(), op
, signed
);
370 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
371 // to get the correctly sized type. This will compile to a single instruction
372 // once the IR is converted to assembly if the SIMD instruction is supported
373 // by the target architecture.
374 SExt(bcx
, ICmp(bcx
, cmp
, lhs
, rhs
, debug_loc
), val_ty(lhs
))
377 // Iterates through the elements of a structural type.
378 pub fn iter_structural_ty
<'blk
, 'tcx
, F
>(cx
: Block
<'blk
, 'tcx
>,
382 -> Block
<'blk
, 'tcx
> where
383 F
: FnMut(Block
<'blk
, 'tcx
>, ValueRef
, Ty
<'tcx
>) -> Block
<'blk
, 'tcx
>,
385 let _icx
= push_ctxt("iter_structural_ty");
387 fn iter_variant
<'blk
, 'tcx
, F
>(cx
: Block
<'blk
, 'tcx
>,
388 repr
: &adt
::Repr
<'tcx
>,
390 variant
: &ty
::VariantInfo
<'tcx
>,
391 substs
: &Substs
<'tcx
>,
393 -> Block
<'blk
, 'tcx
> where
394 F
: FnMut(Block
<'blk
, 'tcx
>, ValueRef
, Ty
<'tcx
>) -> Block
<'blk
, 'tcx
>,
396 let _icx
= push_ctxt("iter_variant");
400 for (i
, &arg
) in variant
.args
.iter().enumerate() {
401 let arg
= monomorphize
::apply_param_substs(tcx
, substs
, &arg
);
402 cx
= f(cx
, adt
::trans_field_ptr(cx
, repr
, av
, variant
.disr_val
, i
), arg
);
407 let (data_ptr
, info
) = if common
::type_is_sized(cx
.tcx(), t
) {
410 let data
= GEPi(cx
, av
, &[0, abi
::FAT_PTR_ADDR
]);
411 let info
= GEPi(cx
, av
, &[0, abi
::FAT_PTR_EXTRA
]);
412 (Load(cx
, data
), Some(Load(cx
, info
)))
417 ty
::ty_struct(..) => {
418 let repr
= adt
::represent_type(cx
.ccx(), t
);
419 expr
::with_field_tys(cx
.tcx(), t
, None
, |discr
, field_tys
| {
420 for (i
, field_ty
) in field_tys
.iter().enumerate() {
421 let field_ty
= field_ty
.mt
.ty
;
422 let llfld_a
= adt
::trans_field_ptr(cx
, &*repr
, data_ptr
, discr
, i
);
424 let val
= if common
::type_is_sized(cx
.tcx(), field_ty
) {
427 let scratch
= datum
::rvalue_scratch_datum(cx
, field_ty
, "__fat_ptr_iter");
428 Store(cx
, llfld_a
, GEPi(cx
, scratch
.val
, &[0, abi
::FAT_PTR_ADDR
]));
429 Store(cx
, info
.unwrap(), GEPi(cx
, scratch
.val
, &[0, abi
::FAT_PTR_EXTRA
]));
432 cx
= f(cx
, val
, field_ty
);
436 ty
::ty_closure(def_id
, substs
) => {
437 let repr
= adt
::represent_type(cx
.ccx(), t
);
438 let typer
= common
::NormalizingClosureTyper
::new(cx
.tcx());
439 let upvars
= typer
.closure_upvars(def_id
, substs
).unwrap();
440 for (i
, upvar
) in upvars
.iter().enumerate() {
441 let llupvar
= adt
::trans_field_ptr(cx
, &*repr
, data_ptr
, 0, i
);
442 cx
= f(cx
, llupvar
, upvar
.ty
);
445 ty
::ty_vec(_
, Some(n
)) => {
446 let (base
, len
) = tvec
::get_fixed_base_and_len(cx
, data_ptr
, n
);
447 let unit_ty
= ty
::sequence_element_type(cx
.tcx(), t
);
448 cx
= tvec
::iter_vec_raw(cx
, base
, unit_ty
, len
, f
);
450 ty
::ty_vec(_
, None
) | ty
::ty_str
=> {
451 let unit_ty
= ty
::sequence_element_type(cx
.tcx(), t
);
452 cx
= tvec
::iter_vec_raw(cx
, data_ptr
, unit_ty
, info
.unwrap(), f
);
454 ty
::ty_tup(ref args
) => {
455 let repr
= adt
::represent_type(cx
.ccx(), t
);
456 for (i
, arg
) in args
.iter().enumerate() {
457 let llfld_a
= adt
::trans_field_ptr(cx
, &*repr
, data_ptr
, 0, i
);
458 cx
= f(cx
, llfld_a
, *arg
);
461 ty
::ty_enum(tid
, substs
) => {
465 let repr
= adt
::represent_type(ccx
, t
);
466 let variants
= ty
::enum_variants(ccx
.tcx(), tid
);
467 let n_variants
= (*variants
).len();
469 // NB: we must hit the discriminant first so that structural
470 // comparison know not to proceed when the discriminants differ.
472 match adt
::trans_switch(cx
, &*repr
, av
) {
473 (_match
::Single
, None
) => {
474 cx
= iter_variant(cx
, &*repr
, av
, &*(*variants
)[0],
477 (_match
::Switch
, Some(lldiscrim_a
)) => {
478 cx
= f(cx
, lldiscrim_a
, cx
.tcx().types
.isize);
479 let unr_cx
= fcx
.new_temp_block("enum-iter-unr");
481 let llswitch
= Switch(cx
, lldiscrim_a
, unr_cx
.llbb
,
483 let next_cx
= fcx
.new_temp_block("enum-iter-next");
485 for variant
in &(*variants
) {
488 &format
!("enum-iter-variant-{}",
489 &variant
.disr_val
.to_string())
491 match adt
::trans_case(cx
, &*repr
, variant
.disr_val
) {
492 _match
::SingleResult(r
) => {
493 AddCase(llswitch
, r
.val
, variant_cx
.llbb
)
495 _
=> ccx
.sess().unimpl("value from adt::trans_case \
496 in iter_structural_ty")
499 iter_variant(variant_cx
,
505 Br(variant_cx
, next_cx
.llbb
, DebugLoc
::None
);
509 _
=> ccx
.sess().unimpl("value from adt::trans_switch \
510 in iter_structural_ty")
514 cx
.sess().unimpl(&format
!("type in iter_structural_ty: {}",
515 ty_to_string(cx
.tcx(), t
)))
521 pub fn cast_shift_expr_rhs(cx
: Block
,
526 cast_shift_rhs(op
, lhs
, rhs
,
527 |a
,b
| Trunc(cx
, a
, b
),
528 |a
,b
| ZExt(cx
, a
, b
))
531 pub fn cast_shift_const_rhs(op
: ast
::BinOp_
,
532 lhs
: ValueRef
, rhs
: ValueRef
) -> ValueRef
{
533 cast_shift_rhs(op
, lhs
, rhs
,
534 |a
, b
| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) }
,
535 |a
, b
| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) }
)
538 fn cast_shift_rhs
<F
, G
>(op
: ast
::BinOp_
,
544 F
: FnOnce(ValueRef
, Type
) -> ValueRef
,
545 G
: FnOnce(ValueRef
, Type
) -> ValueRef
,
547 // Shifts may have any size int on the rhs
548 if ast_util
::is_shift_binop(op
) {
549 let mut rhs_llty
= val_ty(rhs
);
550 let mut lhs_llty
= val_ty(lhs
);
551 if rhs_llty
.kind() == Vector { rhs_llty = rhs_llty.element_type() }
552 if lhs_llty
.kind() == Vector { lhs_llty = lhs_llty.element_type() }
553 let rhs_sz
= rhs_llty
.int_width();
554 let lhs_sz
= lhs_llty
.int_width();
557 } else if lhs_sz
> rhs_sz
{
558 // FIXME (#1877: If shifting by negative
559 // values becomes not undefined then this is wrong.
569 pub fn llty_and_min_for_signed_ty
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>,
570 val_t
: Ty
<'tcx
>) -> (Type
, u64) {
573 let llty
= Type
::int_from_ty(cx
.ccx(), t
);
575 ast
::TyIs
if llty
== Type
::i32(cx
.ccx()) => i32::MIN
as u64,
576 ast
::TyIs
=> i64::MIN
as u64,
577 ast
::TyI8
=> i8::MIN
as u64,
578 ast
::TyI16
=> i16::MIN
as u64,
579 ast
::TyI32
=> i32::MIN
as u64,
580 ast
::TyI64
=> i64::MIN
as u64,
588 pub fn fail_if_zero_or_overflows
<'blk
, 'tcx
>(
589 cx
: Block
<'blk
, 'tcx
>,
590 call_info
: NodeIdAndSpan
,
595 -> Block
<'blk
, 'tcx
> {
596 let (zero_text
, overflow_text
) = if divrem
.node
== ast
::BiDiv
{
597 ("attempted to divide by zero",
598 "attempted to divide with overflow")
600 ("attempted remainder with a divisor of zero",
601 "attempted remainder with overflow")
603 let debug_loc
= call_info
.debug_loc();
605 let (is_zero
, is_signed
) = match rhs_t
.sty
{
607 let zero
= C_integral(Type
::int_from_ty(cx
.ccx(), t
), 0, false);
608 (ICmp(cx
, llvm
::IntEQ
, rhs
, zero
, debug_loc
), true)
611 let zero
= C_integral(Type
::uint_from_ty(cx
.ccx(), t
), 0, false);
612 (ICmp(cx
, llvm
::IntEQ
, rhs
, zero
, debug_loc
), false)
614 ty
::ty_struct(_
, _
) if type_is_simd(cx
.tcx(), rhs_t
) => {
615 let mut res
= C_bool(cx
.ccx(), false);
616 for i
in 0 .. simd_size(cx
.tcx(), rhs_t
) {
619 ExtractElement(cx
, rhs
, C_int(cx
.ccx(), i
as i64))), debug_loc
);
624 cx
.sess().bug(&format
!("fail-if-zero on unexpected type: {}",
625 ty_to_string(cx
.tcx(), rhs_t
)));
628 let bcx
= with_cond(cx
, is_zero
, |bcx
| {
629 controlflow
::trans_fail(bcx
, call_info
, InternedString
::new(zero_text
))
632 // To quote LLVM's documentation for the sdiv instruction:
634 // Division by zero leads to undefined behavior. Overflow also leads
635 // to undefined behavior; this is a rare case, but can occur, for
636 // example, by doing a 32-bit division of -2147483648 by -1.
638 // In order to avoid undefined behavior, we perform runtime checks for
639 // signed division/remainder which would trigger overflow. For unsigned
640 // integers, no action beyond checking for zero need be taken.
642 let (llty
, min
) = llty_and_min_for_signed_ty(cx
, rhs_t
);
643 let minus_one
= ICmp(bcx
, llvm
::IntEQ
, rhs
,
644 C_integral(llty
, !0, false), debug_loc
);
645 with_cond(bcx
, minus_one
, |bcx
| {
646 let is_min
= ICmp(bcx
, llvm
::IntEQ
, lhs
,
647 C_integral(llty
, min
, true), debug_loc
);
648 with_cond(bcx
, is_min
, |bcx
| {
649 controlflow
::trans_fail(bcx
,
651 InternedString
::new(overflow_text
))
659 pub fn trans_external_path
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
660 did
: ast
::DefId
, t
: Ty
<'tcx
>) -> ValueRef
{
661 let name
= csearch
::get_symbol(&ccx
.sess().cstore
, did
);
663 ty
::ty_bare_fn(_
, ref fn_ty
) => {
664 match ccx
.sess().target
.target
.adjust_abi(fn_ty
.abi
) {
666 get_extern_rust_fn(ccx
, t
, &name
[..], did
)
669 ccx
.sess().bug("unexpected intrinsic in trans_external_path")
672 let llfn
= foreign
::register_foreign_item_fn(ccx
, fn_ty
.abi
, t
, &name
[..]);
673 let attrs
= csearch
::get_item_attrs(&ccx
.sess().cstore
, did
);
674 attributes
::from_fn_attrs(ccx
, &attrs
, llfn
);
680 get_extern_const(ccx
, did
, t
)
685 pub fn invoke
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
690 -> (ValueRef
, Block
<'blk
, 'tcx
>) {
691 let _icx
= push_ctxt("invoke_");
692 if bcx
.unreachable
.get() {
693 return (C_null(Type
::i8(bcx
.ccx())), bcx
);
696 let attributes
= attributes
::from_fn_type(bcx
.ccx(), fn_ty
);
698 match bcx
.opt_node_id
{
700 debug
!("invoke at ???");
703 debug
!("invoke at {}", bcx
.tcx().map
.node_to_string(id
));
707 if need_invoke(bcx
) {
708 debug
!("invoking {} at {:?}", bcx
.val_to_string(llfn
), bcx
.llbb
);
709 for &llarg
in llargs
{
710 debug
!("arg: {}", bcx
.val_to_string(llarg
));
712 let normal_bcx
= bcx
.fcx
.new_temp_block("normal-return");
713 let landing_pad
= bcx
.fcx
.get_landing_pad();
715 let llresult
= Invoke(bcx
,
722 return (llresult
, normal_bcx
);
724 debug
!("calling {} at {:?}", bcx
.val_to_string(llfn
), bcx
.llbb
);
725 for &llarg
in llargs
{
726 debug
!("arg: {}", bcx
.val_to_string(llarg
));
729 let llresult
= Call(bcx
,
734 return (llresult
, bcx
);
738 pub fn need_invoke(bcx
: Block
) -> bool
{
739 if bcx
.sess().no_landing_pads() {
743 // Avoid using invoke if we are already inside a landing pad.
748 bcx
.fcx
.needs_invoke()
751 pub fn load_if_immediate
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>,
752 v
: ValueRef
, t
: Ty
<'tcx
>) -> ValueRef
{
753 let _icx
= push_ctxt("load_if_immediate");
754 if type_is_immediate(cx
.ccx(), t
) { return load_ty(cx, v, t); }
758 /// Helper for loading values from memory. Does the necessary conversion if the in-memory type
759 /// differs from the type used for SSA values. Also handles various special cases where the type
760 /// gives us better information about what we are loading.
761 pub fn load_ty
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>,
762 ptr
: ValueRef
, t
: Ty
<'tcx
>) -> ValueRef
{
763 if cx
.unreachable
.get() || type_is_zero_size(cx
.ccx(), t
) {
764 return C_undef(type_of
::type_of(cx
.ccx(), t
));
767 let ptr
= to_arg_ty_ptr(cx
, ptr
, t
);
769 if type_is_immediate(cx
.ccx(), t
) && type_of
::type_of(cx
.ccx(), t
).is_aggregate() {
770 return Load(cx
, ptr
);
774 let global
= llvm
::LLVMIsAGlobalVariable(ptr
);
775 if !global
.is_null() && llvm
::LLVMIsGlobalConstant(global
) == llvm
::True
{
776 let val
= llvm
::LLVMGetInitializer(global
);
778 return from_arg_ty(cx
, val
, t
);
783 let val
= if ty
::type_is_bool(t
) {
784 LoadRangeAssert(cx
, ptr
, 0, 2, llvm
::False
)
785 } else if ty
::type_is_char(t
) {
786 // a char is a Unicode codepoint, and so takes values from 0
787 // to 0x10FFFF inclusive only.
788 LoadRangeAssert(cx
, ptr
, 0, 0x10FFFF + 1, llvm
::False
)
789 } else if (ty
::type_is_region_ptr(t
) || ty
::type_is_unique(t
))
790 && !common
::type_is_fat_ptr(cx
.tcx(), t
) {
796 from_arg_ty(cx
, val
, t
)
799 /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
800 /// differs from the type used for SSA values.
801 pub fn store_ty
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>, v
: ValueRef
, dst
: ValueRef
, t
: Ty
<'tcx
>) {
802 Store(cx
, to_arg_ty(cx
, v
, t
), to_arg_ty_ptr(cx
, dst
, t
));
805 pub fn to_arg_ty(bcx
: Block
, val
: ValueRef
, ty
: Ty
) -> ValueRef
{
806 if ty
::type_is_bool(ty
) {
807 ZExt(bcx
, val
, Type
::i8(bcx
.ccx()))
813 pub fn from_arg_ty(bcx
: Block
, val
: ValueRef
, ty
: Ty
) -> ValueRef
{
814 if ty
::type_is_bool(ty
) {
815 Trunc(bcx
, val
, Type
::i1(bcx
.ccx()))
821 pub fn to_arg_ty_ptr
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, ptr
: ValueRef
, ty
: Ty
<'tcx
>) -> ValueRef
{
822 if type_is_immediate(bcx
.ccx(), ty
) && type_of
::type_of(bcx
.ccx(), ty
).is_aggregate() {
823 // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
824 // for this leads to bad optimizations, so its arg type is an appropriately sized integer
825 // and we have to convert it
826 BitCast(bcx
, ptr
, type_of
::arg_type_of(bcx
.ccx(), ty
).ptr_to())
832 pub fn init_local
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, local
: &ast
::Local
)
833 -> Block
<'blk
, 'tcx
> {
834 debug
!("init_local(bcx={}, local.id={})", bcx
.to_str(), local
.id
);
835 let _indenter
= indenter();
836 let _icx
= push_ctxt("init_local");
837 _match
::store_local(bcx
, local
)
840 pub fn raw_block
<'blk
, 'tcx
>(fcx
: &'blk FunctionContext
<'blk
, 'tcx
>,
843 -> Block
<'blk
, 'tcx
> {
844 common
::BlockS
::new(llbb
, is_lpad
, None
, fcx
)
847 pub fn with_cond
<'blk
, 'tcx
, F
>(bcx
: Block
<'blk
, 'tcx
>,
850 -> Block
<'blk
, 'tcx
> where
851 F
: FnOnce(Block
<'blk
, 'tcx
>) -> Block
<'blk
, 'tcx
>,
853 let _icx
= push_ctxt("with_cond");
855 if bcx
.unreachable
.get() ||
856 (common
::is_const(val
) && common
::const_to_uint(val
) == 0) {
861 let next_cx
= fcx
.new_temp_block("next");
862 let cond_cx
= fcx
.new_temp_block("cond");
863 CondBr(bcx
, val
, cond_cx
.llbb
, next_cx
.llbb
, DebugLoc
::None
);
864 let after_cx
= f(cond_cx
);
865 if !after_cx
.terminated
.get() {
866 Br(after_cx
, next_cx
.llbb
, DebugLoc
::None
);
871 pub fn call_lifetime_start(cx
: Block
, ptr
: ValueRef
) {
872 if cx
.sess().opts
.optimize
== config
::No
{
876 let _icx
= push_ctxt("lifetime_start");
879 let llsize
= C_u64(ccx
, machine
::llsize_of_alloc(ccx
, val_ty(ptr
).element_type()));
880 let ptr
= PointerCast(cx
, ptr
, Type
::i8p(ccx
));
881 let lifetime_start
= ccx
.get_intrinsic(&"llvm.lifetime.start");
882 Call(cx
, lifetime_start
, &[llsize
, ptr
], None
, DebugLoc
::None
);
885 pub fn call_lifetime_end(cx
: Block
, ptr
: ValueRef
) {
886 if cx
.sess().opts
.optimize
== config
::No
{
890 let _icx
= push_ctxt("lifetime_end");
893 let llsize
= C_u64(ccx
, machine
::llsize_of_alloc(ccx
, val_ty(ptr
).element_type()));
894 let ptr
= PointerCast(cx
, ptr
, Type
::i8p(ccx
));
895 let lifetime_end
= ccx
.get_intrinsic(&"llvm.lifetime.end");
896 Call(cx
, lifetime_end
, &[llsize
, ptr
], None
, DebugLoc
::None
);
899 pub fn call_memcpy(cx
: Block
, dst
: ValueRef
, src
: ValueRef
, n_bytes
: ValueRef
, align
: u32) {
900 let _icx
= push_ctxt("call_memcpy");
902 let key
= match &ccx
.sess().target
.target
.target_pointer_width
[..] {
903 "32" => "llvm.memcpy.p0i8.p0i8.i32",
904 "64" => "llvm.memcpy.p0i8.p0i8.i64",
905 tws
=> panic
!("Unsupported target word size for memcpy: {}", tws
),
907 let memcpy
= ccx
.get_intrinsic(&key
);
908 let src_ptr
= PointerCast(cx
, src
, Type
::i8p(ccx
));
909 let dst_ptr
= PointerCast(cx
, dst
, Type
::i8p(ccx
));
910 let size
= IntCast(cx
, n_bytes
, ccx
.int_type());
911 let align
= C_i32(ccx
, align
as i32);
912 let volatile
= C_bool(ccx
, false);
913 Call(cx
, memcpy
, &[dst_ptr
, src_ptr
, size
, align
, volatile
], None
, DebugLoc
::None
);
916 pub fn memcpy_ty
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
917 dst
: ValueRef
, src
: ValueRef
,
919 let _icx
= push_ctxt("memcpy_ty");
921 if ty
::type_is_structural(t
) {
922 let llty
= type_of
::type_of(ccx
, t
);
923 let llsz
= llsize_of(ccx
, llty
);
924 let llalign
= type_of
::align_of(ccx
, t
);
925 call_memcpy(bcx
, dst
, src
, llsz
, llalign
as u32);
927 store_ty(bcx
, load_ty(bcx
, src
, t
), dst
, t
);
931 pub fn drop_done_fill_mem
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>, llptr
: ValueRef
, t
: Ty
<'tcx
>) {
932 if cx
.unreachable
.get() { return; }
933 let _icx
= push_ctxt("drop_done_fill_mem");
935 memfill(&B(bcx
), llptr
, t
, adt
::DTOR_DONE
);
938 pub fn init_zero_mem
<'blk
, 'tcx
>(cx
: Block
<'blk
, 'tcx
>, llptr
: ValueRef
, t
: Ty
<'tcx
>) {
939 if cx
.unreachable
.get() { return; }
940 let _icx
= push_ctxt("init_zero_mem");
942 memfill(&B(bcx
), llptr
, t
, 0);
945 // Always use this function instead of storing a constant byte to the memory
946 // in question. e.g. if you store a zero constant, LLVM will drown in vreg
947 // allocation for large data structures, and the generated code will be
948 // awful. (A telltale sign of this is large quantities of
949 // `mov [byte ptr foo],0` in the generated code.)
950 fn memfill
<'a
, 'tcx
>(b
: &Builder
<'a
, 'tcx
>, llptr
: ValueRef
, ty
: Ty
<'tcx
>, byte
: u8) {
951 let _icx
= push_ctxt("memfill");
954 let llty
= type_of
::type_of(ccx
, ty
);
956 let intrinsic_key
= match &ccx
.sess().target
.target
.target_pointer_width
[..] {
957 "32" => "llvm.memset.p0i8.i32",
958 "64" => "llvm.memset.p0i8.i64",
959 tws
=> panic
!("Unsupported target word size for memset: {}", tws
),
962 let llintrinsicfn
= ccx
.get_intrinsic(&intrinsic_key
);
963 let llptr
= b
.pointercast(llptr
, Type
::i8(ccx
).ptr_to());
964 let llzeroval
= C_u8(ccx
, byte
as usize);
965 let size
= machine
::llsize_of(ccx
, llty
);
966 let align
= C_i32(ccx
, type_of
::align_of(ccx
, ty
) as i32);
967 let volatile
= C_bool(ccx
, false);
968 b
.call(llintrinsicfn
, &[llptr
, llzeroval
, size
, align
, volatile
], None
);
971 pub fn alloc_ty
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>, t
: Ty
<'tcx
>, name
: &str) -> ValueRef
{
972 let _icx
= push_ctxt("alloc_ty");
974 let ty
= type_of
::type_of(ccx
, t
);
975 assert
!(!ty
::type_has_params(t
));
976 let val
= alloca(bcx
, ty
, name
);
980 pub fn alloca(cx
: Block
, ty
: Type
, name
: &str) -> ValueRef
{
981 let p
= alloca_no_lifetime(cx
, ty
, name
);
982 call_lifetime_start(cx
, p
);
986 pub fn alloca_no_lifetime(cx
: Block
, ty
: Type
, name
: &str) -> ValueRef
{
987 let _icx
= push_ctxt("alloca");
988 if cx
.unreachable
.get() {
990 return llvm
::LLVMGetUndef(ty
.ptr_to().to_ref());
993 debuginfo
::clear_source_location(cx
.fcx
);
997 // Creates the alloca slot which holds the pointer to the slot for the final return value
998 pub fn make_return_slot_pointer
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
999 output_type
: Ty
<'tcx
>) -> ValueRef
{
1000 let lloutputtype
= type_of
::type_of(fcx
.ccx
, output_type
);
1002 // We create an alloca to hold a pointer of type `output_type`
1003 // which will hold the pointer to the right alloca which has the
1005 if fcx
.needs_ret_allocas
{
1006 // Let's create the stack slot
1007 let slot
= AllocaFcx(fcx
, lloutputtype
.ptr_to(), "llretslotptr");
1009 // and if we're using an out pointer, then store that in our newly made slot
1010 if type_of
::return_uses_outptr(fcx
.ccx
, output_type
) {
1011 let outptr
= get_param(fcx
.llfn
, 0);
1013 let b
= fcx
.ccx
.builder();
1014 b
.position_before(fcx
.alloca_insert_pt
.get().unwrap());
1015 b
.store(outptr
, slot
);
1020 // But if there are no nested returns, we skip the indirection and have a single
1023 if type_of
::return_uses_outptr(fcx
.ccx
, output_type
) {
1024 get_param(fcx
.llfn
, 0)
1026 AllocaFcx(fcx
, lloutputtype
, "sret_slot")
1031 struct FindNestedReturn
{
1035 impl FindNestedReturn
{
1036 fn new() -> FindNestedReturn
{
1037 FindNestedReturn { found: false }
1041 impl<'v
> Visitor
<'v
> for FindNestedReturn
{
1042 fn visit_expr(&mut self, e
: &ast
::Expr
) {
1044 ast
::ExprRet(..) => {
1047 _
=> visit
::walk_expr(self, e
)
1052 fn build_cfg(tcx
: &ty
::ctxt
, id
: ast
::NodeId
) -> (ast
::NodeId
, Option
<cfg
::CFG
>) {
1053 let blk
= match tcx
.map
.find(id
) {
1054 Some(ast_map
::NodeItem(i
)) => {
1056 ast
::ItemFn(_
, _
, _
, _
, ref blk
) => {
1059 _
=> tcx
.sess
.bug("unexpected item variant in has_nested_returns")
1062 Some(ast_map
::NodeTraitItem(trait_item
)) => {
1063 match trait_item
.node
{
1064 ast
::MethodTraitItem(_
, Some(ref body
)) => body
,
1065 ast
::MethodTraitItem(_
, None
) => {
1066 tcx
.sess
.bug("unexpected variant: required trait method \
1067 in has_nested_returns")
1069 ast
::TypeTraitItem(..) => {
1070 tcx
.sess
.bug("unexpected variant: associated type trait item in \
1071 has_nested_returns")
1075 Some(ast_map
::NodeImplItem(impl_item
)) => {
1076 match impl_item
.node
{
1077 ast
::MethodImplItem(_
, ref body
) => body
,
1078 ast
::TypeImplItem(_
) => {
1079 tcx
.sess
.bug("unexpected variant: associated type impl item in \
1080 has_nested_returns")
1082 ast
::MacImplItem(_
) => {
1083 tcx
.sess
.bug("unexpected variant: unexpanded macro impl item in \
1084 has_nested_returns")
1088 Some(ast_map
::NodeExpr(e
)) => {
1090 ast
::ExprClosure(_
, _
, ref blk
) => blk
,
1091 _
=> tcx
.sess
.bug("unexpected expr variant in has_nested_returns")
1094 Some(ast_map
::NodeVariant(..)) |
1095 Some(ast_map
::NodeStructCtor(..)) => return (ast
::DUMMY_NODE_ID
, None
),
1098 None
if id
== ast
::DUMMY_NODE_ID
=> return (ast
::DUMMY_NODE_ID
, None
),
1100 _
=> tcx
.sess
.bug(&format
!("unexpected variant in has_nested_returns: {}",
1101 tcx
.map
.path_to_string(id
)))
1104 (blk
.id
, Some(cfg
::CFG
::new(tcx
, blk
)))
1107 // Checks for the presence of "nested returns" in a function.
1108 // Nested returns are when the inner expression of a return expression
1109 // (the 'expr' in 'return expr') contains a return expression. Only cases
1110 // where the outer return is actually reachable are considered. Implicit
1111 // returns from the end of blocks are considered as well.
1113 // This check is needed to handle the case where the inner expression is
1114 // part of a larger expression that may have already partially-filled the
1115 // return slot alloca. This can cause errors related to clean-up due to
1116 // the clobbering of the existing value in the return slot.
1117 fn has_nested_returns(tcx
: &ty
::ctxt
, cfg
: &cfg
::CFG
, blk_id
: ast
::NodeId
) -> bool
{
1118 for n
in cfg
.graph
.depth_traverse(cfg
.entry
) {
1119 match tcx
.map
.find(n
.id()) {
1120 Some(ast_map
::NodeExpr(ex
)) => {
1121 if let ast
::ExprRet(Some(ref ret_expr
)) = ex
.node
{
1122 let mut visitor
= FindNestedReturn
::new();
1123 visit
::walk_expr(&mut visitor
, &**ret_expr
);
1129 Some(ast_map
::NodeBlock(blk
)) if blk
.id
== blk_id
=> {
1130 let mut visitor
= FindNestedReturn
::new();
1131 visit
::walk_expr_opt(&mut visitor
, &blk
.expr
);
1143 // NB: must keep 4 fns in sync:
1146 // - create_datums_for_fn_args.
1150 // Be warned! You must call `init_function` before doing anything with the
1151 // returned function context.
1152 pub fn new_fn_ctxt
<'a
, 'tcx
>(ccx
: &'a CrateContext
<'a
, 'tcx
>,
1156 output_type
: ty
::FnOutput
<'tcx
>,
1157 param_substs
: &'tcx Substs
<'tcx
>,
1159 block_arena
: &'a TypedArena
<common
::BlockS
<'a
, 'tcx
>>)
1160 -> FunctionContext
<'a
, 'tcx
> {
1161 common
::validate_substs(param_substs
);
1163 debug
!("new_fn_ctxt(path={}, id={}, param_substs={})",
1167 ccx
.tcx().map
.path_to_string(id
).to_string()
1169 id
, param_substs
.repr(ccx
.tcx()));
1171 let uses_outptr
= match output_type
{
1172 ty
::FnConverging(output_type
) => {
1173 let substd_output_type
=
1174 monomorphize
::apply_param_substs(ccx
.tcx(), param_substs
, &output_type
);
1175 type_of
::return_uses_outptr(ccx
, substd_output_type
)
1177 ty
::FnDiverging
=> false
1179 let debug_context
= debuginfo
::create_function_debug_context(ccx
, id
, param_substs
, llfndecl
);
1180 let (blk_id
, cfg
) = build_cfg(ccx
.tcx(), id
);
1181 let nested_returns
= if let Some(ref cfg
) = cfg
{
1182 has_nested_returns(ccx
.tcx(), cfg
, blk_id
)
1187 let mut fcx
= FunctionContext
{
1190 llretslotptr
: Cell
::new(None
),
1191 param_env
: ty
::empty_parameter_environment(ccx
.tcx()),
1192 alloca_insert_pt
: Cell
::new(None
),
1193 llreturn
: Cell
::new(None
),
1194 needs_ret_allocas
: nested_returns
,
1195 personality
: Cell
::new(None
),
1196 caller_expects_out_pointer
: uses_outptr
,
1197 lllocals
: RefCell
::new(NodeMap()),
1198 llupvars
: RefCell
::new(NodeMap()),
1200 param_substs
: param_substs
,
1202 block_arena
: block_arena
,
1204 debug_context
: debug_context
,
1205 scopes
: RefCell
::new(Vec
::new()),
1210 fcx
.llenv
= Some(get_param(fcx
.llfn
, fcx
.env_arg_pos() as c_uint
))
1216 /// Performs setup on a newly created function, creating the entry scope block
1217 /// and allocating space for the return pointer.
1218 pub fn init_function
<'a
, 'tcx
>(fcx
: &'a FunctionContext
<'a
, 'tcx
>,
1220 output
: ty
::FnOutput
<'tcx
>)
1221 -> Block
<'a
, 'tcx
> {
1222 let entry_bcx
= fcx
.new_temp_block("entry-block");
1224 // Use a dummy instruction as the insertion point for all allocas.
1225 // This is later removed in FunctionContext::cleanup.
1226 fcx
.alloca_insert_pt
.set(Some(unsafe {
1227 Load(entry_bcx
, C_null(Type
::i8p(fcx
.ccx
)));
1228 llvm
::LLVMGetFirstInstruction(entry_bcx
.llbb
)
1231 if let ty
::FnConverging(output_type
) = output
{
1232 // This shouldn't need to recompute the return type,
1233 // as new_fn_ctxt did it already.
1234 let substd_output_type
= fcx
.monomorphize(&output_type
);
1235 if !return_type_is_void(fcx
.ccx
, substd_output_type
) {
1236 // If the function returns nil/bot, there is no real return
1237 // value, so do not set `llretslotptr`.
1238 if !skip_retptr
|| fcx
.caller_expects_out_pointer
{
1239 // Otherwise, we normally allocate the llretslotptr, unless we
1240 // have been instructed to skip it for immediate return
1242 fcx
.llretslotptr
.set(Some(make_return_slot_pointer(fcx
, substd_output_type
)));
1250 // NB: must keep 4 fns in sync:
1253 // - create_datums_for_fn_args.
1257 pub fn arg_kind
<'a
, 'tcx
>(cx
: &FunctionContext
<'a
, 'tcx
>, t
: Ty
<'tcx
>)
1259 use trans
::datum
::{ByRef, ByValue}
;
1262 mode
: if arg_is_indirect(cx
.ccx
, t
) { ByRef }
else { ByValue }
1266 // work around bizarre resolve errors
1267 pub type RvalueDatum
<'tcx
> = datum
::Datum
<'tcx
, datum
::Rvalue
>;
1269 // create_datums_for_fn_args: creates rvalue datums for each of the
1270 // incoming function arguments. These will later be stored into
1271 // appropriate lvalue datums.
1272 pub fn create_datums_for_fn_args
<'a
, 'tcx
>(fcx
: &FunctionContext
<'a
, 'tcx
>,
1273 arg_tys
: &[Ty
<'tcx
>])
1274 -> Vec
<RvalueDatum
<'tcx
>> {
1275 let _icx
= push_ctxt("create_datums_for_fn_args");
1277 // Return an array wrapping the ValueRefs that we get from `get_param` for
1278 // each argument into datums.
1279 arg_tys
.iter().enumerate().map(|(i
, &arg_ty
)| {
1280 let llarg
= get_param(fcx
.llfn
, fcx
.arg_pos(i
) as c_uint
);
1281 datum
::Datum
::new(llarg
, arg_ty
, arg_kind(fcx
, arg_ty
))
1285 /// Creates rvalue datums for each of the incoming function arguments and
1286 /// tuples the arguments. These will later be stored into appropriate lvalue
1289 /// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
1290 fn create_datums_for_fn_args_under_call_abi
<'blk
, 'tcx
>(
1291 mut bcx
: Block
<'blk
, 'tcx
>,
1292 arg_scope
: cleanup
::CustomScopeIndex
,
1293 arg_tys
: &[Ty
<'tcx
>])
1294 -> Vec
<RvalueDatum
<'tcx
>> {
1295 let mut result
= Vec
::new();
1296 for (i
, &arg_ty
) in arg_tys
.iter().enumerate() {
1297 if i
< arg_tys
.len() - 1 {
1298 // Regular argument.
1299 let llarg
= get_param(bcx
.fcx
.llfn
, bcx
.fcx
.arg_pos(i
) as c_uint
);
1300 result
.push(datum
::Datum
::new(llarg
, arg_ty
, arg_kind(bcx
.fcx
,
1305 // This is the last argument. Tuple it.
1307 ty
::ty_tup(ref tupled_arg_tys
) => {
1308 let tuple_args_scope_id
= cleanup
::CustomScope(arg_scope
);
1311 datum
::lvalue_scratch_datum(bcx
,
1314 tuple_args_scope_id
,
1319 for (j
, &tupled_arg_ty
) in
1320 tupled_arg_tys
.iter().enumerate() {
1322 get_param(bcx
.fcx
.llfn
,
1323 bcx
.fcx
.arg_pos(i
+ j
) as c_uint
);
1324 let lldest
= GEPi(bcx
, llval
, &[0, j
]);
1325 let datum
= datum
::Datum
::new(
1328 arg_kind(bcx
.fcx
, tupled_arg_ty
));
1329 bcx
= datum
.store_to(bcx
, lldest
);
1333 let tuple
= unpack_datum
!(bcx
,
1334 tuple
.to_expr_datum()
1335 .to_rvalue_datum(bcx
,
1340 bcx
.tcx().sess
.bug("last argument of a function with \
1341 `rust-call` ABI isn't a tuple?!")
1350 fn copy_args_to_allocas
<'blk
, 'tcx
>(bcx
: Block
<'blk
, 'tcx
>,
1351 arg_scope
: cleanup
::CustomScopeIndex
,
1353 arg_datums
: Vec
<RvalueDatum
<'tcx
>>)
1354 -> Block
<'blk
, 'tcx
> {
1355 debug
!("copy_args_to_allocas");
1357 let _icx
= push_ctxt("copy_args_to_allocas");
1360 let arg_scope_id
= cleanup
::CustomScope(arg_scope
);
1362 for (i
, arg_datum
) in arg_datums
.into_iter().enumerate() {
1363 // For certain mode/type combinations, the raw llarg values are passed
1364 // by value. However, within the fn body itself, we want to always
1365 // have all locals and arguments be by-ref so that we can cancel the
1366 // cleanup and for better interaction with LLVM's debug info. So, if
1367 // the argument would be passed by value, we store it into an alloca.
1368 // This alloca should be optimized away by LLVM's mem-to-reg pass in
1369 // the event it's not truly needed.
1371 bcx
= _match
::store_arg(bcx
, &*args
[i
].pat
, arg_datum
, arg_scope_id
);
1372 debuginfo
::create_argument_metadata(bcx
, &args
[i
]);
1378 // Ties up the llstaticallocas -> llloadenv -> lltop edges,
1379 // and builds the return block.
1380 pub fn finish_fn
<'blk
, 'tcx
>(fcx
: &'blk FunctionContext
<'blk
, 'tcx
>,
1381 last_bcx
: Block
<'blk
, 'tcx
>,
1382 retty
: ty
::FnOutput
<'tcx
>,
1383 ret_debug_loc
: DebugLoc
) {
1384 let _icx
= push_ctxt("finish_fn");
1386 let ret_cx
= match fcx
.llreturn
.get() {
1388 if !last_bcx
.terminated
.get() {
1389 Br(last_bcx
, llreturn
, DebugLoc
::None
);
1391 raw_block(fcx
, false, llreturn
)
1396 // This shouldn't need to recompute the return type,
1397 // as new_fn_ctxt did it already.
1398 let substd_retty
= fcx
.monomorphize(&retty
);
1399 build_return_block(fcx
, ret_cx
, substd_retty
, ret_debug_loc
);
1401 debuginfo
::clear_source_location(fcx
);
1405 // Builds the return block for a function.
1406 pub fn build_return_block
<'blk
, 'tcx
>(fcx
: &FunctionContext
<'blk
, 'tcx
>,
1407 ret_cx
: Block
<'blk
, 'tcx
>,
1408 retty
: ty
::FnOutput
<'tcx
>,
1409 ret_debug_location
: DebugLoc
) {
1410 if fcx
.llretslotptr
.get().is_none() ||
1411 (!fcx
.needs_ret_allocas
&& fcx
.caller_expects_out_pointer
) {
1412 return RetVoid(ret_cx
, ret_debug_location
);
1415 let retslot
= if fcx
.needs_ret_allocas
{
1416 Load(ret_cx
, fcx
.llretslotptr
.get().unwrap())
1418 fcx
.llretslotptr
.get().unwrap()
1420 let retptr
= Value(retslot
);
1421 match retptr
.get_dominating_store(ret_cx
) {
1422 // If there's only a single store to the ret slot, we can directly return
1423 // the value that was stored and omit the store and the alloca
1425 let retval
= s
.get_operand(0).unwrap().get();
1426 s
.erase_from_parent();
1428 if retptr
.has_no_uses() {
1429 retptr
.erase_from_parent();
1432 let retval
= if retty
== ty
::FnConverging(fcx
.ccx
.tcx().types
.bool
) {
1433 Trunc(ret_cx
, retval
, Type
::i1(fcx
.ccx
))
1438 if fcx
.caller_expects_out_pointer
{
1439 if let ty
::FnConverging(retty
) = retty
{
1440 store_ty(ret_cx
, retval
, get_param(fcx
.llfn
, 0), retty
);
1442 RetVoid(ret_cx
, ret_debug_location
)
1444 Ret(ret_cx
, retval
, ret_debug_location
)
1447 // Otherwise, copy the return value to the ret slot
1448 None
=> match retty
{
1449 ty
::FnConverging(retty
) => {
1450 if fcx
.caller_expects_out_pointer
{
1451 memcpy_ty(ret_cx
, get_param(fcx
.llfn
, 0), retslot
, retty
);
1452 RetVoid(ret_cx
, ret_debug_location
)
1454 Ret(ret_cx
, load_ty(ret_cx
, retslot
, retty
), ret_debug_location
)
1457 ty
::FnDiverging
=> {
1458 if fcx
.caller_expects_out_pointer
{
1459 RetVoid(ret_cx
, ret_debug_location
)
1461 Ret(ret_cx
, C_undef(Type
::nil(fcx
.ccx
)), ret_debug_location
)
1468 /// Builds an LLVM function out of a source function.
1470 /// If the function closes over its environment a closure will be returned.
1471 pub fn trans_closure
<'a
, 'b
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1475 param_substs
: &'tcx Substs
<'tcx
>,
1476 fn_ast_id
: ast
::NodeId
,
1477 _attributes
: &[ast
::Attribute
],
1478 output_type
: ty
::FnOutput
<'tcx
>,
1480 closure_env
: closure
::ClosureEnv
<'b
>) {
1481 ccx
.stats().n_closures
.set(ccx
.stats().n_closures
.get() + 1);
1483 let _icx
= push_ctxt("trans_closure");
1484 attributes
::emit_uwtable(llfndecl
, true);
1486 debug
!("trans_closure(..., param_substs={})",
1487 param_substs
.repr(ccx
.tcx()));
1489 let has_env
= match closure_env
{
1490 closure
::ClosureEnv
::Closure(_
) => true,
1491 closure
::ClosureEnv
::NotClosure
=> false,
1494 let (arena
, fcx
): (TypedArena
<_
>, FunctionContext
);
1495 arena
= TypedArena
::new();
1496 fcx
= new_fn_ctxt(ccx
,
1504 let mut bcx
= init_function(&fcx
, false, output_type
);
1506 // cleanup scope for the incoming arguments
1507 let fn_cleanup_debug_loc
=
1508 debuginfo
::get_cleanup_debug_loc_for_ast_node(ccx
, fn_ast_id
, body
.span
, true);
1509 let arg_scope
= fcx
.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc
);
1511 let block_ty
= node_id_type(bcx
, body
.id
);
1513 // Set up arguments to the function.
1514 let monomorphized_arg_types
=
1516 .map(|arg
| node_id_type(bcx
, arg
.id
))
1517 .collect
::<Vec
<_
>>();
1518 let monomorphized_arg_types
= match closure_env
{
1519 closure
::ClosureEnv
::NotClosure
=> {
1520 monomorphized_arg_types
1523 // Tuple up closure argument types for the "rust-call" ABI.
1524 closure
::ClosureEnv
::Closure(_
) => {
1525 vec
![ty
::mk_tup(ccx
.tcx(), monomorphized_arg_types
)]
1528 for monomorphized_arg_type
in &monomorphized_arg_types
{
1529 debug
!("trans_closure: monomorphized_arg_type: {}",
1530 ty_to_string(ccx
.tcx(), *monomorphized_arg_type
));
1532 debug
!("trans_closure: function lltype: {}",
1533 bcx
.fcx
.ccx
.tn().val_to_string(bcx
.fcx
.llfn
));
1535 let arg_datums
= match closure_env
{
1536 closure
::ClosureEnv
::NotClosure
if abi
== RustCall
=> {
1537 create_datums_for_fn_args_under_call_abi(bcx
, arg_scope
, &monomorphized_arg_types
[..])
1540 let arg_tys
= untuple_arguments_if_necessary(ccx
, &monomorphized_arg_types
, abi
);
1541 create_datums_for_fn_args(&fcx
, &arg_tys
)
1545 bcx
= copy_args_to_allocas(bcx
, arg_scope
, &decl
.inputs
, arg_datums
);
1547 bcx
= closure_env
.load(bcx
, cleanup
::CustomScope(arg_scope
));
1549 // Up until here, IR instructions for this function have explicitly not been annotated with
1550 // source code location, so we don't step into call setup code. From here on, source location
1551 // emitting should be enabled.
1552 debuginfo
::start_emitting_source_locations(&fcx
);
1554 let dest
= match fcx
.llretslotptr
.get() {
1555 Some(_
) => expr
::SaveIn(fcx
.get_ret_slot(bcx
, ty
::FnConverging(block_ty
), "iret_slot")),
1557 assert
!(type_is_zero_size(bcx
.ccx(), block_ty
));
1562 // This call to trans_block is the place where we bridge between
1563 // translation calls that don't have a return value (trans_crate,
1564 // trans_mod, trans_item, et cetera) and those that do
1565 // (trans_block, trans_expr, et cetera).
1566 bcx
= controlflow
::trans_block(bcx
, body
, dest
);
1569 expr
::SaveIn(slot
) if fcx
.needs_ret_allocas
=> {
1570 Store(bcx
, slot
, fcx
.llretslotptr
.get().unwrap());
1575 match fcx
.llreturn
.get() {
1577 Br(bcx
, fcx
.return_exit_block(), DebugLoc
::None
);
1578 fcx
.pop_custom_cleanup_scope(arg_scope
);
1581 // Microoptimization writ large: avoid creating a separate
1582 // llreturn basic block
1583 bcx
= fcx
.pop_and_trans_custom_cleanup_scope(bcx
, arg_scope
);
1587 // Put return block after all other blocks.
1588 // This somewhat improves single-stepping experience in debugger.
1590 let llreturn
= fcx
.llreturn
.get();
1591 if let Some(llreturn
) = llreturn
{
1592 llvm
::LLVMMoveBasicBlockAfter(llreturn
, bcx
.llbb
);
1596 let ret_debug_loc
= DebugLoc
::At(fn_cleanup_debug_loc
.id
,
1597 fn_cleanup_debug_loc
.span
);
1599 // Insert the mandatory first few basic blocks before lltop.
1600 finish_fn(&fcx
, bcx
, output_type
, ret_debug_loc
);
1603 /// Creates an LLVM function corresponding to a source language function.
1604 pub fn trans_fn
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1608 param_substs
: &'tcx Substs
<'tcx
>,
1610 attrs
: &[ast
::Attribute
]) {
1611 let _s
= StatRecorder
::new(ccx
, ccx
.tcx().map
.path_to_string(id
).to_string());
1612 debug
!("trans_fn(param_substs={})", param_substs
.repr(ccx
.tcx()));
1613 let _icx
= push_ctxt("trans_fn");
1614 let fn_ty
= ty
::node_id_to_type(ccx
.tcx(), id
);
1615 let output_type
= ty
::erase_late_bound_regions(ccx
.tcx(), &ty
::ty_fn_ret(fn_ty
));
1616 let abi
= ty
::ty_fn_abi(fn_ty
);
1617 trans_closure(ccx
, decl
, body
, llfndecl
, param_substs
, id
, attrs
, output_type
, abi
,
1618 closure
::ClosureEnv
::NotClosure
);
1621 pub fn trans_enum_variant
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1622 _enum_id
: ast
::NodeId
,
1623 variant
: &ast
::Variant
,
1624 _args
: &[ast
::VariantArg
],
1626 param_substs
: &'tcx Substs
<'tcx
>,
1627 llfndecl
: ValueRef
) {
1628 let _icx
= push_ctxt("trans_enum_variant");
1630 trans_enum_variant_or_tuple_like_struct(
1638 pub fn trans_named_tuple_constructor
<'blk
, 'tcx
>(mut bcx
: Block
<'blk
, 'tcx
>,
1641 args
: callee
::CallArgs
,
1643 debug_loc
: DebugLoc
)
1644 -> Result
<'blk
, 'tcx
> {
1646 let ccx
= bcx
.fcx
.ccx
;
1647 let tcx
= ccx
.tcx();
1649 let result_ty
= match ctor_ty
.sty
{
1650 ty
::ty_bare_fn(_
, ref bft
) => {
1651 ty
::erase_late_bound_regions(bcx
.tcx(), &bft
.sig
.output()).unwrap()
1653 _
=> ccx
.sess().bug(
1654 &format
!("trans_enum_variant_constructor: \
1655 unexpected ctor return type {}",
1659 // Get location to store the result. If the user does not care about
1660 // the result, just make a stack slot
1661 let llresult
= match dest
{
1662 expr
::SaveIn(d
) => d
,
1664 if !type_is_zero_size(ccx
, result_ty
) {
1665 alloc_ty(bcx
, result_ty
, "constructor_result")
1667 C_undef(type_of
::type_of(ccx
, result_ty
))
1672 if !type_is_zero_size(ccx
, result_ty
) {
1674 callee
::ArgExprs(exprs
) => {
1675 let fields
= exprs
.iter().map(|x
| &**x
).enumerate().collect
::<Vec
<_
>>();
1676 bcx
= expr
::trans_adt(bcx
,
1681 expr
::SaveIn(llresult
),
1684 _
=> ccx
.sess().bug("expected expr as arguments for variant/struct tuple constructor")
1688 // If the caller doesn't care about the result
1689 // drop the temporary we made
1690 let bcx
= match dest
{
1691 expr
::SaveIn(_
) => bcx
,
1693 let bcx
= glue
::drop_ty(bcx
, llresult
, result_ty
, debug_loc
);
1694 if !type_is_zero_size(ccx
, result_ty
) {
1695 call_lifetime_end(bcx
, llresult
);
1701 Result
::new(bcx
, llresult
)
1704 pub fn trans_tuple_struct
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1705 _fields
: &[ast
::StructField
],
1706 ctor_id
: ast
::NodeId
,
1707 param_substs
: &'tcx Substs
<'tcx
>,
1708 llfndecl
: ValueRef
) {
1709 let _icx
= push_ctxt("trans_tuple_struct");
1711 trans_enum_variant_or_tuple_like_struct(
1719 fn trans_enum_variant_or_tuple_like_struct
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
1720 ctor_id
: ast
::NodeId
,
1722 param_substs
: &'tcx Substs
<'tcx
>,
1723 llfndecl
: ValueRef
) {
1724 let ctor_ty
= ty
::node_id_to_type(ccx
.tcx(), ctor_id
);
1725 let ctor_ty
= monomorphize
::apply_param_substs(ccx
.tcx(), param_substs
, &ctor_ty
);
1727 let result_ty
= match ctor_ty
.sty
{
1728 ty
::ty_bare_fn(_
, ref bft
) => {
1729 ty
::erase_late_bound_regions(ccx
.tcx(), &bft
.sig
.output())
1731 _
=> ccx
.sess().bug(
1732 &format
!("trans_enum_variant_or_tuple_like_struct: \
1733 unexpected ctor return type {}",
1734 ty_to_string(ccx
.tcx(), ctor_ty
)))
1737 let (arena
, fcx
): (TypedArena
<_
>, FunctionContext
);
1738 arena
= TypedArena
::new();
1739 fcx
= new_fn_ctxt(ccx
, llfndecl
, ctor_id
, false, result_ty
,
1740 param_substs
, None
, &arena
);
1741 let bcx
= init_function(&fcx
, false, result_ty
);
1743 assert
!(!fcx
.needs_ret_allocas
);
1746 ty
::erase_late_bound_regions(
1747 ccx
.tcx(), &ty
::ty_fn_args(ctor_ty
));
1749 let arg_datums
= create_datums_for_fn_args(&fcx
, &arg_tys
[..]);
1751 if !type_is_zero_size(fcx
.ccx
, result_ty
.unwrap()) {
1752 let dest
= fcx
.get_ret_slot(bcx
, result_ty
, "eret_slot");
1753 let repr
= adt
::represent_type(ccx
, result_ty
.unwrap());
1754 for (i
, arg_datum
) in arg_datums
.into_iter().enumerate() {
1755 let lldestptr
= adt
::trans_field_ptr(bcx
,
1760 arg_datum
.store_to(bcx
, lldestptr
);
1762 adt
::trans_set_discr(bcx
, &*repr
, dest
, disr
);
1765 finish_fn(&fcx
, bcx
, result_ty
, DebugLoc
::None
);
1768 fn enum_variant_size_lint(ccx
: &CrateContext
, enum_def
: &ast
::EnumDef
, sp
: Span
, id
: ast
::NodeId
) {
1769 let mut sizes
= Vec
::new(); // does no allocation if no pushes, thankfully
1771 let print_info
= ccx
.sess().print_enum_sizes();
1773 let levels
= ccx
.tcx().node_lint_levels
.borrow();
1774 let lint_id
= lint
::LintId
::of(lint
::builtin
::VARIANT_SIZE_DIFFERENCES
);
1775 let lvlsrc
= levels
.get(&(id
, lint_id
));
1776 let is_allow
= lvlsrc
.map_or(true, |&(lvl
, _
)| lvl
== lint
::Allow
);
1778 if is_allow
&& !print_info
{
1779 // we're not interested in anything here
1783 let ty
= ty
::node_id_to_type(ccx
.tcx(), id
);
1784 let avar
= adt
::represent_type(ccx
, ty
);
1786 adt
::General(_
, ref variants
, _
) => {
1787 for var
in variants
{
1789 for field
in var
.fields
.iter().skip(1) {
1790 // skip the discriminant
1791 size
+= llsize_of_real(ccx
, sizing_type_of(ccx
, *field
));
1796 _
=> { /* its size is either constant or unimportant */ }
1799 let (largest
, slargest
, largest_index
) = sizes
.iter().enumerate().fold((0, 0, 0),
1800 |(l
, s
, li
), (idx
, &size
)|
1803 } else if size
> s
{
1811 let llty
= type_of
::sizing_type_of(ccx
, ty
);
1813 let sess
= &ccx
.tcx().sess
;
1814 sess
.span_note(sp
, &*format
!("total size: {} bytes", llsize_of_real(ccx
, llty
)));
1816 adt
::General(..) => {
1817 for (i
, var
) in enum_def
.variants
.iter().enumerate() {
1818 ccx
.tcx().sess
.span_note(var
.span
,
1819 &*format
!("variant data: {} bytes", sizes
[i
]));
1826 // we only warn if the largest variant is at least thrice as large as
1827 // the second-largest.
1828 if !is_allow
&& largest
> slargest
* 3 && slargest
> 0 {
1829 // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
1830 // pass for the latter already ran.
1831 lint
::raw_emit_lint(&ccx
.tcx().sess
, lint
::builtin
::VARIANT_SIZE_DIFFERENCES
,
1832 *lvlsrc
.unwrap(), Some(sp
),
1833 &format
!("enum variant is more than three times larger \
1834 ({} bytes) than the next largest (ignoring padding)",
1837 ccx
.sess().span_note(enum_def
.variants
[largest_index
].span
,
1838 "this variant is the largest");
1842 pub struct TransItemVisitor
<'a
, 'tcx
: 'a
> {
1843 pub ccx
: &'a CrateContext
<'a
, 'tcx
>,
1846 impl<'a
, 'tcx
, 'v
> Visitor
<'v
> for TransItemVisitor
<'a
, 'tcx
> {
1847 fn visit_item(&mut self, i
: &ast
::Item
) {
1848 trans_item(self.ccx
, i
);
1852 pub fn llvm_linkage_by_name(name
: &str) -> Option
<Linkage
> {
1853 // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
1854 // applicable to variable declarations and may not really make sense for
1855 // Rust code in the first place but whitelist them anyway and trust that
1856 // the user knows what s/he's doing. Who knows, unanticipated use cases
1857 // may pop up in the future.
1859 // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
1860 // and don't have to be, LLVM treats them as no-ops.
1862 "appending" => Some(llvm
::AppendingLinkage
),
1863 "available_externally" => Some(llvm
::AvailableExternallyLinkage
),
1864 "common" => Some(llvm
::CommonLinkage
),
1865 "extern_weak" => Some(llvm
::ExternalWeakLinkage
),
1866 "external" => Some(llvm
::ExternalLinkage
),
1867 "internal" => Some(llvm
::InternalLinkage
),
1868 "linkonce" => Some(llvm
::LinkOnceAnyLinkage
),
1869 "linkonce_odr" => Some(llvm
::LinkOnceODRLinkage
),
1870 "private" => Some(llvm
::PrivateLinkage
),
1871 "weak" => Some(llvm
::WeakAnyLinkage
),
1872 "weak_odr" => Some(llvm
::WeakODRLinkage
),
1878 /// Enum describing the origin of an LLVM `Value`, for linkage purposes.
1879 #[derive(Copy, Clone)]
1880 pub enum ValueOrigin
{
1881 /// The LLVM `Value` is in this context because the corresponding item was
1882 /// assigned to the current compilation unit.
1883 OriginalTranslation
,
1884 /// The `Value`'s corresponding item was assigned to some other compilation
1885 /// unit, but the `Value` was translated in this context anyway because the
1886 /// item is marked `#[inline]`.
1890 /// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
1891 /// If the `llval` is the direct translation of a specific Rust item, `id`
1892 /// should be set to the `NodeId` of that item. (This mapping should be
1893 /// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
1894 /// `None`.) `llval_origin` indicates whether `llval` is the translation of an
1895 /// item assigned to `ccx`'s compilation unit or an inlined copy of an item
1896 /// assigned to a different compilation unit.
1897 pub fn update_linkage(ccx
: &CrateContext
,
1899 id
: Option
<ast
::NodeId
>,
1900 llval_origin
: ValueOrigin
) {
1901 match llval_origin
{
1903 // `llval` is a translation of an item defined in a separate
1904 // compilation unit. This only makes sense if there are at least
1905 // two compilation units.
1906 assert
!(ccx
.sess().opts
.cg
.codegen_units
> 1);
1907 // `llval` is a copy of something defined elsewhere, so use
1908 // `AvailableExternallyLinkage` to avoid duplicating code in the
1910 llvm
::SetLinkage(llval
, llvm
::AvailableExternallyLinkage
);
1913 OriginalTranslation
=> {}
,
1916 if let Some(id
) = id
{
1917 let item
= ccx
.tcx().map
.get(id
);
1918 if let ast_map
::NodeItem(i
) = item
{
1919 if let Some(name
) = attr
::first_attr_value_str_by_name(&i
.attrs
, "linkage") {
1920 if let Some(linkage
) = llvm_linkage_by_name(&name
) {
1921 llvm
::SetLinkage(llval
, linkage
);
1923 ccx
.sess().span_fatal(i
.span
, "invalid linkage specified");
1931 Some(id
) if ccx
.reachable().contains(&id
) => {
1932 llvm
::SetLinkage(llval
, llvm
::ExternalLinkage
);
1935 // `id` does not refer to an item in `ccx.reachable`.
1936 if ccx
.sess().opts
.cg
.codegen_units
> 1 {
1937 llvm
::SetLinkage(llval
, llvm
::ExternalLinkage
);
1939 llvm
::SetLinkage(llval
, llvm
::InternalLinkage
);
1945 pub fn trans_item(ccx
: &CrateContext
, item
: &ast
::Item
) {
1946 let _icx
= push_ctxt("trans_item");
1948 let from_external
= ccx
.external_srcs().borrow().contains_key(&item
.id
);
1951 ast
::ItemFn(ref decl
, _fn_style
, abi
, ref generics
, ref body
) => {
1952 if !generics
.is_type_parameterized() {
1953 let trans_everywhere
= attr
::requests_inline(&item
.attrs
);
1954 // Ignore `trans_everywhere` for cross-crate inlined items
1955 // (`from_external`). `trans_item` will be called once for each
1956 // compilation unit that references the item, so it will still get
1957 // translated everywhere it's needed.
1958 for (ref ccx
, is_origin
) in ccx
.maybe_iter(!from_external
&& trans_everywhere
) {
1959 let llfn
= get_item_val(ccx
, item
.id
);
1960 let empty_substs
= ccx
.tcx().mk_substs(Substs
::trans_empty());
1962 foreign
::trans_rust_fn_with_foreign_abi(ccx
, &**decl
, &**body
, &item
.attrs
,
1963 llfn
, empty_substs
, item
.id
, None
);
1965 trans_fn(ccx
, &**decl
, &**body
, llfn
, empty_substs
, item
.id
, &item
.attrs
);
1967 update_linkage(ccx
, llfn
, Some(item
.id
),
1968 if is_origin { OriginalTranslation }
else { InlinedCopy }
);
1970 if is_entry_fn(ccx
.sess(), item
.id
) {
1971 create_entry_wrapper(ccx
, item
.span
, llfn
);
1972 // check for the #[rustc_error] annotation, which forces an
1973 // error in trans. This is used to write compile-fail tests
1974 // that actually test that compilation succeeds without
1975 // reporting an error.
1976 if ty
::has_attr(ccx
.tcx(), local_def(item
.id
), "rustc_error") {
1977 ccx
.tcx().sess
.span_fatal(item
.span
, "compilation successful");
1983 // Be sure to travel more than just one layer deep to catch nested
1984 // items in blocks and such.
1985 let mut v
= TransItemVisitor{ ccx: ccx }
;
1986 v
.visit_block(&**body
);
1988 ast
::ItemImpl(_
, _
, ref generics
, _
, _
, ref impl_items
) => {
1989 meth
::trans_impl(ccx
,
1995 ast
::ItemMod(ref m
) => {
1996 trans_mod(&ccx
.rotate(), m
);
1998 ast
::ItemEnum(ref enum_definition
, ref gens
) => {
1999 if gens
.ty_params
.is_empty() {
2000 // sizes only make sense for non-generic types
2002 enum_variant_size_lint(ccx
, enum_definition
, item
.span
, item
.id
);
2005 ast
::ItemConst(_
, ref expr
) => {
2006 // Recurse on the expression to catch items in blocks
2007 let mut v
= TransItemVisitor{ ccx: ccx }
;
2008 v
.visit_expr(&**expr
);
2010 ast
::ItemStatic(_
, m
, ref expr
) => {
2011 // Recurse on the expression to catch items in blocks
2012 let mut v
= TransItemVisitor{ ccx: ccx }
;
2013 v
.visit_expr(&**expr
);
2015 let g
= consts
::trans_static(ccx
, m
, item
.id
);
2016 update_linkage(ccx
, g
, Some(item
.id
), OriginalTranslation
);
2018 // Do static_assert checking. It can't really be done much earlier
2019 // because we need to get the value of the bool out of LLVM
2020 if attr
::contains_name(&item
.attrs
, "static_assert") {
2021 if !ty
::type_is_bool(ty
::expr_ty(ccx
.tcx(), expr
)) {
2022 ccx
.sess().span_fatal(expr
.span
,
2023 "can only have static_assert on a static \
2026 if m
== ast
::MutMutable
{
2027 ccx
.sess().span_fatal(expr
.span
,
2028 "cannot have static_assert on a mutable \
2032 let v
= ccx
.static_values().borrow().get(&item
.id
).unwrap().clone();
2034 if !(llvm
::LLVMConstIntGetZExtValue(v
) != 0) {
2035 ccx
.sess().span_fatal(expr
.span
, "static assertion failed");
2040 ast
::ItemForeignMod(ref foreign_mod
) => {
2041 foreign
::trans_foreign_mod(ccx
, foreign_mod
);
2043 ast
::ItemTrait(..) => {
2044 // Inside of this trait definition, we won't be actually translating any
2045 // functions, but the trait still needs to be walked. Otherwise default
2046 // methods with items will not get translated and will cause ICE's when
2047 // metadata time comes around.
2048 let mut v
= TransItemVisitor{ ccx: ccx }
;
2049 visit
::walk_item(&mut v
, item
);
2051 _
=> {/* fall through */ }
2055 // Translate a module. Doing this amounts to translating the items in the
2056 // module; there ends up being no artifact (aside from linkage names) of
2057 // separate modules in the compiled program. That's because modules exist
2058 // only as a convenience for humans working with the code, to organize names
2059 // and control visibility.
2060 pub fn trans_mod(ccx
: &CrateContext
, m
: &ast
::Mod
) {
2061 let _icx
= push_ctxt("trans_mod");
2062 for item
in &m
.items
{
2063 trans_item(ccx
, &**item
);
2068 // only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
2069 pub fn register_fn_llvmty(ccx
: &CrateContext
,
2072 node_id
: ast
::NodeId
,
2074 llfty
: Type
) -> ValueRef
{
2075 debug
!("register_fn_llvmty id={} sym={}", node_id
, sym
);
2077 let llfn
= declare
::define_fn(ccx
, &sym
[..], cc
, llfty
,
2078 ty
::FnConverging(ty
::mk_nil(ccx
.tcx()))).unwrap_or_else(||{
2079 ccx
.sess().span_fatal(sp
, &format
!("symbol `{}` is already defined", sym
));
2081 finish_register_fn(ccx
, sym
, node_id
, llfn
);
2085 fn finish_register_fn(ccx
: &CrateContext
, sym
: String
, node_id
: ast
::NodeId
,
2087 ccx
.item_symbols().borrow_mut().insert(node_id
, sym
);
2089 // The stack exhaustion lang item shouldn't have a split stack because
2090 // otherwise it would continue to be exhausted (bad), and both it and the
2091 // eh_personality functions need to be externally linkable.
2092 let def
= ast_util
::local_def(node_id
);
2093 if ccx
.tcx().lang_items
.stack_exhausted() == Some(def
) {
2094 attributes
::split_stack(llfn
, false);
2095 llvm
::SetLinkage(llfn
, llvm
::ExternalLinkage
);
2097 if ccx
.tcx().lang_items
.eh_personality() == Some(def
) {
2098 llvm
::SetLinkage(llfn
, llvm
::ExternalLinkage
);
2102 fn register_fn
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>,
2105 node_id
: ast
::NodeId
,
2106 node_type
: Ty
<'tcx
>)
2108 if let ty
::ty_bare_fn(_
, ref f
) = node_type
.sty
{
2109 if f
.abi
!= Rust
&& f
.abi
!= RustCall
{
2110 ccx
.sess().span_bug(sp
, &format
!("only the `{}` or `{}` calling conventions are valid \
2111 for this function; `{}` was specified",
2112 Rust
.name(), RustCall
.name(), f
.abi
.name()));
2115 ccx
.sess().span_bug(sp
, "expected bare rust function")
2118 let llfn
= declare
::define_rust_fn(ccx
, &sym
[..], node_type
).unwrap_or_else(||{
2119 ccx
.sess().span_fatal(sp
, &format
!("symbol `{}` is already defined", sym
));
2121 finish_register_fn(ccx
, sym
, node_id
, llfn
);
2125 pub fn is_entry_fn(sess
: &Session
, node_id
: ast
::NodeId
) -> bool
{
2126 match *sess
.entry_fn
.borrow() {
2127 Some((entry_id
, _
)) => node_id
== entry_id
,
2132 /// Create the `main` function which will initialise the rust runtime and call users’ main
2134 pub fn create_entry_wrapper(ccx
: &CrateContext
,
2136 main_llfn
: ValueRef
) {
2137 let et
= ccx
.sess().entry_type
.get().unwrap();
2139 config
::EntryMain
=> {
2140 create_entry_fn(ccx
, sp
, main_llfn
, true);
2142 config
::EntryStart
=> create_entry_fn(ccx
, sp
, main_llfn
, false),
2143 config
::EntryNone
=> {}
// Do nothing.
2146 fn create_entry_fn(ccx
: &CrateContext
,
2148 rust_main
: ValueRef
,
2149 use_start_lang_item
: bool
) {
2150 let llfty
= Type
::func(&[ccx
.int_type(), Type
::i8p(ccx
).ptr_to()],
2153 let llfn
= declare
::define_cfn(ccx
, "main", llfty
,
2154 ty
::mk_nil(ccx
.tcx())).unwrap_or_else(||{
2155 ccx
.sess().span_err(sp
, "entry symbol `main` defined multiple times");
2156 // FIXME: We should be smart and show a better diagnostic here.
2157 ccx
.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
2158 ccx
.sess().abort_if_errors();
2162 // FIXME: #16581: Marking a symbol in the executable with `dllexport`
2163 // linkage forces MinGW's linker to output a `.reloc` section for ASLR
2164 if ccx
.sess().target
.target
.options
.is_like_windows
{
2165 unsafe { llvm::LLVMRustSetDLLExportStorageClass(llfn) }
2169 llvm
::LLVMAppendBasicBlockInContext(ccx
.llcx(), llfn
,
2170 "top\0".as_ptr() as *const _
)
2172 let bld
= ccx
.raw_builder();
2174 llvm
::LLVMPositionBuilderAtEnd(bld
, llbb
);
2176 debuginfo
::insert_reference_to_gdb_debug_scripts_section_global(ccx
);
2178 let (start_fn
, args
) = if use_start_lang_item
{
2179 let start_def_id
= match ccx
.tcx().lang_items
.require(StartFnLangItem
) {
2181 Err(s
) => { ccx.sess().fatal(&s[..]); }
2183 let start_fn
= if start_def_id
.krate
== ast
::LOCAL_CRATE
{
2184 get_item_val(ccx
, start_def_id
.node
)
2186 let start_fn_type
= csearch
::get_type(ccx
.tcx(),
2188 trans_external_path(ccx
, start_def_id
, start_fn_type
)
2192 let opaque_rust_main
= llvm
::LLVMBuildPointerCast(bld
,
2193 rust_main
, Type
::i8p(ccx
).to_ref(),
2194 "rust_main\0".as_ptr() as *const _
);
2204 debug
!("using user-defined start fn");
2206 get_param(llfn
, 0 as c_uint
),
2207 get_param(llfn
, 1 as c_uint
)
2213 let result
= llvm
::LLVMBuildCall(bld
,
2216 args
.len() as c_uint
,
2219 llvm
::LLVMBuildRet(bld
, result
);
2224 fn exported_name
<'a
, 'tcx
>(ccx
: &CrateContext
<'a
, 'tcx
>, id
: ast
::NodeId
,
2225 ty
: Ty
<'tcx
>, attrs
: &[ast
::Attribute
]) -> String
{
2226 match ccx
.external_srcs().borrow().get(&id
) {
2228 let sym
= csearch
::get_symbol(&ccx
.sess().cstore
, did
);
2229 debug
!("found item {} in other crate...", sym
);
2235 match attr
::find_export_name_attr(ccx
.sess().diagnostic(), attrs
) {
2236 // Use provided name
2237 Some(name
) => name
.to_string(),
2238 _
=> ccx
.tcx().map
.with_path(id
, |path
| {
2239 if attr
::contains_name(attrs
, "no_mangle") {
2241 path
.last().unwrap().to_string()
2243 match weak_lang_items
::link_name(attrs
) {
2244 Some(name
) => name
.to_string(),
2246 // Usual name mangling
2247 mangle_exported_name(ccx
, path
, ty
, id
)
2255 fn contains_null(s
: &str) -> bool
{
2256 s
.bytes().any(|b
| b
== 0)
2259 pub fn get_item_val(ccx
: &CrateContext
, id
: ast
::NodeId
) -> ValueRef
{
2260 debug
!("get_item_val(id=`{}`)", id
);
2262 match ccx
.item_vals().borrow().get(&id
).cloned() {
2263 Some(v
) => return v
,
2267 let item
= ccx
.tcx().map
.get(id
);
2268 debug
!("get_item_val: id={} item={:?}", id
, item
);
2269 let val
= match item
{
2270 ast_map
::NodeItem(i
) => {
2271 let ty
= ty
::node_id_to_type(ccx
.tcx(), i
.id
);
2272 let sym
= || exported_name(ccx
, id
, ty
, &i
.attrs
);
2274 let v
= match i
.node
{
2275 ast
::ItemStatic(_
, _
, ref expr
) => {
2276 // If this static came from an external crate, then
2277 // we need to get the symbol from csearch instead of
2278 // using the current crate's name/version
2279 // information in the hash of the symbol
2281 debug
!("making {}", sym
);
2283 // We need the translated value here, because for enums the
2284 // LLVM type is not fully determined by the Rust type.
2285 let empty_substs
= ccx
.tcx().mk_substs(Substs
::trans_empty());
2286 let (v
, ty
) = consts
::const_expr(ccx
, &**expr
, empty_substs
);
2287 ccx
.static_values().borrow_mut().insert(id
, v
);
2289 // boolean SSA values are i1, but they have to be stored in i8 slots,
2290 // otherwise some LLVM optimization passes don't work as expected
2291 let llty
= if ty
::type_is_bool(ty
) {
2292 llvm
::LLVMInt8TypeInContext(ccx
.llcx())
2297 // FIXME(nagisa): probably should be declare_global, because no definition
2298 // is happening here, but we depend on it being defined here from
2299 // const::trans_static. This all logic should be replaced.
2300 let g
= declare
::define_global(ccx
, &sym
[..],
2301 Type
::from_ref(llty
)).unwrap_or_else(||{
2302 ccx
.sess().span_fatal(i
.span
, &format
!("symbol `{}` is already defined",
2306 if attr
::contains_name(&i
.attrs
,
2308 llvm
::set_thread_local(g
, true);
2310 ccx
.item_symbols().borrow_mut().insert(i
.id
, sym
);
2315 ast
::ItemFn(_
, _
, abi
, _
, _
) => {
2317 let llfn
= if abi
== Rust
{
2318 register_fn(ccx
, i
.span
, sym
, i
.id
, ty
)
2320 foreign
::register_rust_fn_with_foreign_abi(ccx
, i
.span
, sym
, i
.id
)
2322 attributes
::from_fn_attrs(ccx
, &i
.attrs
, llfn
);
2326 _
=> ccx
.sess().bug("get_item_val: weird result in table")
2329 match attr
::first_attr_value_str_by_name(&i
.attrs
,
2332 if contains_null(§
) {
2333 ccx
.sess().fatal(&format
!("Illegal null byte in link_section value: `{}`",
2337 let buf
= CString
::new(sect
.as_bytes()).unwrap();
2338 llvm
::LLVMSetSection(v
, buf
.as_ptr());
2347 ast_map
::NodeTraitItem(trait_item
) => {
2348 debug
!("get_item_val(): processing a NodeTraitItem");
2349 match trait_item
.node
{
2350 ast
::MethodTraitItem(_
, None
) | ast
::TypeTraitItem(..) => {
2351 ccx
.sess().span_bug(trait_item
.span
,
2352 "unexpected variant: required trait method in get_item_val()");
2354 ast
::MethodTraitItem(_
, Some(_
)) => {
2355 register_method(ccx
, id
, &trait_item
.attrs
, trait_item
.span
)
2360 ast_map
::NodeImplItem(impl_item
) => {
2361 match impl_item
.node
{
2362 ast
::MethodImplItem(..) => {
2363 register_method(ccx
, id
, &impl_item
.attrs
, impl_item
.span
)
2365 ast
::TypeImplItem(_
) => {
2366 ccx
.sess().span_bug(impl_item
.span
,
2367 "unexpected variant: associated type in get_item_val()")
2369 ast
::MacImplItem(_
) => {
2370 ccx
.sess().span_bug(impl_item
.span
,
2371 "unexpected variant: unexpanded macro in get_item_val()")
2376 ast_map
::NodeForeignItem(ni
) => {
2378 ast
::ForeignItemFn(..) => {
2379 let abi
= ccx
.tcx().map
.get_foreign_abi(id
);
2380 let ty
= ty
::node_id_to_type(ccx
.tcx(), ni
.id
);
2381 let name
= foreign
::link_name(&*ni
);
2382 let llfn
= foreign
::register_foreign_item_fn(ccx
, abi
, ty
, &name
);
2383 attributes
::from_fn_attrs(ccx
, &ni
.attrs
, llfn
);
2386 ast
::ForeignItemStatic(..) => {
2387 foreign
::register_static(ccx
, &*ni
)
2392 ast_map
::NodeVariant(ref v
) => {
2394 let args
= match v
.node
.kind
{
2395 ast
::TupleVariantKind(ref args
) => args
,
2396 ast
::StructVariantKind(_
) => {
2397 ccx
.sess().bug("struct variant kind unexpected in get_item_val")
2400 assert
!(!args
.is_empty());
2401 let ty
= ty
::node_id_to_type(ccx
.tcx(), id
);
2402 let parent
= ccx
.tcx().map
.get_parent(id
);
2403 let enm
= ccx
.tcx().map
.expect_item(parent
);
2404 let sym
= exported_name(ccx
,
2409 llfn
= match enm
.node
{
2410 ast
::ItemEnum(_
, _
) => {
2411 register_fn(ccx
, (*v
).span
, sym
, id
, ty
)
2413 _
=> ccx
.sess().bug("NodeVariant, shouldn't happen")
2415 attributes
::inline(llfn
, attributes
::InlineAttr
::Hint
);
2419 ast_map
::NodeStructCtor(struct_def
) => {
2420 // Only register the constructor if this is a tuple-like struct.
2421 let ctor_id
= match struct_def
.ctor_id
{
2423 ccx
.sess().bug("attempt to register a constructor of \
2424 a non-tuple-like struct")
2426 Some(ctor_id
) => ctor_id
,
2428 let parent
= ccx
.tcx().map
.get_parent(id
);
2429 let struct_item
= ccx
.tcx().map
.expect_item(parent
);
2430 let ty
= ty
::node_id_to_type(ccx
.tcx(), ctor_id
);
2431 let sym
= exported_name(ccx
,
2434 &struct_item
.attrs
);
2435 let llfn
= register_fn(ccx
, struct_item
.span
,
2437 attributes
::inline(llfn
, attributes
::InlineAttr
::Hint
);
2442 ccx
.sess().bug(&format
!("get_item_val(): unexpected variant: {:?}",
2447 // All LLVM globals and functions are initially created as external-linkage
2448 // declarations. If `trans_item`/`trans_fn` later turns the declaration
2449 // into a definition, it adjusts the linkage then (using `update_linkage`).
2451 // The exception is foreign items, which have their linkage set inside the
2452 // call to `foreign::register_*` above. We don't touch the linkage after
2453 // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
2454 // other item translation functions do).
2456 ccx
.item_vals().borrow_mut().insert(id
, val
);
2460 fn register_method(ccx
: &CrateContext
, id
: ast
::NodeId
,
2461 attrs
: &[ast
::Attribute
], span
: Span
) -> ValueRef
{
2462 let mty
= ty
::node_id_to_type(ccx
.tcx(), id
);
2464 let sym
= exported_name(ccx
, id
, mty
, &attrs
);
2466 if let ty
::ty_bare_fn(_
, ref f
) = mty
.sty
{
2467 let llfn
= if f
.abi
== Rust
|| f
.abi
== RustCall
{
2468 register_fn(ccx
, span
, sym
, id
, mty
)
2470 foreign
::register_rust_fn_with_foreign_abi(ccx
, span
, sym
, id
)
2472 attributes
::from_fn_attrs(ccx
, &attrs
, llfn
);
2475 ccx
.sess().span_bug(span
, "expected bare rust function");
2479 pub fn crate_ctxt_to_encode_parms
<'a
, 'tcx
>(cx
: &'a SharedCrateContext
<'tcx
>,
2480 ie
: encoder
::EncodeInlinedItem
<'a
>)
2481 -> encoder
::EncodeParams
<'a
, 'tcx
> {
2482 encoder
::EncodeParams
{
2483 diag
: cx
.sess().diagnostic(),
2485 reexports
: cx
.export_map(),
2486 item_symbols
: cx
.item_symbols(),
2487 link_meta
: cx
.link_meta(),
2488 cstore
: &cx
.sess().cstore
,
2489 encode_inlined_item
: ie
,
2490 reachable
: cx
.reachable(),
2494 pub fn write_metadata(cx
: &SharedCrateContext
, krate
: &ast
::Crate
) -> Vec
<u8> {
2497 let any_library
= cx
.sess().crate_types
.borrow().iter().any(|ty
| {
2498 *ty
!= config
::CrateTypeExecutable
2504 let encode_inlined_item
: encoder
::EncodeInlinedItem
=
2505 Box
::new(|ecx
, rbml_w
, ii
| astencode
::encode_inlined_item(ecx
, rbml_w
, ii
));
2507 let encode_parms
= crate_ctxt_to_encode_parms(cx
, encode_inlined_item
);
2508 let metadata
= encoder
::encode_metadata(encode_parms
, krate
);
2509 let mut compressed
= encoder
::metadata_encoding_version
.to_vec();
2510 compressed
.push_all(&flate
::deflate_bytes(&metadata
));
2511 let llmeta
= C_bytes_in_context(cx
.metadata_llcx(), &compressed
[..]);
2512 let llconst
= C_struct_in_context(cx
.metadata_llcx(), &[llmeta
], false);
2513 let name
= format
!("rust_metadata_{}_{}",
2514 cx
.link_meta().crate_name
,
2515 cx
.link_meta().crate_hash
);
2516 let buf
= CString
::new(name
).unwrap();
2517 let llglobal
= unsafe {
2518 llvm
::LLVMAddGlobal(cx
.metadata_llmod(), val_ty(llconst
).to_ref(),
2522 llvm
::LLVMSetInitializer(llglobal
, llconst
);
2523 let name
= loader
::meta_section_name(cx
.sess().target
.target
.options
.is_like_osx
);
2524 let name
= CString
::new(name
).unwrap();
2525 llvm
::LLVMSetSection(llglobal
, name
.as_ptr())
2530 /// Find any symbols that are defined in one compilation unit, but not declared
2531 /// in any other compilation unit. Give these symbols internal linkage.
2532 fn internalize_symbols(cx
: &SharedCrateContext
, reachable
: &HashSet
<String
>) {
2534 let mut declared
= HashSet
::new();
2536 let iter_globals
= |llmod
| {
2538 cur
: llvm
::LLVMGetFirstGlobal(llmod
),
2539 step
: llvm
::LLVMGetNextGlobal
,
2543 let iter_functions
= |llmod
| {
2545 cur
: llvm
::LLVMGetFirstFunction(llmod
),
2546 step
: llvm
::LLVMGetNextFunction
,
2550 // Collect all external declarations in all compilation units.
2551 for ccx
in cx
.iter() {
2552 for val
in iter_globals(ccx
.llmod()).chain(iter_functions(ccx
.llmod())) {
2553 let linkage
= llvm
::LLVMGetLinkage(val
);
2554 // We only care about external declarations (not definitions)
2555 // and available_externally definitions.
2556 if !(linkage
== llvm
::ExternalLinkage
as c_uint
&&
2557 llvm
::LLVMIsDeclaration(val
) != 0) &&
2558 !(linkage
== llvm
::AvailableExternallyLinkage
as c_uint
) {
2562 let name
= CStr
::from_ptr(llvm
::LLVMGetValueName(val
))
2563 .to_bytes().to_vec();
2564 declared
.insert(name
);
2568 // Examine each external definition. If the definition is not used in
2569 // any other compilation unit, and is not reachable from other crates,
2570 // then give it internal linkage.
2571 for ccx
in cx
.iter() {
2572 for val
in iter_globals(ccx
.llmod()).chain(iter_functions(ccx
.llmod())) {
2573 // We only care about external definitions.
2574 if !(llvm
::LLVMGetLinkage(val
) == llvm
::ExternalLinkage
as c_uint
&&
2575 llvm
::LLVMIsDeclaration(val
) == 0) {
2579 let name
= CStr
::from_ptr(llvm
::LLVMGetValueName(val
))
2580 .to_bytes().to_vec();
2581 if !declared
.contains(&name
) &&
2582 !reachable
.contains(str::from_utf8(&name
).unwrap()) {
2583 llvm
::SetLinkage(val
, llvm
::InternalLinkage
);
2592 step
: unsafe extern "C" fn(ValueRef
) -> ValueRef
,
2595 impl Iterator
for ValueIter
{
2596 type Item
= ValueRef
;
2598 fn next(&mut self) -> Option
<ValueRef
> {
2602 let step
: unsafe extern "C" fn(ValueRef
) -> ValueRef
=
2603 mem
::transmute_copy(&self.step
);
2614 pub fn trans_crate
<'tcx
>(analysis
: ty
::CrateAnalysis
<'tcx
>)
2615 -> (ty
::ctxt
<'tcx
>, CrateTranslation
) {
2616 let ty
::CrateAnalysis { ty_cx: tcx, export_map, reachable, name, .. }
= analysis
;
2617 let krate
= tcx
.map
.krate();
2619 let check_overflow
= if let Some(v
) = tcx
.sess
.opts
.debugging_opts
.force_overflow_checks
{
2622 tcx
.sess
.opts
.debug_assertions
2625 let check_dropflag
= if let Some(v
) = tcx
.sess
.opts
.debugging_opts
.force_dropflag_checks
{
2628 tcx
.sess
.opts
.debug_assertions
2631 // Before we touch LLVM, make sure that multithreading is enabled.
2633 use std
::sync
::{Once, ONCE_INIT}
;
2634 static INIT
: Once
= ONCE_INIT
;
2635 static mut POISONED
: bool
= false;
2637 if llvm
::LLVMStartMultithreaded() != 1 {
2638 // use an extra bool to make sure that all future usage of LLVM
2639 // cannot proceed despite the Once not running more than once.
2645 tcx
.sess
.bug("couldn't enable multi-threaded LLVM");
2649 let link_meta
= link
::build_link_meta(&tcx
.sess
, krate
, name
);
2651 let codegen_units
= tcx
.sess
.opts
.cg
.codegen_units
;
2652 let shared_ccx
= SharedCrateContext
::new(&link_meta
.crate_name
,
2663 let ccx
= shared_ccx
.get_ccx(0);
2665 // First, verify intrinsics.
2666 intrinsic
::check_intrinsics(&ccx
);
2668 // Next, translate the module.
2670 let _icx
= push_ctxt("text");
2671 trans_mod(&ccx
, &krate
.module
);
2675 for ccx
in shared_ccx
.iter() {
2676 if ccx
.sess().opts
.debuginfo
!= NoDebugInfo
{
2677 debuginfo
::finalize(&ccx
);
2681 // Translate the metadata.
2682 let metadata
= write_metadata(&shared_ccx
, krate
);
2684 if shared_ccx
.sess().trans_stats() {
2685 let stats
= shared_ccx
.stats();
2686 println
!("--- trans stats ---");
2687 println
!("n_glues_created: {}", stats
.n_glues_created
.get());
2688 println
!("n_null_glues: {}", stats
.n_null_glues
.get());
2689 println
!("n_real_glues: {}", stats
.n_real_glues
.get());
2691 println
!("n_fns: {}", stats
.n_fns
.get());
2692 println
!("n_monos: {}", stats
.n_monos
.get());
2693 println
!("n_inlines: {}", stats
.n_inlines
.get());
2694 println
!("n_closures: {}", stats
.n_closures
.get());
2695 println
!("fn stats:");
2696 stats
.fn_stats
.borrow_mut().sort_by(|&(_
, insns_a
), &(_
, insns_b
)| {
2697 insns_b
.cmp(&insns_a
)
2699 for tuple
in &*stats
.fn_stats
.borrow() {
2701 (ref name
, insns
) => {
2702 println
!("{} insns, {}", insns
, *name
);
2707 if shared_ccx
.sess().count_llvm_insns() {
2708 for (k
, v
) in &*shared_ccx
.stats().llvm_insns
.borrow() {
2709 println
!("{:7} {}", *v
, *k
);
2713 let modules
= shared_ccx
.iter()
2714 .map(|ccx
| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() }
)
2717 let mut reachable
: Vec
<String
> = shared_ccx
.reachable().iter().filter_map(|id
| {
2718 shared_ccx
.item_symbols().borrow().get(id
).map(|s
| s
.to_string())
2721 // For the purposes of LTO, we add to the reachable set all of the upstream
2722 // reachable extern fns. These functions are all part of the public ABI of
2723 // the final product, so LTO needs to preserve them.
2724 shared_ccx
.sess().cstore
.iter_crate_data(|cnum
, _
| {
2725 let syms
= csearch
::get_reachable_extern_fns(&shared_ccx
.sess().cstore
, cnum
);
2726 reachable
.extend(syms
.into_iter().map(|did
| {
2727 csearch
::get_symbol(&shared_ccx
.sess().cstore
, did
)
2731 // Make sure that some other crucial symbols are not eliminated from the
2732 // module. This includes the main function, the crate map (used for debug
2733 // log settings and I/O), and finally the curious rust_stack_exhausted
2734 // symbol. This symbol is required for use by the libmorestack library that
2735 // we link in, so we must ensure that this symbol is not internalized (if
2736 // defined in the crate).
2737 reachable
.push("main".to_string());
2738 reachable
.push("rust_stack_exhausted".to_string());
2740 // referenced from .eh_frame section on some platforms
2741 reachable
.push("rust_eh_personality".to_string());
2742 // referenced from rt/rust_try.ll
2743 reachable
.push("rust_eh_personality_catch".to_string());
2745 if codegen_units
> 1 {
2746 internalize_symbols(&shared_ccx
, &reachable
.iter().cloned().collect());
2749 let metadata_module
= ModuleTranslation
{
2750 llcx
: shared_ccx
.metadata_llcx(),
2751 llmod
: shared_ccx
.metadata_llmod(),
2753 let formats
= shared_ccx
.tcx().dependency_formats
.borrow().clone();
2754 let no_builtins
= attr
::contains_name(&krate
.attrs
, "no_builtins");
2756 let translation
= CrateTranslation
{
2758 metadata_module
: metadata_module
,
2761 reachable
: reachable
,
2762 crate_formats
: formats
,
2763 no_builtins
: no_builtins
,
2766 (shared_ccx
.take_tcx(), translation
)