]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/base.rs
Imported Upstream version 1.2.0+dfsg1
[rustc.git] / src / librustc_trans / trans / base.rs
1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //! Translate the completed AST to the LLVM IR.
11 //!
12 //! Some functions here, such as trans_block and trans_expr, return a value --
13 //! the result of the translation to LLVM -- while others, such as trans_fn,
14 //! trans_impl, and trans_item, are called only for the side effect of adding a
15 //! particular definition to the LLVM IR output we're producing.
16 //!
17 //! Hopefully useful general knowledge about trans:
18 //!
19 //! * There's no way to find out the Ty type of a ValueRef. Doing so
20 //! would be "trying to get the eggs out of an omelette" (credit:
21 //! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
22 //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
23 //! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
24
25 #![allow(non_camel_case_types)]
26
27 pub use self::ValueOrigin::*;
28
29 use super::CrateTranslation;
30 use super::ModuleTranslation;
31
32 use back::link::mangle_exported_name;
33 use back::{link, abi};
34 use lint;
35 use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
36 use llvm;
37 use metadata::{csearch, encoder, loader};
38 use middle::astencode;
39 use middle::cfg;
40 use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
41 use middle::weak_lang_items;
42 use middle::subst::Substs;
43 use middle::ty::{self, Ty, ClosureTyper, type_is_simd, simd_size};
44 use rustc::ast_map;
45 use session::config::{self, NoDebugInfo};
46 use session::Session;
47 use trans::_match;
48 use trans::adt;
49 use trans::attributes;
50 use trans::build::*;
51 use trans::builder::{Builder, noname};
52 use trans::callee;
53 use trans::cleanup::CleanupMethods;
54 use trans::cleanup;
55 use trans::closure;
56 use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral};
57 use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
58 use trans::common::{CrateContext, FunctionContext};
59 use trans::common::{Result, NodeIdAndSpan};
60 use trans::common::{node_id_type, return_type_is_void};
61 use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
62 use trans::common;
63 use trans::consts;
64 use trans::context::SharedCrateContext;
65 use trans::controlflow;
66 use trans::datum;
67 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
68 use trans::declare;
69 use trans::expr;
70 use trans::foreign;
71 use trans::glue;
72 use trans::intrinsic;
73 use trans::machine;
74 use trans::machine::{llsize_of, llsize_of_real};
75 use trans::meth;
76 use trans::monomorphize;
77 use trans::tvec;
78 use trans::type_::Type;
79 use trans::type_of;
80 use trans::type_of::*;
81 use trans::value::Value;
82 use util::common::indenter;
83 use util::sha2::Sha256;
84 use util::nodemap::NodeMap;
85
86 use arena::TypedArena;
87 use libc::c_uint;
88 use std::ffi::{CStr, CString};
89 use std::cell::{Cell, RefCell};
90 use std::collections::HashSet;
91 use std::mem;
92 use std::str;
93 use std::{i8, i16, i32, i64};
94 use syntax::abi::{Rust, RustCall, RustIntrinsic, Abi};
95 use syntax::ast_util::local_def;
96 use syntax::attr::AttrMetaMethods;
97 use syntax::attr;
98 use syntax::codemap::Span;
99 use syntax::parse::token::InternedString;
100 use syntax::visit::Visitor;
101 use syntax::visit;
102 use syntax::{ast, ast_util};
103
104 thread_local! {
105 static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
106 RefCell::new(None)
107 }
108 }
109
110 pub fn with_insn_ctxt<F>(blk: F) where
111 F: FnOnce(&[&'static str]),
112 {
113 TASK_LOCAL_INSN_KEY.with(move |slot| {
114 slot.borrow().as_ref().map(move |s| blk(s));
115 })
116 }
117
118 pub fn init_insn_ctxt() {
119 TASK_LOCAL_INSN_KEY.with(|slot| {
120 *slot.borrow_mut() = Some(Vec::new());
121 });
122 }
123
124 pub struct _InsnCtxt {
125 _cannot_construct_outside_of_this_module: ()
126 }
127
128 impl Drop for _InsnCtxt {
129 fn drop(&mut self) {
130 TASK_LOCAL_INSN_KEY.with(|slot| {
131 match slot.borrow_mut().as_mut() {
132 Some(ctx) => { ctx.pop(); }
133 None => {}
134 }
135 })
136 }
137 }
138
139 pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
140 debug!("new InsnCtxt: {}", s);
141 TASK_LOCAL_INSN_KEY.with(|slot| {
142 match slot.borrow_mut().as_mut() {
143 Some(ctx) => ctx.push(s),
144 None => {}
145 }
146 });
147 _InsnCtxt { _cannot_construct_outside_of_this_module: () }
148 }
149
150 pub struct StatRecorder<'a, 'tcx: 'a> {
151 ccx: &'a CrateContext<'a, 'tcx>,
152 name: Option<String>,
153 istart: usize,
154 }
155
156 impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
157 pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
158 -> StatRecorder<'a, 'tcx> {
159 let istart = ccx.stats().n_llvm_insns.get();
160 StatRecorder {
161 ccx: ccx,
162 name: Some(name),
163 istart: istart,
164 }
165 }
166 }
167
168 impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
169 fn drop(&mut self) {
170 if self.ccx.sess().trans_stats() {
171 let iend = self.ccx.stats().n_llvm_insns.get();
172 self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
173 iend - self.istart));
174 self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
175 // Reset LLVM insn count to avoid compound costs.
176 self.ccx.stats().n_llvm_insns.set(self.istart);
177 }
178 }
179 }
180
181 fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>,
182 name: &str, did: ast::DefId) -> ValueRef {
183 match ccx.externs().borrow().get(name) {
184 Some(n) => return *n,
185 None => ()
186 }
187
188 let f = declare::declare_rust_fn(ccx, name, fn_ty);
189
190 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
191 attributes::from_fn_attrs(ccx, &attrs[..], f);
192
193 ccx.externs().borrow_mut().insert(name.to_string(), f);
194 f
195 }
196
197 pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
198 closure_id: ast::DefId,
199 fn_ty: Ty<'tcx>)
200 -> Ty<'tcx>
201 {
202 let closure_kind = ccx.tcx().closure_kind(closure_id);
203 match closure_kind {
204 ty::FnClosureKind => {
205 ty::mk_imm_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
206 }
207 ty::FnMutClosureKind => {
208 ty::mk_mut_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
209 }
210 ty::FnOnceClosureKind => fn_ty
211 }
212 }
213
214 pub fn kind_for_closure(ccx: &CrateContext, closure_id: ast::DefId) -> ty::ClosureKind {
215 *ccx.tcx().closure_kinds.borrow().get(&closure_id).unwrap()
216 }
217
218 pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId,
219 t: Ty<'tcx>) -> ValueRef {
220 let name = csearch::get_symbol(&ccx.sess().cstore, did);
221 let ty = type_of(ccx, t);
222 match ccx.externs().borrow_mut().get(&name) {
223 Some(n) => return *n,
224 None => ()
225 }
226 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
227 // FIXME(nagisa): investigate whether it can be changed into define_global
228 let c = declare::declare_global(ccx, &name[..], ty);
229 // Thread-local statics in some other crate need to *always* be linked
230 // against in a thread-local fashion, so we need to be sure to apply the
231 // thread-local attribute locally if it was present remotely. If we
232 // don't do this then linker errors can be generated where the linker
233 // complains that one object files has a thread local version of the
234 // symbol and another one doesn't.
235 for attr in ty::get_attrs(ccx.tcx(), did).iter() {
236 if attr.check_name("thread_local") {
237 llvm::set_thread_local(c, true);
238 }
239 }
240 if ccx.use_dll_storage_attrs() {
241 llvm::SetDLLStorageClass(c, llvm::DLLImportStorageClass);
242 }
243 ccx.externs().borrow_mut().insert(name.to_string(), c);
244 return c;
245 }
246
247 fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
248 info_ty: Ty<'tcx>, it: LangItem) -> ast::DefId {
249 match bcx.tcx().lang_items.require(it) {
250 Ok(id) => id,
251 Err(s) => {
252 bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
253 }
254 }
255 }
256
257 // The following malloc_raw_dyn* functions allocate a box to contain
258 // a given type, but with a potentially dynamic size.
259
260 pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
261 llty_ptr: Type,
262 info_ty: Ty<'tcx>,
263 size: ValueRef,
264 align: ValueRef,
265 debug_loc: DebugLoc)
266 -> Result<'blk, 'tcx> {
267 let _icx = push_ctxt("malloc_raw_exchange");
268
269 // Allocate space:
270 let r = callee::trans_lang_call(bcx,
271 require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
272 &[size, align],
273 None,
274 debug_loc);
275
276 Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
277 }
278
279
280 pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, op: ast::BinOp_, signed: bool)
281 -> llvm::IntPredicate {
282 match op {
283 ast::BiEq => llvm::IntEQ,
284 ast::BiNe => llvm::IntNE,
285 ast::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
286 ast::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
287 ast::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
288 ast::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
289 op => {
290 ccx.sess().bug(&format!("comparison_op_to_icmp_predicate: expected \
291 comparison operator, found {:?}", op));
292 }
293 }
294 }
295
296 pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: ast::BinOp_)
297 -> llvm::RealPredicate {
298 match op {
299 ast::BiEq => llvm::RealOEQ,
300 ast::BiNe => llvm::RealUNE,
301 ast::BiLt => llvm::RealOLT,
302 ast::BiLe => llvm::RealOLE,
303 ast::BiGt => llvm::RealOGT,
304 ast::BiGe => llvm::RealOGE,
305 op => {
306 ccx.sess().bug(&format!("comparison_op_to_fcmp_predicate: expected \
307 comparison operator, found {:?}", op));
308 }
309 }
310 }
311
312 pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
313 lhs: ValueRef,
314 rhs: ValueRef,
315 t: Ty<'tcx>,
316 op: ast::BinOp_,
317 debug_loc: DebugLoc)
318 -> ValueRef {
319 match t.sty {
320 ty::TyTuple(ref tys) if tys.is_empty() => {
321 // We don't need to do actual comparisons for nil.
322 // () == () holds but () < () does not.
323 match op {
324 ast::BiEq | ast::BiLe | ast::BiGe => return C_bool(bcx.ccx(), true),
325 ast::BiNe | ast::BiLt | ast::BiGt => return C_bool(bcx.ccx(), false),
326 // refinements would be nice
327 _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator")
328 }
329 }
330 ty::TyBareFn(..) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
331 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
332 }
333 ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
334 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
335 }
336 ty::TyInt(_) => {
337 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
338 }
339 ty::TyFloat(_) => {
340 FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc)
341 }
342 // Should never get here, because t is scalar.
343 _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types")
344 }
345 }
346
347 pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
348 lhs: ValueRef,
349 rhs: ValueRef,
350 t: Ty<'tcx>,
351 op: ast::BinOp_,
352 debug_loc: DebugLoc)
353 -> ValueRef {
354 let signed = match t.sty {
355 ty::TyFloat(_) => {
356 // The comparison operators for floating point vectors are challenging.
357 // LLVM outputs a `< size x i1 >`, but if we perform a sign extension
358 // then bitcast to a floating point vector, the result will be `-NaN`
359 // for each truth value. Because of this they are unsupported.
360 bcx.sess().bug("compare_simd_types: comparison operators \
361 not supported for floating point SIMD types")
362 },
363 ty::TyUint(_) => false,
364 ty::TyInt(_) => true,
365 _ => bcx.sess().bug("compare_simd_types: invalid SIMD type"),
366 };
367
368 let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed);
369 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
370 // to get the correctly sized type. This will compile to a single instruction
371 // once the IR is converted to assembly if the SIMD instruction is supported
372 // by the target architecture.
373 SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), val_ty(lhs))
374 }
375
376 // Iterates through the elements of a structural type.
377 pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
378 av: ValueRef,
379 t: Ty<'tcx>,
380 mut f: F)
381 -> Block<'blk, 'tcx> where
382 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
383 {
384 let _icx = push_ctxt("iter_structural_ty");
385
386 fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
387 repr: &adt::Repr<'tcx>,
388 av: ValueRef,
389 variant: &ty::VariantInfo<'tcx>,
390 substs: &Substs<'tcx>,
391 f: &mut F)
392 -> Block<'blk, 'tcx> where
393 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
394 {
395 let _icx = push_ctxt("iter_variant");
396 let tcx = cx.tcx();
397 let mut cx = cx;
398
399 for (i, &arg) in variant.args.iter().enumerate() {
400 let arg = monomorphize::apply_param_substs(tcx, substs, &arg);
401 cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg);
402 }
403 return cx;
404 }
405
406 let (data_ptr, info) = if common::type_is_sized(cx.tcx(), t) {
407 (av, None)
408 } else {
409 let data = GEPi(cx, av, &[0, abi::FAT_PTR_ADDR]);
410 let info = GEPi(cx, av, &[0, abi::FAT_PTR_EXTRA]);
411 (Load(cx, data), Some(Load(cx, info)))
412 };
413
414 let mut cx = cx;
415 match t.sty {
416 ty::TyStruct(..) => {
417 let repr = adt::represent_type(cx.ccx(), t);
418 expr::with_field_tys(cx.tcx(), t, None, |discr, field_tys| {
419 for (i, field_ty) in field_tys.iter().enumerate() {
420 let field_ty = field_ty.mt.ty;
421 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
422
423 let val = if common::type_is_sized(cx.tcx(), field_ty) {
424 llfld_a
425 } else {
426 let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
427 Store(cx, llfld_a, GEPi(cx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
428 Store(cx, info.unwrap(), GEPi(cx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
429 scratch.val
430 };
431 cx = f(cx, val, field_ty);
432 }
433 })
434 }
435 ty::TyClosure(def_id, substs) => {
436 let repr = adt::represent_type(cx.ccx(), t);
437 let typer = common::NormalizingClosureTyper::new(cx.tcx());
438 let upvars = typer.closure_upvars(def_id, substs).unwrap();
439 for (i, upvar) in upvars.iter().enumerate() {
440 let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
441 cx = f(cx, llupvar, upvar.ty);
442 }
443 }
444 ty::TyArray(_, n) => {
445 let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
446 let unit_ty = ty::sequence_element_type(cx.tcx(), t);
447 cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
448 }
449 ty::TySlice(_) | ty::TyStr => {
450 let unit_ty = ty::sequence_element_type(cx.tcx(), t);
451 cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
452 }
453 ty::TyTuple(ref args) => {
454 let repr = adt::represent_type(cx.ccx(), t);
455 for (i, arg) in args.iter().enumerate() {
456 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
457 cx = f(cx, llfld_a, *arg);
458 }
459 }
460 ty::TyEnum(tid, substs) => {
461 let fcx = cx.fcx;
462 let ccx = fcx.ccx;
463
464 let repr = adt::represent_type(ccx, t);
465 let variants = ty::enum_variants(ccx.tcx(), tid);
466 let n_variants = (*variants).len();
467
468 // NB: we must hit the discriminant first so that structural
469 // comparison know not to proceed when the discriminants differ.
470
471 match adt::trans_switch(cx, &*repr, av) {
472 (_match::Single, None) => {
473 if n_variants != 0 {
474 assert!(n_variants == 1);
475 cx = iter_variant(cx, &*repr, av, &*(*variants)[0],
476 substs, &mut f);
477 }
478 }
479 (_match::Switch, Some(lldiscrim_a)) => {
480 cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
481
482 // Create a fall-through basic block for the "else" case of
483 // the switch instruction we're about to generate. Note that
484 // we do **not** use an Unreachable instruction here, even
485 // though most of the time this basic block will never be hit.
486 //
487 // When an enum is dropped it's contents are currently
488 // overwritten to DTOR_DONE, which means the discriminant
489 // could have changed value to something not within the actual
490 // range of the discriminant. Currently this function is only
491 // used for drop glue so in this case we just return quickly
492 // from the outer function, and any other use case will only
493 // call this for an already-valid enum in which case the `ret
494 // void` will never be hit.
495 let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
496 RetVoid(ret_void_cx, DebugLoc::None);
497 let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb,
498 n_variants);
499 let next_cx = fcx.new_temp_block("enum-iter-next");
500
501 for variant in &(*variants) {
502 let variant_cx =
503 fcx.new_temp_block(
504 &format!("enum-iter-variant-{}",
505 &variant.disr_val.to_string())
506 );
507 match adt::trans_case(cx, &*repr, variant.disr_val) {
508 _match::SingleResult(r) => {
509 AddCase(llswitch, r.val, variant_cx.llbb)
510 }
511 _ => ccx.sess().unimpl("value from adt::trans_case \
512 in iter_structural_ty")
513 }
514 let variant_cx =
515 iter_variant(variant_cx,
516 &*repr,
517 data_ptr,
518 &**variant,
519 substs,
520 &mut f);
521 Br(variant_cx, next_cx.llbb, DebugLoc::None);
522 }
523 cx = next_cx;
524 }
525 _ => ccx.sess().unimpl("value from adt::trans_switch \
526 in iter_structural_ty")
527 }
528 }
529 _ => {
530 cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
531 }
532 }
533 return cx;
534 }
535
536 pub fn cast_shift_expr_rhs(cx: Block,
537 op: ast::BinOp_,
538 lhs: ValueRef,
539 rhs: ValueRef)
540 -> ValueRef {
541 cast_shift_rhs(op, lhs, rhs,
542 |a,b| Trunc(cx, a, b),
543 |a,b| ZExt(cx, a, b))
544 }
545
546 pub fn cast_shift_const_rhs(op: ast::BinOp_,
547 lhs: ValueRef, rhs: ValueRef) -> ValueRef {
548 cast_shift_rhs(op, lhs, rhs,
549 |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
550 |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
551 }
552
553 fn cast_shift_rhs<F, G>(op: ast::BinOp_,
554 lhs: ValueRef,
555 rhs: ValueRef,
556 trunc: F,
557 zext: G)
558 -> ValueRef where
559 F: FnOnce(ValueRef, Type) -> ValueRef,
560 G: FnOnce(ValueRef, Type) -> ValueRef,
561 {
562 // Shifts may have any size int on the rhs
563 if ast_util::is_shift_binop(op) {
564 let mut rhs_llty = val_ty(rhs);
565 let mut lhs_llty = val_ty(lhs);
566 if rhs_llty.kind() == Vector { rhs_llty = rhs_llty.element_type() }
567 if lhs_llty.kind() == Vector { lhs_llty = lhs_llty.element_type() }
568 let rhs_sz = rhs_llty.int_width();
569 let lhs_sz = lhs_llty.int_width();
570 if lhs_sz < rhs_sz {
571 trunc(rhs, lhs_llty)
572 } else if lhs_sz > rhs_sz {
573 // FIXME (#1877: If shifting by negative
574 // values becomes not undefined then this is wrong.
575 zext(rhs, lhs_llty)
576 } else {
577 rhs
578 }
579 } else {
580 rhs
581 }
582 }
583
584 pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
585 val_t: Ty<'tcx>) -> (Type, u64) {
586 match val_t.sty {
587 ty::TyInt(t) => {
588 let llty = Type::int_from_ty(cx.ccx(), t);
589 let min = match t {
590 ast::TyIs if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
591 ast::TyIs => i64::MIN as u64,
592 ast::TyI8 => i8::MIN as u64,
593 ast::TyI16 => i16::MIN as u64,
594 ast::TyI32 => i32::MIN as u64,
595 ast::TyI64 => i64::MIN as u64,
596 };
597 (llty, min)
598 }
599 _ => unreachable!(),
600 }
601 }
602
603 pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
604 cx: Block<'blk, 'tcx>,
605 call_info: NodeIdAndSpan,
606 divrem: ast::BinOp,
607 lhs: ValueRef,
608 rhs: ValueRef,
609 rhs_t: Ty<'tcx>)
610 -> Block<'blk, 'tcx> {
611 let (zero_text, overflow_text) = if divrem.node == ast::BiDiv {
612 ("attempted to divide by zero",
613 "attempted to divide with overflow")
614 } else {
615 ("attempted remainder with a divisor of zero",
616 "attempted remainder with overflow")
617 };
618 let debug_loc = call_info.debug_loc();
619
620 let (is_zero, is_signed) = match rhs_t.sty {
621 ty::TyInt(t) => {
622 let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
623 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
624 }
625 ty::TyUint(t) => {
626 let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
627 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
628 }
629 ty::TyStruct(_, _) if type_is_simd(cx.tcx(), rhs_t) => {
630 let mut res = C_bool(cx.ccx(), false);
631 for i in 0 .. simd_size(cx.tcx(), rhs_t) {
632 res = Or(cx, res,
633 IsNull(cx,
634 ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc);
635 }
636 (res, false)
637 }
638 _ => {
639 cx.sess().bug(&format!("fail-if-zero on unexpected type: {}", rhs_t));
640 }
641 };
642 let bcx = with_cond(cx, is_zero, |bcx| {
643 controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text))
644 });
645
646 // To quote LLVM's documentation for the sdiv instruction:
647 //
648 // Division by zero leads to undefined behavior. Overflow also leads
649 // to undefined behavior; this is a rare case, but can occur, for
650 // example, by doing a 32-bit division of -2147483648 by -1.
651 //
652 // In order to avoid undefined behavior, we perform runtime checks for
653 // signed division/remainder which would trigger overflow. For unsigned
654 // integers, no action beyond checking for zero need be taken.
655 if is_signed {
656 let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
657 let minus_one = ICmp(bcx, llvm::IntEQ, rhs,
658 C_integral(llty, !0, false), debug_loc);
659 with_cond(bcx, minus_one, |bcx| {
660 let is_min = ICmp(bcx, llvm::IntEQ, lhs,
661 C_integral(llty, min, true), debug_loc);
662 with_cond(bcx, is_min, |bcx| {
663 controlflow::trans_fail(bcx,
664 call_info,
665 InternedString::new(overflow_text))
666 })
667 })
668 } else {
669 bcx
670 }
671 }
672
673 pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
674 did: ast::DefId, t: Ty<'tcx>) -> ValueRef {
675 let name = csearch::get_symbol(&ccx.sess().cstore, did);
676 match t.sty {
677 ty::TyBareFn(_, ref fn_ty) => {
678 match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
679 Rust | RustCall => {
680 get_extern_rust_fn(ccx, t, &name[..], did)
681 }
682 RustIntrinsic => {
683 ccx.sess().bug("unexpected intrinsic in trans_external_path")
684 }
685 _ => {
686 let llfn = foreign::register_foreign_item_fn(ccx, fn_ty.abi,
687 t, &name);
688 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
689 attributes::from_fn_attrs(ccx, &attrs, llfn);
690 llfn
691 }
692 }
693 }
694 _ => {
695 get_extern_const(ccx, did, t)
696 }
697 }
698 }
699
700 pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
701 llfn: ValueRef,
702 llargs: &[ValueRef],
703 fn_ty: Ty<'tcx>,
704 debug_loc: DebugLoc)
705 -> (ValueRef, Block<'blk, 'tcx>) {
706 let _icx = push_ctxt("invoke_");
707 if bcx.unreachable.get() {
708 return (C_null(Type::i8(bcx.ccx())), bcx);
709 }
710
711 let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty);
712
713 match bcx.opt_node_id {
714 None => {
715 debug!("invoke at ???");
716 }
717 Some(id) => {
718 debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
719 }
720 }
721
722 if need_invoke(bcx) {
723 debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
724 for &llarg in llargs {
725 debug!("arg: {}", bcx.val_to_string(llarg));
726 }
727 let normal_bcx = bcx.fcx.new_temp_block("normal-return");
728 let landing_pad = bcx.fcx.get_landing_pad();
729
730 let llresult = Invoke(bcx,
731 llfn,
732 &llargs[..],
733 normal_bcx.llbb,
734 landing_pad,
735 Some(attributes),
736 debug_loc);
737 return (llresult, normal_bcx);
738 } else {
739 debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
740 for &llarg in llargs {
741 debug!("arg: {}", bcx.val_to_string(llarg));
742 }
743
744 let llresult = Call(bcx,
745 llfn,
746 &llargs[..],
747 Some(attributes),
748 debug_loc);
749 return (llresult, bcx);
750 }
751 }
752
753 pub fn need_invoke(bcx: Block) -> bool {
754 // FIXME(#25869) currently unwinding is not implemented for MSVC and our
755 // normal unwinding infrastructure ends up just causing linker
756 // errors with the current LLVM implementation, so landing
757 // pads are disabled entirely for MSVC targets
758 if bcx.sess().no_landing_pads() ||
759 bcx.sess().target.target.options.is_like_msvc {
760 return false;
761 }
762
763 // Avoid using invoke if we are already inside a landing pad.
764 if bcx.is_lpad {
765 return false;
766 }
767
768 bcx.fcx.needs_invoke()
769 }
770
771 pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
772 v: ValueRef, t: Ty<'tcx>) -> ValueRef {
773 let _icx = push_ctxt("load_if_immediate");
774 if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
775 return v;
776 }
777
778 /// Helper for loading values from memory. Does the necessary conversion if the in-memory type
779 /// differs from the type used for SSA values. Also handles various special cases where the type
780 /// gives us better information about what we are loading.
781 pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
782 ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
783 if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
784 return C_undef(type_of::type_of(cx.ccx(), t));
785 }
786
787 let ptr = to_arg_ty_ptr(cx, ptr, t);
788 let align = type_of::align_of(cx.ccx(), t);
789
790 if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
791 let load = Load(cx, ptr);
792 unsafe {
793 llvm::LLVMSetAlignment(load, align);
794 }
795 return load;
796 }
797
798 unsafe {
799 let global = llvm::LLVMIsAGlobalVariable(ptr);
800 if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
801 let val = llvm::LLVMGetInitializer(global);
802 if !val.is_null() {
803 return to_arg_ty(cx, val, t);
804 }
805 }
806 }
807
808 let val = if ty::type_is_bool(t) {
809 LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
810 } else if ty::type_is_char(t) {
811 // a char is a Unicode codepoint, and so takes values from 0
812 // to 0x10FFFF inclusive only.
813 LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
814 } else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
815 && !common::type_is_fat_ptr(cx.tcx(), t) {
816 LoadNonNull(cx, ptr)
817 } else {
818 Load(cx, ptr)
819 };
820
821 unsafe {
822 llvm::LLVMSetAlignment(val, align);
823 }
824
825 to_arg_ty(cx, val, t)
826 }
827
828 /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
829 /// differs from the type used for SSA values.
830 pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
831 if cx.unreachable.get() {
832 return;
833 }
834
835 let store = Store(cx, from_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
836 unsafe {
837 llvm::LLVMSetAlignment(store, type_of::align_of(cx.ccx(), t));
838 }
839 }
840
841 pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
842 if ty::type_is_bool(ty) {
843 ZExt(bcx, val, Type::i8(bcx.ccx()))
844 } else {
845 val
846 }
847 }
848
849 pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
850 if ty::type_is_bool(ty) {
851 Trunc(bcx, val, Type::i1(bcx.ccx()))
852 } else {
853 val
854 }
855 }
856
857 pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef {
858 if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() {
859 // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
860 // for this leads to bad optimizations, so its arg type is an appropriately sized integer
861 // and we have to convert it
862 BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to())
863 } else {
864 ptr
865 }
866 }
867
868 pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
869 -> Block<'blk, 'tcx> {
870 debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
871 let _indenter = indenter();
872 let _icx = push_ctxt("init_local");
873 _match::store_local(bcx, local)
874 }
875
876 pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
877 is_lpad: bool,
878 llbb: BasicBlockRef)
879 -> Block<'blk, 'tcx> {
880 common::BlockS::new(llbb, is_lpad, None, fcx)
881 }
882
883 pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
884 val: ValueRef,
885 f: F)
886 -> Block<'blk, 'tcx> where
887 F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>,
888 {
889 let _icx = push_ctxt("with_cond");
890
891 if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
892 return bcx;
893 }
894
895 let fcx = bcx.fcx;
896 let next_cx = fcx.new_temp_block("next");
897 let cond_cx = fcx.new_temp_block("cond");
898 CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
899 let after_cx = f(cond_cx);
900 if !after_cx.terminated.get() {
901 Br(after_cx, next_cx.llbb, DebugLoc::None);
902 }
903 next_cx
904 }
905
906 pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
907 if cx.sess().opts.optimize == config::No {
908 return;
909 }
910
911 let _icx = push_ctxt("lifetime_start");
912 let ccx = cx.ccx();
913
914 let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
915 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
916 let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
917 Call(cx, lifetime_start, &[llsize, ptr], None, DebugLoc::None);
918 }
919
920 pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
921 if cx.sess().opts.optimize == config::No {
922 return;
923 }
924
925 let _icx = push_ctxt("lifetime_end");
926 let ccx = cx.ccx();
927
928 let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
929 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
930 let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
931 Call(cx, lifetime_end, &[llsize, ptr], None, DebugLoc::None);
932 }
933
934 pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
935 let _icx = push_ctxt("call_memcpy");
936 let ccx = cx.ccx();
937 let key = match &ccx.sess().target.target.target_pointer_width[..] {
938 "32" => "llvm.memcpy.p0i8.p0i8.i32",
939 "64" => "llvm.memcpy.p0i8.p0i8.i64",
940 tws => panic!("Unsupported target word size for memcpy: {}", tws),
941 };
942 let memcpy = ccx.get_intrinsic(&key);
943 let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
944 let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
945 let size = IntCast(cx, n_bytes, ccx.int_type());
946 let align = C_i32(ccx, align as i32);
947 let volatile = C_bool(ccx, false);
948 Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None);
949 }
950
951 pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
952 dst: ValueRef, src: ValueRef,
953 t: Ty<'tcx>) {
954 let _icx = push_ctxt("memcpy_ty");
955 let ccx = bcx.ccx();
956 if ty::type_is_structural(t) {
957 let llty = type_of::type_of(ccx, t);
958 let llsz = llsize_of(ccx, llty);
959 let llalign = type_of::align_of(ccx, t);
960 call_memcpy(bcx, dst, src, llsz, llalign as u32);
961 } else {
962 store_ty(bcx, load_ty(bcx, src, t), dst, t);
963 }
964 }
965
966 pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
967 if cx.unreachable.get() { return; }
968 let _icx = push_ctxt("drop_done_fill_mem");
969 let bcx = cx;
970 memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
971 }
972
973 pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
974 if cx.unreachable.get() { return; }
975 let _icx = push_ctxt("init_zero_mem");
976 let bcx = cx;
977 memfill(&B(bcx), llptr, t, 0);
978 }
979
980 // Always use this function instead of storing a constant byte to the memory
981 // in question. e.g. if you store a zero constant, LLVM will drown in vreg
982 // allocation for large data structures, and the generated code will be
983 // awful. (A telltale sign of this is large quantities of
984 // `mov [byte ptr foo],0` in the generated code.)
985 fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
986 let _icx = push_ctxt("memfill");
987 let ccx = b.ccx;
988
989 let llty = type_of::type_of(ccx, ty);
990
991 let intrinsic_key = match &ccx.sess().target.target.target_pointer_width[..] {
992 "32" => "llvm.memset.p0i8.i32",
993 "64" => "llvm.memset.p0i8.i64",
994 tws => panic!("Unsupported target word size for memset: {}", tws),
995 };
996
997 let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
998 let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
999 let llzeroval = C_u8(ccx, byte as usize);
1000 let size = machine::llsize_of(ccx, llty);
1001 let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
1002 let volatile = C_bool(ccx, false);
1003 b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None);
1004 }
1005
1006 pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef {
1007 let _icx = push_ctxt("alloc_ty");
1008 let ccx = bcx.ccx();
1009 let ty = type_of::type_of(ccx, t);
1010 assert!(!ty::type_has_params(t));
1011 let val = alloca(bcx, ty, name);
1012 return val;
1013 }
1014
1015 pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
1016 let p = alloca_no_lifetime(cx, ty, name);
1017 call_lifetime_start(cx, p);
1018 p
1019 }
1020
1021 pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef {
1022 let _icx = push_ctxt("alloca");
1023 if cx.unreachable.get() {
1024 unsafe {
1025 return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
1026 }
1027 }
1028 debuginfo::clear_source_location(cx.fcx);
1029 Alloca(cx, ty, name)
1030 }
1031
1032 // Creates the alloca slot which holds the pointer to the slot for the final return value
1033 pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1034 output_type: Ty<'tcx>) -> ValueRef {
1035 let lloutputtype = type_of::type_of(fcx.ccx, output_type);
1036
1037 // We create an alloca to hold a pointer of type `output_type`
1038 // which will hold the pointer to the right alloca which has the
1039 // final ret value
1040 if fcx.needs_ret_allocas {
1041 // Let's create the stack slot
1042 let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr");
1043
1044 // and if we're using an out pointer, then store that in our newly made slot
1045 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1046 let outptr = get_param(fcx.llfn, 0);
1047
1048 let b = fcx.ccx.builder();
1049 b.position_before(fcx.alloca_insert_pt.get().unwrap());
1050 b.store(outptr, slot);
1051 }
1052
1053 slot
1054
1055 // But if there are no nested returns, we skip the indirection and have a single
1056 // retslot
1057 } else {
1058 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1059 get_param(fcx.llfn, 0)
1060 } else {
1061 AllocaFcx(fcx, lloutputtype, "sret_slot")
1062 }
1063 }
1064 }
1065
1066 struct FindNestedReturn {
1067 found: bool,
1068 }
1069
1070 impl FindNestedReturn {
1071 fn new() -> FindNestedReturn {
1072 FindNestedReturn { found: false }
1073 }
1074 }
1075
1076 impl<'v> Visitor<'v> for FindNestedReturn {
1077 fn visit_expr(&mut self, e: &ast::Expr) {
1078 match e.node {
1079 ast::ExprRet(..) => {
1080 self.found = true;
1081 }
1082 _ => visit::walk_expr(self, e)
1083 }
1084 }
1085 }
1086
1087 fn build_cfg(tcx: &ty::ctxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
1088 let blk = match tcx.map.find(id) {
1089 Some(ast_map::NodeItem(i)) => {
1090 match i.node {
1091 ast::ItemFn(_, _, _, _, _, ref blk) => {
1092 blk
1093 }
1094 _ => tcx.sess.bug("unexpected item variant in has_nested_returns")
1095 }
1096 }
1097 Some(ast_map::NodeTraitItem(trait_item)) => {
1098 match trait_item.node {
1099 ast::MethodTraitItem(_, Some(ref body)) => body,
1100 _ => {
1101 tcx.sess.bug("unexpected variant: trait item other than a \
1102 provided method in has_nested_returns")
1103 }
1104 }
1105 }
1106 Some(ast_map::NodeImplItem(impl_item)) => {
1107 match impl_item.node {
1108 ast::MethodImplItem(_, ref body) => body,
1109 _ => {
1110 tcx.sess.bug("unexpected variant: non-method impl item in \
1111 has_nested_returns")
1112 }
1113 }
1114 }
1115 Some(ast_map::NodeExpr(e)) => {
1116 match e.node {
1117 ast::ExprClosure(_, _, ref blk) => blk,
1118 _ => tcx.sess.bug("unexpected expr variant in has_nested_returns")
1119 }
1120 }
1121 Some(ast_map::NodeVariant(..)) |
1122 Some(ast_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
1123
1124 // glue, shims, etc
1125 None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
1126
1127 _ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}",
1128 tcx.map.path_to_string(id)))
1129 };
1130
1131 (blk.id, Some(cfg::CFG::new(tcx, blk)))
1132 }
1133
1134 // Checks for the presence of "nested returns" in a function.
1135 // Nested returns are when the inner expression of a return expression
1136 // (the 'expr' in 'return expr') contains a return expression. Only cases
1137 // where the outer return is actually reachable are considered. Implicit
1138 // returns from the end of blocks are considered as well.
1139 //
1140 // This check is needed to handle the case where the inner expression is
1141 // part of a larger expression that may have already partially-filled the
1142 // return slot alloca. This can cause errors related to clean-up due to
1143 // the clobbering of the existing value in the return slot.
1144 fn has_nested_returns(tcx: &ty::ctxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
1145 for index in cfg.graph.depth_traverse(cfg.entry) {
1146 let n = cfg.graph.node_data(index);
1147 match tcx.map.find(n.id()) {
1148 Some(ast_map::NodeExpr(ex)) => {
1149 if let ast::ExprRet(Some(ref ret_expr)) = ex.node {
1150 let mut visitor = FindNestedReturn::new();
1151 visit::walk_expr(&mut visitor, &**ret_expr);
1152 if visitor.found {
1153 return true;
1154 }
1155 }
1156 }
1157 Some(ast_map::NodeBlock(blk)) if blk.id == blk_id => {
1158 let mut visitor = FindNestedReturn::new();
1159 visit::walk_expr_opt(&mut visitor, &blk.expr);
1160 if visitor.found {
1161 return true;
1162 }
1163 }
1164 _ => {}
1165 }
1166 }
1167
1168 return false;
1169 }
1170
1171 // NB: must keep 4 fns in sync:
1172 //
1173 // - type_of_fn
1174 // - create_datums_for_fn_args.
1175 // - new_fn_ctxt
1176 // - trans_args
1177 //
1178 // Be warned! You must call `init_function` before doing anything with the
1179 // returned function context.
1180 pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
1181 llfndecl: ValueRef,
1182 id: ast::NodeId,
1183 has_env: bool,
1184 output_type: ty::FnOutput<'tcx>,
1185 param_substs: &'tcx Substs<'tcx>,
1186 sp: Option<Span>,
1187 block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
1188 -> FunctionContext<'a, 'tcx> {
1189 common::validate_substs(param_substs);
1190
1191 debug!("new_fn_ctxt(path={}, id={}, param_substs={:?})",
1192 if id == !0 {
1193 "".to_string()
1194 } else {
1195 ccx.tcx().map.path_to_string(id).to_string()
1196 },
1197 id, param_substs);
1198
1199 let uses_outptr = match output_type {
1200 ty::FnConverging(output_type) => {
1201 let substd_output_type =
1202 monomorphize::apply_param_substs(ccx.tcx(), param_substs, &output_type);
1203 type_of::return_uses_outptr(ccx, substd_output_type)
1204 }
1205 ty::FnDiverging => false
1206 };
1207 let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
1208 let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
1209 let nested_returns = if let Some(ref cfg) = cfg {
1210 has_nested_returns(ccx.tcx(), cfg, blk_id)
1211 } else {
1212 false
1213 };
1214
1215 let mut fcx = FunctionContext {
1216 llfn: llfndecl,
1217 llenv: None,
1218 llretslotptr: Cell::new(None),
1219 param_env: ty::empty_parameter_environment(ccx.tcx()),
1220 alloca_insert_pt: Cell::new(None),
1221 llreturn: Cell::new(None),
1222 needs_ret_allocas: nested_returns,
1223 personality: Cell::new(None),
1224 caller_expects_out_pointer: uses_outptr,
1225 lllocals: RefCell::new(NodeMap()),
1226 llupvars: RefCell::new(NodeMap()),
1227 id: id,
1228 param_substs: param_substs,
1229 span: sp,
1230 block_arena: block_arena,
1231 ccx: ccx,
1232 debug_context: debug_context,
1233 scopes: RefCell::new(Vec::new()),
1234 cfg: cfg
1235 };
1236
1237 if has_env {
1238 fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
1239 }
1240
1241 fcx
1242 }
1243
1244 /// Performs setup on a newly created function, creating the entry scope block
1245 /// and allocating space for the return pointer.
1246 pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
1247 skip_retptr: bool,
1248 output: ty::FnOutput<'tcx>)
1249 -> Block<'a, 'tcx> {
1250 let entry_bcx = fcx.new_temp_block("entry-block");
1251
1252 // Use a dummy instruction as the insertion point for all allocas.
1253 // This is later removed in FunctionContext::cleanup.
1254 fcx.alloca_insert_pt.set(Some(unsafe {
1255 Load(entry_bcx, C_null(Type::i8p(fcx.ccx)));
1256 llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
1257 }));
1258
1259 if let ty::FnConverging(output_type) = output {
1260 // This shouldn't need to recompute the return type,
1261 // as new_fn_ctxt did it already.
1262 let substd_output_type = fcx.monomorphize(&output_type);
1263 if !return_type_is_void(fcx.ccx, substd_output_type) {
1264 // If the function returns nil/bot, there is no real return
1265 // value, so do not set `llretslotptr`.
1266 if !skip_retptr || fcx.caller_expects_out_pointer {
1267 // Otherwise, we normally allocate the llretslotptr, unless we
1268 // have been instructed to skip it for immediate return
1269 // values.
1270 fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type)));
1271 }
1272 }
1273 }
1274
1275 entry_bcx
1276 }
1277
1278 // NB: must keep 4 fns in sync:
1279 //
1280 // - type_of_fn
1281 // - create_datums_for_fn_args.
1282 // - new_fn_ctxt
1283 // - trans_args
1284
1285 pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>)
1286 -> datum::Rvalue {
1287 use trans::datum::{ByRef, ByValue};
1288
1289 datum::Rvalue {
1290 mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue }
1291 }
1292 }
1293
1294 // work around bizarre resolve errors
1295 pub type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
1296
1297 // create_datums_for_fn_args: creates rvalue datums for each of the
1298 // incoming function arguments. These will later be stored into
1299 // appropriate lvalue datums.
1300 pub fn create_datums_for_fn_args<'a, 'tcx>(bcx: Block<'a, 'tcx>,
1301 arg_tys: &[Ty<'tcx>])
1302 -> Vec<RvalueDatum<'tcx>> {
1303 let _icx = push_ctxt("create_datums_for_fn_args");
1304 let fcx = bcx.fcx;
1305
1306 // Return an array wrapping the ValueRefs that we get from `get_param` for
1307 // each argument into datums.
1308 let mut i = fcx.arg_offset() as c_uint;
1309 arg_tys.iter().map(|&arg_ty| {
1310 if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
1311 let llty = type_of::type_of(bcx.ccx(), arg_ty);
1312 let data = get_param(fcx.llfn, i);
1313 let extra = get_param(fcx.llfn, i + 1);
1314 let fat_ptr = expr::make_fat_ptr(bcx, llty, data, extra);
1315 i += 2;
1316 datum::Datum::new(fat_ptr, arg_ty, datum::Rvalue { mode: datum::ByValue })
1317 } else {
1318 let llarg = get_param(fcx.llfn, i);
1319 i += 1;
1320 datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))
1321 }
1322 }).collect()
1323 }
1324
1325 /// Creates rvalue datums for each of the incoming function arguments and
1326 /// tuples the arguments. These will later be stored into appropriate lvalue
1327 /// datums.
1328 ///
1329 /// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
1330 fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>(
1331 mut bcx: Block<'blk, 'tcx>,
1332 arg_scope: cleanup::CustomScopeIndex,
1333 arg_tys: &[Ty<'tcx>])
1334 -> Vec<RvalueDatum<'tcx>> {
1335 let mut result = Vec::new();
1336 let mut idx = bcx.fcx.arg_offset() as c_uint;
1337 for (i, &arg_ty) in arg_tys.iter().enumerate() {
1338 if i < arg_tys.len() - 1 {
1339 // Regular argument.
1340 result.push(if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
1341 let llty = type_of::type_of(bcx.ccx(), arg_ty);
1342 let data = get_param(bcx.fcx.llfn, idx);
1343 let extra = get_param(bcx.fcx.llfn, idx + 1);
1344 idx += 2;
1345 let fat_ptr = expr::make_fat_ptr(bcx, llty, data, extra);
1346 datum::Datum::new(fat_ptr, arg_ty, datum::Rvalue { mode: datum::ByValue })
1347 } else {
1348 let val = get_param(bcx.fcx.llfn, idx);
1349 idx += 1;
1350 datum::Datum::new(val, arg_ty, arg_kind(bcx.fcx, arg_ty))
1351 });
1352
1353 continue
1354 }
1355
1356 // This is the last argument. Tuple it.
1357 match arg_ty.sty {
1358 ty::TyTuple(ref tupled_arg_tys) => {
1359 let tuple_args_scope_id = cleanup::CustomScope(arg_scope);
1360 let tuple =
1361 unpack_datum!(bcx,
1362 datum::lvalue_scratch_datum(bcx,
1363 arg_ty,
1364 "tupled_args",
1365 tuple_args_scope_id,
1366 (),
1367 |(),
1368 mut bcx,
1369 llval| {
1370 for (j, &tupled_arg_ty) in
1371 tupled_arg_tys.iter().enumerate() {
1372 let lldest = GEPi(bcx, llval, &[0, j]);
1373 if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
1374 let data = get_param(bcx.fcx.llfn, idx);
1375 let extra = get_param(bcx.fcx.llfn, idx + 1);
1376 Store(bcx, data, expr::get_dataptr(bcx, lldest));
1377 Store(bcx, extra, expr::get_len(bcx, lldest));
1378 idx += 2;
1379 } else {
1380 let datum = datum::Datum::new(
1381 get_param(bcx.fcx.llfn, idx),
1382 tupled_arg_ty,
1383 arg_kind(bcx.fcx, tupled_arg_ty));
1384 idx += 1;
1385 bcx = datum.store_to(bcx, lldest);
1386 };
1387 }
1388 bcx
1389 }));
1390 let tuple = unpack_datum!(bcx,
1391 tuple.to_expr_datum()
1392 .to_rvalue_datum(bcx,
1393 "argtuple"));
1394 result.push(tuple);
1395 }
1396 _ => {
1397 bcx.tcx().sess.bug("last argument of a function with \
1398 `rust-call` ABI isn't a tuple?!")
1399 }
1400 };
1401
1402 }
1403
1404 result
1405 }
1406
1407 fn copy_args_to_allocas<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1408 arg_scope: cleanup::CustomScopeIndex,
1409 args: &[ast::Arg],
1410 arg_datums: Vec<RvalueDatum<'tcx>>)
1411 -> Block<'blk, 'tcx> {
1412 debug!("copy_args_to_allocas");
1413
1414 let _icx = push_ctxt("copy_args_to_allocas");
1415 let mut bcx = bcx;
1416
1417 let arg_scope_id = cleanup::CustomScope(arg_scope);
1418
1419 for (i, arg_datum) in arg_datums.into_iter().enumerate() {
1420 // For certain mode/type combinations, the raw llarg values are passed
1421 // by value. However, within the fn body itself, we want to always
1422 // have all locals and arguments be by-ref so that we can cancel the
1423 // cleanup and for better interaction with LLVM's debug info. So, if
1424 // the argument would be passed by value, we store it into an alloca.
1425 // This alloca should be optimized away by LLVM's mem-to-reg pass in
1426 // the event it's not truly needed.
1427
1428 bcx = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id);
1429 debuginfo::create_argument_metadata(bcx, &args[i]);
1430 }
1431
1432 bcx
1433 }
1434
1435 // Ties up the llstaticallocas -> llloadenv -> lltop edges,
1436 // and builds the return block.
1437 pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
1438 last_bcx: Block<'blk, 'tcx>,
1439 retty: ty::FnOutput<'tcx>,
1440 ret_debug_loc: DebugLoc) {
1441 let _icx = push_ctxt("finish_fn");
1442
1443 let ret_cx = match fcx.llreturn.get() {
1444 Some(llreturn) => {
1445 if !last_bcx.terminated.get() {
1446 Br(last_bcx, llreturn, DebugLoc::None);
1447 }
1448 raw_block(fcx, false, llreturn)
1449 }
1450 None => last_bcx
1451 };
1452
1453 // This shouldn't need to recompute the return type,
1454 // as new_fn_ctxt did it already.
1455 let substd_retty = fcx.monomorphize(&retty);
1456 build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc);
1457
1458 debuginfo::clear_source_location(fcx);
1459 fcx.cleanup();
1460 }
1461
1462 // Builds the return block for a function.
1463 pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
1464 ret_cx: Block<'blk, 'tcx>,
1465 retty: ty::FnOutput<'tcx>,
1466 ret_debug_location: DebugLoc) {
1467 if fcx.llretslotptr.get().is_none() ||
1468 (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
1469 return RetVoid(ret_cx, ret_debug_location);
1470 }
1471
1472 let retslot = if fcx.needs_ret_allocas {
1473 Load(ret_cx, fcx.llretslotptr.get().unwrap())
1474 } else {
1475 fcx.llretslotptr.get().unwrap()
1476 };
1477 let retptr = Value(retslot);
1478 match retptr.get_dominating_store(ret_cx) {
1479 // If there's only a single store to the ret slot, we can directly return
1480 // the value that was stored and omit the store and the alloca
1481 Some(s) => {
1482 let retval = s.get_operand(0).unwrap().get();
1483 s.erase_from_parent();
1484
1485 if retptr.has_no_uses() {
1486 retptr.erase_from_parent();
1487 }
1488
1489 let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) {
1490 Trunc(ret_cx, retval, Type::i1(fcx.ccx))
1491 } else {
1492 retval
1493 };
1494
1495 if fcx.caller_expects_out_pointer {
1496 if let ty::FnConverging(retty) = retty {
1497 store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
1498 }
1499 RetVoid(ret_cx, ret_debug_location)
1500 } else {
1501 Ret(ret_cx, retval, ret_debug_location)
1502 }
1503 }
1504 // Otherwise, copy the return value to the ret slot
1505 None => match retty {
1506 ty::FnConverging(retty) => {
1507 if fcx.caller_expects_out_pointer {
1508 memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
1509 RetVoid(ret_cx, ret_debug_location)
1510 } else {
1511 Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location)
1512 }
1513 }
1514 ty::FnDiverging => {
1515 if fcx.caller_expects_out_pointer {
1516 RetVoid(ret_cx, ret_debug_location)
1517 } else {
1518 Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
1519 }
1520 }
1521 }
1522 }
1523 }
1524
1525 /// Builds an LLVM function out of a source function.
1526 ///
1527 /// If the function closes over its environment a closure will be returned.
1528 pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1529 decl: &ast::FnDecl,
1530 body: &ast::Block,
1531 llfndecl: ValueRef,
1532 param_substs: &'tcx Substs<'tcx>,
1533 fn_ast_id: ast::NodeId,
1534 _attributes: &[ast::Attribute],
1535 output_type: ty::FnOutput<'tcx>,
1536 abi: Abi,
1537 closure_env: closure::ClosureEnv<'b>) {
1538 ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
1539
1540 let _icx = push_ctxt("trans_closure");
1541 attributes::emit_uwtable(llfndecl, true);
1542
1543 debug!("trans_closure(..., param_substs={:?})",
1544 param_substs);
1545
1546 let has_env = match closure_env {
1547 closure::ClosureEnv::Closure(_) => true,
1548 closure::ClosureEnv::NotClosure => false,
1549 };
1550
1551 let (arena, fcx): (TypedArena<_>, FunctionContext);
1552 arena = TypedArena::new();
1553 fcx = new_fn_ctxt(ccx,
1554 llfndecl,
1555 fn_ast_id,
1556 has_env,
1557 output_type,
1558 param_substs,
1559 Some(body.span),
1560 &arena);
1561 let mut bcx = init_function(&fcx, false, output_type);
1562
1563 // cleanup scope for the incoming arguments
1564 let fn_cleanup_debug_loc =
1565 debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true);
1566 let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
1567
1568 let block_ty = node_id_type(bcx, body.id);
1569
1570 // Set up arguments to the function.
1571 let monomorphized_arg_types =
1572 decl.inputs.iter()
1573 .map(|arg| node_id_type(bcx, arg.id))
1574 .collect::<Vec<_>>();
1575 let monomorphized_arg_types = match closure_env {
1576 closure::ClosureEnv::NotClosure => {
1577 monomorphized_arg_types
1578 }
1579
1580 // Tuple up closure argument types for the "rust-call" ABI.
1581 closure::ClosureEnv::Closure(_) => {
1582 vec![ty::mk_tup(ccx.tcx(), monomorphized_arg_types)]
1583 }
1584 };
1585 for monomorphized_arg_type in &monomorphized_arg_types {
1586 debug!("trans_closure: monomorphized_arg_type: {:?}",
1587 monomorphized_arg_type);
1588 }
1589 debug!("trans_closure: function lltype: {}",
1590 bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
1591
1592 let arg_datums = match closure_env {
1593 closure::ClosureEnv::NotClosure if abi == RustCall => {
1594 create_datums_for_fn_args_under_call_abi(bcx, arg_scope, &monomorphized_arg_types[..])
1595 }
1596 _ => {
1597 let arg_tys = untuple_arguments_if_necessary(ccx, &monomorphized_arg_types, abi);
1598 create_datums_for_fn_args(bcx, &arg_tys)
1599 }
1600 };
1601
1602 bcx = copy_args_to_allocas(bcx, arg_scope, &decl.inputs, arg_datums);
1603
1604 bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
1605
1606 // Up until here, IR instructions for this function have explicitly not been annotated with
1607 // source code location, so we don't step into call setup code. From here on, source location
1608 // emitting should be enabled.
1609 debuginfo::start_emitting_source_locations(&fcx);
1610
1611 let dest = match fcx.llretslotptr.get() {
1612 Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")),
1613 None => {
1614 assert!(type_is_zero_size(bcx.ccx(), block_ty));
1615 expr::Ignore
1616 }
1617 };
1618
1619 // This call to trans_block is the place where we bridge between
1620 // translation calls that don't have a return value (trans_crate,
1621 // trans_mod, trans_item, et cetera) and those that do
1622 // (trans_block, trans_expr, et cetera).
1623 bcx = controlflow::trans_block(bcx, body, dest);
1624
1625 match dest {
1626 expr::SaveIn(slot) if fcx.needs_ret_allocas => {
1627 Store(bcx, slot, fcx.llretslotptr.get().unwrap());
1628 }
1629 _ => {}
1630 }
1631
1632 match fcx.llreturn.get() {
1633 Some(_) => {
1634 Br(bcx, fcx.return_exit_block(), DebugLoc::None);
1635 fcx.pop_custom_cleanup_scope(arg_scope);
1636 }
1637 None => {
1638 // Microoptimization writ large: avoid creating a separate
1639 // llreturn basic block
1640 bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
1641 }
1642 };
1643
1644 // Put return block after all other blocks.
1645 // This somewhat improves single-stepping experience in debugger.
1646 unsafe {
1647 let llreturn = fcx.llreturn.get();
1648 if let Some(llreturn) = llreturn {
1649 llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
1650 }
1651 }
1652
1653 let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id,
1654 fn_cleanup_debug_loc.span);
1655
1656 // Insert the mandatory first few basic blocks before lltop.
1657 finish_fn(&fcx, bcx, output_type, ret_debug_loc);
1658 }
1659
1660 /// Creates an LLVM function corresponding to a source language function.
1661 pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1662 decl: &ast::FnDecl,
1663 body: &ast::Block,
1664 llfndecl: ValueRef,
1665 param_substs: &'tcx Substs<'tcx>,
1666 id: ast::NodeId,
1667 attrs: &[ast::Attribute]) {
1668 let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
1669 debug!("trans_fn(param_substs={:?})", param_substs);
1670 let _icx = push_ctxt("trans_fn");
1671 let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
1672 let output_type = ty::erase_late_bound_regions(ccx.tcx(), &ty::ty_fn_ret(fn_ty));
1673 let abi = ty::ty_fn_abi(fn_ty);
1674 trans_closure(ccx, decl, body, llfndecl, param_substs, id, attrs, output_type, abi,
1675 closure::ClosureEnv::NotClosure);
1676 }
1677
1678 pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1679 _enum_id: ast::NodeId,
1680 variant: &ast::Variant,
1681 _args: &[ast::VariantArg],
1682 disr: ty::Disr,
1683 param_substs: &'tcx Substs<'tcx>,
1684 llfndecl: ValueRef) {
1685 let _icx = push_ctxt("trans_enum_variant");
1686
1687 trans_enum_variant_or_tuple_like_struct(
1688 ccx,
1689 variant.node.id,
1690 disr,
1691 param_substs,
1692 llfndecl);
1693 }
1694
1695 pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1696 ctor_ty: Ty<'tcx>,
1697 disr: ty::Disr,
1698 args: callee::CallArgs,
1699 dest: expr::Dest,
1700 debug_loc: DebugLoc)
1701 -> Result<'blk, 'tcx> {
1702
1703 let ccx = bcx.fcx.ccx;
1704
1705 let result_ty = match ctor_ty.sty {
1706 ty::TyBareFn(_, ref bft) => {
1707 ty::erase_late_bound_regions(bcx.tcx(), &bft.sig.output()).unwrap()
1708 }
1709 _ => ccx.sess().bug(
1710 &format!("trans_enum_variant_constructor: \
1711 unexpected ctor return type {}",
1712 ctor_ty))
1713 };
1714
1715 // Get location to store the result. If the user does not care about
1716 // the result, just make a stack slot
1717 let llresult = match dest {
1718 expr::SaveIn(d) => d,
1719 expr::Ignore => {
1720 if !type_is_zero_size(ccx, result_ty) {
1721 alloc_ty(bcx, result_ty, "constructor_result")
1722 } else {
1723 C_undef(type_of::type_of(ccx, result_ty).ptr_to())
1724 }
1725 }
1726 };
1727
1728 if !type_is_zero_size(ccx, result_ty) {
1729 match args {
1730 callee::ArgExprs(exprs) => {
1731 let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
1732 bcx = expr::trans_adt(bcx,
1733 result_ty,
1734 disr,
1735 &fields[..],
1736 None,
1737 expr::SaveIn(llresult),
1738 debug_loc);
1739 }
1740 _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
1741 }
1742 }
1743
1744 // If the caller doesn't care about the result
1745 // drop the temporary we made
1746 let bcx = match dest {
1747 expr::SaveIn(_) => bcx,
1748 expr::Ignore => {
1749 let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
1750 if !type_is_zero_size(ccx, result_ty) {
1751 call_lifetime_end(bcx, llresult);
1752 }
1753 bcx
1754 }
1755 };
1756
1757 Result::new(bcx, llresult)
1758 }
1759
1760 pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1761 _fields: &[ast::StructField],
1762 ctor_id: ast::NodeId,
1763 param_substs: &'tcx Substs<'tcx>,
1764 llfndecl: ValueRef) {
1765 let _icx = push_ctxt("trans_tuple_struct");
1766
1767 trans_enum_variant_or_tuple_like_struct(
1768 ccx,
1769 ctor_id,
1770 0,
1771 param_substs,
1772 llfndecl);
1773 }
1774
1775 fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1776 ctor_id: ast::NodeId,
1777 disr: ty::Disr,
1778 param_substs: &'tcx Substs<'tcx>,
1779 llfndecl: ValueRef) {
1780 let ctor_ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
1781 let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
1782
1783 let result_ty = match ctor_ty.sty {
1784 ty::TyBareFn(_, ref bft) => {
1785 ty::erase_late_bound_regions(ccx.tcx(), &bft.sig.output())
1786 }
1787 _ => ccx.sess().bug(
1788 &format!("trans_enum_variant_or_tuple_like_struct: \
1789 unexpected ctor return type {}",
1790 ctor_ty))
1791 };
1792
1793 let (arena, fcx): (TypedArena<_>, FunctionContext);
1794 arena = TypedArena::new();
1795 fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
1796 param_substs, None, &arena);
1797 let bcx = init_function(&fcx, false, result_ty);
1798
1799 assert!(!fcx.needs_ret_allocas);
1800
1801 let arg_tys =
1802 ty::erase_late_bound_regions(
1803 ccx.tcx(), &ty::ty_fn_args(ctor_ty));
1804
1805 let arg_datums = create_datums_for_fn_args(bcx, &arg_tys[..]);
1806
1807 if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
1808 let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
1809 let repr = adt::represent_type(ccx, result_ty.unwrap());
1810 for (i, arg_datum) in arg_datums.into_iter().enumerate() {
1811 let lldestptr = adt::trans_field_ptr(bcx,
1812 &*repr,
1813 dest,
1814 disr,
1815 i);
1816 arg_datum.store_to(bcx, lldestptr);
1817 }
1818 adt::trans_set_discr(bcx, &*repr, dest, disr);
1819 }
1820
1821 finish_fn(&fcx, bcx, result_ty, DebugLoc::None);
1822 }
1823
1824 fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
1825 let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
1826
1827 let print_info = ccx.sess().print_enum_sizes();
1828
1829 let levels = ccx.tcx().node_lint_levels.borrow();
1830 let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
1831 let lvlsrc = levels.get(&(id, lint_id));
1832 let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
1833
1834 if is_allow && !print_info {
1835 // we're not interested in anything here
1836 return
1837 }
1838
1839 let ty = ty::node_id_to_type(ccx.tcx(), id);
1840 let avar = adt::represent_type(ccx, ty);
1841 match *avar {
1842 adt::General(_, ref variants, _) => {
1843 for var in variants {
1844 let mut size = 0;
1845 for field in var.fields.iter().skip(1) {
1846 // skip the discriminant
1847 size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
1848 }
1849 sizes.push(size);
1850 }
1851 },
1852 _ => { /* its size is either constant or unimportant */ }
1853 }
1854
1855 let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
1856 |(l, s, li), (idx, &size)|
1857 if size > l {
1858 (size, l, idx)
1859 } else if size > s {
1860 (l, size, li)
1861 } else {
1862 (l, s, li)
1863 }
1864 );
1865
1866 if print_info {
1867 let llty = type_of::sizing_type_of(ccx, ty);
1868
1869 let sess = &ccx.tcx().sess;
1870 sess.span_note(sp, &*format!("total size: {} bytes", llsize_of_real(ccx, llty)));
1871 match *avar {
1872 adt::General(..) => {
1873 for (i, var) in enum_def.variants.iter().enumerate() {
1874 ccx.tcx().sess.span_note(var.span,
1875 &*format!("variant data: {} bytes", sizes[i]));
1876 }
1877 }
1878 _ => {}
1879 }
1880 }
1881
1882 // we only warn if the largest variant is at least thrice as large as
1883 // the second-largest.
1884 if !is_allow && largest > slargest * 3 && slargest > 0 {
1885 // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
1886 // pass for the latter already ran.
1887 lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
1888 *lvlsrc.unwrap(), Some(sp),
1889 &format!("enum variant is more than three times larger \
1890 ({} bytes) than the next largest (ignoring padding)",
1891 largest));
1892
1893 ccx.sess().span_note(enum_def.variants[largest_index].span,
1894 "this variant is the largest");
1895 }
1896 }
1897
1898 pub struct TransItemVisitor<'a, 'tcx: 'a> {
1899 pub ccx: &'a CrateContext<'a, 'tcx>,
1900 }
1901
1902 impl<'a, 'tcx, 'v> Visitor<'v> for TransItemVisitor<'a, 'tcx> {
1903 fn visit_item(&mut self, i: &ast::Item) {
1904 trans_item(self.ccx, i);
1905 }
1906 }
1907
1908 pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
1909 // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
1910 // applicable to variable declarations and may not really make sense for
1911 // Rust code in the first place but whitelist them anyway and trust that
1912 // the user knows what s/he's doing. Who knows, unanticipated use cases
1913 // may pop up in the future.
1914 //
1915 // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
1916 // and don't have to be, LLVM treats them as no-ops.
1917 match name {
1918 "appending" => Some(llvm::AppendingLinkage),
1919 "available_externally" => Some(llvm::AvailableExternallyLinkage),
1920 "common" => Some(llvm::CommonLinkage),
1921 "extern_weak" => Some(llvm::ExternalWeakLinkage),
1922 "external" => Some(llvm::ExternalLinkage),
1923 "internal" => Some(llvm::InternalLinkage),
1924 "linkonce" => Some(llvm::LinkOnceAnyLinkage),
1925 "linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
1926 "private" => Some(llvm::PrivateLinkage),
1927 "weak" => Some(llvm::WeakAnyLinkage),
1928 "weak_odr" => Some(llvm::WeakODRLinkage),
1929 _ => None,
1930 }
1931 }
1932
1933
1934 /// Enum describing the origin of an LLVM `Value`, for linkage purposes.
1935 #[derive(Copy, Clone)]
1936 pub enum ValueOrigin {
1937 /// The LLVM `Value` is in this context because the corresponding item was
1938 /// assigned to the current compilation unit.
1939 OriginalTranslation,
1940 /// The `Value`'s corresponding item was assigned to some other compilation
1941 /// unit, but the `Value` was translated in this context anyway because the
1942 /// item is marked `#[inline]`.
1943 InlinedCopy,
1944 }
1945
1946 /// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
1947 /// If the `llval` is the direct translation of a specific Rust item, `id`
1948 /// should be set to the `NodeId` of that item. (This mapping should be
1949 /// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
1950 /// `None`.) `llval_origin` indicates whether `llval` is the translation of an
1951 /// item assigned to `ccx`'s compilation unit or an inlined copy of an item
1952 /// assigned to a different compilation unit.
1953 pub fn update_linkage(ccx: &CrateContext,
1954 llval: ValueRef,
1955 id: Option<ast::NodeId>,
1956 llval_origin: ValueOrigin) {
1957 match llval_origin {
1958 InlinedCopy => {
1959 // `llval` is a translation of an item defined in a separate
1960 // compilation unit. This only makes sense if there are at least
1961 // two compilation units.
1962 assert!(ccx.sess().opts.cg.codegen_units > 1);
1963 // `llval` is a copy of something defined elsewhere, so use
1964 // `AvailableExternallyLinkage` to avoid duplicating code in the
1965 // output.
1966 llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
1967 return;
1968 },
1969 OriginalTranslation => {},
1970 }
1971
1972 if let Some(id) = id {
1973 let item = ccx.tcx().map.get(id);
1974 if let ast_map::NodeItem(i) = item {
1975 if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") {
1976 if let Some(linkage) = llvm_linkage_by_name(&name) {
1977 llvm::SetLinkage(llval, linkage);
1978 } else {
1979 ccx.sess().span_fatal(i.span, "invalid linkage specified");
1980 }
1981 return;
1982 }
1983 }
1984 }
1985
1986 match id {
1987 Some(id) if ccx.reachable().contains(&id) => {
1988 llvm::SetLinkage(llval, llvm::ExternalLinkage);
1989 if ccx.use_dll_storage_attrs() {
1990 llvm::SetDLLStorageClass(llval, llvm::DLLExportStorageClass);
1991 }
1992 },
1993 _ => {
1994 // `id` does not refer to an item in `ccx.reachable`.
1995 if ccx.sess().opts.cg.codegen_units > 1 {
1996 llvm::SetLinkage(llval, llvm::ExternalLinkage);
1997 if ccx.use_dll_storage_attrs() {
1998 llvm::SetDLLStorageClass(llval, llvm::DLLExportStorageClass);
1999 }
2000 } else {
2001 llvm::SetLinkage(llval, llvm::InternalLinkage);
2002 }
2003 },
2004 }
2005 }
2006
2007 pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
2008 let _icx = push_ctxt("trans_item");
2009
2010 let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
2011
2012 match item.node {
2013 ast::ItemFn(ref decl, _, _, abi, ref generics, ref body) => {
2014 if !generics.is_type_parameterized() {
2015 let trans_everywhere = attr::requests_inline(&item.attrs);
2016 // Ignore `trans_everywhere` for cross-crate inlined items
2017 // (`from_external`). `trans_item` will be called once for each
2018 // compilation unit that references the item, so it will still get
2019 // translated everywhere it's needed.
2020 for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
2021 let llfn = get_item_val(ccx, item.id);
2022 let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
2023 if abi != Rust {
2024 foreign::trans_rust_fn_with_foreign_abi(ccx, &**decl, &**body, &item.attrs,
2025 llfn, empty_substs, item.id, None);
2026 } else {
2027 trans_fn(ccx, &**decl, &**body, llfn, empty_substs, item.id, &item.attrs);
2028 }
2029 update_linkage(ccx, llfn, Some(item.id),
2030 if is_origin { OriginalTranslation } else { InlinedCopy });
2031
2032 if is_entry_fn(ccx.sess(), item.id) {
2033 create_entry_wrapper(ccx, item.span, llfn);
2034 // check for the #[rustc_error] annotation, which forces an
2035 // error in trans. This is used to write compile-fail tests
2036 // that actually test that compilation succeeds without
2037 // reporting an error.
2038 if ty::has_attr(ccx.tcx(), local_def(item.id), "rustc_error") {
2039 ccx.tcx().sess.span_fatal(item.span, "compilation successful");
2040 }
2041 }
2042 }
2043 }
2044
2045 // Be sure to travel more than just one layer deep to catch nested
2046 // items in blocks and such.
2047 let mut v = TransItemVisitor{ ccx: ccx };
2048 v.visit_block(&**body);
2049 }
2050 ast::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
2051 meth::trans_impl(ccx,
2052 item.ident,
2053 &impl_items[..],
2054 generics,
2055 item.id);
2056 }
2057 ast::ItemMod(ref m) => {
2058 trans_mod(&ccx.rotate(), m);
2059 }
2060 ast::ItemEnum(ref enum_definition, ref gens) => {
2061 if gens.ty_params.is_empty() {
2062 // sizes only make sense for non-generic types
2063
2064 enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
2065 }
2066 }
2067 ast::ItemConst(_, ref expr) => {
2068 // Recurse on the expression to catch items in blocks
2069 let mut v = TransItemVisitor{ ccx: ccx };
2070 v.visit_expr(&**expr);
2071 }
2072 ast::ItemStatic(_, m, ref expr) => {
2073 // Recurse on the expression to catch items in blocks
2074 let mut v = TransItemVisitor{ ccx: ccx };
2075 v.visit_expr(&**expr);
2076
2077 let g = consts::trans_static(ccx, m, item.id);
2078 update_linkage(ccx, g, Some(item.id), OriginalTranslation);
2079 },
2080 ast::ItemForeignMod(ref foreign_mod) => {
2081 foreign::trans_foreign_mod(ccx, foreign_mod);
2082 }
2083 ast::ItemTrait(..) => {
2084 // Inside of this trait definition, we won't be actually translating any
2085 // functions, but the trait still needs to be walked. Otherwise default
2086 // methods with items will not get translated and will cause ICE's when
2087 // metadata time comes around.
2088 let mut v = TransItemVisitor{ ccx: ccx };
2089 visit::walk_item(&mut v, item);
2090 }
2091 _ => {/* fall through */ }
2092 }
2093 }
2094
2095 // Translate a module. Doing this amounts to translating the items in the
2096 // module; there ends up being no artifact (aside from linkage names) of
2097 // separate modules in the compiled program. That's because modules exist
2098 // only as a convenience for humans working with the code, to organize names
2099 // and control visibility.
2100 pub fn trans_mod(ccx: &CrateContext, m: &ast::Mod) {
2101 let _icx = push_ctxt("trans_mod");
2102 for item in &m.items {
2103 trans_item(ccx, &**item);
2104 }
2105 }
2106
2107
2108 // only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
2109 pub fn register_fn_llvmty(ccx: &CrateContext,
2110 sp: Span,
2111 sym: String,
2112 node_id: ast::NodeId,
2113 cc: llvm::CallConv,
2114 llfty: Type) -> ValueRef {
2115 debug!("register_fn_llvmty id={} sym={}", node_id, sym);
2116
2117 let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
2118 ty::FnConverging(ty::mk_nil(ccx.tcx()))).unwrap_or_else(||{
2119 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2120 });
2121 finish_register_fn(ccx, sym, node_id, llfn);
2122 llfn
2123 }
2124
2125 fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId,
2126 llfn: ValueRef) {
2127 ccx.item_symbols().borrow_mut().insert(node_id, sym);
2128
2129 // The stack exhaustion lang item shouldn't have a split stack because
2130 // otherwise it would continue to be exhausted (bad), and both it and the
2131 // eh_personality functions need to be externally linkable.
2132 let def = ast_util::local_def(node_id);
2133 if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
2134 attributes::split_stack(llfn, false);
2135 llvm::SetLinkage(llfn, llvm::ExternalLinkage);
2136 if ccx.use_dll_storage_attrs() {
2137 llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
2138 }
2139 }
2140 if ccx.tcx().lang_items.eh_personality() == Some(def) {
2141 llvm::SetLinkage(llfn, llvm::ExternalLinkage);
2142 if ccx.use_dll_storage_attrs() {
2143 llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
2144 }
2145 }
2146 }
2147
2148 fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
2149 sp: Span,
2150 sym: String,
2151 node_id: ast::NodeId,
2152 node_type: Ty<'tcx>)
2153 -> ValueRef {
2154 if let ty::TyBareFn(_, ref f) = node_type.sty {
2155 if f.abi != Rust && f.abi != RustCall {
2156 ccx.sess().span_bug(sp, &format!("only the `{}` or `{}` calling conventions are valid \
2157 for this function; `{}` was specified",
2158 Rust.name(), RustCall.name(), f.abi.name()));
2159 }
2160 } else {
2161 ccx.sess().span_bug(sp, "expected bare rust function")
2162 }
2163
2164 let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(||{
2165 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2166 });
2167 finish_register_fn(ccx, sym, node_id, llfn);
2168 llfn
2169 }
2170
2171 pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
2172 match *sess.entry_fn.borrow() {
2173 Some((entry_id, _)) => node_id == entry_id,
2174 None => false
2175 }
2176 }
2177
2178 /// Create the `main` function which will initialise the rust runtime and call users’ main
2179 /// function.
2180 pub fn create_entry_wrapper(ccx: &CrateContext,
2181 sp: Span,
2182 main_llfn: ValueRef) {
2183 let et = ccx.sess().entry_type.get().unwrap();
2184 match et {
2185 config::EntryMain => {
2186 create_entry_fn(ccx, sp, main_llfn, true);
2187 }
2188 config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false),
2189 config::EntryNone => {} // Do nothing.
2190 }
2191
2192 fn create_entry_fn(ccx: &CrateContext,
2193 sp: Span,
2194 rust_main: ValueRef,
2195 use_start_lang_item: bool) {
2196 let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()],
2197 &ccx.int_type());
2198
2199 let llfn = declare::define_cfn(ccx, "main", llfty,
2200 ty::mk_nil(ccx.tcx())).unwrap_or_else(||{
2201 ccx.sess().span_err(sp, "entry symbol `main` defined multiple times");
2202 // FIXME: We should be smart and show a better diagnostic here.
2203 ccx.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
2204 ccx.sess().abort_if_errors();
2205 panic!();
2206 });
2207
2208 // FIXME: #16581: Marking a symbol in the executable with `dllexport`
2209 // linkage forces MinGW's linker to output a `.reloc` section for ASLR
2210 if ccx.sess().target.target.options.is_like_windows {
2211 llvm::SetDLLStorageClass(llfn, llvm::DLLExportStorageClass);
2212 }
2213
2214 let llbb = unsafe {
2215 llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn,
2216 "top\0".as_ptr() as *const _)
2217 };
2218 let bld = ccx.raw_builder();
2219 unsafe {
2220 llvm::LLVMPositionBuilderAtEnd(bld, llbb);
2221
2222 debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
2223
2224 let (start_fn, args) = if use_start_lang_item {
2225 let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
2226 Ok(id) => id,
2227 Err(s) => { ccx.sess().fatal(&s[..]); }
2228 };
2229 let start_fn = if start_def_id.krate == ast::LOCAL_CRATE {
2230 get_item_val(ccx, start_def_id.node)
2231 } else {
2232 let start_fn_type = csearch::get_type(ccx.tcx(),
2233 start_def_id).ty;
2234 trans_external_path(ccx, start_def_id, start_fn_type)
2235 };
2236
2237 let args = {
2238 let opaque_rust_main = llvm::LLVMBuildPointerCast(bld,
2239 rust_main, Type::i8p(ccx).to_ref(),
2240 "rust_main\0".as_ptr() as *const _);
2241
2242 vec!(
2243 opaque_rust_main,
2244 get_param(llfn, 0),
2245 get_param(llfn, 1)
2246 )
2247 };
2248 (start_fn, args)
2249 } else {
2250 debug!("using user-defined start fn");
2251 let args = vec!(
2252 get_param(llfn, 0 as c_uint),
2253 get_param(llfn, 1 as c_uint)
2254 );
2255
2256 (rust_main, args)
2257 };
2258
2259 let result = llvm::LLVMBuildCall(bld,
2260 start_fn,
2261 args.as_ptr(),
2262 args.len() as c_uint,
2263 noname());
2264
2265 llvm::LLVMBuildRet(bld, result);
2266 }
2267 }
2268 }
2269
2270 fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, id: ast::NodeId,
2271 ty: Ty<'tcx>, attrs: &[ast::Attribute]) -> String {
2272 match ccx.external_srcs().borrow().get(&id) {
2273 Some(&did) => {
2274 let sym = csearch::get_symbol(&ccx.sess().cstore, did);
2275 debug!("found item {} in other crate...", sym);
2276 return sym;
2277 }
2278 None => {}
2279 }
2280
2281 match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) {
2282 // Use provided name
2283 Some(name) => name.to_string(),
2284 _ => ccx.tcx().map.with_path(id, |path| {
2285 if attr::contains_name(attrs, "no_mangle") {
2286 // Don't mangle
2287 path.last().unwrap().to_string()
2288 } else {
2289 match weak_lang_items::link_name(attrs) {
2290 Some(name) => name.to_string(),
2291 None => {
2292 // Usual name mangling
2293 mangle_exported_name(ccx, path, ty, id)
2294 }
2295 }
2296 }
2297 })
2298 }
2299 }
2300
2301 fn contains_null(s: &str) -> bool {
2302 s.bytes().any(|b| b == 0)
2303 }
2304
2305 pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
2306 debug!("get_item_val(id=`{}`)", id);
2307
2308 match ccx.item_vals().borrow().get(&id).cloned() {
2309 Some(v) => return v,
2310 None => {}
2311 }
2312
2313 let item = ccx.tcx().map.get(id);
2314 debug!("get_item_val: id={} item={:?}", id, item);
2315 let val = match item {
2316 ast_map::NodeItem(i) => {
2317 let ty = ty::node_id_to_type(ccx.tcx(), i.id);
2318 let sym = || exported_name(ccx, id, ty, &i.attrs);
2319
2320 let v = match i.node {
2321 ast::ItemStatic(_, _, ref expr) => {
2322 // If this static came from an external crate, then
2323 // we need to get the symbol from csearch instead of
2324 // using the current crate's name/version
2325 // information in the hash of the symbol
2326 let sym = sym();
2327 debug!("making {}", sym);
2328
2329 // We need the translated value here, because for enums the
2330 // LLVM type is not fully determined by the Rust type.
2331 let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
2332 let (v, ty) = consts::const_expr(ccx, &**expr, empty_substs, None);
2333 ccx.static_values().borrow_mut().insert(id, v);
2334 unsafe {
2335 // boolean SSA values are i1, but they have to be stored in i8 slots,
2336 // otherwise some LLVM optimization passes don't work as expected
2337 let llty = if ty::type_is_bool(ty) {
2338 llvm::LLVMInt8TypeInContext(ccx.llcx())
2339 } else {
2340 llvm::LLVMTypeOf(v)
2341 };
2342
2343 // FIXME(nagisa): probably should be declare_global, because no definition
2344 // is happening here, but we depend on it being defined here from
2345 // const::trans_static. This all logic should be replaced.
2346 let g = declare::define_global(ccx, &sym[..],
2347 Type::from_ref(llty)).unwrap_or_else(||{
2348 ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
2349 sym))
2350 });
2351
2352 if attr::contains_name(&i.attrs,
2353 "thread_local") {
2354 llvm::set_thread_local(g, true);
2355 }
2356 ccx.item_symbols().borrow_mut().insert(i.id, sym);
2357 g
2358 }
2359 }
2360
2361 ast::ItemFn(_, _, _, abi, _, _) => {
2362 let sym = sym();
2363 let llfn = if abi == Rust {
2364 register_fn(ccx, i.span, sym, i.id, ty)
2365 } else {
2366 foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id)
2367 };
2368 attributes::from_fn_attrs(ccx, &i.attrs, llfn);
2369 llfn
2370 }
2371
2372 _ => ccx.sess().bug("get_item_val: weird result in table")
2373 };
2374
2375 match attr::first_attr_value_str_by_name(&i.attrs,
2376 "link_section") {
2377 Some(sect) => {
2378 if contains_null(&sect) {
2379 ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
2380 &sect));
2381 }
2382 unsafe {
2383 let buf = CString::new(sect.as_bytes()).unwrap();
2384 llvm::LLVMSetSection(v, buf.as_ptr());
2385 }
2386 },
2387 None => ()
2388 }
2389
2390 v
2391 }
2392
2393 ast_map::NodeTraitItem(trait_item) => {
2394 debug!("get_item_val(): processing a NodeTraitItem");
2395 match trait_item.node {
2396 ast::MethodTraitItem(_, Some(_)) => {
2397 register_method(ccx, id, &trait_item.attrs, trait_item.span)
2398 }
2399 _ => {
2400 ccx.sess().span_bug(trait_item.span,
2401 "unexpected variant: trait item other than a provided \
2402 method in get_item_val()");
2403 }
2404 }
2405 }
2406
2407 ast_map::NodeImplItem(impl_item) => {
2408 match impl_item.node {
2409 ast::MethodImplItem(..) => {
2410 register_method(ccx, id, &impl_item.attrs, impl_item.span)
2411 }
2412 _ => {
2413 ccx.sess().span_bug(impl_item.span,
2414 "unexpected variant: non-method impl item in \
2415 get_item_val()");
2416 }
2417 }
2418 }
2419
2420 ast_map::NodeForeignItem(ni) => {
2421 match ni.node {
2422 ast::ForeignItemFn(..) => {
2423 let abi = ccx.tcx().map.get_foreign_abi(id);
2424 let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
2425 let name = foreign::link_name(&*ni);
2426 let llfn = foreign::register_foreign_item_fn(ccx, abi, ty, &name);
2427 attributes::from_fn_attrs(ccx, &ni.attrs, llfn);
2428 llfn
2429 }
2430 ast::ForeignItemStatic(..) => {
2431 foreign::register_static(ccx, &*ni)
2432 }
2433 }
2434 }
2435
2436 ast_map::NodeVariant(ref v) => {
2437 let llfn;
2438 let args = match v.node.kind {
2439 ast::TupleVariantKind(ref args) => args,
2440 ast::StructVariantKind(_) => {
2441 ccx.sess().bug("struct variant kind unexpected in get_item_val")
2442 }
2443 };
2444 assert!(!args.is_empty());
2445 let ty = ty::node_id_to_type(ccx.tcx(), id);
2446 let parent = ccx.tcx().map.get_parent(id);
2447 let enm = ccx.tcx().map.expect_item(parent);
2448 let sym = exported_name(ccx,
2449 id,
2450 ty,
2451 &enm.attrs);
2452
2453 llfn = match enm.node {
2454 ast::ItemEnum(_, _) => {
2455 register_fn(ccx, (*v).span, sym, id, ty)
2456 }
2457 _ => ccx.sess().bug("NodeVariant, shouldn't happen")
2458 };
2459 attributes::inline(llfn, attributes::InlineAttr::Hint);
2460 llfn
2461 }
2462
2463 ast_map::NodeStructCtor(struct_def) => {
2464 // Only register the constructor if this is a tuple-like struct.
2465 let ctor_id = match struct_def.ctor_id {
2466 None => {
2467 ccx.sess().bug("attempt to register a constructor of \
2468 a non-tuple-like struct")
2469 }
2470 Some(ctor_id) => ctor_id,
2471 };
2472 let parent = ccx.tcx().map.get_parent(id);
2473 let struct_item = ccx.tcx().map.expect_item(parent);
2474 let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
2475 let sym = exported_name(ccx,
2476 id,
2477 ty,
2478 &struct_item.attrs);
2479 let llfn = register_fn(ccx, struct_item.span,
2480 sym, ctor_id, ty);
2481 attributes::inline(llfn, attributes::InlineAttr::Hint);
2482 llfn
2483 }
2484
2485 ref variant => {
2486 ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}",
2487 variant))
2488 }
2489 };
2490
2491 // All LLVM globals and functions are initially created as external-linkage
2492 // declarations. If `trans_item`/`trans_fn` later turns the declaration
2493 // into a definition, it adjusts the linkage then (using `update_linkage`).
2494 //
2495 // The exception is foreign items, which have their linkage set inside the
2496 // call to `foreign::register_*` above. We don't touch the linkage after
2497 // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
2498 // other item translation functions do).
2499
2500 ccx.item_vals().borrow_mut().insert(id, val);
2501 val
2502 }
2503
2504 fn register_method(ccx: &CrateContext, id: ast::NodeId,
2505 attrs: &[ast::Attribute], span: Span) -> ValueRef {
2506 let mty = ty::node_id_to_type(ccx.tcx(), id);
2507
2508 let sym = exported_name(ccx, id, mty, &attrs);
2509
2510 if let ty::TyBareFn(_, ref f) = mty.sty {
2511 let llfn = if f.abi == Rust || f.abi == RustCall {
2512 register_fn(ccx, span, sym, id, mty)
2513 } else {
2514 foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id)
2515 };
2516 attributes::from_fn_attrs(ccx, &attrs, llfn);
2517 return llfn;
2518 } else {
2519 ccx.sess().span_bug(span, "expected bare rust function");
2520 }
2521 }
2522
2523 pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'a, 'tcx>,
2524 ie: encoder::EncodeInlinedItem<'a>)
2525 -> encoder::EncodeParams<'a, 'tcx> {
2526 encoder::EncodeParams {
2527 diag: cx.sess().diagnostic(),
2528 tcx: cx.tcx(),
2529 reexports: cx.export_map(),
2530 item_symbols: cx.item_symbols(),
2531 link_meta: cx.link_meta(),
2532 cstore: &cx.sess().cstore,
2533 encode_inlined_item: ie,
2534 reachable: cx.reachable(),
2535 }
2536 }
2537
2538 pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
2539 use flate;
2540
2541 let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
2542 *ty != config::CrateTypeExecutable
2543 });
2544 if !any_library {
2545 return Vec::new()
2546 }
2547
2548 let encode_inlined_item: encoder::EncodeInlinedItem =
2549 Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
2550
2551 let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item);
2552 let metadata = encoder::encode_metadata(encode_parms, krate);
2553 let mut compressed = encoder::metadata_encoding_version.to_vec();
2554 compressed.push_all(&flate::deflate_bytes(&metadata));
2555 let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
2556 let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
2557 let name = format!("rust_metadata_{}_{}",
2558 cx.link_meta().crate_name,
2559 cx.link_meta().crate_hash);
2560 let buf = CString::new(name).unwrap();
2561 let llglobal = unsafe {
2562 llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(),
2563 buf.as_ptr())
2564 };
2565 unsafe {
2566 llvm::LLVMSetInitializer(llglobal, llconst);
2567 let name = loader::meta_section_name(&cx.sess().target.target);
2568 let name = CString::new(name).unwrap();
2569 llvm::LLVMSetSection(llglobal, name.as_ptr())
2570 }
2571 return metadata;
2572 }
2573
2574 /// Find any symbols that are defined in one compilation unit, but not declared
2575 /// in any other compilation unit. Give these symbols internal linkage.
2576 fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
2577 unsafe {
2578 let mut declared = HashSet::new();
2579
2580 let iter_globals = |llmod| {
2581 ValueIter {
2582 cur: llvm::LLVMGetFirstGlobal(llmod),
2583 step: llvm::LLVMGetNextGlobal,
2584 }
2585 };
2586
2587 let iter_functions = |llmod| {
2588 ValueIter {
2589 cur: llvm::LLVMGetFirstFunction(llmod),
2590 step: llvm::LLVMGetNextFunction,
2591 }
2592 };
2593
2594 // Collect all external declarations in all compilation units.
2595 for ccx in cx.iter() {
2596 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2597 let linkage = llvm::LLVMGetLinkage(val);
2598 // We only care about external declarations (not definitions)
2599 // and available_externally definitions.
2600 if !(linkage == llvm::ExternalLinkage as c_uint &&
2601 llvm::LLVMIsDeclaration(val) != 0) &&
2602 !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
2603 continue
2604 }
2605
2606 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2607 .to_bytes().to_vec();
2608 declared.insert(name);
2609 }
2610 }
2611
2612 // Examine each external definition. If the definition is not used in
2613 // any other compilation unit, and is not reachable from other crates,
2614 // then give it internal linkage.
2615 for ccx in cx.iter() {
2616 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2617 // We only care about external definitions.
2618 if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
2619 llvm::LLVMIsDeclaration(val) == 0) {
2620 continue
2621 }
2622
2623 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2624 .to_bytes().to_vec();
2625 if !declared.contains(&name) &&
2626 !reachable.contains(str::from_utf8(&name).unwrap()) {
2627 llvm::SetLinkage(val, llvm::InternalLinkage);
2628 llvm::SetDLLStorageClass(val, llvm::DefaultStorageClass);
2629 }
2630 }
2631 }
2632 }
2633
2634
2635 struct ValueIter {
2636 cur: ValueRef,
2637 step: unsafe extern "C" fn(ValueRef) -> ValueRef,
2638 }
2639
2640 impl Iterator for ValueIter {
2641 type Item = ValueRef;
2642
2643 fn next(&mut self) -> Option<ValueRef> {
2644 let old = self.cur;
2645 if !old.is_null() {
2646 self.cur = unsafe {
2647 let step: unsafe extern "C" fn(ValueRef) -> ValueRef =
2648 mem::transmute_copy(&self.step);
2649 step(old)
2650 };
2651 Some(old)
2652 } else {
2653 None
2654 }
2655 }
2656 }
2657 }
2658
2659 pub fn trans_crate(tcx: &ty::ctxt, analysis: ty::CrateAnalysis) -> CrateTranslation {
2660 let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
2661 let krate = tcx.map.krate();
2662
2663 let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
2664 v
2665 } else {
2666 tcx.sess.opts.debug_assertions
2667 };
2668
2669 let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
2670 v
2671 } else {
2672 tcx.sess.opts.debug_assertions
2673 };
2674
2675 // Before we touch LLVM, make sure that multithreading is enabled.
2676 unsafe {
2677 use std::sync::Once;
2678 static INIT: Once = Once::new();
2679 static mut POISONED: bool = false;
2680 INIT.call_once(|| {
2681 if llvm::LLVMStartMultithreaded() != 1 {
2682 // use an extra bool to make sure that all future usage of LLVM
2683 // cannot proceed despite the Once not running more than once.
2684 POISONED = true;
2685 }
2686 });
2687
2688 if POISONED {
2689 tcx.sess.bug("couldn't enable multi-threaded LLVM");
2690 }
2691 }
2692
2693 let link_meta = link::build_link_meta(&tcx.sess, krate, name);
2694
2695 let codegen_units = tcx.sess.opts.cg.codegen_units;
2696 let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
2697 codegen_units,
2698 tcx,
2699 export_map,
2700 Sha256::new(),
2701 link_meta.clone(),
2702 reachable,
2703 check_overflow,
2704 check_dropflag);
2705
2706 {
2707 let ccx = shared_ccx.get_ccx(0);
2708
2709 // First, verify intrinsics.
2710 intrinsic::check_intrinsics(&ccx);
2711
2712 // Next, translate the module.
2713 {
2714 let _icx = push_ctxt("text");
2715 trans_mod(&ccx, &krate.module);
2716 }
2717 }
2718
2719 for ccx in shared_ccx.iter() {
2720 if ccx.sess().opts.debuginfo != NoDebugInfo {
2721 debuginfo::finalize(&ccx);
2722 }
2723 }
2724
2725 // Translate the metadata.
2726 let metadata = write_metadata(&shared_ccx, krate);
2727
2728 if shared_ccx.sess().trans_stats() {
2729 let stats = shared_ccx.stats();
2730 println!("--- trans stats ---");
2731 println!("n_glues_created: {}", stats.n_glues_created.get());
2732 println!("n_null_glues: {}", stats.n_null_glues.get());
2733 println!("n_real_glues: {}", stats.n_real_glues.get());
2734
2735 println!("n_fns: {}", stats.n_fns.get());
2736 println!("n_monos: {}", stats.n_monos.get());
2737 println!("n_inlines: {}", stats.n_inlines.get());
2738 println!("n_closures: {}", stats.n_closures.get());
2739 println!("fn stats:");
2740 stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
2741 insns_b.cmp(&insns_a)
2742 });
2743 for tuple in stats.fn_stats.borrow().iter() {
2744 match *tuple {
2745 (ref name, insns) => {
2746 println!("{} insns, {}", insns, *name);
2747 }
2748 }
2749 }
2750 }
2751 if shared_ccx.sess().count_llvm_insns() {
2752 for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
2753 println!("{:7} {}", *v, *k);
2754 }
2755 }
2756
2757 let modules = shared_ccx.iter()
2758 .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
2759 .collect();
2760
2761 let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
2762 shared_ccx.item_symbols().borrow().get(id).map(|s| s.to_string())
2763 }).collect();
2764
2765 // For the purposes of LTO, we add to the reachable set all of the upstream
2766 // reachable extern fns. These functions are all part of the public ABI of
2767 // the final product, so LTO needs to preserve them.
2768 shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
2769 let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
2770 reachable.extend(syms.into_iter().map(|did| {
2771 csearch::get_symbol(&shared_ccx.sess().cstore, did)
2772 }));
2773 });
2774
2775 // Make sure that some other crucial symbols are not eliminated from the
2776 // module. This includes the main function, the crate map (used for debug
2777 // log settings and I/O), and finally the curious rust_stack_exhausted
2778 // symbol. This symbol is required for use by the libmorestack library that
2779 // we link in, so we must ensure that this symbol is not internalized (if
2780 // defined in the crate).
2781 reachable.push("main".to_string());
2782 reachable.push("rust_stack_exhausted".to_string());
2783
2784 // referenced from .eh_frame section on some platforms
2785 reachable.push("rust_eh_personality".to_string());
2786 // referenced from rt/rust_try.ll
2787 reachable.push("rust_eh_personality_catch".to_string());
2788
2789 if codegen_units > 1 {
2790 internalize_symbols(&shared_ccx, &reachable.iter().cloned().collect());
2791 }
2792
2793 let metadata_module = ModuleTranslation {
2794 llcx: shared_ccx.metadata_llcx(),
2795 llmod: shared_ccx.metadata_llmod(),
2796 };
2797 let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
2798 let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
2799
2800 CrateTranslation {
2801 modules: modules,
2802 metadata_module: metadata_module,
2803 link: link_meta,
2804 metadata: metadata,
2805 reachable: reachable,
2806 crate_formats: formats,
2807 no_builtins: no_builtins,
2808 }
2809 }