]> git.proxmox.com Git - rustc.git/blob - src/librustc_trans/trans/base.rs
Imported Upstream version 1.0.0~beta.3
[rustc.git] / src / librustc_trans / trans / base.rs
1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10 //! Translate the completed AST to the LLVM IR.
11 //!
12 //! Some functions here, such as trans_block and trans_expr, return a value --
13 //! the result of the translation to LLVM -- while others, such as trans_fn,
14 //! trans_impl, and trans_item, are called only for the side effect of adding a
15 //! particular definition to the LLVM IR output we're producing.
16 //!
17 //! Hopefully useful general knowledge about trans:
18 //!
19 //! * There's no way to find out the Ty type of a ValueRef. Doing so
20 //! would be "trying to get the eggs out of an omelette" (credit:
21 //! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
22 //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
23 //! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
24
25 #![allow(non_camel_case_types)]
26
27 pub use self::ValueOrigin::*;
28
29 use super::CrateTranslation;
30 use super::ModuleTranslation;
31
32 use back::link::mangle_exported_name;
33 use back::{link, abi};
34 use lint;
35 use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
36 use llvm;
37 use metadata::{csearch, encoder, loader};
38 use middle::astencode;
39 use middle::cfg;
40 use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
41 use middle::weak_lang_items;
42 use middle::subst::Substs;
43 use middle::ty::{self, Ty, ClosureTyper, type_is_simd, simd_size};
44 use session::config::{self, NoDebugInfo};
45 use session::Session;
46 use trans::_match;
47 use trans::adt;
48 use trans::attributes;
49 use trans::build::*;
50 use trans::builder::{Builder, noname};
51 use trans::callee;
52 use trans::cleanup::CleanupMethods;
53 use trans::cleanup;
54 use trans::closure;
55 use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_integral};
56 use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
57 use trans::common::{CrateContext, FunctionContext};
58 use trans::common::{Result, NodeIdAndSpan};
59 use trans::common::{node_id_type, return_type_is_void};
60 use trans::common::{type_is_immediate, type_is_zero_size, val_ty};
61 use trans::common;
62 use trans::consts;
63 use trans::context::SharedCrateContext;
64 use trans::controlflow;
65 use trans::datum;
66 use trans::debuginfo::{self, DebugLoc, ToDebugLoc};
67 use trans::declare;
68 use trans::expr;
69 use trans::foreign;
70 use trans::glue;
71 use trans::intrinsic;
72 use trans::machine;
73 use trans::machine::{llsize_of, llsize_of_real};
74 use trans::meth;
75 use trans::monomorphize;
76 use trans::tvec;
77 use trans::type_::Type;
78 use trans::type_of;
79 use trans::type_of::*;
80 use trans::value::Value;
81 use util::common::indenter;
82 use util::ppaux::{Repr, ty_to_string};
83 use util::sha2::Sha256;
84 use util::nodemap::NodeMap;
85
86 use arena::TypedArena;
87 use libc::c_uint;
88 use std::ffi::{CStr, CString};
89 use std::cell::{Cell, RefCell};
90 use std::collections::HashSet;
91 use std::mem;
92 use std::str;
93 use std::{i8, i16, i32, i64};
94 use syntax::abi::{Rust, RustCall, RustIntrinsic, Abi};
95 use syntax::ast_util::local_def;
96 use syntax::attr::AttrMetaMethods;
97 use syntax::attr;
98 use syntax::codemap::Span;
99 use syntax::parse::token::InternedString;
100 use syntax::visit::Visitor;
101 use syntax::visit;
102 use syntax::{ast, ast_util, ast_map};
103
104 thread_local! {
105 static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
106 RefCell::new(None)
107 }
108 }
109
110 pub fn with_insn_ctxt<F>(blk: F) where
111 F: FnOnce(&[&'static str]),
112 {
113 TASK_LOCAL_INSN_KEY.with(move |slot| {
114 slot.borrow().as_ref().map(move |s| blk(s));
115 })
116 }
117
118 pub fn init_insn_ctxt() {
119 TASK_LOCAL_INSN_KEY.with(|slot| {
120 *slot.borrow_mut() = Some(Vec::new());
121 });
122 }
123
124 pub struct _InsnCtxt {
125 _cannot_construct_outside_of_this_module: ()
126 }
127
128 #[unsafe_destructor]
129 impl Drop for _InsnCtxt {
130 fn drop(&mut self) {
131 TASK_LOCAL_INSN_KEY.with(|slot| {
132 match slot.borrow_mut().as_mut() {
133 Some(ctx) => { ctx.pop(); }
134 None => {}
135 }
136 })
137 }
138 }
139
140 pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
141 debug!("new InsnCtxt: {}", s);
142 TASK_LOCAL_INSN_KEY.with(|slot| {
143 match slot.borrow_mut().as_mut() {
144 Some(ctx) => ctx.push(s),
145 None => {}
146 }
147 });
148 _InsnCtxt { _cannot_construct_outside_of_this_module: () }
149 }
150
151 pub struct StatRecorder<'a, 'tcx: 'a> {
152 ccx: &'a CrateContext<'a, 'tcx>,
153 name: Option<String>,
154 istart: usize,
155 }
156
157 impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
158 pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
159 -> StatRecorder<'a, 'tcx> {
160 let istart = ccx.stats().n_llvm_insns.get();
161 StatRecorder {
162 ccx: ccx,
163 name: Some(name),
164 istart: istart,
165 }
166 }
167 }
168
169 #[unsafe_destructor]
170 impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
171 fn drop(&mut self) {
172 if self.ccx.sess().trans_stats() {
173 let iend = self.ccx.stats().n_llvm_insns.get();
174 self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
175 iend - self.istart));
176 self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
177 // Reset LLVM insn count to avoid compound costs.
178 self.ccx.stats().n_llvm_insns.set(self.istart);
179 }
180 }
181 }
182
183 fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>,
184 name: &str, did: ast::DefId) -> ValueRef {
185 match ccx.externs().borrow().get(name) {
186 Some(n) => return *n,
187 None => ()
188 }
189
190 let f = declare::declare_rust_fn(ccx, name, fn_ty);
191
192 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
193 attributes::from_fn_attrs(ccx, &attrs[..], f);
194
195 ccx.externs().borrow_mut().insert(name.to_string(), f);
196 f
197 }
198
199 pub fn self_type_for_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
200 closure_id: ast::DefId,
201 fn_ty: Ty<'tcx>)
202 -> Ty<'tcx>
203 {
204 let closure_kind = ccx.tcx().closure_kind(closure_id);
205 match closure_kind {
206 ty::FnClosureKind => {
207 ty::mk_imm_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
208 }
209 ty::FnMutClosureKind => {
210 ty::mk_mut_rptr(ccx.tcx(), ccx.tcx().mk_region(ty::ReStatic), fn_ty)
211 }
212 ty::FnOnceClosureKind => fn_ty
213 }
214 }
215
216 pub fn kind_for_closure(ccx: &CrateContext, closure_id: ast::DefId) -> ty::ClosureKind {
217 *ccx.tcx().closure_kinds.borrow().get(&closure_id).unwrap()
218 }
219
220 pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId,
221 t: Ty<'tcx>) -> ValueRef {
222 let name = csearch::get_symbol(&ccx.sess().cstore, did);
223 let ty = type_of(ccx, t);
224 match ccx.externs().borrow_mut().get(&name) {
225 Some(n) => return *n,
226 None => ()
227 }
228 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
229 // FIXME(nagisa): investigate whether it can be changed into define_global
230 let c = declare::declare_global(ccx, &name[..], ty);
231 // Thread-local statics in some other crate need to *always* be linked
232 // against in a thread-local fashion, so we need to be sure to apply the
233 // thread-local attribute locally if it was present remotely. If we
234 // don't do this then linker errors can be generated where the linker
235 // complains that one object files has a thread local version of the
236 // symbol and another one doesn't.
237 for attr in &*ty::get_attrs(ccx.tcx(), did) {
238 if attr.check_name("thread_local") {
239 llvm::set_thread_local(c, true);
240 }
241 }
242 ccx.externs().borrow_mut().insert(name.to_string(), c);
243 return c;
244 }
245
246 fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
247 info_ty: Ty<'tcx>, it: LangItem) -> ast::DefId {
248 match bcx.tcx().lang_items.require(it) {
249 Ok(id) => id,
250 Err(s) => {
251 bcx.sess().fatal(&format!("allocation of `{}` {}",
252 bcx.ty_to_string(info_ty),
253 s));
254 }
255 }
256 }
257
258 // The following malloc_raw_dyn* functions allocate a box to contain
259 // a given type, but with a potentially dynamic size.
260
261 pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
262 llty_ptr: Type,
263 info_ty: Ty<'tcx>,
264 size: ValueRef,
265 align: ValueRef,
266 debug_loc: DebugLoc)
267 -> Result<'blk, 'tcx> {
268 let _icx = push_ctxt("malloc_raw_exchange");
269
270 // Allocate space:
271 let r = callee::trans_lang_call(bcx,
272 require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
273 &[size, align],
274 None,
275 debug_loc);
276
277 Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
278 }
279
280
281 pub fn bin_op_to_icmp_predicate(ccx: &CrateContext, op: ast::BinOp_, signed: bool)
282 -> llvm::IntPredicate {
283 match op {
284 ast::BiEq => llvm::IntEQ,
285 ast::BiNe => llvm::IntNE,
286 ast::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
287 ast::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
288 ast::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
289 ast::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
290 op => {
291 ccx.sess().bug(&format!("comparison_op_to_icmp_predicate: expected \
292 comparison operator, found {:?}", op));
293 }
294 }
295 }
296
297 pub fn bin_op_to_fcmp_predicate(ccx: &CrateContext, op: ast::BinOp_)
298 -> llvm::RealPredicate {
299 match op {
300 ast::BiEq => llvm::RealOEQ,
301 ast::BiNe => llvm::RealUNE,
302 ast::BiLt => llvm::RealOLT,
303 ast::BiLe => llvm::RealOLE,
304 ast::BiGt => llvm::RealOGT,
305 ast::BiGe => llvm::RealOGE,
306 op => {
307 ccx.sess().bug(&format!("comparison_op_to_fcmp_predicate: expected \
308 comparison operator, found {:?}", op));
309 }
310 }
311 }
312
313 pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
314 lhs: ValueRef,
315 rhs: ValueRef,
316 t: Ty<'tcx>,
317 op: ast::BinOp_,
318 debug_loc: DebugLoc)
319 -> ValueRef {
320 match t.sty {
321 ty::ty_tup(ref tys) if tys.is_empty() => {
322 // We don't need to do actual comparisons for nil.
323 // () == () holds but () < () does not.
324 match op {
325 ast::BiEq | ast::BiLe | ast::BiGe => return C_bool(bcx.ccx(), true),
326 ast::BiNe | ast::BiLt | ast::BiGt => return C_bool(bcx.ccx(), false),
327 // refinements would be nice
328 _ => bcx.sess().bug("compare_scalar_types: must be a comparison operator")
329 }
330 }
331 ty::ty_bare_fn(..) | ty::ty_bool | ty::ty_uint(_) | ty::ty_char => {
332 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
333 }
334 ty::ty_ptr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
335 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, false), lhs, rhs, debug_loc)
336 }
337 ty::ty_int(_) => {
338 ICmp(bcx, bin_op_to_icmp_predicate(bcx.ccx(), op, true), lhs, rhs, debug_loc)
339 }
340 ty::ty_float(_) => {
341 FCmp(bcx, bin_op_to_fcmp_predicate(bcx.ccx(), op), lhs, rhs, debug_loc)
342 }
343 // Should never get here, because t is scalar.
344 _ => bcx.sess().bug("non-scalar type passed to compare_scalar_types")
345 }
346 }
347
348 pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
349 lhs: ValueRef,
350 rhs: ValueRef,
351 t: Ty<'tcx>,
352 op: ast::BinOp_,
353 debug_loc: DebugLoc)
354 -> ValueRef {
355 let signed = match t.sty {
356 ty::ty_float(_) => {
357 // The comparison operators for floating point vectors are challenging.
358 // LLVM outputs a `< size x i1 >`, but if we perform a sign extension
359 // then bitcast to a floating point vector, the result will be `-NaN`
360 // for each truth value. Because of this they are unsupported.
361 bcx.sess().bug("compare_simd_types: comparison operators \
362 not supported for floating point SIMD types")
363 },
364 ty::ty_uint(_) => false,
365 ty::ty_int(_) => true,
366 _ => bcx.sess().bug("compare_simd_types: invalid SIMD type"),
367 };
368
369 let cmp = bin_op_to_icmp_predicate(bcx.ccx(), op, signed);
370 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
371 // to get the correctly sized type. This will compile to a single instruction
372 // once the IR is converted to assembly if the SIMD instruction is supported
373 // by the target architecture.
374 SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), val_ty(lhs))
375 }
376
377 // Iterates through the elements of a structural type.
378 pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
379 av: ValueRef,
380 t: Ty<'tcx>,
381 mut f: F)
382 -> Block<'blk, 'tcx> where
383 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
384 {
385 let _icx = push_ctxt("iter_structural_ty");
386
387 fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
388 repr: &adt::Repr<'tcx>,
389 av: ValueRef,
390 variant: &ty::VariantInfo<'tcx>,
391 substs: &Substs<'tcx>,
392 f: &mut F)
393 -> Block<'blk, 'tcx> where
394 F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>,
395 {
396 let _icx = push_ctxt("iter_variant");
397 let tcx = cx.tcx();
398 let mut cx = cx;
399
400 for (i, &arg) in variant.args.iter().enumerate() {
401 let arg = monomorphize::apply_param_substs(tcx, substs, &arg);
402 cx = f(cx, adt::trans_field_ptr(cx, repr, av, variant.disr_val, i), arg);
403 }
404 return cx;
405 }
406
407 let (data_ptr, info) = if common::type_is_sized(cx.tcx(), t) {
408 (av, None)
409 } else {
410 let data = GEPi(cx, av, &[0, abi::FAT_PTR_ADDR]);
411 let info = GEPi(cx, av, &[0, abi::FAT_PTR_EXTRA]);
412 (Load(cx, data), Some(Load(cx, info)))
413 };
414
415 let mut cx = cx;
416 match t.sty {
417 ty::ty_struct(..) => {
418 let repr = adt::represent_type(cx.ccx(), t);
419 expr::with_field_tys(cx.tcx(), t, None, |discr, field_tys| {
420 for (i, field_ty) in field_tys.iter().enumerate() {
421 let field_ty = field_ty.mt.ty;
422 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
423
424 let val = if common::type_is_sized(cx.tcx(), field_ty) {
425 llfld_a
426 } else {
427 let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
428 Store(cx, llfld_a, GEPi(cx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
429 Store(cx, info.unwrap(), GEPi(cx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
430 scratch.val
431 };
432 cx = f(cx, val, field_ty);
433 }
434 })
435 }
436 ty::ty_closure(def_id, substs) => {
437 let repr = adt::represent_type(cx.ccx(), t);
438 let typer = common::NormalizingClosureTyper::new(cx.tcx());
439 let upvars = typer.closure_upvars(def_id, substs).unwrap();
440 for (i, upvar) in upvars.iter().enumerate() {
441 let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
442 cx = f(cx, llupvar, upvar.ty);
443 }
444 }
445 ty::ty_vec(_, Some(n)) => {
446 let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
447 let unit_ty = ty::sequence_element_type(cx.tcx(), t);
448 cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
449 }
450 ty::ty_vec(_, None) | ty::ty_str => {
451 let unit_ty = ty::sequence_element_type(cx.tcx(), t);
452 cx = tvec::iter_vec_raw(cx, data_ptr, unit_ty, info.unwrap(), f);
453 }
454 ty::ty_tup(ref args) => {
455 let repr = adt::represent_type(cx.ccx(), t);
456 for (i, arg) in args.iter().enumerate() {
457 let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
458 cx = f(cx, llfld_a, *arg);
459 }
460 }
461 ty::ty_enum(tid, substs) => {
462 let fcx = cx.fcx;
463 let ccx = fcx.ccx;
464
465 let repr = adt::represent_type(ccx, t);
466 let variants = ty::enum_variants(ccx.tcx(), tid);
467 let n_variants = (*variants).len();
468
469 // NB: we must hit the discriminant first so that structural
470 // comparison know not to proceed when the discriminants differ.
471
472 match adt::trans_switch(cx, &*repr, av) {
473 (_match::Single, None) => {
474 cx = iter_variant(cx, &*repr, av, &*(*variants)[0],
475 substs, &mut f);
476 }
477 (_match::Switch, Some(lldiscrim_a)) => {
478 cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
479 let unr_cx = fcx.new_temp_block("enum-iter-unr");
480 Unreachable(unr_cx);
481 let llswitch = Switch(cx, lldiscrim_a, unr_cx.llbb,
482 n_variants);
483 let next_cx = fcx.new_temp_block("enum-iter-next");
484
485 for variant in &(*variants) {
486 let variant_cx =
487 fcx.new_temp_block(
488 &format!("enum-iter-variant-{}",
489 &variant.disr_val.to_string())
490 );
491 match adt::trans_case(cx, &*repr, variant.disr_val) {
492 _match::SingleResult(r) => {
493 AddCase(llswitch, r.val, variant_cx.llbb)
494 }
495 _ => ccx.sess().unimpl("value from adt::trans_case \
496 in iter_structural_ty")
497 }
498 let variant_cx =
499 iter_variant(variant_cx,
500 &*repr,
501 data_ptr,
502 &**variant,
503 substs,
504 &mut f);
505 Br(variant_cx, next_cx.llbb, DebugLoc::None);
506 }
507 cx = next_cx;
508 }
509 _ => ccx.sess().unimpl("value from adt::trans_switch \
510 in iter_structural_ty")
511 }
512 }
513 _ => {
514 cx.sess().unimpl(&format!("type in iter_structural_ty: {}",
515 ty_to_string(cx.tcx(), t)))
516 }
517 }
518 return cx;
519 }
520
521 pub fn cast_shift_expr_rhs(cx: Block,
522 op: ast::BinOp_,
523 lhs: ValueRef,
524 rhs: ValueRef)
525 -> ValueRef {
526 cast_shift_rhs(op, lhs, rhs,
527 |a,b| Trunc(cx, a, b),
528 |a,b| ZExt(cx, a, b))
529 }
530
531 pub fn cast_shift_const_rhs(op: ast::BinOp_,
532 lhs: ValueRef, rhs: ValueRef) -> ValueRef {
533 cast_shift_rhs(op, lhs, rhs,
534 |a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
535 |a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
536 }
537
538 fn cast_shift_rhs<F, G>(op: ast::BinOp_,
539 lhs: ValueRef,
540 rhs: ValueRef,
541 trunc: F,
542 zext: G)
543 -> ValueRef where
544 F: FnOnce(ValueRef, Type) -> ValueRef,
545 G: FnOnce(ValueRef, Type) -> ValueRef,
546 {
547 // Shifts may have any size int on the rhs
548 if ast_util::is_shift_binop(op) {
549 let mut rhs_llty = val_ty(rhs);
550 let mut lhs_llty = val_ty(lhs);
551 if rhs_llty.kind() == Vector { rhs_llty = rhs_llty.element_type() }
552 if lhs_llty.kind() == Vector { lhs_llty = lhs_llty.element_type() }
553 let rhs_sz = rhs_llty.int_width();
554 let lhs_sz = lhs_llty.int_width();
555 if lhs_sz < rhs_sz {
556 trunc(rhs, lhs_llty)
557 } else if lhs_sz > rhs_sz {
558 // FIXME (#1877: If shifting by negative
559 // values becomes not undefined then this is wrong.
560 zext(rhs, lhs_llty)
561 } else {
562 rhs
563 }
564 } else {
565 rhs
566 }
567 }
568
569 pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
570 val_t: Ty<'tcx>) -> (Type, u64) {
571 match val_t.sty {
572 ty::ty_int(t) => {
573 let llty = Type::int_from_ty(cx.ccx(), t);
574 let min = match t {
575 ast::TyIs if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
576 ast::TyIs => i64::MIN as u64,
577 ast::TyI8 => i8::MIN as u64,
578 ast::TyI16 => i16::MIN as u64,
579 ast::TyI32 => i32::MIN as u64,
580 ast::TyI64 => i64::MIN as u64,
581 };
582 (llty, min)
583 }
584 _ => unreachable!(),
585 }
586 }
587
588 pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
589 cx: Block<'blk, 'tcx>,
590 call_info: NodeIdAndSpan,
591 divrem: ast::BinOp,
592 lhs: ValueRef,
593 rhs: ValueRef,
594 rhs_t: Ty<'tcx>)
595 -> Block<'blk, 'tcx> {
596 let (zero_text, overflow_text) = if divrem.node == ast::BiDiv {
597 ("attempted to divide by zero",
598 "attempted to divide with overflow")
599 } else {
600 ("attempted remainder with a divisor of zero",
601 "attempted remainder with overflow")
602 };
603 let debug_loc = call_info.debug_loc();
604
605 let (is_zero, is_signed) = match rhs_t.sty {
606 ty::ty_int(t) => {
607 let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
608 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
609 }
610 ty::ty_uint(t) => {
611 let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
612 (ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
613 }
614 ty::ty_struct(_, _) if type_is_simd(cx.tcx(), rhs_t) => {
615 let mut res = C_bool(cx.ccx(), false);
616 for i in 0 .. simd_size(cx.tcx(), rhs_t) {
617 res = Or(cx, res,
618 IsNull(cx,
619 ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))), debug_loc);
620 }
621 (res, false)
622 }
623 _ => {
624 cx.sess().bug(&format!("fail-if-zero on unexpected type: {}",
625 ty_to_string(cx.tcx(), rhs_t)));
626 }
627 };
628 let bcx = with_cond(cx, is_zero, |bcx| {
629 controlflow::trans_fail(bcx, call_info, InternedString::new(zero_text))
630 });
631
632 // To quote LLVM's documentation for the sdiv instruction:
633 //
634 // Division by zero leads to undefined behavior. Overflow also leads
635 // to undefined behavior; this is a rare case, but can occur, for
636 // example, by doing a 32-bit division of -2147483648 by -1.
637 //
638 // In order to avoid undefined behavior, we perform runtime checks for
639 // signed division/remainder which would trigger overflow. For unsigned
640 // integers, no action beyond checking for zero need be taken.
641 if is_signed {
642 let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
643 let minus_one = ICmp(bcx, llvm::IntEQ, rhs,
644 C_integral(llty, !0, false), debug_loc);
645 with_cond(bcx, minus_one, |bcx| {
646 let is_min = ICmp(bcx, llvm::IntEQ, lhs,
647 C_integral(llty, min, true), debug_loc);
648 with_cond(bcx, is_min, |bcx| {
649 controlflow::trans_fail(bcx,
650 call_info,
651 InternedString::new(overflow_text))
652 })
653 })
654 } else {
655 bcx
656 }
657 }
658
659 pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
660 did: ast::DefId, t: Ty<'tcx>) -> ValueRef {
661 let name = csearch::get_symbol(&ccx.sess().cstore, did);
662 match t.sty {
663 ty::ty_bare_fn(_, ref fn_ty) => {
664 match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
665 Rust | RustCall => {
666 get_extern_rust_fn(ccx, t, &name[..], did)
667 }
668 RustIntrinsic => {
669 ccx.sess().bug("unexpected intrinsic in trans_external_path")
670 }
671 _ => {
672 let llfn = foreign::register_foreign_item_fn(ccx, fn_ty.abi, t, &name[..]);
673 let attrs = csearch::get_item_attrs(&ccx.sess().cstore, did);
674 attributes::from_fn_attrs(ccx, &attrs, llfn);
675 llfn
676 }
677 }
678 }
679 _ => {
680 get_extern_const(ccx, did, t)
681 }
682 }
683 }
684
685 pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
686 llfn: ValueRef,
687 llargs: &[ValueRef],
688 fn_ty: Ty<'tcx>,
689 debug_loc: DebugLoc)
690 -> (ValueRef, Block<'blk, 'tcx>) {
691 let _icx = push_ctxt("invoke_");
692 if bcx.unreachable.get() {
693 return (C_null(Type::i8(bcx.ccx())), bcx);
694 }
695
696 let attributes = attributes::from_fn_type(bcx.ccx(), fn_ty);
697
698 match bcx.opt_node_id {
699 None => {
700 debug!("invoke at ???");
701 }
702 Some(id) => {
703 debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
704 }
705 }
706
707 if need_invoke(bcx) {
708 debug!("invoking {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
709 for &llarg in llargs {
710 debug!("arg: {}", bcx.val_to_string(llarg));
711 }
712 let normal_bcx = bcx.fcx.new_temp_block("normal-return");
713 let landing_pad = bcx.fcx.get_landing_pad();
714
715 let llresult = Invoke(bcx,
716 llfn,
717 &llargs[..],
718 normal_bcx.llbb,
719 landing_pad,
720 Some(attributes),
721 debug_loc);
722 return (llresult, normal_bcx);
723 } else {
724 debug!("calling {} at {:?}", bcx.val_to_string(llfn), bcx.llbb);
725 for &llarg in llargs {
726 debug!("arg: {}", bcx.val_to_string(llarg));
727 }
728
729 let llresult = Call(bcx,
730 llfn,
731 &llargs[..],
732 Some(attributes),
733 debug_loc);
734 return (llresult, bcx);
735 }
736 }
737
738 pub fn need_invoke(bcx: Block) -> bool {
739 if bcx.sess().no_landing_pads() {
740 return false;
741 }
742
743 // Avoid using invoke if we are already inside a landing pad.
744 if bcx.is_lpad {
745 return false;
746 }
747
748 bcx.fcx.needs_invoke()
749 }
750
751 pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
752 v: ValueRef, t: Ty<'tcx>) -> ValueRef {
753 let _icx = push_ctxt("load_if_immediate");
754 if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
755 return v;
756 }
757
758 /// Helper for loading values from memory. Does the necessary conversion if the in-memory type
759 /// differs from the type used for SSA values. Also handles various special cases where the type
760 /// gives us better information about what we are loading.
761 pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
762 ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
763 if cx.unreachable.get() || type_is_zero_size(cx.ccx(), t) {
764 return C_undef(type_of::type_of(cx.ccx(), t));
765 }
766
767 let ptr = to_arg_ty_ptr(cx, ptr, t);
768
769 if type_is_immediate(cx.ccx(), t) && type_of::type_of(cx.ccx(), t).is_aggregate() {
770 return Load(cx, ptr);
771 }
772
773 unsafe {
774 let global = llvm::LLVMIsAGlobalVariable(ptr);
775 if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
776 let val = llvm::LLVMGetInitializer(global);
777 if !val.is_null() {
778 return from_arg_ty(cx, val, t);
779 }
780 }
781 }
782
783 let val = if ty::type_is_bool(t) {
784 LoadRangeAssert(cx, ptr, 0, 2, llvm::False)
785 } else if ty::type_is_char(t) {
786 // a char is a Unicode codepoint, and so takes values from 0
787 // to 0x10FFFF inclusive only.
788 LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
789 } else if (ty::type_is_region_ptr(t) || ty::type_is_unique(t))
790 && !common::type_is_fat_ptr(cx.tcx(), t) {
791 LoadNonNull(cx, ptr)
792 } else {
793 Load(cx, ptr)
794 };
795
796 from_arg_ty(cx, val, t)
797 }
798
799 /// Helper for storing values in memory. Does the necessary conversion if the in-memory type
800 /// differs from the type used for SSA values.
801 pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
802 Store(cx, to_arg_ty(cx, v, t), to_arg_ty_ptr(cx, dst, t));
803 }
804
805 pub fn to_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
806 if ty::type_is_bool(ty) {
807 ZExt(bcx, val, Type::i8(bcx.ccx()))
808 } else {
809 val
810 }
811 }
812
813 pub fn from_arg_ty(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
814 if ty::type_is_bool(ty) {
815 Trunc(bcx, val, Type::i1(bcx.ccx()))
816 } else {
817 val
818 }
819 }
820
821 pub fn to_arg_ty_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ptr: ValueRef, ty: Ty<'tcx>) -> ValueRef {
822 if type_is_immediate(bcx.ccx(), ty) && type_of::type_of(bcx.ccx(), ty).is_aggregate() {
823 // We want to pass small aggregates as immediate values, but using an aggregate LLVM type
824 // for this leads to bad optimizations, so its arg type is an appropriately sized integer
825 // and we have to convert it
826 BitCast(bcx, ptr, type_of::arg_type_of(bcx.ccx(), ty).ptr_to())
827 } else {
828 ptr
829 }
830 }
831
832 pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
833 -> Block<'blk, 'tcx> {
834 debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
835 let _indenter = indenter();
836 let _icx = push_ctxt("init_local");
837 _match::store_local(bcx, local)
838 }
839
840 pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
841 is_lpad: bool,
842 llbb: BasicBlockRef)
843 -> Block<'blk, 'tcx> {
844 common::BlockS::new(llbb, is_lpad, None, fcx)
845 }
846
847 pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
848 val: ValueRef,
849 f: F)
850 -> Block<'blk, 'tcx> where
851 F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>,
852 {
853 let _icx = push_ctxt("with_cond");
854
855 if bcx.unreachable.get() ||
856 (common::is_const(val) && common::const_to_uint(val) == 0) {
857 return bcx;
858 }
859
860 let fcx = bcx.fcx;
861 let next_cx = fcx.new_temp_block("next");
862 let cond_cx = fcx.new_temp_block("cond");
863 CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
864 let after_cx = f(cond_cx);
865 if !after_cx.terminated.get() {
866 Br(after_cx, next_cx.llbb, DebugLoc::None);
867 }
868 next_cx
869 }
870
871 pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
872 if cx.sess().opts.optimize == config::No {
873 return;
874 }
875
876 let _icx = push_ctxt("lifetime_start");
877 let ccx = cx.ccx();
878
879 let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
880 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
881 let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
882 Call(cx, lifetime_start, &[llsize, ptr], None, DebugLoc::None);
883 }
884
885 pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
886 if cx.sess().opts.optimize == config::No {
887 return;
888 }
889
890 let _icx = push_ctxt("lifetime_end");
891 let ccx = cx.ccx();
892
893 let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
894 let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
895 let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
896 Call(cx, lifetime_end, &[llsize, ptr], None, DebugLoc::None);
897 }
898
899 pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
900 let _icx = push_ctxt("call_memcpy");
901 let ccx = cx.ccx();
902 let key = match &ccx.sess().target.target.target_pointer_width[..] {
903 "32" => "llvm.memcpy.p0i8.p0i8.i32",
904 "64" => "llvm.memcpy.p0i8.p0i8.i64",
905 tws => panic!("Unsupported target word size for memcpy: {}", tws),
906 };
907 let memcpy = ccx.get_intrinsic(&key);
908 let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
909 let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
910 let size = IntCast(cx, n_bytes, ccx.int_type());
911 let align = C_i32(ccx, align as i32);
912 let volatile = C_bool(ccx, false);
913 Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None, DebugLoc::None);
914 }
915
916 pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
917 dst: ValueRef, src: ValueRef,
918 t: Ty<'tcx>) {
919 let _icx = push_ctxt("memcpy_ty");
920 let ccx = bcx.ccx();
921 if ty::type_is_structural(t) {
922 let llty = type_of::type_of(ccx, t);
923 let llsz = llsize_of(ccx, llty);
924 let llalign = type_of::align_of(ccx, t);
925 call_memcpy(bcx, dst, src, llsz, llalign as u32);
926 } else {
927 store_ty(bcx, load_ty(bcx, src, t), dst, t);
928 }
929 }
930
931 pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
932 if cx.unreachable.get() { return; }
933 let _icx = push_ctxt("drop_done_fill_mem");
934 let bcx = cx;
935 memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
936 }
937
938 pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
939 if cx.unreachable.get() { return; }
940 let _icx = push_ctxt("init_zero_mem");
941 let bcx = cx;
942 memfill(&B(bcx), llptr, t, 0);
943 }
944
945 // Always use this function instead of storing a constant byte to the memory
946 // in question. e.g. if you store a zero constant, LLVM will drown in vreg
947 // allocation for large data structures, and the generated code will be
948 // awful. (A telltale sign of this is large quantities of
949 // `mov [byte ptr foo],0` in the generated code.)
950 fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
951 let _icx = push_ctxt("memfill");
952 let ccx = b.ccx;
953
954 let llty = type_of::type_of(ccx, ty);
955
956 let intrinsic_key = match &ccx.sess().target.target.target_pointer_width[..] {
957 "32" => "llvm.memset.p0i8.i32",
958 "64" => "llvm.memset.p0i8.i64",
959 tws => panic!("Unsupported target word size for memset: {}", tws),
960 };
961
962 let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
963 let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
964 let llzeroval = C_u8(ccx, byte as usize);
965 let size = machine::llsize_of(ccx, llty);
966 let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
967 let volatile = C_bool(ccx, false);
968 b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None);
969 }
970
971 pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef {
972 let _icx = push_ctxt("alloc_ty");
973 let ccx = bcx.ccx();
974 let ty = type_of::type_of(ccx, t);
975 assert!(!ty::type_has_params(t));
976 let val = alloca(bcx, ty, name);
977 return val;
978 }
979
980 pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
981 let p = alloca_no_lifetime(cx, ty, name);
982 call_lifetime_start(cx, p);
983 p
984 }
985
986 pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef {
987 let _icx = push_ctxt("alloca");
988 if cx.unreachable.get() {
989 unsafe {
990 return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
991 }
992 }
993 debuginfo::clear_source_location(cx.fcx);
994 Alloca(cx, ty, name)
995 }
996
997 // Creates the alloca slot which holds the pointer to the slot for the final return value
998 pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
999 output_type: Ty<'tcx>) -> ValueRef {
1000 let lloutputtype = type_of::type_of(fcx.ccx, output_type);
1001
1002 // We create an alloca to hold a pointer of type `output_type`
1003 // which will hold the pointer to the right alloca which has the
1004 // final ret value
1005 if fcx.needs_ret_allocas {
1006 // Let's create the stack slot
1007 let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr");
1008
1009 // and if we're using an out pointer, then store that in our newly made slot
1010 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1011 let outptr = get_param(fcx.llfn, 0);
1012
1013 let b = fcx.ccx.builder();
1014 b.position_before(fcx.alloca_insert_pt.get().unwrap());
1015 b.store(outptr, slot);
1016 }
1017
1018 slot
1019
1020 // But if there are no nested returns, we skip the indirection and have a single
1021 // retslot
1022 } else {
1023 if type_of::return_uses_outptr(fcx.ccx, output_type) {
1024 get_param(fcx.llfn, 0)
1025 } else {
1026 AllocaFcx(fcx, lloutputtype, "sret_slot")
1027 }
1028 }
1029 }
1030
1031 struct FindNestedReturn {
1032 found: bool,
1033 }
1034
1035 impl FindNestedReturn {
1036 fn new() -> FindNestedReturn {
1037 FindNestedReturn { found: false }
1038 }
1039 }
1040
1041 impl<'v> Visitor<'v> for FindNestedReturn {
1042 fn visit_expr(&mut self, e: &ast::Expr) {
1043 match e.node {
1044 ast::ExprRet(..) => {
1045 self.found = true;
1046 }
1047 _ => visit::walk_expr(self, e)
1048 }
1049 }
1050 }
1051
1052 fn build_cfg(tcx: &ty::ctxt, id: ast::NodeId) -> (ast::NodeId, Option<cfg::CFG>) {
1053 let blk = match tcx.map.find(id) {
1054 Some(ast_map::NodeItem(i)) => {
1055 match i.node {
1056 ast::ItemFn(_, _, _, _, ref blk) => {
1057 blk
1058 }
1059 _ => tcx.sess.bug("unexpected item variant in has_nested_returns")
1060 }
1061 }
1062 Some(ast_map::NodeTraitItem(trait_item)) => {
1063 match trait_item.node {
1064 ast::MethodTraitItem(_, Some(ref body)) => body,
1065 ast::MethodTraitItem(_, None) => {
1066 tcx.sess.bug("unexpected variant: required trait method \
1067 in has_nested_returns")
1068 }
1069 ast::TypeTraitItem(..) => {
1070 tcx.sess.bug("unexpected variant: associated type trait item in \
1071 has_nested_returns")
1072 }
1073 }
1074 }
1075 Some(ast_map::NodeImplItem(impl_item)) => {
1076 match impl_item.node {
1077 ast::MethodImplItem(_, ref body) => body,
1078 ast::TypeImplItem(_) => {
1079 tcx.sess.bug("unexpected variant: associated type impl item in \
1080 has_nested_returns")
1081 }
1082 ast::MacImplItem(_) => {
1083 tcx.sess.bug("unexpected variant: unexpanded macro impl item in \
1084 has_nested_returns")
1085 }
1086 }
1087 }
1088 Some(ast_map::NodeExpr(e)) => {
1089 match e.node {
1090 ast::ExprClosure(_, _, ref blk) => blk,
1091 _ => tcx.sess.bug("unexpected expr variant in has_nested_returns")
1092 }
1093 }
1094 Some(ast_map::NodeVariant(..)) |
1095 Some(ast_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
1096
1097 // glue, shims, etc
1098 None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
1099
1100 _ => tcx.sess.bug(&format!("unexpected variant in has_nested_returns: {}",
1101 tcx.map.path_to_string(id)))
1102 };
1103
1104 (blk.id, Some(cfg::CFG::new(tcx, blk)))
1105 }
1106
1107 // Checks for the presence of "nested returns" in a function.
1108 // Nested returns are when the inner expression of a return expression
1109 // (the 'expr' in 'return expr') contains a return expression. Only cases
1110 // where the outer return is actually reachable are considered. Implicit
1111 // returns from the end of blocks are considered as well.
1112 //
1113 // This check is needed to handle the case where the inner expression is
1114 // part of a larger expression that may have already partially-filled the
1115 // return slot alloca. This can cause errors related to clean-up due to
1116 // the clobbering of the existing value in the return slot.
1117 fn has_nested_returns(tcx: &ty::ctxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
1118 for n in cfg.graph.depth_traverse(cfg.entry) {
1119 match tcx.map.find(n.id()) {
1120 Some(ast_map::NodeExpr(ex)) => {
1121 if let ast::ExprRet(Some(ref ret_expr)) = ex.node {
1122 let mut visitor = FindNestedReturn::new();
1123 visit::walk_expr(&mut visitor, &**ret_expr);
1124 if visitor.found {
1125 return true;
1126 }
1127 }
1128 }
1129 Some(ast_map::NodeBlock(blk)) if blk.id == blk_id => {
1130 let mut visitor = FindNestedReturn::new();
1131 visit::walk_expr_opt(&mut visitor, &blk.expr);
1132 if visitor.found {
1133 return true;
1134 }
1135 }
1136 _ => {}
1137 }
1138 }
1139
1140 return false;
1141 }
1142
1143 // NB: must keep 4 fns in sync:
1144 //
1145 // - type_of_fn
1146 // - create_datums_for_fn_args.
1147 // - new_fn_ctxt
1148 // - trans_args
1149 //
1150 // Be warned! You must call `init_function` before doing anything with the
1151 // returned function context.
1152 pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
1153 llfndecl: ValueRef,
1154 id: ast::NodeId,
1155 has_env: bool,
1156 output_type: ty::FnOutput<'tcx>,
1157 param_substs: &'tcx Substs<'tcx>,
1158 sp: Option<Span>,
1159 block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
1160 -> FunctionContext<'a, 'tcx> {
1161 common::validate_substs(param_substs);
1162
1163 debug!("new_fn_ctxt(path={}, id={}, param_substs={})",
1164 if id == !0 {
1165 "".to_string()
1166 } else {
1167 ccx.tcx().map.path_to_string(id).to_string()
1168 },
1169 id, param_substs.repr(ccx.tcx()));
1170
1171 let uses_outptr = match output_type {
1172 ty::FnConverging(output_type) => {
1173 let substd_output_type =
1174 monomorphize::apply_param_substs(ccx.tcx(), param_substs, &output_type);
1175 type_of::return_uses_outptr(ccx, substd_output_type)
1176 }
1177 ty::FnDiverging => false
1178 };
1179 let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
1180 let (blk_id, cfg) = build_cfg(ccx.tcx(), id);
1181 let nested_returns = if let Some(ref cfg) = cfg {
1182 has_nested_returns(ccx.tcx(), cfg, blk_id)
1183 } else {
1184 false
1185 };
1186
1187 let mut fcx = FunctionContext {
1188 llfn: llfndecl,
1189 llenv: None,
1190 llretslotptr: Cell::new(None),
1191 param_env: ty::empty_parameter_environment(ccx.tcx()),
1192 alloca_insert_pt: Cell::new(None),
1193 llreturn: Cell::new(None),
1194 needs_ret_allocas: nested_returns,
1195 personality: Cell::new(None),
1196 caller_expects_out_pointer: uses_outptr,
1197 lllocals: RefCell::new(NodeMap()),
1198 llupvars: RefCell::new(NodeMap()),
1199 id: id,
1200 param_substs: param_substs,
1201 span: sp,
1202 block_arena: block_arena,
1203 ccx: ccx,
1204 debug_context: debug_context,
1205 scopes: RefCell::new(Vec::new()),
1206 cfg: cfg
1207 };
1208
1209 if has_env {
1210 fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
1211 }
1212
1213 fcx
1214 }
1215
1216 /// Performs setup on a newly created function, creating the entry scope block
1217 /// and allocating space for the return pointer.
1218 pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
1219 skip_retptr: bool,
1220 output: ty::FnOutput<'tcx>)
1221 -> Block<'a, 'tcx> {
1222 let entry_bcx = fcx.new_temp_block("entry-block");
1223
1224 // Use a dummy instruction as the insertion point for all allocas.
1225 // This is later removed in FunctionContext::cleanup.
1226 fcx.alloca_insert_pt.set(Some(unsafe {
1227 Load(entry_bcx, C_null(Type::i8p(fcx.ccx)));
1228 llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
1229 }));
1230
1231 if let ty::FnConverging(output_type) = output {
1232 // This shouldn't need to recompute the return type,
1233 // as new_fn_ctxt did it already.
1234 let substd_output_type = fcx.monomorphize(&output_type);
1235 if !return_type_is_void(fcx.ccx, substd_output_type) {
1236 // If the function returns nil/bot, there is no real return
1237 // value, so do not set `llretslotptr`.
1238 if !skip_retptr || fcx.caller_expects_out_pointer {
1239 // Otherwise, we normally allocate the llretslotptr, unless we
1240 // have been instructed to skip it for immediate return
1241 // values.
1242 fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type)));
1243 }
1244 }
1245 }
1246
1247 entry_bcx
1248 }
1249
1250 // NB: must keep 4 fns in sync:
1251 //
1252 // - type_of_fn
1253 // - create_datums_for_fn_args.
1254 // - new_fn_ctxt
1255 // - trans_args
1256
1257 pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>)
1258 -> datum::Rvalue {
1259 use trans::datum::{ByRef, ByValue};
1260
1261 datum::Rvalue {
1262 mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue }
1263 }
1264 }
1265
1266 // work around bizarre resolve errors
1267 pub type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
1268
1269 // create_datums_for_fn_args: creates rvalue datums for each of the
1270 // incoming function arguments. These will later be stored into
1271 // appropriate lvalue datums.
1272 pub fn create_datums_for_fn_args<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
1273 arg_tys: &[Ty<'tcx>])
1274 -> Vec<RvalueDatum<'tcx>> {
1275 let _icx = push_ctxt("create_datums_for_fn_args");
1276
1277 // Return an array wrapping the ValueRefs that we get from `get_param` for
1278 // each argument into datums.
1279 arg_tys.iter().enumerate().map(|(i, &arg_ty)| {
1280 let llarg = get_param(fcx.llfn, fcx.arg_pos(i) as c_uint);
1281 datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))
1282 }).collect()
1283 }
1284
1285 /// Creates rvalue datums for each of the incoming function arguments and
1286 /// tuples the arguments. These will later be stored into appropriate lvalue
1287 /// datums.
1288 ///
1289 /// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
1290 fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>(
1291 mut bcx: Block<'blk, 'tcx>,
1292 arg_scope: cleanup::CustomScopeIndex,
1293 arg_tys: &[Ty<'tcx>])
1294 -> Vec<RvalueDatum<'tcx>> {
1295 let mut result = Vec::new();
1296 for (i, &arg_ty) in arg_tys.iter().enumerate() {
1297 if i < arg_tys.len() - 1 {
1298 // Regular argument.
1299 let llarg = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(i) as c_uint);
1300 result.push(datum::Datum::new(llarg, arg_ty, arg_kind(bcx.fcx,
1301 arg_ty)));
1302 continue
1303 }
1304
1305 // This is the last argument. Tuple it.
1306 match arg_ty.sty {
1307 ty::ty_tup(ref tupled_arg_tys) => {
1308 let tuple_args_scope_id = cleanup::CustomScope(arg_scope);
1309 let tuple =
1310 unpack_datum!(bcx,
1311 datum::lvalue_scratch_datum(bcx,
1312 arg_ty,
1313 "tupled_args",
1314 tuple_args_scope_id,
1315 (),
1316 |(),
1317 mut bcx,
1318 llval| {
1319 for (j, &tupled_arg_ty) in
1320 tupled_arg_tys.iter().enumerate() {
1321 let llarg =
1322 get_param(bcx.fcx.llfn,
1323 bcx.fcx.arg_pos(i + j) as c_uint);
1324 let lldest = GEPi(bcx, llval, &[0, j]);
1325 let datum = datum::Datum::new(
1326 llarg,
1327 tupled_arg_ty,
1328 arg_kind(bcx.fcx, tupled_arg_ty));
1329 bcx = datum.store_to(bcx, lldest);
1330 }
1331 bcx
1332 }));
1333 let tuple = unpack_datum!(bcx,
1334 tuple.to_expr_datum()
1335 .to_rvalue_datum(bcx,
1336 "argtuple"));
1337 result.push(tuple);
1338 }
1339 _ => {
1340 bcx.tcx().sess.bug("last argument of a function with \
1341 `rust-call` ABI isn't a tuple?!")
1342 }
1343 };
1344
1345 }
1346
1347 result
1348 }
1349
1350 fn copy_args_to_allocas<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
1351 arg_scope: cleanup::CustomScopeIndex,
1352 args: &[ast::Arg],
1353 arg_datums: Vec<RvalueDatum<'tcx>>)
1354 -> Block<'blk, 'tcx> {
1355 debug!("copy_args_to_allocas");
1356
1357 let _icx = push_ctxt("copy_args_to_allocas");
1358 let mut bcx = bcx;
1359
1360 let arg_scope_id = cleanup::CustomScope(arg_scope);
1361
1362 for (i, arg_datum) in arg_datums.into_iter().enumerate() {
1363 // For certain mode/type combinations, the raw llarg values are passed
1364 // by value. However, within the fn body itself, we want to always
1365 // have all locals and arguments be by-ref so that we can cancel the
1366 // cleanup and for better interaction with LLVM's debug info. So, if
1367 // the argument would be passed by value, we store it into an alloca.
1368 // This alloca should be optimized away by LLVM's mem-to-reg pass in
1369 // the event it's not truly needed.
1370
1371 bcx = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id);
1372 debuginfo::create_argument_metadata(bcx, &args[i]);
1373 }
1374
1375 bcx
1376 }
1377
1378 // Ties up the llstaticallocas -> llloadenv -> lltop edges,
1379 // and builds the return block.
1380 pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
1381 last_bcx: Block<'blk, 'tcx>,
1382 retty: ty::FnOutput<'tcx>,
1383 ret_debug_loc: DebugLoc) {
1384 let _icx = push_ctxt("finish_fn");
1385
1386 let ret_cx = match fcx.llreturn.get() {
1387 Some(llreturn) => {
1388 if !last_bcx.terminated.get() {
1389 Br(last_bcx, llreturn, DebugLoc::None);
1390 }
1391 raw_block(fcx, false, llreturn)
1392 }
1393 None => last_bcx
1394 };
1395
1396 // This shouldn't need to recompute the return type,
1397 // as new_fn_ctxt did it already.
1398 let substd_retty = fcx.monomorphize(&retty);
1399 build_return_block(fcx, ret_cx, substd_retty, ret_debug_loc);
1400
1401 debuginfo::clear_source_location(fcx);
1402 fcx.cleanup();
1403 }
1404
1405 // Builds the return block for a function.
1406 pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
1407 ret_cx: Block<'blk, 'tcx>,
1408 retty: ty::FnOutput<'tcx>,
1409 ret_debug_location: DebugLoc) {
1410 if fcx.llretslotptr.get().is_none() ||
1411 (!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
1412 return RetVoid(ret_cx, ret_debug_location);
1413 }
1414
1415 let retslot = if fcx.needs_ret_allocas {
1416 Load(ret_cx, fcx.llretslotptr.get().unwrap())
1417 } else {
1418 fcx.llretslotptr.get().unwrap()
1419 };
1420 let retptr = Value(retslot);
1421 match retptr.get_dominating_store(ret_cx) {
1422 // If there's only a single store to the ret slot, we can directly return
1423 // the value that was stored and omit the store and the alloca
1424 Some(s) => {
1425 let retval = s.get_operand(0).unwrap().get();
1426 s.erase_from_parent();
1427
1428 if retptr.has_no_uses() {
1429 retptr.erase_from_parent();
1430 }
1431
1432 let retval = if retty == ty::FnConverging(fcx.ccx.tcx().types.bool) {
1433 Trunc(ret_cx, retval, Type::i1(fcx.ccx))
1434 } else {
1435 retval
1436 };
1437
1438 if fcx.caller_expects_out_pointer {
1439 if let ty::FnConverging(retty) = retty {
1440 store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
1441 }
1442 RetVoid(ret_cx, ret_debug_location)
1443 } else {
1444 Ret(ret_cx, retval, ret_debug_location)
1445 }
1446 }
1447 // Otherwise, copy the return value to the ret slot
1448 None => match retty {
1449 ty::FnConverging(retty) => {
1450 if fcx.caller_expects_out_pointer {
1451 memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
1452 RetVoid(ret_cx, ret_debug_location)
1453 } else {
1454 Ret(ret_cx, load_ty(ret_cx, retslot, retty), ret_debug_location)
1455 }
1456 }
1457 ty::FnDiverging => {
1458 if fcx.caller_expects_out_pointer {
1459 RetVoid(ret_cx, ret_debug_location)
1460 } else {
1461 Ret(ret_cx, C_undef(Type::nil(fcx.ccx)), ret_debug_location)
1462 }
1463 }
1464 }
1465 }
1466 }
1467
1468 /// Builds an LLVM function out of a source function.
1469 ///
1470 /// If the function closes over its environment a closure will be returned.
1471 pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1472 decl: &ast::FnDecl,
1473 body: &ast::Block,
1474 llfndecl: ValueRef,
1475 param_substs: &'tcx Substs<'tcx>,
1476 fn_ast_id: ast::NodeId,
1477 _attributes: &[ast::Attribute],
1478 output_type: ty::FnOutput<'tcx>,
1479 abi: Abi,
1480 closure_env: closure::ClosureEnv<'b>) {
1481 ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
1482
1483 let _icx = push_ctxt("trans_closure");
1484 attributes::emit_uwtable(llfndecl, true);
1485
1486 debug!("trans_closure(..., param_substs={})",
1487 param_substs.repr(ccx.tcx()));
1488
1489 let has_env = match closure_env {
1490 closure::ClosureEnv::Closure(_) => true,
1491 closure::ClosureEnv::NotClosure => false,
1492 };
1493
1494 let (arena, fcx): (TypedArena<_>, FunctionContext);
1495 arena = TypedArena::new();
1496 fcx = new_fn_ctxt(ccx,
1497 llfndecl,
1498 fn_ast_id,
1499 has_env,
1500 output_type,
1501 param_substs,
1502 Some(body.span),
1503 &arena);
1504 let mut bcx = init_function(&fcx, false, output_type);
1505
1506 // cleanup scope for the incoming arguments
1507 let fn_cleanup_debug_loc =
1508 debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true);
1509 let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
1510
1511 let block_ty = node_id_type(bcx, body.id);
1512
1513 // Set up arguments to the function.
1514 let monomorphized_arg_types =
1515 decl.inputs.iter()
1516 .map(|arg| node_id_type(bcx, arg.id))
1517 .collect::<Vec<_>>();
1518 let monomorphized_arg_types = match closure_env {
1519 closure::ClosureEnv::NotClosure => {
1520 monomorphized_arg_types
1521 }
1522
1523 // Tuple up closure argument types for the "rust-call" ABI.
1524 closure::ClosureEnv::Closure(_) => {
1525 vec![ty::mk_tup(ccx.tcx(), monomorphized_arg_types)]
1526 }
1527 };
1528 for monomorphized_arg_type in &monomorphized_arg_types {
1529 debug!("trans_closure: monomorphized_arg_type: {}",
1530 ty_to_string(ccx.tcx(), *monomorphized_arg_type));
1531 }
1532 debug!("trans_closure: function lltype: {}",
1533 bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
1534
1535 let arg_datums = match closure_env {
1536 closure::ClosureEnv::NotClosure if abi == RustCall => {
1537 create_datums_for_fn_args_under_call_abi(bcx, arg_scope, &monomorphized_arg_types[..])
1538 }
1539 _ => {
1540 let arg_tys = untuple_arguments_if_necessary(ccx, &monomorphized_arg_types, abi);
1541 create_datums_for_fn_args(&fcx, &arg_tys)
1542 }
1543 };
1544
1545 bcx = copy_args_to_allocas(bcx, arg_scope, &decl.inputs, arg_datums);
1546
1547 bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
1548
1549 // Up until here, IR instructions for this function have explicitly not been annotated with
1550 // source code location, so we don't step into call setup code. From here on, source location
1551 // emitting should be enabled.
1552 debuginfo::start_emitting_source_locations(&fcx);
1553
1554 let dest = match fcx.llretslotptr.get() {
1555 Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")),
1556 None => {
1557 assert!(type_is_zero_size(bcx.ccx(), block_ty));
1558 expr::Ignore
1559 }
1560 };
1561
1562 // This call to trans_block is the place where we bridge between
1563 // translation calls that don't have a return value (trans_crate,
1564 // trans_mod, trans_item, et cetera) and those that do
1565 // (trans_block, trans_expr, et cetera).
1566 bcx = controlflow::trans_block(bcx, body, dest);
1567
1568 match dest {
1569 expr::SaveIn(slot) if fcx.needs_ret_allocas => {
1570 Store(bcx, slot, fcx.llretslotptr.get().unwrap());
1571 }
1572 _ => {}
1573 }
1574
1575 match fcx.llreturn.get() {
1576 Some(_) => {
1577 Br(bcx, fcx.return_exit_block(), DebugLoc::None);
1578 fcx.pop_custom_cleanup_scope(arg_scope);
1579 }
1580 None => {
1581 // Microoptimization writ large: avoid creating a separate
1582 // llreturn basic block
1583 bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
1584 }
1585 };
1586
1587 // Put return block after all other blocks.
1588 // This somewhat improves single-stepping experience in debugger.
1589 unsafe {
1590 let llreturn = fcx.llreturn.get();
1591 if let Some(llreturn) = llreturn {
1592 llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
1593 }
1594 }
1595
1596 let ret_debug_loc = DebugLoc::At(fn_cleanup_debug_loc.id,
1597 fn_cleanup_debug_loc.span);
1598
1599 // Insert the mandatory first few basic blocks before lltop.
1600 finish_fn(&fcx, bcx, output_type, ret_debug_loc);
1601 }
1602
1603 /// Creates an LLVM function corresponding to a source language function.
1604 pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1605 decl: &ast::FnDecl,
1606 body: &ast::Block,
1607 llfndecl: ValueRef,
1608 param_substs: &'tcx Substs<'tcx>,
1609 id: ast::NodeId,
1610 attrs: &[ast::Attribute]) {
1611 let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
1612 debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
1613 let _icx = push_ctxt("trans_fn");
1614 let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
1615 let output_type = ty::erase_late_bound_regions(ccx.tcx(), &ty::ty_fn_ret(fn_ty));
1616 let abi = ty::ty_fn_abi(fn_ty);
1617 trans_closure(ccx, decl, body, llfndecl, param_substs, id, attrs, output_type, abi,
1618 closure::ClosureEnv::NotClosure);
1619 }
1620
1621 pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1622 _enum_id: ast::NodeId,
1623 variant: &ast::Variant,
1624 _args: &[ast::VariantArg],
1625 disr: ty::Disr,
1626 param_substs: &'tcx Substs<'tcx>,
1627 llfndecl: ValueRef) {
1628 let _icx = push_ctxt("trans_enum_variant");
1629
1630 trans_enum_variant_or_tuple_like_struct(
1631 ccx,
1632 variant.node.id,
1633 disr,
1634 param_substs,
1635 llfndecl);
1636 }
1637
1638 pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
1639 ctor_ty: Ty<'tcx>,
1640 disr: ty::Disr,
1641 args: callee::CallArgs,
1642 dest: expr::Dest,
1643 debug_loc: DebugLoc)
1644 -> Result<'blk, 'tcx> {
1645
1646 let ccx = bcx.fcx.ccx;
1647 let tcx = ccx.tcx();
1648
1649 let result_ty = match ctor_ty.sty {
1650 ty::ty_bare_fn(_, ref bft) => {
1651 ty::erase_late_bound_regions(bcx.tcx(), &bft.sig.output()).unwrap()
1652 }
1653 _ => ccx.sess().bug(
1654 &format!("trans_enum_variant_constructor: \
1655 unexpected ctor return type {}",
1656 ctor_ty.repr(tcx)))
1657 };
1658
1659 // Get location to store the result. If the user does not care about
1660 // the result, just make a stack slot
1661 let llresult = match dest {
1662 expr::SaveIn(d) => d,
1663 expr::Ignore => {
1664 if !type_is_zero_size(ccx, result_ty) {
1665 alloc_ty(bcx, result_ty, "constructor_result")
1666 } else {
1667 C_undef(type_of::type_of(ccx, result_ty))
1668 }
1669 }
1670 };
1671
1672 if !type_is_zero_size(ccx, result_ty) {
1673 match args {
1674 callee::ArgExprs(exprs) => {
1675 let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
1676 bcx = expr::trans_adt(bcx,
1677 result_ty,
1678 disr,
1679 &fields[..],
1680 None,
1681 expr::SaveIn(llresult),
1682 debug_loc);
1683 }
1684 _ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
1685 }
1686 }
1687
1688 // If the caller doesn't care about the result
1689 // drop the temporary we made
1690 let bcx = match dest {
1691 expr::SaveIn(_) => bcx,
1692 expr::Ignore => {
1693 let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
1694 if !type_is_zero_size(ccx, result_ty) {
1695 call_lifetime_end(bcx, llresult);
1696 }
1697 bcx
1698 }
1699 };
1700
1701 Result::new(bcx, llresult)
1702 }
1703
1704 pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1705 _fields: &[ast::StructField],
1706 ctor_id: ast::NodeId,
1707 param_substs: &'tcx Substs<'tcx>,
1708 llfndecl: ValueRef) {
1709 let _icx = push_ctxt("trans_tuple_struct");
1710
1711 trans_enum_variant_or_tuple_like_struct(
1712 ccx,
1713 ctor_id,
1714 0,
1715 param_substs,
1716 llfndecl);
1717 }
1718
1719 fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
1720 ctor_id: ast::NodeId,
1721 disr: ty::Disr,
1722 param_substs: &'tcx Substs<'tcx>,
1723 llfndecl: ValueRef) {
1724 let ctor_ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
1725 let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
1726
1727 let result_ty = match ctor_ty.sty {
1728 ty::ty_bare_fn(_, ref bft) => {
1729 ty::erase_late_bound_regions(ccx.tcx(), &bft.sig.output())
1730 }
1731 _ => ccx.sess().bug(
1732 &format!("trans_enum_variant_or_tuple_like_struct: \
1733 unexpected ctor return type {}",
1734 ty_to_string(ccx.tcx(), ctor_ty)))
1735 };
1736
1737 let (arena, fcx): (TypedArena<_>, FunctionContext);
1738 arena = TypedArena::new();
1739 fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
1740 param_substs, None, &arena);
1741 let bcx = init_function(&fcx, false, result_ty);
1742
1743 assert!(!fcx.needs_ret_allocas);
1744
1745 let arg_tys =
1746 ty::erase_late_bound_regions(
1747 ccx.tcx(), &ty::ty_fn_args(ctor_ty));
1748
1749 let arg_datums = create_datums_for_fn_args(&fcx, &arg_tys[..]);
1750
1751 if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
1752 let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
1753 let repr = adt::represent_type(ccx, result_ty.unwrap());
1754 for (i, arg_datum) in arg_datums.into_iter().enumerate() {
1755 let lldestptr = adt::trans_field_ptr(bcx,
1756 &*repr,
1757 dest,
1758 disr,
1759 i);
1760 arg_datum.store_to(bcx, lldestptr);
1761 }
1762 adt::trans_set_discr(bcx, &*repr, dest, disr);
1763 }
1764
1765 finish_fn(&fcx, bcx, result_ty, DebugLoc::None);
1766 }
1767
1768 fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
1769 let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
1770
1771 let print_info = ccx.sess().print_enum_sizes();
1772
1773 let levels = ccx.tcx().node_lint_levels.borrow();
1774 let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
1775 let lvlsrc = levels.get(&(id, lint_id));
1776 let is_allow = lvlsrc.map_or(true, |&(lvl, _)| lvl == lint::Allow);
1777
1778 if is_allow && !print_info {
1779 // we're not interested in anything here
1780 return
1781 }
1782
1783 let ty = ty::node_id_to_type(ccx.tcx(), id);
1784 let avar = adt::represent_type(ccx, ty);
1785 match *avar {
1786 adt::General(_, ref variants, _) => {
1787 for var in variants {
1788 let mut size = 0;
1789 for field in var.fields.iter().skip(1) {
1790 // skip the discriminant
1791 size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
1792 }
1793 sizes.push(size);
1794 }
1795 },
1796 _ => { /* its size is either constant or unimportant */ }
1797 }
1798
1799 let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
1800 |(l, s, li), (idx, &size)|
1801 if size > l {
1802 (size, l, idx)
1803 } else if size > s {
1804 (l, size, li)
1805 } else {
1806 (l, s, li)
1807 }
1808 );
1809
1810 if print_info {
1811 let llty = type_of::sizing_type_of(ccx, ty);
1812
1813 let sess = &ccx.tcx().sess;
1814 sess.span_note(sp, &*format!("total size: {} bytes", llsize_of_real(ccx, llty)));
1815 match *avar {
1816 adt::General(..) => {
1817 for (i, var) in enum_def.variants.iter().enumerate() {
1818 ccx.tcx().sess.span_note(var.span,
1819 &*format!("variant data: {} bytes", sizes[i]));
1820 }
1821 }
1822 _ => {}
1823 }
1824 }
1825
1826 // we only warn if the largest variant is at least thrice as large as
1827 // the second-largest.
1828 if !is_allow && largest > slargest * 3 && slargest > 0 {
1829 // Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
1830 // pass for the latter already ran.
1831 lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
1832 *lvlsrc.unwrap(), Some(sp),
1833 &format!("enum variant is more than three times larger \
1834 ({} bytes) than the next largest (ignoring padding)",
1835 largest));
1836
1837 ccx.sess().span_note(enum_def.variants[largest_index].span,
1838 "this variant is the largest");
1839 }
1840 }
1841
1842 pub struct TransItemVisitor<'a, 'tcx: 'a> {
1843 pub ccx: &'a CrateContext<'a, 'tcx>,
1844 }
1845
1846 impl<'a, 'tcx, 'v> Visitor<'v> for TransItemVisitor<'a, 'tcx> {
1847 fn visit_item(&mut self, i: &ast::Item) {
1848 trans_item(self.ccx, i);
1849 }
1850 }
1851
1852 pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
1853 // Use the names from src/llvm/docs/LangRef.rst here. Most types are only
1854 // applicable to variable declarations and may not really make sense for
1855 // Rust code in the first place but whitelist them anyway and trust that
1856 // the user knows what s/he's doing. Who knows, unanticipated use cases
1857 // may pop up in the future.
1858 //
1859 // ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
1860 // and don't have to be, LLVM treats them as no-ops.
1861 match name {
1862 "appending" => Some(llvm::AppendingLinkage),
1863 "available_externally" => Some(llvm::AvailableExternallyLinkage),
1864 "common" => Some(llvm::CommonLinkage),
1865 "extern_weak" => Some(llvm::ExternalWeakLinkage),
1866 "external" => Some(llvm::ExternalLinkage),
1867 "internal" => Some(llvm::InternalLinkage),
1868 "linkonce" => Some(llvm::LinkOnceAnyLinkage),
1869 "linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
1870 "private" => Some(llvm::PrivateLinkage),
1871 "weak" => Some(llvm::WeakAnyLinkage),
1872 "weak_odr" => Some(llvm::WeakODRLinkage),
1873 _ => None,
1874 }
1875 }
1876
1877
1878 /// Enum describing the origin of an LLVM `Value`, for linkage purposes.
1879 #[derive(Copy, Clone)]
1880 pub enum ValueOrigin {
1881 /// The LLVM `Value` is in this context because the corresponding item was
1882 /// assigned to the current compilation unit.
1883 OriginalTranslation,
1884 /// The `Value`'s corresponding item was assigned to some other compilation
1885 /// unit, but the `Value` was translated in this context anyway because the
1886 /// item is marked `#[inline]`.
1887 InlinedCopy,
1888 }
1889
1890 /// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
1891 /// If the `llval` is the direct translation of a specific Rust item, `id`
1892 /// should be set to the `NodeId` of that item. (This mapping should be
1893 /// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
1894 /// `None`.) `llval_origin` indicates whether `llval` is the translation of an
1895 /// item assigned to `ccx`'s compilation unit or an inlined copy of an item
1896 /// assigned to a different compilation unit.
1897 pub fn update_linkage(ccx: &CrateContext,
1898 llval: ValueRef,
1899 id: Option<ast::NodeId>,
1900 llval_origin: ValueOrigin) {
1901 match llval_origin {
1902 InlinedCopy => {
1903 // `llval` is a translation of an item defined in a separate
1904 // compilation unit. This only makes sense if there are at least
1905 // two compilation units.
1906 assert!(ccx.sess().opts.cg.codegen_units > 1);
1907 // `llval` is a copy of something defined elsewhere, so use
1908 // `AvailableExternallyLinkage` to avoid duplicating code in the
1909 // output.
1910 llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
1911 return;
1912 },
1913 OriginalTranslation => {},
1914 }
1915
1916 if let Some(id) = id {
1917 let item = ccx.tcx().map.get(id);
1918 if let ast_map::NodeItem(i) = item {
1919 if let Some(name) = attr::first_attr_value_str_by_name(&i.attrs, "linkage") {
1920 if let Some(linkage) = llvm_linkage_by_name(&name) {
1921 llvm::SetLinkage(llval, linkage);
1922 } else {
1923 ccx.sess().span_fatal(i.span, "invalid linkage specified");
1924 }
1925 return;
1926 }
1927 }
1928 }
1929
1930 match id {
1931 Some(id) if ccx.reachable().contains(&id) => {
1932 llvm::SetLinkage(llval, llvm::ExternalLinkage);
1933 },
1934 _ => {
1935 // `id` does not refer to an item in `ccx.reachable`.
1936 if ccx.sess().opts.cg.codegen_units > 1 {
1937 llvm::SetLinkage(llval, llvm::ExternalLinkage);
1938 } else {
1939 llvm::SetLinkage(llval, llvm::InternalLinkage);
1940 }
1941 },
1942 }
1943 }
1944
1945 pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
1946 let _icx = push_ctxt("trans_item");
1947
1948 let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
1949
1950 match item.node {
1951 ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
1952 if !generics.is_type_parameterized() {
1953 let trans_everywhere = attr::requests_inline(&item.attrs);
1954 // Ignore `trans_everywhere` for cross-crate inlined items
1955 // (`from_external`). `trans_item` will be called once for each
1956 // compilation unit that references the item, so it will still get
1957 // translated everywhere it's needed.
1958 for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
1959 let llfn = get_item_val(ccx, item.id);
1960 let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
1961 if abi != Rust {
1962 foreign::trans_rust_fn_with_foreign_abi(ccx, &**decl, &**body, &item.attrs,
1963 llfn, empty_substs, item.id, None);
1964 } else {
1965 trans_fn(ccx, &**decl, &**body, llfn, empty_substs, item.id, &item.attrs);
1966 }
1967 update_linkage(ccx, llfn, Some(item.id),
1968 if is_origin { OriginalTranslation } else { InlinedCopy });
1969
1970 if is_entry_fn(ccx.sess(), item.id) {
1971 create_entry_wrapper(ccx, item.span, llfn);
1972 // check for the #[rustc_error] annotation, which forces an
1973 // error in trans. This is used to write compile-fail tests
1974 // that actually test that compilation succeeds without
1975 // reporting an error.
1976 if ty::has_attr(ccx.tcx(), local_def(item.id), "rustc_error") {
1977 ccx.tcx().sess.span_fatal(item.span, "compilation successful");
1978 }
1979 }
1980 }
1981 }
1982
1983 // Be sure to travel more than just one layer deep to catch nested
1984 // items in blocks and such.
1985 let mut v = TransItemVisitor{ ccx: ccx };
1986 v.visit_block(&**body);
1987 }
1988 ast::ItemImpl(_, _, ref generics, _, _, ref impl_items) => {
1989 meth::trans_impl(ccx,
1990 item.ident,
1991 &impl_items[..],
1992 generics,
1993 item.id);
1994 }
1995 ast::ItemMod(ref m) => {
1996 trans_mod(&ccx.rotate(), m);
1997 }
1998 ast::ItemEnum(ref enum_definition, ref gens) => {
1999 if gens.ty_params.is_empty() {
2000 // sizes only make sense for non-generic types
2001
2002 enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
2003 }
2004 }
2005 ast::ItemConst(_, ref expr) => {
2006 // Recurse on the expression to catch items in blocks
2007 let mut v = TransItemVisitor{ ccx: ccx };
2008 v.visit_expr(&**expr);
2009 }
2010 ast::ItemStatic(_, m, ref expr) => {
2011 // Recurse on the expression to catch items in blocks
2012 let mut v = TransItemVisitor{ ccx: ccx };
2013 v.visit_expr(&**expr);
2014
2015 let g = consts::trans_static(ccx, m, item.id);
2016 update_linkage(ccx, g, Some(item.id), OriginalTranslation);
2017
2018 // Do static_assert checking. It can't really be done much earlier
2019 // because we need to get the value of the bool out of LLVM
2020 if attr::contains_name(&item.attrs, "static_assert") {
2021 if !ty::type_is_bool(ty::expr_ty(ccx.tcx(), expr)) {
2022 ccx.sess().span_fatal(expr.span,
2023 "can only have static_assert on a static \
2024 with type `bool`");
2025 }
2026 if m == ast::MutMutable {
2027 ccx.sess().span_fatal(expr.span,
2028 "cannot have static_assert on a mutable \
2029 static");
2030 }
2031
2032 let v = ccx.static_values().borrow().get(&item.id).unwrap().clone();
2033 unsafe {
2034 if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
2035 ccx.sess().span_fatal(expr.span, "static assertion failed");
2036 }
2037 }
2038 }
2039 },
2040 ast::ItemForeignMod(ref foreign_mod) => {
2041 foreign::trans_foreign_mod(ccx, foreign_mod);
2042 }
2043 ast::ItemTrait(..) => {
2044 // Inside of this trait definition, we won't be actually translating any
2045 // functions, but the trait still needs to be walked. Otherwise default
2046 // methods with items will not get translated and will cause ICE's when
2047 // metadata time comes around.
2048 let mut v = TransItemVisitor{ ccx: ccx };
2049 visit::walk_item(&mut v, item);
2050 }
2051 _ => {/* fall through */ }
2052 }
2053 }
2054
2055 // Translate a module. Doing this amounts to translating the items in the
2056 // module; there ends up being no artifact (aside from linkage names) of
2057 // separate modules in the compiled program. That's because modules exist
2058 // only as a convenience for humans working with the code, to organize names
2059 // and control visibility.
2060 pub fn trans_mod(ccx: &CrateContext, m: &ast::Mod) {
2061 let _icx = push_ctxt("trans_mod");
2062 for item in &m.items {
2063 trans_item(ccx, &**item);
2064 }
2065 }
2066
2067
2068 // only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
2069 pub fn register_fn_llvmty(ccx: &CrateContext,
2070 sp: Span,
2071 sym: String,
2072 node_id: ast::NodeId,
2073 cc: llvm::CallConv,
2074 llfty: Type) -> ValueRef {
2075 debug!("register_fn_llvmty id={} sym={}", node_id, sym);
2076
2077 let llfn = declare::define_fn(ccx, &sym[..], cc, llfty,
2078 ty::FnConverging(ty::mk_nil(ccx.tcx()))).unwrap_or_else(||{
2079 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2080 });
2081 finish_register_fn(ccx, sym, node_id, llfn);
2082 llfn
2083 }
2084
2085 fn finish_register_fn(ccx: &CrateContext, sym: String, node_id: ast::NodeId,
2086 llfn: ValueRef) {
2087 ccx.item_symbols().borrow_mut().insert(node_id, sym);
2088
2089 // The stack exhaustion lang item shouldn't have a split stack because
2090 // otherwise it would continue to be exhausted (bad), and both it and the
2091 // eh_personality functions need to be externally linkable.
2092 let def = ast_util::local_def(node_id);
2093 if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
2094 attributes::split_stack(llfn, false);
2095 llvm::SetLinkage(llfn, llvm::ExternalLinkage);
2096 }
2097 if ccx.tcx().lang_items.eh_personality() == Some(def) {
2098 llvm::SetLinkage(llfn, llvm::ExternalLinkage);
2099 }
2100 }
2101
2102 fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
2103 sp: Span,
2104 sym: String,
2105 node_id: ast::NodeId,
2106 node_type: Ty<'tcx>)
2107 -> ValueRef {
2108 if let ty::ty_bare_fn(_, ref f) = node_type.sty {
2109 if f.abi != Rust && f.abi != RustCall {
2110 ccx.sess().span_bug(sp, &format!("only the `{}` or `{}` calling conventions are valid \
2111 for this function; `{}` was specified",
2112 Rust.name(), RustCall.name(), f.abi.name()));
2113 }
2114 } else {
2115 ccx.sess().span_bug(sp, "expected bare rust function")
2116 }
2117
2118 let llfn = declare::define_rust_fn(ccx, &sym[..], node_type).unwrap_or_else(||{
2119 ccx.sess().span_fatal(sp, &format!("symbol `{}` is already defined", sym));
2120 });
2121 finish_register_fn(ccx, sym, node_id, llfn);
2122 llfn
2123 }
2124
2125 pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
2126 match *sess.entry_fn.borrow() {
2127 Some((entry_id, _)) => node_id == entry_id,
2128 None => false
2129 }
2130 }
2131
2132 /// Create the `main` function which will initialise the rust runtime and call users’ main
2133 /// function.
2134 pub fn create_entry_wrapper(ccx: &CrateContext,
2135 sp: Span,
2136 main_llfn: ValueRef) {
2137 let et = ccx.sess().entry_type.get().unwrap();
2138 match et {
2139 config::EntryMain => {
2140 create_entry_fn(ccx, sp, main_llfn, true);
2141 }
2142 config::EntryStart => create_entry_fn(ccx, sp, main_llfn, false),
2143 config::EntryNone => {} // Do nothing.
2144 }
2145
2146 fn create_entry_fn(ccx: &CrateContext,
2147 sp: Span,
2148 rust_main: ValueRef,
2149 use_start_lang_item: bool) {
2150 let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()],
2151 &ccx.int_type());
2152
2153 let llfn = declare::define_cfn(ccx, "main", llfty,
2154 ty::mk_nil(ccx.tcx())).unwrap_or_else(||{
2155 ccx.sess().span_err(sp, "entry symbol `main` defined multiple times");
2156 // FIXME: We should be smart and show a better diagnostic here.
2157 ccx.sess().help("did you use #[no_mangle] on `fn main`? Use #[start] instead");
2158 ccx.sess().abort_if_errors();
2159 panic!();
2160 });
2161
2162 // FIXME: #16581: Marking a symbol in the executable with `dllexport`
2163 // linkage forces MinGW's linker to output a `.reloc` section for ASLR
2164 if ccx.sess().target.target.options.is_like_windows {
2165 unsafe { llvm::LLVMRustSetDLLExportStorageClass(llfn) }
2166 }
2167
2168 let llbb = unsafe {
2169 llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn,
2170 "top\0".as_ptr() as *const _)
2171 };
2172 let bld = ccx.raw_builder();
2173 unsafe {
2174 llvm::LLVMPositionBuilderAtEnd(bld, llbb);
2175
2176 debuginfo::insert_reference_to_gdb_debug_scripts_section_global(ccx);
2177
2178 let (start_fn, args) = if use_start_lang_item {
2179 let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
2180 Ok(id) => id,
2181 Err(s) => { ccx.sess().fatal(&s[..]); }
2182 };
2183 let start_fn = if start_def_id.krate == ast::LOCAL_CRATE {
2184 get_item_val(ccx, start_def_id.node)
2185 } else {
2186 let start_fn_type = csearch::get_type(ccx.tcx(),
2187 start_def_id).ty;
2188 trans_external_path(ccx, start_def_id, start_fn_type)
2189 };
2190
2191 let args = {
2192 let opaque_rust_main = llvm::LLVMBuildPointerCast(bld,
2193 rust_main, Type::i8p(ccx).to_ref(),
2194 "rust_main\0".as_ptr() as *const _);
2195
2196 vec!(
2197 opaque_rust_main,
2198 get_param(llfn, 0),
2199 get_param(llfn, 1)
2200 )
2201 };
2202 (start_fn, args)
2203 } else {
2204 debug!("using user-defined start fn");
2205 let args = vec!(
2206 get_param(llfn, 0 as c_uint),
2207 get_param(llfn, 1 as c_uint)
2208 );
2209
2210 (rust_main, args)
2211 };
2212
2213 let result = llvm::LLVMBuildCall(bld,
2214 start_fn,
2215 args.as_ptr(),
2216 args.len() as c_uint,
2217 noname());
2218
2219 llvm::LLVMBuildRet(bld, result);
2220 }
2221 }
2222 }
2223
2224 fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, id: ast::NodeId,
2225 ty: Ty<'tcx>, attrs: &[ast::Attribute]) -> String {
2226 match ccx.external_srcs().borrow().get(&id) {
2227 Some(&did) => {
2228 let sym = csearch::get_symbol(&ccx.sess().cstore, did);
2229 debug!("found item {} in other crate...", sym);
2230 return sym;
2231 }
2232 None => {}
2233 }
2234
2235 match attr::find_export_name_attr(ccx.sess().diagnostic(), attrs) {
2236 // Use provided name
2237 Some(name) => name.to_string(),
2238 _ => ccx.tcx().map.with_path(id, |path| {
2239 if attr::contains_name(attrs, "no_mangle") {
2240 // Don't mangle
2241 path.last().unwrap().to_string()
2242 } else {
2243 match weak_lang_items::link_name(attrs) {
2244 Some(name) => name.to_string(),
2245 None => {
2246 // Usual name mangling
2247 mangle_exported_name(ccx, path, ty, id)
2248 }
2249 }
2250 }
2251 })
2252 }
2253 }
2254
2255 fn contains_null(s: &str) -> bool {
2256 s.bytes().any(|b| b == 0)
2257 }
2258
2259 pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
2260 debug!("get_item_val(id=`{}`)", id);
2261
2262 match ccx.item_vals().borrow().get(&id).cloned() {
2263 Some(v) => return v,
2264 None => {}
2265 }
2266
2267 let item = ccx.tcx().map.get(id);
2268 debug!("get_item_val: id={} item={:?}", id, item);
2269 let val = match item {
2270 ast_map::NodeItem(i) => {
2271 let ty = ty::node_id_to_type(ccx.tcx(), i.id);
2272 let sym = || exported_name(ccx, id, ty, &i.attrs);
2273
2274 let v = match i.node {
2275 ast::ItemStatic(_, _, ref expr) => {
2276 // If this static came from an external crate, then
2277 // we need to get the symbol from csearch instead of
2278 // using the current crate's name/version
2279 // information in the hash of the symbol
2280 let sym = sym();
2281 debug!("making {}", sym);
2282
2283 // We need the translated value here, because for enums the
2284 // LLVM type is not fully determined by the Rust type.
2285 let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
2286 let (v, ty) = consts::const_expr(ccx, &**expr, empty_substs);
2287 ccx.static_values().borrow_mut().insert(id, v);
2288 unsafe {
2289 // boolean SSA values are i1, but they have to be stored in i8 slots,
2290 // otherwise some LLVM optimization passes don't work as expected
2291 let llty = if ty::type_is_bool(ty) {
2292 llvm::LLVMInt8TypeInContext(ccx.llcx())
2293 } else {
2294 llvm::LLVMTypeOf(v)
2295 };
2296
2297 // FIXME(nagisa): probably should be declare_global, because no definition
2298 // is happening here, but we depend on it being defined here from
2299 // const::trans_static. This all logic should be replaced.
2300 let g = declare::define_global(ccx, &sym[..],
2301 Type::from_ref(llty)).unwrap_or_else(||{
2302 ccx.sess().span_fatal(i.span, &format!("symbol `{}` is already defined",
2303 sym))
2304 });
2305
2306 if attr::contains_name(&i.attrs,
2307 "thread_local") {
2308 llvm::set_thread_local(g, true);
2309 }
2310 ccx.item_symbols().borrow_mut().insert(i.id, sym);
2311 g
2312 }
2313 }
2314
2315 ast::ItemFn(_, _, abi, _, _) => {
2316 let sym = sym();
2317 let llfn = if abi == Rust {
2318 register_fn(ccx, i.span, sym, i.id, ty)
2319 } else {
2320 foreign::register_rust_fn_with_foreign_abi(ccx, i.span, sym, i.id)
2321 };
2322 attributes::from_fn_attrs(ccx, &i.attrs, llfn);
2323 llfn
2324 }
2325
2326 _ => ccx.sess().bug("get_item_val: weird result in table")
2327 };
2328
2329 match attr::first_attr_value_str_by_name(&i.attrs,
2330 "link_section") {
2331 Some(sect) => {
2332 if contains_null(&sect) {
2333 ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`",
2334 &sect));
2335 }
2336 unsafe {
2337 let buf = CString::new(sect.as_bytes()).unwrap();
2338 llvm::LLVMSetSection(v, buf.as_ptr());
2339 }
2340 },
2341 None => ()
2342 }
2343
2344 v
2345 }
2346
2347 ast_map::NodeTraitItem(trait_item) => {
2348 debug!("get_item_val(): processing a NodeTraitItem");
2349 match trait_item.node {
2350 ast::MethodTraitItem(_, None) | ast::TypeTraitItem(..) => {
2351 ccx.sess().span_bug(trait_item.span,
2352 "unexpected variant: required trait method in get_item_val()");
2353 }
2354 ast::MethodTraitItem(_, Some(_)) => {
2355 register_method(ccx, id, &trait_item.attrs, trait_item.span)
2356 }
2357 }
2358 }
2359
2360 ast_map::NodeImplItem(impl_item) => {
2361 match impl_item.node {
2362 ast::MethodImplItem(..) => {
2363 register_method(ccx, id, &impl_item.attrs, impl_item.span)
2364 }
2365 ast::TypeImplItem(_) => {
2366 ccx.sess().span_bug(impl_item.span,
2367 "unexpected variant: associated type in get_item_val()")
2368 }
2369 ast::MacImplItem(_) => {
2370 ccx.sess().span_bug(impl_item.span,
2371 "unexpected variant: unexpanded macro in get_item_val()")
2372 }
2373 }
2374 }
2375
2376 ast_map::NodeForeignItem(ni) => {
2377 match ni.node {
2378 ast::ForeignItemFn(..) => {
2379 let abi = ccx.tcx().map.get_foreign_abi(id);
2380 let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
2381 let name = foreign::link_name(&*ni);
2382 let llfn = foreign::register_foreign_item_fn(ccx, abi, ty, &name);
2383 attributes::from_fn_attrs(ccx, &ni.attrs, llfn);
2384 llfn
2385 }
2386 ast::ForeignItemStatic(..) => {
2387 foreign::register_static(ccx, &*ni)
2388 }
2389 }
2390 }
2391
2392 ast_map::NodeVariant(ref v) => {
2393 let llfn;
2394 let args = match v.node.kind {
2395 ast::TupleVariantKind(ref args) => args,
2396 ast::StructVariantKind(_) => {
2397 ccx.sess().bug("struct variant kind unexpected in get_item_val")
2398 }
2399 };
2400 assert!(!args.is_empty());
2401 let ty = ty::node_id_to_type(ccx.tcx(), id);
2402 let parent = ccx.tcx().map.get_parent(id);
2403 let enm = ccx.tcx().map.expect_item(parent);
2404 let sym = exported_name(ccx,
2405 id,
2406 ty,
2407 &enm.attrs);
2408
2409 llfn = match enm.node {
2410 ast::ItemEnum(_, _) => {
2411 register_fn(ccx, (*v).span, sym, id, ty)
2412 }
2413 _ => ccx.sess().bug("NodeVariant, shouldn't happen")
2414 };
2415 attributes::inline(llfn, attributes::InlineAttr::Hint);
2416 llfn
2417 }
2418
2419 ast_map::NodeStructCtor(struct_def) => {
2420 // Only register the constructor if this is a tuple-like struct.
2421 let ctor_id = match struct_def.ctor_id {
2422 None => {
2423 ccx.sess().bug("attempt to register a constructor of \
2424 a non-tuple-like struct")
2425 }
2426 Some(ctor_id) => ctor_id,
2427 };
2428 let parent = ccx.tcx().map.get_parent(id);
2429 let struct_item = ccx.tcx().map.expect_item(parent);
2430 let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
2431 let sym = exported_name(ccx,
2432 id,
2433 ty,
2434 &struct_item.attrs);
2435 let llfn = register_fn(ccx, struct_item.span,
2436 sym, ctor_id, ty);
2437 attributes::inline(llfn, attributes::InlineAttr::Hint);
2438 llfn
2439 }
2440
2441 ref variant => {
2442 ccx.sess().bug(&format!("get_item_val(): unexpected variant: {:?}",
2443 variant))
2444 }
2445 };
2446
2447 // All LLVM globals and functions are initially created as external-linkage
2448 // declarations. If `trans_item`/`trans_fn` later turns the declaration
2449 // into a definition, it adjusts the linkage then (using `update_linkage`).
2450 //
2451 // The exception is foreign items, which have their linkage set inside the
2452 // call to `foreign::register_*` above. We don't touch the linkage after
2453 // that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
2454 // other item translation functions do).
2455
2456 ccx.item_vals().borrow_mut().insert(id, val);
2457 val
2458 }
2459
2460 fn register_method(ccx: &CrateContext, id: ast::NodeId,
2461 attrs: &[ast::Attribute], span: Span) -> ValueRef {
2462 let mty = ty::node_id_to_type(ccx.tcx(), id);
2463
2464 let sym = exported_name(ccx, id, mty, &attrs);
2465
2466 if let ty::ty_bare_fn(_, ref f) = mty.sty {
2467 let llfn = if f.abi == Rust || f.abi == RustCall {
2468 register_fn(ccx, span, sym, id, mty)
2469 } else {
2470 foreign::register_rust_fn_with_foreign_abi(ccx, span, sym, id)
2471 };
2472 attributes::from_fn_attrs(ccx, &attrs, llfn);
2473 return llfn;
2474 } else {
2475 ccx.sess().span_bug(span, "expected bare rust function");
2476 }
2477 }
2478
2479 pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'tcx>,
2480 ie: encoder::EncodeInlinedItem<'a>)
2481 -> encoder::EncodeParams<'a, 'tcx> {
2482 encoder::EncodeParams {
2483 diag: cx.sess().diagnostic(),
2484 tcx: cx.tcx(),
2485 reexports: cx.export_map(),
2486 item_symbols: cx.item_symbols(),
2487 link_meta: cx.link_meta(),
2488 cstore: &cx.sess().cstore,
2489 encode_inlined_item: ie,
2490 reachable: cx.reachable(),
2491 }
2492 }
2493
2494 pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
2495 use flate;
2496
2497 let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
2498 *ty != config::CrateTypeExecutable
2499 });
2500 if !any_library {
2501 return Vec::new()
2502 }
2503
2504 let encode_inlined_item: encoder::EncodeInlinedItem =
2505 Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
2506
2507 let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item);
2508 let metadata = encoder::encode_metadata(encode_parms, krate);
2509 let mut compressed = encoder::metadata_encoding_version.to_vec();
2510 compressed.push_all(&flate::deflate_bytes(&metadata));
2511 let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
2512 let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
2513 let name = format!("rust_metadata_{}_{}",
2514 cx.link_meta().crate_name,
2515 cx.link_meta().crate_hash);
2516 let buf = CString::new(name).unwrap();
2517 let llglobal = unsafe {
2518 llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(),
2519 buf.as_ptr())
2520 };
2521 unsafe {
2522 llvm::LLVMSetInitializer(llglobal, llconst);
2523 let name = loader::meta_section_name(cx.sess().target.target.options.is_like_osx);
2524 let name = CString::new(name).unwrap();
2525 llvm::LLVMSetSection(llglobal, name.as_ptr())
2526 }
2527 return metadata;
2528 }
2529
2530 /// Find any symbols that are defined in one compilation unit, but not declared
2531 /// in any other compilation unit. Give these symbols internal linkage.
2532 fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
2533 unsafe {
2534 let mut declared = HashSet::new();
2535
2536 let iter_globals = |llmod| {
2537 ValueIter {
2538 cur: llvm::LLVMGetFirstGlobal(llmod),
2539 step: llvm::LLVMGetNextGlobal,
2540 }
2541 };
2542
2543 let iter_functions = |llmod| {
2544 ValueIter {
2545 cur: llvm::LLVMGetFirstFunction(llmod),
2546 step: llvm::LLVMGetNextFunction,
2547 }
2548 };
2549
2550 // Collect all external declarations in all compilation units.
2551 for ccx in cx.iter() {
2552 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2553 let linkage = llvm::LLVMGetLinkage(val);
2554 // We only care about external declarations (not definitions)
2555 // and available_externally definitions.
2556 if !(linkage == llvm::ExternalLinkage as c_uint &&
2557 llvm::LLVMIsDeclaration(val) != 0) &&
2558 !(linkage == llvm::AvailableExternallyLinkage as c_uint) {
2559 continue
2560 }
2561
2562 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2563 .to_bytes().to_vec();
2564 declared.insert(name);
2565 }
2566 }
2567
2568 // Examine each external definition. If the definition is not used in
2569 // any other compilation unit, and is not reachable from other crates,
2570 // then give it internal linkage.
2571 for ccx in cx.iter() {
2572 for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
2573 // We only care about external definitions.
2574 if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
2575 llvm::LLVMIsDeclaration(val) == 0) {
2576 continue
2577 }
2578
2579 let name = CStr::from_ptr(llvm::LLVMGetValueName(val))
2580 .to_bytes().to_vec();
2581 if !declared.contains(&name) &&
2582 !reachable.contains(str::from_utf8(&name).unwrap()) {
2583 llvm::SetLinkage(val, llvm::InternalLinkage);
2584 }
2585 }
2586 }
2587 }
2588
2589
2590 struct ValueIter {
2591 cur: ValueRef,
2592 step: unsafe extern "C" fn(ValueRef) -> ValueRef,
2593 }
2594
2595 impl Iterator for ValueIter {
2596 type Item = ValueRef;
2597
2598 fn next(&mut self) -> Option<ValueRef> {
2599 let old = self.cur;
2600 if !old.is_null() {
2601 self.cur = unsafe {
2602 let step: unsafe extern "C" fn(ValueRef) -> ValueRef =
2603 mem::transmute_copy(&self.step);
2604 step(old)
2605 };
2606 Some(old)
2607 } else {
2608 None
2609 }
2610 }
2611 }
2612 }
2613
2614 pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
2615 -> (ty::ctxt<'tcx>, CrateTranslation) {
2616 let ty::CrateAnalysis { ty_cx: tcx, export_map, reachable, name, .. } = analysis;
2617 let krate = tcx.map.krate();
2618
2619 let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
2620 v
2621 } else {
2622 tcx.sess.opts.debug_assertions
2623 };
2624
2625 let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
2626 v
2627 } else {
2628 tcx.sess.opts.debug_assertions
2629 };
2630
2631 // Before we touch LLVM, make sure that multithreading is enabled.
2632 unsafe {
2633 use std::sync::{Once, ONCE_INIT};
2634 static INIT: Once = ONCE_INIT;
2635 static mut POISONED: bool = false;
2636 INIT.call_once(|| {
2637 if llvm::LLVMStartMultithreaded() != 1 {
2638 // use an extra bool to make sure that all future usage of LLVM
2639 // cannot proceed despite the Once not running more than once.
2640 POISONED = true;
2641 }
2642 });
2643
2644 if POISONED {
2645 tcx.sess.bug("couldn't enable multi-threaded LLVM");
2646 }
2647 }
2648
2649 let link_meta = link::build_link_meta(&tcx.sess, krate, name);
2650
2651 let codegen_units = tcx.sess.opts.cg.codegen_units;
2652 let shared_ccx = SharedCrateContext::new(&link_meta.crate_name,
2653 codegen_units,
2654 tcx,
2655 export_map,
2656 Sha256::new(),
2657 link_meta.clone(),
2658 reachable,
2659 check_overflow,
2660 check_dropflag);
2661
2662 {
2663 let ccx = shared_ccx.get_ccx(0);
2664
2665 // First, verify intrinsics.
2666 intrinsic::check_intrinsics(&ccx);
2667
2668 // Next, translate the module.
2669 {
2670 let _icx = push_ctxt("text");
2671 trans_mod(&ccx, &krate.module);
2672 }
2673 }
2674
2675 for ccx in shared_ccx.iter() {
2676 if ccx.sess().opts.debuginfo != NoDebugInfo {
2677 debuginfo::finalize(&ccx);
2678 }
2679 }
2680
2681 // Translate the metadata.
2682 let metadata = write_metadata(&shared_ccx, krate);
2683
2684 if shared_ccx.sess().trans_stats() {
2685 let stats = shared_ccx.stats();
2686 println!("--- trans stats ---");
2687 println!("n_glues_created: {}", stats.n_glues_created.get());
2688 println!("n_null_glues: {}", stats.n_null_glues.get());
2689 println!("n_real_glues: {}", stats.n_real_glues.get());
2690
2691 println!("n_fns: {}", stats.n_fns.get());
2692 println!("n_monos: {}", stats.n_monos.get());
2693 println!("n_inlines: {}", stats.n_inlines.get());
2694 println!("n_closures: {}", stats.n_closures.get());
2695 println!("fn stats:");
2696 stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
2697 insns_b.cmp(&insns_a)
2698 });
2699 for tuple in &*stats.fn_stats.borrow() {
2700 match *tuple {
2701 (ref name, insns) => {
2702 println!("{} insns, {}", insns, *name);
2703 }
2704 }
2705 }
2706 }
2707 if shared_ccx.sess().count_llvm_insns() {
2708 for (k, v) in &*shared_ccx.stats().llvm_insns.borrow() {
2709 println!("{:7} {}", *v, *k);
2710 }
2711 }
2712
2713 let modules = shared_ccx.iter()
2714 .map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
2715 .collect();
2716
2717 let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
2718 shared_ccx.item_symbols().borrow().get(id).map(|s| s.to_string())
2719 }).collect();
2720
2721 // For the purposes of LTO, we add to the reachable set all of the upstream
2722 // reachable extern fns. These functions are all part of the public ABI of
2723 // the final product, so LTO needs to preserve them.
2724 shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
2725 let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
2726 reachable.extend(syms.into_iter().map(|did| {
2727 csearch::get_symbol(&shared_ccx.sess().cstore, did)
2728 }));
2729 });
2730
2731 // Make sure that some other crucial symbols are not eliminated from the
2732 // module. This includes the main function, the crate map (used for debug
2733 // log settings and I/O), and finally the curious rust_stack_exhausted
2734 // symbol. This symbol is required for use by the libmorestack library that
2735 // we link in, so we must ensure that this symbol is not internalized (if
2736 // defined in the crate).
2737 reachable.push("main".to_string());
2738 reachable.push("rust_stack_exhausted".to_string());
2739
2740 // referenced from .eh_frame section on some platforms
2741 reachable.push("rust_eh_personality".to_string());
2742 // referenced from rt/rust_try.ll
2743 reachable.push("rust_eh_personality_catch".to_string());
2744
2745 if codegen_units > 1 {
2746 internalize_symbols(&shared_ccx, &reachable.iter().cloned().collect());
2747 }
2748
2749 let metadata_module = ModuleTranslation {
2750 llcx: shared_ccx.metadata_llcx(),
2751 llmod: shared_ccx.metadata_llmod(),
2752 };
2753 let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
2754 let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
2755
2756 let translation = CrateTranslation {
2757 modules: modules,
2758 metadata_module: metadata_module,
2759 link: link_meta,
2760 metadata: metadata,
2761 reachable: reachable,
2762 crate_formats: formats,
2763 no_builtins: no_builtins,
2764 };
2765
2766 (shared_ccx.take_tcx(), translation)
2767 }