]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_llvm/base.rs
New upstream version 1.28.0~beta.14+dfsg1
[rustc.git] / src / librustc_codegen_llvm / base.rs
1 // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 //! Codegen the completed AST to the LLVM IR.
12 //!
13 //! Some functions here, such as codegen_block and codegen_expr, return a value --
14 //! the result of the codegen to LLVM -- while others, such as codegen_fn
15 //! and mono_item, are called only for the side effect of adding a
16 //! particular definition to the LLVM IR output we're producing.
17 //!
18 //! Hopefully useful general knowledge about codegen:
19 //!
20 //! * There's no way to find out the Ty type of a ValueRef. Doing so
21 //! would be "trying to get the eggs out of an omelette" (credit:
22 //! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
23 //! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
24 //! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
25
26 use super::ModuleLlvm;
27 use super::ModuleSource;
28 use super::ModuleCodegen;
29 use super::ModuleKind;
30
31 use abi;
32 use back::link;
33 use back::write::{self, OngoingCodegen, create_target_machine};
34 use llvm::{ContextRef, ModuleRef, ValueRef, Vector, get_param};
35 use llvm;
36 use metadata;
37 use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
38 use rustc::middle::lang_items::StartFnLangItem;
39 use rustc::middle::weak_lang_items;
40 use rustc::mir::mono::{Linkage, Visibility, Stats};
41 use rustc::middle::cstore::{EncodedMetadata};
42 use rustc::ty::{self, Ty, TyCtxt};
43 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
44 use rustc::ty::query::Providers;
45 use rustc::dep_graph::{DepNode, DepConstructor};
46 use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
47 use rustc::middle::exported_symbols;
48 use rustc::util::common::{time, print_time_passes_entry};
49 use rustc::session::config::{self, NoDebugInfo};
50 use rustc::session::Session;
51 use rustc_incremental;
52 use allocator;
53 use mir::place::PlaceRef;
54 use attributes;
55 use builder::{Builder, MemFlags};
56 use callee;
57 use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
58 use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode};
59 use common::{self, C_struct_in_context, C_array, val_ty};
60 use consts;
61 use context::{self, CodegenCx};
62 use debuginfo;
63 use declare;
64 use meth;
65 use mir;
66 use monomorphize::Instance;
67 use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt};
68 use rustc_codegen_utils::symbol_names_test;
69 use time_graph;
70 use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt, DefPathBasedNames};
71 use type_::Type;
72 use type_of::LayoutLlvmExt;
73 use rustc::util::nodemap::{FxHashMap, FxHashSet, DefIdSet};
74 use CrateInfo;
75 use rustc_data_structures::sync::Lrc;
76 use rustc_target::spec::TargetTriple;
77
78 use std::any::Any;
79 use std::collections::BTreeMap;
80 use std::ffi::CString;
81 use std::str;
82 use std::sync::Arc;
83 use std::time::{Instant, Duration};
84 use std::i32;
85 use std::cmp;
86 use std::sync::mpsc;
87 use syntax_pos::Span;
88 use syntax_pos::symbol::InternedString;
89 use syntax::attr;
90 use rustc::hir;
91 use syntax::ast;
92
93 use mir::operand::OperandValue;
94
95 pub use rustc_codegen_utils::check_for_rustc_errors_attr;
96
97 pub struct StatRecorder<'a, 'tcx: 'a> {
98 cx: &'a CodegenCx<'a, 'tcx>,
99 name: Option<String>,
100 istart: usize,
101 }
102
103 impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
104 pub fn new(cx: &'a CodegenCx<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
105 let istart = cx.stats.borrow().n_llvm_insns;
106 StatRecorder {
107 cx,
108 name: Some(name),
109 istart,
110 }
111 }
112 }
113
114 impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
115 fn drop(&mut self) {
116 if self.cx.sess().codegen_stats() {
117 let mut stats = self.cx.stats.borrow_mut();
118 let iend = stats.n_llvm_insns;
119 stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
120 stats.n_fns += 1;
121 // Reset LLVM insn count to avoid compound costs.
122 stats.n_llvm_insns = self.istart;
123 }
124 }
125 }
126
127 pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
128 signed: bool)
129 -> llvm::IntPredicate {
130 match op {
131 hir::BiEq => llvm::IntEQ,
132 hir::BiNe => llvm::IntNE,
133 hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
134 hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
135 hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
136 hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
137 op => {
138 bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
139 found {:?}",
140 op)
141 }
142 }
143 }
144
145 pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
146 match op {
147 hir::BiEq => llvm::RealOEQ,
148 hir::BiNe => llvm::RealUNE,
149 hir::BiLt => llvm::RealOLT,
150 hir::BiLe => llvm::RealOLE,
151 hir::BiGt => llvm::RealOGT,
152 hir::BiGe => llvm::RealOGE,
153 op => {
154 bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
155 found {:?}",
156 op);
157 }
158 }
159 }
160
161 pub fn compare_simd_types<'a, 'tcx>(
162 bx: &Builder<'a, 'tcx>,
163 lhs: ValueRef,
164 rhs: ValueRef,
165 t: Ty<'tcx>,
166 ret_ty: Type,
167 op: hir::BinOp_
168 ) -> ValueRef {
169 let signed = match t.sty {
170 ty::TyFloat(_) => {
171 let cmp = bin_op_to_fcmp_predicate(op);
172 return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
173 },
174 ty::TyUint(_) => false,
175 ty::TyInt(_) => true,
176 _ => bug!("compare_simd_types: invalid SIMD type"),
177 };
178
179 let cmp = bin_op_to_icmp_predicate(op, signed);
180 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
181 // to get the correctly sized type. This will compile to a single instruction
182 // once the IR is converted to assembly if the SIMD instruction is supported
183 // by the target architecture.
184 bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
185 }
186
187 /// Retrieve the information we are losing (making dynamic) in an unsizing
188 /// adjustment.
189 ///
190 /// The `old_info` argument is a bit funny. It is intended for use
191 /// in an upcast, where the new vtable for an object will be derived
192 /// from the old one.
193 pub fn unsized_info<'cx, 'tcx>(cx: &CodegenCx<'cx, 'tcx>,
194 source: Ty<'tcx>,
195 target: Ty<'tcx>,
196 old_info: Option<ValueRef>)
197 -> ValueRef {
198 let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
199 match (&source.sty, &target.sty) {
200 (&ty::TyArray(_, len), &ty::TySlice(_)) => {
201 C_usize(cx, len.unwrap_usize(cx.tcx))
202 }
203 (&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
204 // For now, upcasts are limited to changes in marker
205 // traits, and hence never actually require an actual
206 // change to the vtable.
207 old_info.expect("unsized_info: missing old info for trait upcast")
208 }
209 (_, &ty::TyDynamic(ref data, ..)) => {
210 let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target))
211 .field(cx, abi::FAT_PTR_EXTRA);
212 consts::ptrcast(meth::get_vtable(cx, source, data.principal()),
213 vtable_ptr.llvm_type(cx))
214 }
215 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
216 source,
217 target),
218 }
219 }
220
221 /// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
222 pub fn unsize_thin_ptr<'a, 'tcx>(
223 bx: &Builder<'a, 'tcx>,
224 src: ValueRef,
225 src_ty: Ty<'tcx>,
226 dst_ty: Ty<'tcx>
227 ) -> (ValueRef, ValueRef) {
228 debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
229 match (&src_ty.sty, &dst_ty.sty) {
230 (&ty::TyRef(_, a, _),
231 &ty::TyRef(_, b, _)) |
232 (&ty::TyRef(_, a, _),
233 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
234 (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
235 &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
236 assert!(bx.cx.type_is_sized(a));
237 let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
238 (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
239 }
240 (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
241 let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
242 assert!(bx.cx.type_is_sized(a));
243 let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
244 (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
245 }
246 (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
247 assert_eq!(def_a, def_b);
248
249 let src_layout = bx.cx.layout_of(src_ty);
250 let dst_layout = bx.cx.layout_of(dst_ty);
251 let mut result = None;
252 for i in 0..src_layout.fields.count() {
253 let src_f = src_layout.field(bx.cx, i);
254 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
255 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
256 if src_f.is_zst() {
257 continue;
258 }
259 assert_eq!(src_layout.size, src_f.size);
260
261 let dst_f = dst_layout.field(bx.cx, i);
262 assert_ne!(src_f.ty, dst_f.ty);
263 assert_eq!(result, None);
264 result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
265 }
266 let (lldata, llextra) = result.unwrap();
267 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
268 (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0)),
269 bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1)))
270 }
271 _ => bug!("unsize_thin_ptr: called on bad types"),
272 }
273 }
274
275 /// Coerce `src`, which is a reference to a value of type `src_ty`,
276 /// to a value of type `dst_ty` and store the result in `dst`
277 pub fn coerce_unsized_into<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
278 src: PlaceRef<'tcx>,
279 dst: PlaceRef<'tcx>) {
280 let src_ty = src.layout.ty;
281 let dst_ty = dst.layout.ty;
282 let coerce_ptr = || {
283 let (base, info) = match src.load(bx).val {
284 OperandValue::Pair(base, info) => {
285 // fat-ptr to fat-ptr unsize preserves the vtable
286 // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
287 // So we need to pointercast the base to ensure
288 // the types match up.
289 let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR);
290 (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info)
291 }
292 OperandValue::Immediate(base) => {
293 unsize_thin_ptr(bx, base, src_ty, dst_ty)
294 }
295 OperandValue::Ref(..) => bug!()
296 };
297 OperandValue::Pair(base, info).store(bx, dst);
298 };
299 match (&src_ty.sty, &dst_ty.sty) {
300 (&ty::TyRef(..), &ty::TyRef(..)) |
301 (&ty::TyRef(..), &ty::TyRawPtr(..)) |
302 (&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
303 coerce_ptr()
304 }
305 (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
306 coerce_ptr()
307 }
308
309 (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
310 assert_eq!(def_a, def_b);
311
312 for i in 0..def_a.variants[0].fields.len() {
313 let src_f = src.project_field(bx, i);
314 let dst_f = dst.project_field(bx, i);
315
316 if dst_f.layout.is_zst() {
317 continue;
318 }
319
320 if src_f.layout.ty == dst_f.layout.ty {
321 memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout,
322 src_f.align.min(dst_f.align), MemFlags::empty());
323 } else {
324 coerce_unsized_into(bx, src_f, dst_f);
325 }
326 }
327 }
328 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
329 src_ty,
330 dst_ty),
331 }
332 }
333
334 pub fn cast_shift_expr_rhs(
335 cx: &Builder, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef
336 ) -> ValueRef {
337 cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b))
338 }
339
340 fn cast_shift_rhs<F, G>(op: hir::BinOp_,
341 lhs: ValueRef,
342 rhs: ValueRef,
343 trunc: F,
344 zext: G)
345 -> ValueRef
346 where F: FnOnce(ValueRef, Type) -> ValueRef,
347 G: FnOnce(ValueRef, Type) -> ValueRef
348 {
349 // Shifts may have any size int on the rhs
350 if op.is_shift() {
351 let mut rhs_llty = val_ty(rhs);
352 let mut lhs_llty = val_ty(lhs);
353 if rhs_llty.kind() == Vector {
354 rhs_llty = rhs_llty.element_type()
355 }
356 if lhs_llty.kind() == Vector {
357 lhs_llty = lhs_llty.element_type()
358 }
359 let rhs_sz = rhs_llty.int_width();
360 let lhs_sz = lhs_llty.int_width();
361 if lhs_sz < rhs_sz {
362 trunc(rhs, lhs_llty)
363 } else if lhs_sz > rhs_sz {
364 // FIXME (#1877: If shifting by negative
365 // values becomes not undefined then this is wrong.
366 zext(rhs, lhs_llty)
367 } else {
368 rhs
369 }
370 } else {
371 rhs
372 }
373 }
374
375 /// Returns whether this session's target will use SEH-based unwinding.
376 ///
377 /// This is only true for MSVC targets, and even then the 64-bit MSVC target
378 /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
379 /// 64-bit MinGW) instead of "full SEH".
380 pub fn wants_msvc_seh(sess: &Session) -> bool {
381 sess.target.target.options.is_like_msvc
382 }
383
384 pub fn call_assume<'a, 'tcx>(bx: &Builder<'a, 'tcx>, val: ValueRef) {
385 let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
386 bx.call(assume_intrinsic, &[val], None);
387 }
388
389 pub fn from_immediate(bx: &Builder, val: ValueRef) -> ValueRef {
390 if val_ty(val) == Type::i1(bx.cx) {
391 bx.zext(val, Type::i8(bx.cx))
392 } else {
393 val
394 }
395 }
396
397 pub fn to_immediate(bx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
398 if let layout::Abi::Scalar(ref scalar) = layout.abi {
399 if scalar.is_bool() {
400 return bx.trunc(val, Type::i1(bx.cx));
401 }
402 }
403 val
404 }
405
406 pub fn call_memcpy(bx: &Builder,
407 dst: ValueRef,
408 src: ValueRef,
409 n_bytes: ValueRef,
410 align: Align,
411 flags: MemFlags) {
412 if flags.contains(MemFlags::NONTEMPORAL) {
413 // HACK(nox): This is inefficient but there is no nontemporal memcpy.
414 let val = bx.load(src, align);
415 let ptr = bx.pointercast(dst, val_ty(val).ptr_to());
416 bx.store_with_flags(val, ptr, align, flags);
417 return;
418 }
419 let cx = bx.cx;
420 let ptr_width = &cx.sess().target.target.target_pointer_width;
421 let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
422 let memcpy = cx.get_intrinsic(&key);
423 let src_ptr = bx.pointercast(src, Type::i8p(cx));
424 let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
425 let size = bx.intcast(n_bytes, cx.isize_ty, false);
426 let align = C_i32(cx, align.abi() as i32);
427 let volatile = C_bool(cx, flags.contains(MemFlags::VOLATILE));
428 bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
429 }
430
431 pub fn memcpy_ty<'a, 'tcx>(
432 bx: &Builder<'a, 'tcx>,
433 dst: ValueRef,
434 src: ValueRef,
435 layout: TyLayout<'tcx>,
436 align: Align,
437 flags: MemFlags,
438 ) {
439 let size = layout.size.bytes();
440 if size == 0 {
441 return;
442 }
443
444 call_memcpy(bx, dst, src, C_usize(bx.cx, size), align, flags);
445 }
446
447 pub fn call_memset<'a, 'tcx>(bx: &Builder<'a, 'tcx>,
448 ptr: ValueRef,
449 fill_byte: ValueRef,
450 size: ValueRef,
451 align: ValueRef,
452 volatile: bool) -> ValueRef {
453 let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
454 let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
455 let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
456 let volatile = C_bool(bx.cx, volatile);
457 bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
458 }
459
460 pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
461 let _s = if cx.sess().codegen_stats() {
462 let mut instance_name = String::new();
463 DefPathBasedNames::new(cx.tcx, true, true)
464 .push_def_path(instance.def_id(), &mut instance_name);
465 Some(StatRecorder::new(cx, instance_name))
466 } else {
467 None
468 };
469
470 // this is an info! to allow collecting monomorphization statistics
471 // and to allow finding the last function before LLVM aborts from
472 // release builds.
473 info!("codegen_instance({})", instance);
474
475 let fn_ty = instance.ty(cx.tcx);
476 let sig = common::ty_fn_sig(cx, fn_ty);
477 let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
478
479 let lldecl = match cx.instances.borrow().get(&instance) {
480 Some(&val) => val,
481 None => bug!("Instance `{:?}` not already declared", instance)
482 };
483
484 cx.stats.borrow_mut().n_closures += 1;
485
486 // The `uwtable` attribute according to LLVM is:
487 //
488 // This attribute indicates that the ABI being targeted requires that an
489 // unwind table entry be produced for this function even if we can show
490 // that no exceptions passes by it. This is normally the case for the
491 // ELF x86-64 abi, but it can be disabled for some compilation units.
492 //
493 // Typically when we're compiling with `-C panic=abort` (which implies this
494 // `no_landing_pads` check) we don't need `uwtable` because we can't
495 // generate any exceptions! On Windows, however, exceptions include other
496 // events such as illegal instructions, segfaults, etc. This means that on
497 // Windows we end up still needing the `uwtable` attribute even if the `-C
498 // panic=abort` flag is passed.
499 //
500 // You can also find more info on why Windows is whitelisted here in:
501 // https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
502 if !cx.sess().no_landing_pads() ||
503 cx.sess().target.target.options.requires_uwtable {
504 attributes::emit_uwtable(lldecl, true);
505 }
506
507 let mir = cx.tcx.instance_mir(instance.def);
508 mir::codegen_mir(cx, lldecl, &mir, instance, sig);
509 }
510
511 pub fn set_link_section(cx: &CodegenCx,
512 llval: ValueRef,
513 attrs: &[ast::Attribute]) {
514 if let Some(sect) = attr::first_attr_value_str_by_name(attrs, "link_section") {
515 if contains_null(&sect.as_str()) {
516 cx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", &sect));
517 }
518 unsafe {
519 let buf = CString::new(sect.as_str().as_bytes()).unwrap();
520 llvm::LLVMSetSection(llval, buf.as_ptr());
521 }
522 }
523 }
524
525 /// Create the `main` function which will initialize the rust runtime and call
526 /// users main function.
527 fn maybe_create_entry_wrapper(cx: &CodegenCx) {
528 let (main_def_id, span) = match *cx.sess().entry_fn.borrow() {
529 Some((id, span, _)) => {
530 (cx.tcx.hir.local_def_id(id), span)
531 }
532 None => return,
533 };
534
535 let instance = Instance::mono(cx.tcx, main_def_id);
536
537 if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) {
538 // We want to create the wrapper in the same codegen unit as Rust's main
539 // function.
540 return;
541 }
542
543 let main_llfn = callee::get_fn(cx, instance);
544
545 let et = cx.sess().entry_fn.get().map(|e| e.2);
546 match et {
547 Some(config::EntryMain) => create_entry_fn(cx, span, main_llfn, main_def_id, true),
548 Some(config::EntryStart) => create_entry_fn(cx, span, main_llfn, main_def_id, false),
549 None => {} // Do nothing.
550 }
551
552 fn create_entry_fn<'cx>(cx: &'cx CodegenCx,
553 sp: Span,
554 rust_main: ValueRef,
555 rust_main_def_id: DefId,
556 use_start_lang_item: bool) {
557 let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], &Type::c_int(cx));
558
559 let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
560 // Given that `main()` has no arguments,
561 // then its return type cannot have
562 // late-bound regions, since late-bound
563 // regions must appear in the argument
564 // listing.
565 let main_ret_ty = cx.tcx.erase_regions(
566 &main_ret_ty.no_late_bound_regions().unwrap(),
567 );
568
569 if declare::get_defined_value(cx, "main").is_some() {
570 // FIXME: We should be smart and show a better diagnostic here.
571 cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
572 .help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
573 .emit();
574 cx.sess().abort_if_errors();
575 bug!();
576 }
577 let llfn = declare::declare_cfn(cx, "main", llfty);
578
579 // `main` should respect same config for frame pointer elimination as rest of code
580 attributes::set_frame_pointer_elimination(cx, llfn);
581
582 let bx = Builder::new_block(cx, llfn, "top");
583
584 debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx);
585
586 // Params from native main() used as args for rust start function
587 let param_argc = get_param(llfn, 0);
588 let param_argv = get_param(llfn, 1);
589 let arg_argc = bx.intcast(param_argc, cx.isize_ty, true);
590 let arg_argv = param_argv;
591
592 let (start_fn, args) = if use_start_lang_item {
593 let start_def_id = cx.tcx.require_lang_item(StartFnLangItem);
594 let start_fn = callee::resolve_and_get_fn(
595 cx,
596 start_def_id,
597 cx.tcx.intern_substs(&[main_ret_ty.into()]),
598 );
599 (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()),
600 arg_argc, arg_argv])
601 } else {
602 debug!("using user-defined start fn");
603 (rust_main, vec![arg_argc, arg_argv])
604 };
605
606 let result = bx.call(start_fn, &args, None);
607 bx.ret(bx.intcast(result, Type::c_int(cx), true));
608 }
609 }
610
611 fn contains_null(s: &str) -> bool {
612 s.bytes().any(|b| b == 0)
613 }
614
615 fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
616 llmod_id: &str,
617 link_meta: &LinkMeta)
618 -> (ContextRef, ModuleRef, EncodedMetadata) {
619 use std::io::Write;
620 use flate2::Compression;
621 use flate2::write::DeflateEncoder;
622
623 let (metadata_llcx, metadata_llmod) = unsafe {
624 context::create_context_and_module(tcx.sess, llmod_id)
625 };
626
627 #[derive(PartialEq, Eq, PartialOrd, Ord)]
628 enum MetadataKind {
629 None,
630 Uncompressed,
631 Compressed
632 }
633
634 let kind = tcx.sess.crate_types.borrow().iter().map(|ty| {
635 match *ty {
636 config::CrateTypeExecutable |
637 config::CrateTypeStaticlib |
638 config::CrateTypeCdylib => MetadataKind::None,
639
640 config::CrateTypeRlib => MetadataKind::Uncompressed,
641
642 config::CrateTypeDylib |
643 config::CrateTypeProcMacro => MetadataKind::Compressed,
644 }
645 }).max().unwrap_or(MetadataKind::None);
646
647 if kind == MetadataKind::None {
648 return (metadata_llcx,
649 metadata_llmod,
650 EncodedMetadata::new());
651 }
652
653 let metadata = tcx.encode_metadata(link_meta);
654 if kind == MetadataKind::Uncompressed {
655 return (metadata_llcx, metadata_llmod, metadata);
656 }
657
658 assert!(kind == MetadataKind::Compressed);
659 let mut compressed = tcx.metadata_encoding_version();
660 DeflateEncoder::new(&mut compressed, Compression::fast())
661 .write_all(&metadata.raw_data).unwrap();
662
663 let llmeta = C_bytes_in_context(metadata_llcx, &compressed);
664 let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false);
665 let name = exported_symbols::metadata_symbol_name(tcx);
666 let buf = CString::new(name).unwrap();
667 let llglobal = unsafe {
668 llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst).to_ref(), buf.as_ptr())
669 };
670 unsafe {
671 llvm::LLVMSetInitializer(llglobal, llconst);
672 let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
673 let name = CString::new(section_name).unwrap();
674 llvm::LLVMSetSection(llglobal, name.as_ptr());
675
676 // Also generate a .section directive to force no
677 // flags, at least for ELF outputs, so that the
678 // metadata doesn't get loaded into memory.
679 let directive = format!(".section {}", section_name);
680 let directive = CString::new(directive).unwrap();
681 llvm::LLVMSetModuleInlineAsm(metadata_llmod, directive.as_ptr())
682 }
683 return (metadata_llcx, metadata_llmod, metadata);
684 }
685
686 pub struct ValueIter {
687 cur: ValueRef,
688 step: unsafe extern "C" fn(ValueRef) -> ValueRef,
689 }
690
691 impl Iterator for ValueIter {
692 type Item = ValueRef;
693
694 fn next(&mut self) -> Option<ValueRef> {
695 let old = self.cur;
696 if !old.is_null() {
697 self.cur = unsafe { (self.step)(old) };
698 Some(old)
699 } else {
700 None
701 }
702 }
703 }
704
705 pub fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
706 unsafe {
707 ValueIter {
708 cur: llvm::LLVMGetFirstGlobal(llmod),
709 step: llvm::LLVMGetNextGlobal,
710 }
711 }
712 }
713
714 pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
715 rx: mpsc::Receiver<Box<Any + Send>>)
716 -> OngoingCodegen {
717
718 check_for_rustc_errors_attr(tcx);
719
720 if let Some(true) = tcx.sess.opts.debugging_opts.thinlto {
721 if unsafe { !llvm::LLVMRustThinLTOAvailable() } {
722 tcx.sess.fatal("this compiler's LLVM does not support ThinLTO");
723 }
724 }
725
726 if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() ||
727 !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) &&
728 unsafe { !llvm::LLVMRustPGOAvailable() }
729 {
730 tcx.sess.fatal("this compiler's LLVM does not support PGO");
731 }
732
733 let crate_hash = tcx.crate_hash(LOCAL_CRATE);
734 let link_meta = link::build_link_meta(crate_hash);
735
736 // Codegen the metadata.
737 let llmod_id = "metadata";
738 let (metadata_llcx, metadata_llmod, metadata) =
739 time(tcx.sess, "write metadata", || {
740 write_metadata(tcx, llmod_id, &link_meta)
741 });
742
743 let metadata_module = ModuleCodegen {
744 name: link::METADATA_MODULE_NAME.to_string(),
745 llmod_id: llmod_id.to_string(),
746 source: ModuleSource::Codegened(ModuleLlvm {
747 llcx: metadata_llcx,
748 llmod: metadata_llmod,
749 tm: create_target_machine(tcx.sess, false),
750 }),
751 kind: ModuleKind::Metadata,
752 };
753
754 let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
755 Some(time_graph::TimeGraph::new())
756 } else {
757 None
758 };
759
760 // Skip crate items and just output metadata in -Z no-codegen mode.
761 if tcx.sess.opts.debugging_opts.no_codegen ||
762 !tcx.sess.opts.output_types.should_codegen() {
763 let ongoing_codegen = write::start_async_codegen(
764 tcx,
765 time_graph.clone(),
766 link_meta,
767 metadata,
768 rx,
769 1);
770
771 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
772 ongoing_codegen.codegen_finished(tcx);
773
774 assert_and_save_dep_graph(tcx);
775
776 ongoing_codegen.check_for_errors(tcx.sess);
777
778 return ongoing_codegen;
779 }
780
781 // Run the monomorphization collector and partition the collected items into
782 // codegen units.
783 let codegen_units =
784 tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
785 let codegen_units = (*codegen_units).clone();
786
787 // Force all codegen_unit queries so they are already either red or green
788 // when compile_codegen_unit accesses them. We are not able to re-execute
789 // the codegen_unit query from just the DepNode, so an unknown color would
790 // lead to having to re-execute compile_codegen_unit, possibly
791 // unnecessarily.
792 if tcx.dep_graph.is_fully_enabled() {
793 for cgu in &codegen_units {
794 tcx.codegen_unit(cgu.name().clone());
795 }
796 }
797
798 let ongoing_codegen = write::start_async_codegen(
799 tcx,
800 time_graph.clone(),
801 link_meta,
802 metadata,
803 rx,
804 codegen_units.len());
805
806 // Codegen an allocator shim, if any
807 let allocator_module = if let Some(kind) = *tcx.sess.allocator_kind.get() {
808 unsafe {
809 let llmod_id = "allocator";
810 let (llcx, llmod) =
811 context::create_context_and_module(tcx.sess, llmod_id);
812 let modules = ModuleLlvm {
813 llmod,
814 llcx,
815 tm: create_target_machine(tcx.sess, false),
816 };
817 time(tcx.sess, "write allocator module", || {
818 allocator::codegen(tcx, &modules, kind)
819 });
820
821 Some(ModuleCodegen {
822 name: link::ALLOCATOR_MODULE_NAME.to_string(),
823 llmod_id: llmod_id.to_string(),
824 source: ModuleSource::Codegened(modules),
825 kind: ModuleKind::Allocator,
826 })
827 }
828 } else {
829 None
830 };
831
832 if let Some(allocator_module) = allocator_module {
833 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
834 }
835
836 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
837
838 // We sort the codegen units by size. This way we can schedule work for LLVM
839 // a bit more efficiently.
840 let codegen_units = {
841 let mut codegen_units = codegen_units;
842 codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
843 codegen_units
844 };
845
846 let mut total_codegen_time = Duration::new(0, 0);
847 let mut all_stats = Stats::default();
848
849 for cgu in codegen_units.into_iter() {
850 ongoing_codegen.wait_for_signal_to_codegen_item();
851 ongoing_codegen.check_for_errors(tcx.sess);
852
853 // First, if incremental compilation is enabled, we try to re-use the
854 // codegen unit from the cache.
855 if tcx.dep_graph.is_fully_enabled() {
856 let cgu_id = cgu.work_product_id();
857
858 // Check whether there is a previous work-product we can
859 // re-use. Not only must the file exist, and the inputs not
860 // be dirty, but the hash of the symbols we will generate must
861 // be the same.
862 if let Some(buf) = tcx.dep_graph.previous_work_product(&cgu_id) {
863 let dep_node = &DepNode::new(tcx,
864 DepConstructor::CompileCodegenUnit(cgu.name().clone()));
865
866 // We try to mark the DepNode::CompileCodegenUnit green. If we
867 // succeed it means that none of the dependencies has changed
868 // and we can safely re-use.
869 if let Some(dep_node_index) = tcx.dep_graph.try_mark_green(tcx, dep_node) {
870 // Append ".rs" to LLVM module identifier.
871 //
872 // LLVM code generator emits a ".file filename" directive
873 // for ELF backends. Value of the "filename" is set as the
874 // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
875 // crashes if the module identifier is same as other symbols
876 // such as a function name in the module.
877 // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
878 let llmod_id = format!("{}.rs", cgu.name());
879
880 let module = ModuleCodegen {
881 name: cgu.name().to_string(),
882 source: ModuleSource::Preexisting(buf),
883 kind: ModuleKind::Regular,
884 llmod_id,
885 };
886 tcx.dep_graph.mark_loaded_from_cache(dep_node_index, true);
887 write::submit_codegened_module_to_llvm(tcx, module, 0);
888 // Continue to next cgu, this one is done.
889 continue
890 }
891 } else {
892 // This can happen if files were deleted from the cache
893 // directory for some reason. We just re-compile then.
894 }
895 }
896
897 let _timing_guard = time_graph.as_ref().map(|time_graph| {
898 time_graph.start(write::CODEGEN_WORKER_TIMELINE,
899 write::CODEGEN_WORK_PACKAGE_KIND,
900 &format!("codegen {}", cgu.name()))
901 });
902 let start_time = Instant::now();
903 all_stats.extend(tcx.compile_codegen_unit(*cgu.name()));
904 total_codegen_time += start_time.elapsed();
905 ongoing_codegen.check_for_errors(tcx.sess);
906 }
907
908 ongoing_codegen.codegen_finished(tcx);
909
910 // Since the main thread is sometimes blocked during codegen, we keep track
911 // -Ztime-passes output manually.
912 print_time_passes_entry(tcx.sess.time_passes(),
913 "codegen to LLVM IR",
914 total_codegen_time);
915
916 if tcx.sess.opts.incremental.is_some() {
917 ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
918 }
919
920 symbol_names_test::report_symbol_names(tcx);
921
922 if tcx.sess.codegen_stats() {
923 println!("--- codegen stats ---");
924 println!("n_glues_created: {}", all_stats.n_glues_created);
925 println!("n_null_glues: {}", all_stats.n_null_glues);
926 println!("n_real_glues: {}", all_stats.n_real_glues);
927
928 println!("n_fns: {}", all_stats.n_fns);
929 println!("n_inlines: {}", all_stats.n_inlines);
930 println!("n_closures: {}", all_stats.n_closures);
931 println!("fn stats:");
932 all_stats.fn_stats.sort_by_key(|&(_, insns)| insns);
933 for &(ref name, insns) in all_stats.fn_stats.iter() {
934 println!("{} insns, {}", insns, *name);
935 }
936 }
937
938 if tcx.sess.count_llvm_insns() {
939 for (k, v) in all_stats.llvm_insns.iter() {
940 println!("{:7} {}", *v, *k);
941 }
942 }
943
944 ongoing_codegen.check_for_errors(tcx.sess);
945
946 assert_and_save_dep_graph(tcx);
947 ongoing_codegen
948 }
949
950 fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
951 time(tcx.sess,
952 "assert dep graph",
953 || rustc_incremental::assert_dep_graph(tcx));
954
955 time(tcx.sess,
956 "serialize dep graph",
957 || rustc_incremental::save_dep_graph(tcx));
958 }
959
960 fn collect_and_partition_mono_items<'a, 'tcx>(
961 tcx: TyCtxt<'a, 'tcx, 'tcx>,
962 cnum: CrateNum,
963 ) -> (Arc<DefIdSet>, Arc<Vec<Arc<CodegenUnit<'tcx>>>>)
964 {
965 assert_eq!(cnum, LOCAL_CRATE);
966
967 let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items {
968 Some(ref s) => {
969 let mode_string = s.to_lowercase();
970 let mode_string = mode_string.trim();
971 if mode_string == "eager" {
972 MonoItemCollectionMode::Eager
973 } else {
974 if mode_string != "lazy" {
975 let message = format!("Unknown codegen-item collection mode '{}'. \
976 Falling back to 'lazy' mode.",
977 mode_string);
978 tcx.sess.warn(&message);
979 }
980
981 MonoItemCollectionMode::Lazy
982 }
983 }
984 None => {
985 if tcx.sess.opts.cg.link_dead_code {
986 MonoItemCollectionMode::Eager
987 } else {
988 MonoItemCollectionMode::Lazy
989 }
990 }
991 };
992
993 let (items, inlining_map) =
994 time(tcx.sess, "monomorphization collection", || {
995 collector::collect_crate_mono_items(tcx, collection_mode)
996 });
997
998 tcx.sess.abort_if_errors();
999
1000 ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter());
1001
1002 let strategy = if tcx.sess.opts.incremental.is_some() {
1003 PartitioningStrategy::PerModule
1004 } else {
1005 PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units())
1006 };
1007
1008 let codegen_units = time(tcx.sess, "codegen unit partitioning", || {
1009 partitioning::partition(tcx,
1010 items.iter().cloned(),
1011 strategy,
1012 &inlining_map)
1013 .into_iter()
1014 .map(Arc::new)
1015 .collect::<Vec<_>>()
1016 });
1017
1018 let mono_items: DefIdSet = items.iter().filter_map(|mono_item| {
1019 match *mono_item {
1020 MonoItem::Fn(ref instance) => Some(instance.def_id()),
1021 MonoItem::Static(def_id) => Some(def_id),
1022 _ => None,
1023 }
1024 }).collect();
1025
1026 if tcx.sess.opts.debugging_opts.print_mono_items.is_some() {
1027 let mut item_to_cgus = FxHashMap();
1028
1029 for cgu in &codegen_units {
1030 for (&mono_item, &linkage) in cgu.items() {
1031 item_to_cgus.entry(mono_item)
1032 .or_insert(Vec::new())
1033 .push((cgu.name().clone(), linkage));
1034 }
1035 }
1036
1037 let mut item_keys: Vec<_> = items
1038 .iter()
1039 .map(|i| {
1040 let mut output = i.to_string(tcx);
1041 output.push_str(" @@");
1042 let mut empty = Vec::new();
1043 let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
1044 cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone());
1045 cgus.dedup();
1046 for &(ref cgu_name, (linkage, _)) in cgus.iter() {
1047 output.push_str(" ");
1048 output.push_str(&cgu_name.as_str());
1049
1050 let linkage_abbrev = match linkage {
1051 Linkage::External => "External",
1052 Linkage::AvailableExternally => "Available",
1053 Linkage::LinkOnceAny => "OnceAny",
1054 Linkage::LinkOnceODR => "OnceODR",
1055 Linkage::WeakAny => "WeakAny",
1056 Linkage::WeakODR => "WeakODR",
1057 Linkage::Appending => "Appending",
1058 Linkage::Internal => "Internal",
1059 Linkage::Private => "Private",
1060 Linkage::ExternalWeak => "ExternalWeak",
1061 Linkage::Common => "Common",
1062 };
1063
1064 output.push_str("[");
1065 output.push_str(linkage_abbrev);
1066 output.push_str("]");
1067 }
1068 output
1069 })
1070 .collect();
1071
1072 item_keys.sort();
1073
1074 for item in item_keys {
1075 println!("MONO_ITEM {}", item);
1076 }
1077 }
1078
1079 (Arc::new(mono_items), Arc::new(codegen_units))
1080 }
1081
1082 impl CrateInfo {
1083 pub fn new(tcx: TyCtxt) -> CrateInfo {
1084 let mut info = CrateInfo {
1085 panic_runtime: None,
1086 compiler_builtins: None,
1087 profiler_runtime: None,
1088 sanitizer_runtime: None,
1089 is_no_builtins: FxHashSet(),
1090 native_libraries: FxHashMap(),
1091 used_libraries: tcx.native_libraries(LOCAL_CRATE),
1092 link_args: tcx.link_args(LOCAL_CRATE),
1093 crate_name: FxHashMap(),
1094 used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
1095 used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
1096 used_crate_source: FxHashMap(),
1097 wasm_custom_sections: BTreeMap::new(),
1098 wasm_imports: FxHashMap(),
1099 lang_item_to_crate: FxHashMap(),
1100 missing_lang_items: FxHashMap(),
1101 };
1102 let lang_items = tcx.lang_items();
1103
1104 let load_wasm_items = tcx.sess.crate_types.borrow()
1105 .iter()
1106 .any(|c| *c != config::CrateTypeRlib) &&
1107 tcx.sess.opts.target_triple == TargetTriple::from_triple("wasm32-unknown-unknown");
1108
1109 if load_wasm_items {
1110 info!("attempting to load all wasm sections");
1111 for &id in tcx.wasm_custom_sections(LOCAL_CRATE).iter() {
1112 let (name, contents) = fetch_wasm_section(tcx, id);
1113 info.wasm_custom_sections.entry(name)
1114 .or_insert(Vec::new())
1115 .extend(contents);
1116 }
1117 info.load_wasm_imports(tcx, LOCAL_CRATE);
1118 }
1119
1120 for &cnum in tcx.crates().iter() {
1121 info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
1122 info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
1123 info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
1124 if tcx.is_panic_runtime(cnum) {
1125 info.panic_runtime = Some(cnum);
1126 }
1127 if tcx.is_compiler_builtins(cnum) {
1128 info.compiler_builtins = Some(cnum);
1129 }
1130 if tcx.is_profiler_runtime(cnum) {
1131 info.profiler_runtime = Some(cnum);
1132 }
1133 if tcx.is_sanitizer_runtime(cnum) {
1134 info.sanitizer_runtime = Some(cnum);
1135 }
1136 if tcx.is_no_builtins(cnum) {
1137 info.is_no_builtins.insert(cnum);
1138 }
1139 if load_wasm_items {
1140 for &id in tcx.wasm_custom_sections(cnum).iter() {
1141 let (name, contents) = fetch_wasm_section(tcx, id);
1142 info.wasm_custom_sections.entry(name)
1143 .or_insert(Vec::new())
1144 .extend(contents);
1145 }
1146 info.load_wasm_imports(tcx, cnum);
1147 }
1148 let missing = tcx.missing_lang_items(cnum);
1149 for &item in missing.iter() {
1150 if let Ok(id) = lang_items.require(item) {
1151 info.lang_item_to_crate.insert(item, id.krate);
1152 }
1153 }
1154
1155 // No need to look for lang items that are whitelisted and don't
1156 // actually need to exist.
1157 let missing = missing.iter()
1158 .cloned()
1159 .filter(|&l| !weak_lang_items::whitelisted(tcx, l))
1160 .collect();
1161 info.missing_lang_items.insert(cnum, missing);
1162 }
1163
1164 return info
1165 }
1166
1167 fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) {
1168 for (&id, module) in tcx.wasm_import_module_map(cnum).iter() {
1169 let instance = Instance::mono(tcx, id);
1170 let import_name = tcx.symbol_name(instance);
1171 self.wasm_imports.insert(import_name.to_string(), module.clone());
1172 }
1173 }
1174 }
1175
1176 fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool {
1177 let (all_mono_items, _) =
1178 tcx.collect_and_partition_mono_items(LOCAL_CRATE);
1179 all_mono_items.contains(&id)
1180 }
1181
1182 fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
1183 cgu: InternedString) -> Stats {
1184 let cgu = tcx.codegen_unit(cgu);
1185
1186 let start_time = Instant::now();
1187 let (stats, module) = module_codegen(tcx, cgu);
1188 let time_to_codegen = start_time.elapsed();
1189
1190 // We assume that the cost to run LLVM on a CGU is proportional to
1191 // the time we needed for codegenning it.
1192 let cost = time_to_codegen.as_secs() * 1_000_000_000 +
1193 time_to_codegen.subsec_nanos() as u64;
1194
1195 write::submit_codegened_module_to_llvm(tcx,
1196 module,
1197 cost);
1198 return stats;
1199
1200 fn module_codegen<'a, 'tcx>(
1201 tcx: TyCtxt<'a, 'tcx, 'tcx>,
1202 cgu: Arc<CodegenUnit<'tcx>>)
1203 -> (Stats, ModuleCodegen)
1204 {
1205 let cgu_name = cgu.name().to_string();
1206
1207 // Append ".rs" to LLVM module identifier.
1208 //
1209 // LLVM code generator emits a ".file filename" directive
1210 // for ELF backends. Value of the "filename" is set as the
1211 // LLVM module identifier. Due to a LLVM MC bug[1], LLVM
1212 // crashes if the module identifier is same as other symbols
1213 // such as a function name in the module.
1214 // 1. http://llvm.org/bugs/show_bug.cgi?id=11479
1215 let llmod_id = format!("{}-{}.rs",
1216 cgu.name(),
1217 tcx.crate_disambiguator(LOCAL_CRATE)
1218 .to_fingerprint().to_hex());
1219
1220 // Instantiate monomorphizations without filling out definitions yet...
1221 let cx = CodegenCx::new(tcx, cgu, &llmod_id);
1222 let module = {
1223 let mono_items = cx.codegen_unit
1224 .items_in_deterministic_order(cx.tcx);
1225 for &(mono_item, (linkage, visibility)) in &mono_items {
1226 mono_item.predefine(&cx, linkage, visibility);
1227 }
1228
1229 // ... and now that we have everything pre-defined, fill out those definitions.
1230 for &(mono_item, _) in &mono_items {
1231 mono_item.define(&cx);
1232 }
1233
1234 // If this codegen unit contains the main function, also create the
1235 // wrapper here
1236 maybe_create_entry_wrapper(&cx);
1237
1238 // Run replace-all-uses-with for statics that need it
1239 for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() {
1240 unsafe {
1241 let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
1242 llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
1243 llvm::LLVMDeleteGlobal(old_g);
1244 }
1245 }
1246
1247 // Create the llvm.used variable
1248 // This variable has type [N x i8*] and is stored in the llvm.metadata section
1249 if !cx.used_statics.borrow().is_empty() {
1250 let name = CString::new("llvm.used").unwrap();
1251 let section = CString::new("llvm.metadata").unwrap();
1252 let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow());
1253
1254 unsafe {
1255 let g = llvm::LLVMAddGlobal(cx.llmod,
1256 val_ty(array).to_ref(),
1257 name.as_ptr());
1258 llvm::LLVMSetInitializer(g, array);
1259 llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
1260 llvm::LLVMSetSection(g, section.as_ptr());
1261 }
1262 }
1263
1264 // Finalize debuginfo
1265 if cx.sess().opts.debuginfo != NoDebugInfo {
1266 debuginfo::finalize(&cx);
1267 }
1268
1269 let llvm_module = ModuleLlvm {
1270 llcx: cx.llcx,
1271 llmod: cx.llmod,
1272 tm: create_target_machine(cx.sess(), false),
1273 };
1274
1275 ModuleCodegen {
1276 name: cgu_name,
1277 source: ModuleSource::Codegened(llvm_module),
1278 kind: ModuleKind::Regular,
1279 llmod_id,
1280 }
1281 };
1282
1283 (cx.into_stats(), module)
1284 }
1285 }
1286
1287 pub fn provide(providers: &mut Providers) {
1288 providers.collect_and_partition_mono_items =
1289 collect_and_partition_mono_items;
1290
1291 providers.is_codegened_item = is_codegened_item;
1292
1293 providers.codegen_unit = |tcx, name| {
1294 let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE);
1295 all.iter()
1296 .find(|cgu| *cgu.name() == name)
1297 .cloned()
1298 .expect(&format!("failed to find cgu with name {:?}", name))
1299 };
1300 providers.compile_codegen_unit = compile_codegen_unit;
1301
1302 provide_extern(providers);
1303 }
1304
1305 pub fn provide_extern(providers: &mut Providers) {
1306 providers.dllimport_foreign_items = |tcx, krate| {
1307 let module_map = tcx.foreign_modules(krate);
1308 let module_map = module_map.iter()
1309 .map(|lib| (lib.def_id, lib))
1310 .collect::<FxHashMap<_, _>>();
1311
1312 let dllimports = tcx.native_libraries(krate)
1313 .iter()
1314 .filter(|lib| {
1315 if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
1316 return false
1317 }
1318 let cfg = match lib.cfg {
1319 Some(ref cfg) => cfg,
1320 None => return true,
1321 };
1322 attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
1323 })
1324 .filter_map(|lib| lib.foreign_module)
1325 .map(|id| &module_map[&id])
1326 .flat_map(|module| module.foreign_items.iter().cloned())
1327 .collect();
1328 Lrc::new(dllimports)
1329 };
1330
1331 providers.is_dllimport_foreign_item = |tcx, def_id| {
1332 tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
1333 };
1334 }
1335
1336 pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
1337 match linkage {
1338 Linkage::External => llvm::Linkage::ExternalLinkage,
1339 Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
1340 Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
1341 Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
1342 Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
1343 Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
1344 Linkage::Appending => llvm::Linkage::AppendingLinkage,
1345 Linkage::Internal => llvm::Linkage::InternalLinkage,
1346 Linkage::Private => llvm::Linkage::PrivateLinkage,
1347 Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
1348 Linkage::Common => llvm::Linkage::CommonLinkage,
1349 }
1350 }
1351
1352 pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
1353 match linkage {
1354 Visibility::Default => llvm::Visibility::Default,
1355 Visibility::Hidden => llvm::Visibility::Hidden,
1356 Visibility::Protected => llvm::Visibility::Protected,
1357 }
1358 }
1359
1360 // FIXME(mw): Anything that is produced via DepGraph::with_task() must implement
1361 // the HashStable trait. Normally DepGraph::with_task() calls are
1362 // hidden behind queries, but CGU creation is a special case in two
1363 // ways: (1) it's not a query and (2) CGU are output nodes, so their
1364 // Fingerprints are not actually needed. It remains to be clarified
1365 // how exactly this case will be handled in the red/green system but
1366 // for now we content ourselves with providing a no-op HashStable
1367 // implementation for CGUs.
1368 mod temp_stable_hash_impls {
1369 use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher,
1370 HashStable};
1371 use ModuleCodegen;
1372
1373 impl<HCX> HashStable<HCX> for ModuleCodegen {
1374 fn hash_stable<W: StableHasherResult>(&self,
1375 _: &mut HCX,
1376 _: &mut StableHasher<W>) {
1377 // do nothing
1378 }
1379 }
1380 }
1381
1382 fn fetch_wasm_section(tcx: TyCtxt, id: DefId) -> (String, Vec<u8>) {
1383 use rustc::mir::interpret::GlobalId;
1384 use rustc::middle::const_val::ConstVal;
1385
1386 info!("loading wasm section {:?}", id);
1387
1388 let section = tcx.get_attrs(id)
1389 .iter()
1390 .find(|a| a.check_name("wasm_custom_section"))
1391 .expect("missing #[wasm_custom_section] attribute")
1392 .value_str()
1393 .expect("malformed #[wasm_custom_section] attribute");
1394
1395 let instance = ty::Instance::mono(tcx, id);
1396 let cid = GlobalId {
1397 instance,
1398 promoted: None
1399 };
1400 let param_env = ty::ParamEnv::reveal_all();
1401 let val = tcx.const_eval(param_env.and(cid)).unwrap();
1402
1403 let const_val = match val.val {
1404 ConstVal::Value(val) => val,
1405 ConstVal::Unevaluated(..) => bug!("should be evaluated"),
1406 };
1407
1408 let alloc = tcx.const_value_to_allocation((const_val, val.ty));
1409 (section.to_string(), alloc.bytes.clone())
1410 }