]> git.proxmox.com Git - rustc.git/blame - src/librustc_codegen_ssa/base.rs
New upstream version 1.47.0+dfsg1
[rustc.git] / src / librustc_codegen_ssa / base.rs
CommitLineData
a1dfa0c6
XL
1//! Codegen the completed AST to the LLVM IR.
2//!
60c5eb7d
XL
3//! Some functions here, such as `codegen_block` and `codegen_expr`, return a value --
4//! the result of the codegen to LLVM -- while others, such as `codegen_fn`
5//! and `mono_item`, are called only for the side effect of adding a
a1dfa0c6
XL
6//! particular definition to the LLVM IR output we're producing.
7//!
8//! Hopefully useful general knowledge about codegen:
9//!
60c5eb7d 10//! * There's no way to find out the `Ty` type of a `Value`. Doing so
9fa01778
XL
11//! would be "trying to get the eggs out of an omelette" (credit:
12//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
13//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
14//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
a1dfa0c6 15
60c5eb7d 16use crate::back::write::{
f9f354fc
XL
17 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
18 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
60c5eb7d 19};
dfeec247 20use crate::common::{IntPredicate, RealPredicate, TypeKind};
60c5eb7d
XL
21use crate::meth;
22use crate::mir;
23use crate::mir::operand::OperandValue;
24use crate::mir::place::PlaceRef;
25use crate::traits::*;
dfeec247 26use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
a1dfa0c6 27
74b04a01 28use rustc_attr as attr;
dfeec247
XL
29use rustc_data_structures::fx::FxHashMap;
30use rustc_data_structures::profiling::print_time_passes_entry;
31use rustc_data_structures::sync::{par_iter, Lock, ParallelIterator};
32use rustc_hir as hir;
f9f354fc 33use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
3dfed10e 34use rustc_hir::lang_items::LangItem;
e74abb32 35use rustc_index::vec::Idx;
ba9703b0
XL
36use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
37use rustc_middle::middle::cstore::EncodedMetadata;
38use rustc_middle::middle::cstore::{self, LinkagePreference};
39use rustc_middle::middle::lang_items;
40use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
41use rustc_middle::ty::layout::{self, HasTyCtxt, TyAndLayout};
42use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
43use rustc_middle::ty::query::Providers;
44use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
dfeec247 45use rustc_session::cgu_reuse_tracker::CguReuse;
f9f354fc
XL
46use rustc_session::config::{self, EntryFnType};
47use rustc_session::utils::NativeLibKind;
ba9703b0 48use rustc_session::Session;
dfeec247 49use rustc_span::Span;
ba9703b0
XL
50use rustc_symbol_mangling::test as symbol_names_test;
51use rustc_target::abi::{Abi, Align, LayoutOf, Scalar, VariantIdx};
a1dfa0c6 52
a1dfa0c6
XL
53use std::cmp;
54use std::ops::{Deref, DerefMut};
dfeec247 55use std::time::{Duration, Instant};
a1dfa0c6 56
dfeec247 57pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
a1dfa0c6
XL
58 match op {
59 hir::BinOpKind::Eq => IntPredicate::IntEQ,
60 hir::BinOpKind::Ne => IntPredicate::IntNE,
dfeec247
XL
61 hir::BinOpKind::Lt => {
62 if signed {
63 IntPredicate::IntSLT
64 } else {
65 IntPredicate::IntULT
66 }
a1dfa0c6 67 }
dfeec247
XL
68 hir::BinOpKind::Le => {
69 if signed {
70 IntPredicate::IntSLE
71 } else {
72 IntPredicate::IntULE
73 }
74 }
75 hir::BinOpKind::Gt => {
76 if signed {
77 IntPredicate::IntSGT
78 } else {
79 IntPredicate::IntUGT
80 }
81 }
82 hir::BinOpKind::Ge => {
83 if signed {
84 IntPredicate::IntSGE
85 } else {
86 IntPredicate::IntUGE
87 }
88 }
89 op => bug!(
90 "comparison_op_to_icmp_predicate: expected comparison operator, \
91 found {:?}",
92 op
93 ),
a1dfa0c6
XL
94 }
95}
96
97pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
98 match op {
99 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
100 hir::BinOpKind::Ne => RealPredicate::RealUNE,
101 hir::BinOpKind::Lt => RealPredicate::RealOLT,
102 hir::BinOpKind::Le => RealPredicate::RealOLE,
103 hir::BinOpKind::Gt => RealPredicate::RealOGT,
104 hir::BinOpKind::Ge => RealPredicate::RealOGE,
105 op => {
dfeec247
XL
106 bug!(
107 "comparison_op_to_fcmp_predicate: expected comparison operator, \
108 found {:?}",
109 op
110 );
a1dfa0c6
XL
111 }
112 }
113}
114
dc9dc135 115pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
116 bx: &mut Bx,
117 lhs: Bx::Value,
118 rhs: Bx::Value,
119 t: Ty<'tcx>,
120 ret_ty: Bx::Type,
dc9dc135 121 op: hir::BinOpKind,
a1dfa0c6 122) -> Bx::Value {
e74abb32 123 let signed = match t.kind {
a1dfa0c6
XL
124 ty::Float(_) => {
125 let cmp = bin_op_to_fcmp_predicate(op);
126 let cmp = bx.fcmp(cmp, lhs, rhs);
127 return bx.sext(cmp, ret_ty);
dfeec247 128 }
a1dfa0c6
XL
129 ty::Uint(_) => false,
130 ty::Int(_) => true,
131 _ => bug!("compare_simd_types: invalid SIMD type"),
132 };
133
134 let cmp = bin_op_to_icmp_predicate(op, signed);
135 let cmp = bx.icmp(cmp, lhs, rhs);
136 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
137 // to get the correctly sized type. This will compile to a single instruction
138 // once the IR is converted to assembly if the SIMD instruction is supported
139 // by the target architecture.
140 bx.sext(cmp, ret_ty)
141}
142
9fa01778 143/// Retrieves the information we are losing (making dynamic) in an unsizing
a1dfa0c6
XL
144/// adjustment.
145///
60c5eb7d
XL
146/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
147/// where the new vtable for an object will be derived from the old one.
a1dfa0c6
XL
148pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
149 cx: &Cx,
150 source: Ty<'tcx>,
151 target: Ty<'tcx>,
152 old_info: Option<Cx::Value>,
153) -> Cx::Value {
416331ca
XL
154 let (source, target) =
155 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, cx.param_env());
e74abb32 156 match (&source.kind, &target.kind) {
a1dfa0c6 157 (&ty::Array(_, len), &ty::Slice(_)) => {
416331ca 158 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
a1dfa0c6
XL
159 }
160 (&ty::Dynamic(..), &ty::Dynamic(..)) => {
161 // For now, upcasts are limited to changes in marker
162 // traits, and hence never actually require an actual
163 // change to the vtable.
164 old_info.expect("unsized_info: missing old info for trait upcast")
165 }
166 (_, &ty::Dynamic(ref data, ..)) => {
dfeec247 167 let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)).field(cx, FAT_PTR_EXTRA);
60c5eb7d
XL
168 cx.const_ptrcast(
169 meth::get_vtable(cx, source, data.principal()),
170 cx.backend_type(vtable_ptr),
171 )
a1dfa0c6 172 }
dfeec247 173 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
a1dfa0c6
XL
174 }
175}
176
60c5eb7d 177/// Coerces `src` to `dst_ty`. `src_ty` must be a thin pointer.
dc9dc135 178pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
179 bx: &mut Bx,
180 src: Bx::Value,
181 src_ty: Ty<'tcx>,
dc9dc135 182 dst_ty: Ty<'tcx>,
a1dfa0c6
XL
183) -> (Bx::Value, Bx::Value) {
184 debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
e74abb32 185 match (&src_ty.kind, &dst_ty.kind) {
ba9703b0 186 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
dfeec247 187 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
a1dfa0c6
XL
188 assert!(bx.cx().type_is_sized(a));
189 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
190 (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
191 }
a1dfa0c6
XL
192 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
193 assert_eq!(def_a, def_b);
194
195 let src_layout = bx.cx().layout_of(src_ty);
196 let dst_layout = bx.cx().layout_of(dst_ty);
197 let mut result = None;
198 for i in 0..src_layout.fields.count() {
199 let src_f = src_layout.field(bx.cx(), i);
200 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
201 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
202 if src_f.is_zst() {
203 continue;
204 }
205 assert_eq!(src_layout.size, src_f.size);
206
207 let dst_f = dst_layout.field(bx.cx(), i);
208 assert_ne!(src_f.ty, dst_f.ty);
209 assert_eq!(result, None);
210 result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
211 }
212 let (lldata, llextra) = result.unwrap();
213 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
e74abb32
XL
214 // FIXME(eddyb) move these out of this `match` arm, so they're always
215 // applied, uniformly, no matter the source/destination types.
dfeec247
XL
216 (
217 bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
218 bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)),
219 )
a1dfa0c6
XL
220 }
221 _ => bug!("unsize_thin_ptr: called on bad types"),
222 }
223}
224
60c5eb7d
XL
225/// Coerces `src`, which is a reference to a value of type `src_ty`,
226/// to a value of type `dst_ty`, and stores the result in `dst`.
dc9dc135 227pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
228 bx: &mut Bx,
229 src: PlaceRef<'tcx, Bx::Value>,
dc9dc135
XL
230 dst: PlaceRef<'tcx, Bx::Value>,
231) {
a1dfa0c6
XL
232 let src_ty = src.layout.ty;
233 let dst_ty = dst.layout.ty;
e74abb32 234 match (&src_ty.kind, &dst_ty.kind) {
ba9703b0 235 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
e74abb32
XL
236 let (base, info) = match bx.load_operand(src).val {
237 OperandValue::Pair(base, info) => {
238 // fat-ptr to fat-ptr unsize preserves the vtable
239 // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
240 // So we need to pointercast the base to ensure
241 // the types match up.
242 // FIXME(eddyb) use `scalar_pair_element_backend_type` here,
243 // like `unsize_thin_ptr` does.
244 let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
245 (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
246 }
dfeec247
XL
247 OperandValue::Immediate(base) => unsize_thin_ptr(bx, base, src_ty, dst_ty),
248 OperandValue::Ref(..) => bug!(),
e74abb32
XL
249 };
250 OperandValue::Pair(base, info).store(bx, dst);
a1dfa0c6
XL
251 }
252
253 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
254 assert_eq!(def_a, def_b);
255
256 for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
257 let src_f = src.project_field(bx, i);
258 let dst_f = dst.project_field(bx, i);
259
260 if dst_f.layout.is_zst() {
261 continue;
262 }
263
264 if src_f.layout.ty == dst_f.layout.ty {
dfeec247
XL
265 memcpy_ty(
266 bx,
267 dst_f.llval,
268 dst_f.align,
269 src_f.llval,
270 src_f.align,
271 src_f.layout,
272 MemFlags::empty(),
273 );
a1dfa0c6
XL
274 } else {
275 coerce_unsized_into(bx, src_f, dst_f);
276 }
277 }
278 }
dfeec247 279 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
a1dfa0c6
XL
280 }
281}
282
dc9dc135 283pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
284 bx: &mut Bx,
285 op: hir::BinOpKind,
286 lhs: Bx::Value,
dc9dc135 287 rhs: Bx::Value,
a1dfa0c6
XL
288) -> Bx::Value {
289 cast_shift_rhs(bx, op, lhs, rhs)
290}
291
dc9dc135 292fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
293 bx: &mut Bx,
294 op: hir::BinOpKind,
295 lhs: Bx::Value,
296 rhs: Bx::Value,
297) -> Bx::Value {
298 // Shifts may have any size int on the rhs
299 if op.is_shift() {
300 let mut rhs_llty = bx.cx().val_ty(rhs);
301 let mut lhs_llty = bx.cx().val_ty(lhs);
302 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
303 rhs_llty = bx.cx().element_type(rhs_llty)
304 }
305 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
306 lhs_llty = bx.cx().element_type(lhs_llty)
307 }
308 let rhs_sz = bx.cx().int_width(rhs_llty);
309 let lhs_sz = bx.cx().int_width(lhs_llty);
310 if lhs_sz < rhs_sz {
311 bx.trunc(rhs, lhs_llty)
312 } else if lhs_sz > rhs_sz {
313 // FIXME (#1877: If in the future shifting by negative
314 // values is no longer undefined then this is wrong.
315 bx.zext(rhs, lhs_llty)
316 } else {
317 rhs
318 }
319 } else {
320 rhs
321 }
322}
323
9fa01778 324/// Returns `true` if this session's target will use SEH-based unwinding.
a1dfa0c6
XL
325///
326/// This is only true for MSVC targets, and even then the 64-bit MSVC target
327/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
328/// 64-bit MinGW) instead of "full SEH".
329pub fn wants_msvc_seh(sess: &Session) -> bool {
330 sess.target.target.options.is_like_msvc
331}
332
dc9dc135 333pub fn from_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6 334 bx: &mut Bx,
dc9dc135 335 val: Bx::Value,
a1dfa0c6 336) -> Bx::Value {
dfeec247 337 if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { val }
a1dfa0c6
XL
338}
339
dc9dc135 340pub fn to_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
341 bx: &mut Bx,
342 val: Bx::Value,
ba9703b0 343 layout: layout::TyAndLayout<'_>,
a1dfa0c6 344) -> Bx::Value {
ba9703b0 345 if let Abi::Scalar(ref scalar) = layout.abi {
a1dfa0c6
XL
346 return to_immediate_scalar(bx, val, scalar);
347 }
348 val
349}
350
dc9dc135 351pub fn to_immediate_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
352 bx: &mut Bx,
353 val: Bx::Value,
ba9703b0 354 scalar: &Scalar,
a1dfa0c6
XL
355) -> Bx::Value {
356 if scalar.is_bool() {
357 return bx.trunc(val, bx.cx().type_i1());
358 }
359 val
360}
361
dc9dc135 362pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
363 bx: &mut Bx,
364 dst: Bx::Value,
365 dst_align: Align,
366 src: Bx::Value,
367 src_align: Align,
ba9703b0 368 layout: TyAndLayout<'tcx>,
a1dfa0c6
XL
369 flags: MemFlags,
370) {
371 let size = layout.size.bytes();
372 if size == 0 {
373 return;
374 }
375
376 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
377}
378
379pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
380 cx: &'a Bx::CodegenCx,
381 instance: Instance<'tcx>,
382) {
a1dfa0c6
XL
383 // this is an info! to allow collecting monomorphization statistics
384 // and to allow finding the last function before LLVM aborts from
385 // release builds.
386 info!("codegen_instance({})", instance);
387
60c5eb7d 388 mir::codegen_mir::<Bx>(cx, instance);
a1dfa0c6
XL
389}
390
9fa01778 391/// Creates the `main` function which will initialize the rust runtime and call
a1dfa0c6 392/// users main function.
74b04a01
XL
393pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
394 cx: &'a Bx::CodegenCx,
395) -> Option<Bx::Function> {
9fa01778 396 let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
dfeec247 397 Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)),
74b04a01 398 None => return None,
a1dfa0c6
XL
399 };
400
f9f354fc 401 let instance = Instance::mono(cx.tcx(), main_def_id.to_def_id());
a1dfa0c6
XL
402
403 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
404 // We want to create the wrapper in the same codegen unit as Rust's main
405 // function.
74b04a01 406 return None;
a1dfa0c6
XL
407 }
408
e74abb32 409 let main_llfn = cx.get_fn_addr(instance);
a1dfa0c6 410
74b04a01
XL
411 return cx.tcx().entry_fn(LOCAL_CRATE).map(|(_, et)| {
412 let use_start_lang_item = EntryFnType::Start != et;
413 create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, use_start_lang_item)
414 });
a1dfa0c6 415
dc9dc135 416 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
417 cx: &'a Bx::CodegenCx,
418 sp: Span,
419 rust_main: Bx::Value,
f9f354fc 420 rust_main_def_id: LocalDefId,
a1dfa0c6 421 use_start_lang_item: bool,
74b04a01 422 ) -> Bx::Function {
e74abb32
XL
423 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
424 // depending on whether the target needs `argc` and `argv` to be passed in.
425 let llfty = if cx.sess().target.target.options.main_needs_argc_argv {
426 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
427 } else {
428 cx.type_func(&[], cx.type_int())
429 };
a1dfa0c6
XL
430
431 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
432 // Given that `main()` has no arguments,
433 // then its return type cannot have
434 // late-bound regions, since late-bound
435 // regions must appear in the argument
436 // listing.
dfeec247 437 let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap());
a1dfa0c6 438
74b04a01 439 if cx.get_declared_value("main").is_some() {
a1dfa0c6 440 // FIXME: We should be smart and show a better diagnostic here.
dfeec247 441 cx.sess()
74b04a01 442 .struct_span_err(sp, "entry symbol `main` declared multiple times")
dfeec247
XL
443 .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
444 .emit();
a1dfa0c6
XL
445 cx.sess().abort_if_errors();
446 bug!();
447 }
448 let llfn = cx.declare_cfn("main", llfty);
449
450 // `main` should respect same config for frame pointer elimination as rest of code
451 cx.set_frame_pointer_elimination(llfn);
452 cx.apply_target_cpu_attr(llfn);
453
454 let mut bx = Bx::new_block(&cx, llfn, "top");
455
456 bx.insert_reference_to_gdb_debug_scripts_section_global();
457
e74abb32 458 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
a1dfa0c6
XL
459
460 let (start_fn, args) = if use_start_lang_item {
3dfed10e 461 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
e74abb32
XL
462 let start_fn = cx.get_fn_addr(
463 ty::Instance::resolve(
464 cx.tcx(),
465 ty::ParamEnv::reveal_all(),
466 start_def_id,
467 cx.tcx().intern_substs(&[main_ret_ty.into()]),
dfeec247 468 )
f9f354fc 469 .unwrap()
dfeec247 470 .unwrap(),
a1dfa0c6 471 );
dfeec247
XL
472 (
473 start_fn,
474 vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
475 )
a1dfa0c6
XL
476 } else {
477 debug!("using user-defined start fn");
478 (rust_main, vec![arg_argc, arg_argv])
479 };
480
481 let result = bx.call(start_fn, &args, None);
482 let cast = bx.intcast(result, cx.type_int(), true);
483 bx.ret(cast);
74b04a01
XL
484
485 llfn
a1dfa0c6
XL
486 }
487}
488
e74abb32
XL
489/// Obtain the `argc` and `argv` values to pass to the rust start function.
490fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
491 cx: &'a Bx::CodegenCx,
dfeec247
XL
492 bx: &mut Bx,
493) -> (Bx::Value, Bx::Value) {
e74abb32
XL
494 if cx.sess().target.target.options.main_needs_argc_argv {
495 // Params from native `main()` used as args for rust start function
496 let param_argc = bx.get_param(0);
497 let param_argv = bx.get_param(1);
498 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
499 let arg_argv = param_argv;
500 (arg_argc, arg_argv)
501 } else {
502 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
503 let arg_argc = bx.const_int(cx.type_int(), 0);
504 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
505 (arg_argc, arg_argv)
506 }
507}
508
ba9703b0 509pub const CODEGEN_WORKER_ID: usize = usize::MAX;
a1dfa0c6
XL
510
511pub fn codegen_crate<B: ExtraBackendMethods>(
512 backend: B,
dc9dc135 513 tcx: TyCtxt<'tcx>,
48663c56
XL
514 metadata: EncodedMetadata,
515 need_metadata_module: bool,
a1dfa0c6 516) -> OngoingCodegen<B> {
a1dfa0c6 517 // Skip crate items and just output metadata in -Z no-codegen mode.
dfeec247 518 if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
e74abb32 519 let ongoing_codegen = start_async_codegen(backend, tcx, metadata, 1);
a1dfa0c6 520
a1dfa0c6
XL
521 ongoing_codegen.codegen_finished(tcx);
522
dfeec247 523 finalize_tcx(tcx);
a1dfa0c6
XL
524
525 ongoing_codegen.check_for_errors(tcx.sess);
526
527 return ongoing_codegen;
528 }
529
48663c56
XL
530 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
531
a1dfa0c6
XL
532 // Run the monomorphization collector and partition the collected items into
533 // codegen units.
534 let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
a1dfa0c6
XL
535
536 // Force all codegen_unit queries so they are already either red or green
537 // when compile_codegen_unit accesses them. We are not able to re-execute
538 // the codegen_unit query from just the DepNode, so an unknown color would
539 // lead to having to re-execute compile_codegen_unit, possibly
540 // unnecessarily.
541 if tcx.dep_graph.is_fully_enabled() {
ba9703b0 542 for cgu in codegen_units {
f9f354fc 543 tcx.ensure().codegen_unit(cgu.name());
a1dfa0c6
XL
544 }
545 }
546
e74abb32 547 let ongoing_codegen = start_async_codegen(backend.clone(), tcx, metadata, codegen_units.len());
a1dfa0c6
XL
548 let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
549
550 // Codegen an allocator shim, if necessary.
551 //
552 // If the crate doesn't have an `allocator_kind` set then there's definitely
553 // no shim to generate. Otherwise we also check our dependency graph for all
554 // our output crate types. If anything there looks like its a `Dynamic`
555 // linkage, then it's already got an allocator shim and we'll be using that
556 // one instead. If nothing exists then it's our job to generate the
557 // allocator!
dfeec247 558 let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
ba9703b0 559 use rustc_middle::middle::dependency_format::Linkage;
dfeec247
XL
560 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
561 });
a1dfa0c6
XL
562 let allocator_module = if any_dynamic_crate {
563 None
60c5eb7d 564 } else if let Some(kind) = tcx.allocator_kind() {
dfeec247
XL
565 let llmod_id =
566 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
9fa01778 567 let mut modules = backend.new_metadata(tcx, &llmod_id);
dfeec247
XL
568 tcx.sess
569 .time("write_allocator_module", || backend.codegen_allocator(tcx, &mut modules, kind));
a1dfa0c6 570
dfeec247 571 Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator })
a1dfa0c6
XL
572 } else {
573 None
574 };
575
576 if let Some(allocator_module) = allocator_module {
577 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
578 }
579
48663c56
XL
580 if need_metadata_module {
581 // Codegen the encoded metadata.
dfeec247
XL
582 let metadata_cgu_name =
583 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
48663c56 584 let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
dfeec247
XL
585 tcx.sess.time("write_compressed_metadata", || {
586 backend.write_compressed_metadata(
587 tcx,
588 &ongoing_codegen.metadata,
589 &mut metadata_llvm_module,
590 );
48663c56 591 });
48663c56
XL
592
593 let metadata_module = ModuleCodegen {
594 name: metadata_cgu_name,
595 module_llvm: metadata_llvm_module,
596 kind: ModuleKind::Metadata,
597 };
598 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
599 }
a1dfa0c6
XL
600
601 // We sort the codegen units by size. This way we can schedule work for LLVM
602 // a bit more efficiently.
603 let codegen_units = {
ba9703b0 604 let mut codegen_units = codegen_units.iter().collect::<Vec<_>>();
a1dfa0c6
XL
605 codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
606 codegen_units
607 };
608
dfeec247
XL
609 let total_codegen_time = Lock::new(Duration::new(0, 0));
610
611 // The non-parallel compiler can only translate codegen units to LLVM IR
612 // on a single thread, leading to a staircase effect where the N LLVM
613 // threads have to wait on the single codegen threads to generate work
614 // for them. The parallel compiler does not have this restriction, so
615 // we can pre-load the LLVM queue in parallel before handing off
616 // coordination to the OnGoingCodegen scheduler.
617 //
618 // This likely is a temporary measure. Once we don't have to support the
619 // non-parallel compiler anymore, we can compile CGUs end-to-end in
620 // parallel and get rid of the complicated scheduling logic.
621 let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
622 if cfg!(parallel_compiler) {
623 tcx.sess.time("compile_first_CGU_batch", || {
624 // Try to find one CGU to compile per thread.
625 let cgus: Vec<_> = cgu_reuse
626 .iter()
627 .enumerate()
628 .filter(|&(_, reuse)| reuse == &CguReuse::No)
629 .take(tcx.sess.threads())
630 .collect();
631
632 // Compile the found CGUs in parallel.
633 par_iter(cgus)
634 .map(|(i, _)| {
635 let start_time = Instant::now();
636 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
637 let mut time = total_codegen_time.lock();
638 *time += start_time.elapsed();
639 (i, module)
640 })
641 .collect()
642 })
643 } else {
644 FxHashMap::default()
645 }
646 };
647
648 let mut cgu_reuse = Vec::new();
649 let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
a1dfa0c6 650
dfeec247 651 for (i, cgu) in codegen_units.iter().enumerate() {
a1dfa0c6
XL
652 ongoing_codegen.wait_for_signal_to_codegen_item();
653 ongoing_codegen.check_for_errors(tcx.sess);
654
dfeec247
XL
655 // Do some setup work in the first iteration
656 if pre_compiled_cgus.is_none() {
657 // Calculate the CGU reuse
658 cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
659 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
660 });
661 // Pre compile some CGUs
662 pre_compiled_cgus = Some(pre_compile_cgus(&cgu_reuse));
663 }
664
665 let cgu_reuse = cgu_reuse[i];
a1dfa0c6
XL
666 tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
667
668 match cgu_reuse {
669 CguReuse::No => {
dfeec247
XL
670 let (module, cost) =
671 if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
672 cgu
673 } else {
674 let start_time = Instant::now();
675 let module = backend.compile_codegen_unit(tcx, cgu.name());
676 let mut time = total_codegen_time.lock();
677 *time += start_time.elapsed();
678 module
679 };
680 submit_codegened_module_to_llvm(
681 &backend,
682 &ongoing_codegen.coordinator_send,
683 module,
684 cost,
685 );
a1dfa0c6
XL
686 false
687 }
688 CguReuse::PreLto => {
dfeec247
XL
689 submit_pre_lto_module_to_llvm(
690 &backend,
691 tcx,
692 &ongoing_codegen.coordinator_send,
693 CachedModuleCodegen {
694 name: cgu.name().to_string(),
695 source: cgu.work_product(tcx),
696 },
697 );
a1dfa0c6
XL
698 true
699 }
700 CguReuse::PostLto => {
dfeec247
XL
701 submit_post_lto_module_to_llvm(
702 &backend,
703 &ongoing_codegen.coordinator_send,
704 CachedModuleCodegen {
705 name: cgu.name().to_string(),
706 source: cgu.work_product(tcx),
707 },
708 );
a1dfa0c6
XL
709 true
710 }
711 };
712 }
713
714 ongoing_codegen.codegen_finished(tcx);
715
716 // Since the main thread is sometimes blocked during codegen, we keep track
717 // -Ztime-passes output manually.
dfeec247
XL
718 print_time_passes_entry(
719 tcx.sess.time_passes(),
720 "codegen_to_LLVM_IR",
721 total_codegen_time.into_inner(),
722 );
a1dfa0c6
XL
723
724 ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
725
726 symbol_names_test::report_symbol_names(tcx);
727
a1dfa0c6
XL
728 ongoing_codegen.check_for_errors(tcx.sess);
729
dfeec247
XL
730 finalize_tcx(tcx);
731
a1dfa0c6
XL
732 ongoing_codegen.into_inner()
733}
734
735/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
736/// when it's dropped abnormally.
737///
738/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
739/// stumbled upon. The segfault was never reproduced locally, but it was
740/// suspected to be related to the fact that codegen worker threads were
741/// sticking around by the time the main thread was exiting, causing issues.
742///
743/// This structure is an attempt to fix that issue where the `codegen_aborted`
744/// message will block until all workers have finished. This should ensure that
745/// even if the main codegen thread panics we'll wait for pending work to
746/// complete before returning from the main thread, hopefully avoiding
747/// segfaults.
748///
749/// If you see this comment in the code, then it means that this workaround
750/// worked! We may yet one day track down the mysterious cause of that
751/// segfault...
752struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
753
754impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
755 fn into_inner(mut self) -> OngoingCodegen<B> {
756 self.0.take().unwrap()
757 }
758}
759
760impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
761 type Target = OngoingCodegen<B>;
762
763 fn deref(&self) -> &OngoingCodegen<B> {
764 self.0.as_ref().unwrap()
765 }
766}
767
768impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
769 fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
770 self.0.as_mut().unwrap()
771 }
772}
773
774impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
775 fn drop(&mut self) {
776 if let Some(codegen) = self.0.take() {
777 codegen.codegen_aborted();
778 }
779 }
780}
781
dfeec247
XL
782fn finalize_tcx(tcx: TyCtxt<'_>) {
783 tcx.sess.time("assert_dep_graph", || ::rustc_incremental::assert_dep_graph(tcx));
784 tcx.sess.time("serialize_dep_graph", || ::rustc_incremental::save_dep_graph(tcx));
a1dfa0c6 785
dfeec247
XL
786 // We assume that no queries are run past here. If there are new queries
787 // after this point, they'll show up as "<unknown>" in self-profiling data.
788 {
789 let _prof_timer = tcx.prof.generic_activity("self_profile_alloc_query_strings");
790 tcx.alloc_self_profile_query_strings();
791 }
a1dfa0c6
XL
792}
793
794impl CrateInfo {
dc9dc135 795 pub fn new(tcx: TyCtxt<'_>) -> CrateInfo {
a1dfa0c6
XL
796 let mut info = CrateInfo {
797 panic_runtime: None,
798 compiler_builtins: None,
799 profiler_runtime: None,
a1dfa0c6
XL
800 is_no_builtins: Default::default(),
801 native_libraries: Default::default(),
802 used_libraries: tcx.native_libraries(LOCAL_CRATE),
803 link_args: tcx.link_args(LOCAL_CRATE),
804 crate_name: Default::default(),
805 used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
806 used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
807 used_crate_source: Default::default(),
a1dfa0c6
XL
808 lang_item_to_crate: Default::default(),
809 missing_lang_items: Default::default(),
e74abb32 810 dependency_formats: tcx.dependency_formats(LOCAL_CRATE),
a1dfa0c6
XL
811 };
812 let lang_items = tcx.lang_items();
813
a1dfa0c6
XL
814 let crates = tcx.crates();
815
816 let n_crates = crates.len();
817 info.native_libraries.reserve(n_crates);
818 info.crate_name.reserve(n_crates);
819 info.used_crate_source.reserve(n_crates);
820 info.missing_lang_items.reserve(n_crates);
821
822 for &cnum in crates.iter() {
823 info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
824 info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
825 info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
826 if tcx.is_panic_runtime(cnum) {
827 info.panic_runtime = Some(cnum);
828 }
829 if tcx.is_compiler_builtins(cnum) {
830 info.compiler_builtins = Some(cnum);
831 }
832 if tcx.is_profiler_runtime(cnum) {
833 info.profiler_runtime = Some(cnum);
834 }
a1dfa0c6
XL
835 if tcx.is_no_builtins(cnum) {
836 info.is_no_builtins.insert(cnum);
837 }
a1dfa0c6
XL
838 let missing = tcx.missing_lang_items(cnum);
839 for &item in missing.iter() {
840 if let Ok(id) = lang_items.require(item) {
841 info.lang_item_to_crate.insert(item, id.krate);
842 }
843 }
844
f035d41b 845 // No need to look for lang items that don't actually need to exist.
74b04a01 846 let missing =
f035d41b 847 missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
a1dfa0c6
XL
848 info.missing_lang_items.insert(cnum, missing);
849 }
850
ba9703b0 851 info
a1dfa0c6 852 }
a1dfa0c6
XL
853}
854
f035d41b 855pub fn provide_both(providers: &mut Providers) {
9fa01778
XL
856 providers.backend_optimization_level = |tcx, cratenum| {
857 let for_speed = match tcx.sess.opts.optimize {
858 // If globally no optimisation is done, #[optimize] has no effect.
859 //
860 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
861 // pass manager and it is likely that some module-wide passes (such as inliner or
862 // cross-function constant propagation) would ignore the `optnone` annotation we put
863 // on the functions, thus necessarily involving these functions into optimisations.
864 config::OptLevel::No => return config::OptLevel::No,
865 // If globally optimise-speed is already specified, just use that level.
866 config::OptLevel::Less => return config::OptLevel::Less,
867 config::OptLevel::Default => return config::OptLevel::Default,
868 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
869 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
870 // are present).
871 config::OptLevel::Size => config::OptLevel::Default,
872 config::OptLevel::SizeMin => config::OptLevel::Default,
873 };
874
875 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
876 for id in &*defids {
dfeec247 877 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
9fa01778
XL
878 match optimize {
879 attr::OptimizeAttr::None => continue,
880 attr::OptimizeAttr::Size => continue,
881 attr::OptimizeAttr::Speed => {
882 return for_speed;
883 }
884 }
885 }
ba9703b0 886 tcx.sess.opts.optimize
9fa01778
XL
887 };
888
a1dfa0c6
XL
889 providers.dllimport_foreign_items = |tcx, krate| {
890 let module_map = tcx.foreign_modules(krate);
dfeec247
XL
891 let module_map =
892 module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>();
a1dfa0c6 893
dfeec247
XL
894 let dllimports = tcx
895 .native_libraries(krate)
a1dfa0c6
XL
896 .iter()
897 .filter(|lib| {
f9f354fc 898 if !matches!(lib.kind, NativeLibKind::Dylib | NativeLibKind::Unspecified) {
dfeec247 899 return false;
a1dfa0c6
XL
900 }
901 let cfg = match lib.cfg {
902 Some(ref cfg) => cfg,
903 None => return true,
904 };
905 attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
906 })
907 .filter_map(|lib| lib.foreign_module)
908 .map(|id| &module_map[&id])
909 .flat_map(|module| module.foreign_items.iter().cloned())
910 .collect();
f9f354fc 911 dllimports
a1dfa0c6
XL
912 };
913
dfeec247
XL
914 providers.is_dllimport_foreign_item =
915 |tcx, def_id| tcx.dllimport_foreign_items(def_id.krate).contains(&def_id);
a1dfa0c6
XL
916}
917
dc9dc135 918fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
a1dfa0c6 919 if !tcx.dep_graph.is_fully_enabled() {
dfeec247 920 return CguReuse::No;
a1dfa0c6
XL
921 }
922
923 let work_product_id = &cgu.work_product_id();
924 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
925 // We don't have anything cached for this CGU. This can happen
926 // if the CGU did not exist in the previous session.
dfeec247 927 return CguReuse::No;
a1dfa0c6
XL
928 }
929
930 // Try to mark the CGU as green. If it we can do so, it means that nothing
931 // affecting the LLVM module has changed and we can re-use a cached version.
932 // If we compile with any kind of LTO, this means we can re-use the bitcode
933 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
934 // know that later). If we are not doing LTO, there is only one optimized
935 // version of each module, so we re-use that.
936 let dep_node = cgu.codegen_dep_node(tcx);
dfeec247
XL
937 assert!(
938 !tcx.dep_graph.dep_node_exists(&dep_node),
a1dfa0c6 939 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
dfeec247
XL
940 cgu.name()
941 );
a1dfa0c6
XL
942
943 if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
f9f354fc
XL
944 // We can re-use either the pre- or the post-thinlto state. If no LTO is
945 // being performed then we can use post-LTO artifacts, otherwise we must
946 // reuse pre-LTO artifacts
947 match compute_per_cgu_lto_type(
948 &tcx.sess.lto(),
949 &tcx.sess.opts,
950 &tcx.sess.crate_types(),
951 ModuleKind::Regular,
952 ) {
953 ComputedLtoType::No => CguReuse::PostLto,
954 _ => CguReuse::PreLto,
955 }
a1dfa0c6
XL
956 } else {
957 CguReuse::No
958 }
959}