]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_codegen_ssa/src/base.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / base.rs
CommitLineData
a2a8927a 1use crate::back::metadata::create_compressed_metadata_file;
60c5eb7d 2use crate::back::write::{
f9f354fc
XL
3 compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
4 submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
60c5eb7d 5};
dfeec247 6use crate::common::{IntPredicate, RealPredicate, TypeKind};
60c5eb7d
XL
7use crate::meth;
8use crate::mir;
9use crate::mir::operand::OperandValue;
10use crate::mir::place::PlaceRef;
11use crate::traits::*;
a2a8927a 12use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
a1dfa0c6 13
74b04a01 14use rustc_attr as attr;
dfeec247 15use rustc_data_structures::fx::FxHashMap;
5869c6ff 16use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
5e7ed085
FG
17
18#[cfg(parallel_compiler)]
5869c6ff 19use rustc_data_structures::sync::{par_iter, ParallelIterator};
dfeec247 20use rustc_hir as hir;
cdc7bbd5 21use rustc_hir::def_id::{DefId, LOCAL_CRATE};
3dfed10e 22use rustc_hir::lang_items::LangItem;
e74abb32 23use rustc_index::vec::Idx;
c295e0f8 24use rustc_metadata::EncodedMetadata;
ba9703b0 25use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
a2a8927a 26use rustc_middle::middle::exported_symbols;
ba9703b0
XL
27use rustc_middle::middle::lang_items;
28use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
c295e0f8 29use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
ba9703b0
XL
30use rustc_middle::ty::query::Providers;
31use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
dfeec247 32use rustc_session::cgu_reuse_tracker::CguReuse;
a2a8927a 33use rustc_session::config::{self, EntryFnType, OutputType};
ba9703b0 34use rustc_session::Session;
17df50a5 35use rustc_span::symbol::sym;
c295e0f8 36use rustc_target::abi::{Align, VariantIdx};
a1dfa0c6 37
94222f64 38use std::convert::TryFrom;
a1dfa0c6 39use std::ops::{Deref, DerefMut};
dfeec247 40use std::time::{Duration, Instant};
a1dfa0c6 41
5869c6ff
XL
42use itertools::Itertools;
43
dfeec247 44pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
a1dfa0c6
XL
45 match op {
46 hir::BinOpKind::Eq => IntPredicate::IntEQ,
47 hir::BinOpKind::Ne => IntPredicate::IntNE,
dfeec247
XL
48 hir::BinOpKind::Lt => {
49 if signed {
50 IntPredicate::IntSLT
51 } else {
52 IntPredicate::IntULT
53 }
a1dfa0c6 54 }
dfeec247
XL
55 hir::BinOpKind::Le => {
56 if signed {
57 IntPredicate::IntSLE
58 } else {
59 IntPredicate::IntULE
60 }
61 }
62 hir::BinOpKind::Gt => {
63 if signed {
64 IntPredicate::IntSGT
65 } else {
66 IntPredicate::IntUGT
67 }
68 }
69 hir::BinOpKind::Ge => {
70 if signed {
71 IntPredicate::IntSGE
72 } else {
73 IntPredicate::IntUGE
74 }
75 }
76 op => bug!(
77 "comparison_op_to_icmp_predicate: expected comparison operator, \
78 found {:?}",
79 op
80 ),
a1dfa0c6
XL
81 }
82}
83
84pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
85 match op {
86 hir::BinOpKind::Eq => RealPredicate::RealOEQ,
87 hir::BinOpKind::Ne => RealPredicate::RealUNE,
88 hir::BinOpKind::Lt => RealPredicate::RealOLT,
89 hir::BinOpKind::Le => RealPredicate::RealOLE,
90 hir::BinOpKind::Gt => RealPredicate::RealOGT,
91 hir::BinOpKind::Ge => RealPredicate::RealOGE,
92 op => {
dfeec247
XL
93 bug!(
94 "comparison_op_to_fcmp_predicate: expected comparison operator, \
95 found {:?}",
96 op
97 );
a1dfa0c6
XL
98 }
99 }
100}
101
dc9dc135 102pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
103 bx: &mut Bx,
104 lhs: Bx::Value,
105 rhs: Bx::Value,
106 t: Ty<'tcx>,
107 ret_ty: Bx::Type,
dc9dc135 108 op: hir::BinOpKind,
a1dfa0c6 109) -> Bx::Value {
1b1a35ee 110 let signed = match t.kind() {
a1dfa0c6
XL
111 ty::Float(_) => {
112 let cmp = bin_op_to_fcmp_predicate(op);
113 let cmp = bx.fcmp(cmp, lhs, rhs);
114 return bx.sext(cmp, ret_ty);
dfeec247 115 }
a1dfa0c6
XL
116 ty::Uint(_) => false,
117 ty::Int(_) => true,
118 _ => bug!("compare_simd_types: invalid SIMD type"),
119 };
120
121 let cmp = bin_op_to_icmp_predicate(op, signed);
122 let cmp = bx.icmp(cmp, lhs, rhs);
123 // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
124 // to get the correctly sized type. This will compile to a single instruction
125 // once the IR is converted to assembly if the SIMD instruction is supported
126 // by the target architecture.
127 bx.sext(cmp, ret_ty)
128}
129
9fa01778 130/// Retrieves the information we are losing (making dynamic) in an unsizing
a1dfa0c6
XL
131/// adjustment.
132///
60c5eb7d
XL
133/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
134/// where the new vtable for an object will be derived from the old one.
94222f64
XL
135pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
136 bx: &mut Bx,
a1dfa0c6
XL
137 source: Ty<'tcx>,
138 target: Ty<'tcx>,
94222f64
XL
139 old_info: Option<Bx::Value>,
140) -> Bx::Value {
141 let cx = bx.cx();
416331ca 142 let (source, target) =
94222f64 143 cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
1b1a35ee 144 match (source.kind(), target.kind()) {
a1dfa0c6 145 (&ty::Array(_, len), &ty::Slice(_)) => {
416331ca 146 cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
a1dfa0c6 147 }
94222f64
XL
148 (&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
149 let old_info =
150 old_info.expect("unsized_info: missing old info for trait upcasting coercion");
151 if data_a.principal_def_id() == data_b.principal_def_id() {
152 return old_info;
153 }
154
155 // trait upcasting coercion
156
157 let vptr_entry_idx =
158 cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
159
160 if let Some(entry_idx) = vptr_entry_idx {
161 let ptr_ty = cx.type_i8p();
162 let ptr_align = cx.tcx().data_layout.pointer_align.abi;
163 let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
164 let gep = bx.inbounds_gep(
165 ptr_ty,
166 llvtable,
167 &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
168 );
169 let new_vptr = bx.load(ptr_ty, gep, ptr_align);
170 bx.nonnull_metadata(new_vptr);
171 // Vtable loads are invariant.
172 bx.set_invariant_load(new_vptr);
173 new_vptr
174 } else {
175 old_info
176 }
a1dfa0c6
XL
177 }
178 (_, &ty::Dynamic(ref data, ..)) => {
94222f64
XL
179 let vtable_ptr_ty = cx.scalar_pair_element_backend_type(
180 cx.layout_of(cx.tcx().mk_mut_ptr(target)),
181 1,
182 true,
183 );
184 cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
a1dfa0c6 185 }
dfeec247 186 _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
a1dfa0c6
XL
187 }
188}
189
94222f64
XL
190/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
191pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
192 bx: &mut Bx,
193 src: Bx::Value,
194 src_ty: Ty<'tcx>,
dc9dc135 195 dst_ty: Ty<'tcx>,
94222f64 196 old_info: Option<Bx::Value>,
a1dfa0c6 197) -> (Bx::Value, Bx::Value) {
94222f64 198 debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
1b1a35ee 199 match (src_ty.kind(), dst_ty.kind()) {
ba9703b0 200 (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
dfeec247 201 | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
94222f64 202 assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
a1dfa0c6 203 let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
94222f64 204 (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
a1dfa0c6 205 }
a1dfa0c6
XL
206 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
207 assert_eq!(def_a, def_b);
a1dfa0c6
XL
208 let src_layout = bx.cx().layout_of(src_ty);
209 let dst_layout = bx.cx().layout_of(dst_ty);
94222f64
XL
210 if src_ty == dst_ty {
211 return (src, old_info.unwrap());
212 }
a1dfa0c6
XL
213 let mut result = None;
214 for i in 0..src_layout.fields.count() {
215 let src_f = src_layout.field(bx.cx(), i);
216 assert_eq!(src_layout.fields.offset(i).bytes(), 0);
217 assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
218 if src_f.is_zst() {
219 continue;
220 }
221 assert_eq!(src_layout.size, src_f.size);
222
223 let dst_f = dst_layout.field(bx.cx(), i);
224 assert_ne!(src_f.ty, dst_f.ty);
225 assert_eq!(result, None);
94222f64 226 result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
a1dfa0c6
XL
227 }
228 let (lldata, llextra) = result.unwrap();
94222f64
XL
229 let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
230 let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
a1dfa0c6 231 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
94222f64 232 (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
a1dfa0c6 233 }
94222f64 234 _ => bug!("unsize_ptr: called on bad types"),
a1dfa0c6
XL
235 }
236}
237
60c5eb7d
XL
238/// Coerces `src`, which is a reference to a value of type `src_ty`,
239/// to a value of type `dst_ty`, and stores the result in `dst`.
dc9dc135 240pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
241 bx: &mut Bx,
242 src: PlaceRef<'tcx, Bx::Value>,
dc9dc135
XL
243 dst: PlaceRef<'tcx, Bx::Value>,
244) {
a1dfa0c6
XL
245 let src_ty = src.layout.ty;
246 let dst_ty = dst.layout.ty;
1b1a35ee 247 match (src_ty.kind(), dst_ty.kind()) {
ba9703b0 248 (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
e74abb32 249 let (base, info) = match bx.load_operand(src).val {
94222f64
XL
250 OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
251 OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
dfeec247 252 OperandValue::Ref(..) => bug!(),
e74abb32
XL
253 };
254 OperandValue::Pair(base, info).store(bx, dst);
a1dfa0c6
XL
255 }
256
257 (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
258 assert_eq!(def_a, def_b);
259
5e7ed085 260 for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
a1dfa0c6
XL
261 let src_f = src.project_field(bx, i);
262 let dst_f = dst.project_field(bx, i);
263
264 if dst_f.layout.is_zst() {
265 continue;
266 }
267
268 if src_f.layout.ty == dst_f.layout.ty {
dfeec247
XL
269 memcpy_ty(
270 bx,
271 dst_f.llval,
272 dst_f.align,
273 src_f.llval,
274 src_f.align,
275 src_f.layout,
276 MemFlags::empty(),
277 );
a1dfa0c6
XL
278 } else {
279 coerce_unsized_into(bx, src_f, dst_f);
280 }
281 }
282 }
dfeec247 283 _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,),
a1dfa0c6
XL
284 }
285}
286
dc9dc135 287pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
288 bx: &mut Bx,
289 op: hir::BinOpKind,
290 lhs: Bx::Value,
dc9dc135 291 rhs: Bx::Value,
a1dfa0c6
XL
292) -> Bx::Value {
293 cast_shift_rhs(bx, op, lhs, rhs)
294}
295
dc9dc135 296fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
297 bx: &mut Bx,
298 op: hir::BinOpKind,
299 lhs: Bx::Value,
300 rhs: Bx::Value,
301) -> Bx::Value {
302 // Shifts may have any size int on the rhs
303 if op.is_shift() {
304 let mut rhs_llty = bx.cx().val_ty(rhs);
305 let mut lhs_llty = bx.cx().val_ty(lhs);
306 if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
307 rhs_llty = bx.cx().element_type(rhs_llty)
308 }
309 if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
310 lhs_llty = bx.cx().element_type(lhs_llty)
311 }
312 let rhs_sz = bx.cx().int_width(rhs_llty);
313 let lhs_sz = bx.cx().int_width(lhs_llty);
314 if lhs_sz < rhs_sz {
315 bx.trunc(rhs, lhs_llty)
316 } else if lhs_sz > rhs_sz {
317 // FIXME (#1877: If in the future shifting by negative
318 // values is no longer undefined then this is wrong.
319 bx.zext(rhs, lhs_llty)
320 } else {
321 rhs
322 }
323 } else {
324 rhs
325 }
326}
327
9fa01778 328/// Returns `true` if this session's target will use SEH-based unwinding.
a1dfa0c6
XL
329///
330/// This is only true for MSVC targets, and even then the 64-bit MSVC target
331/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
332/// 64-bit MinGW) instead of "full SEH".
333pub fn wants_msvc_seh(sess: &Session) -> bool {
29967ef6 334 sess.target.is_like_msvc
a1dfa0c6
XL
335}
336
dc9dc135 337pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6
XL
338 bx: &mut Bx,
339 dst: Bx::Value,
340 dst_align: Align,
341 src: Bx::Value,
342 src_align: Align,
ba9703b0 343 layout: TyAndLayout<'tcx>,
a1dfa0c6
XL
344 flags: MemFlags,
345) {
346 let size = layout.size.bytes();
347 if size == 0 {
348 return;
349 }
350
351 bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
352}
353
354pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
355 cx: &'a Bx::CodegenCx,
356 instance: Instance<'tcx>,
357) {
a1dfa0c6
XL
358 // this is an info! to allow collecting monomorphization statistics
359 // and to allow finding the last function before LLVM aborts from
360 // release builds.
361 info!("codegen_instance({})", instance);
362
60c5eb7d 363 mir::codegen_mir::<Bx>(cx, instance);
a1dfa0c6
XL
364}
365
9fa01778 366/// Creates the `main` function which will initialize the rust runtime and call
a1dfa0c6 367/// users main function.
74b04a01
XL
368pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
369 cx: &'a Bx::CodegenCx,
370) -> Option<Bx::Function> {
17df50a5 371 let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
cdc7bbd5
XL
372 let main_is_local = main_def_id.is_local();
373 let instance = Instance::mono(cx.tcx(), main_def_id);
a1dfa0c6 374
cdc7bbd5 375 if main_is_local {
a1dfa0c6
XL
376 // We want to create the wrapper in the same codegen unit as Rust's main
377 // function.
cdc7bbd5
XL
378 if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
379 return None;
380 }
17df50a5
XL
381 } else if !cx.codegen_unit().is_primary() {
382 // We want to create the wrapper only when the codegen unit is the primary one
383 return None;
a1dfa0c6
XL
384 }
385
e74abb32 386 let main_llfn = cx.get_fn_addr(instance);
a1dfa0c6 387
17df50a5
XL
388 let use_start_lang_item = EntryFnType::Start != entry_type;
389 let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, use_start_lang_item);
390 return Some(entry_fn);
a1dfa0c6 391
dc9dc135 392 fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
a1dfa0c6 393 cx: &'a Bx::CodegenCx,
a1dfa0c6 394 rust_main: Bx::Value,
cdc7bbd5 395 rust_main_def_id: DefId,
a1dfa0c6 396 use_start_lang_item: bool,
74b04a01 397 ) -> Bx::Function {
e74abb32
XL
398 // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
399 // depending on whether the target needs `argc` and `argv` to be passed in.
29967ef6 400 let llfty = if cx.sess().target.main_needs_argc_argv {
e74abb32
XL
401 cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
402 } else {
403 cx.type_func(&[], cx.type_int())
404 };
a1dfa0c6
XL
405
406 let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
407 // Given that `main()` has no arguments,
408 // then its return type cannot have
409 // late-bound regions, since late-bound
410 // regions must appear in the argument
411 // listing.
5e7ed085
FG
412 let main_ret_ty = cx.tcx().normalize_erasing_regions(
413 ty::ParamEnv::reveal_all(),
414 main_ret_ty.no_bound_vars().unwrap(),
415 );
416
417 let Some(llfn) = cx.declare_c_main(llfty) else {
418 // FIXME: We should be smart and show a better diagnostic here.
419 let span = cx.tcx().def_span(rust_main_def_id);
420 cx.sess()
421 .struct_span_err(span, "entry symbol `main` declared multiple times")
422 .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
423 .emit();
424 cx.sess().abort_if_errors();
425 bug!();
1b1a35ee 426 };
a1dfa0c6
XL
427
428 // `main` should respect same config for frame pointer elimination as rest of code
136023e0 429 cx.set_frame_pointer_type(llfn);
a1dfa0c6
XL
430 cx.apply_target_cpu_attr(llfn);
431
17df50a5
XL
432 let llbb = Bx::append_block(&cx, llfn, "top");
433 let mut bx = Bx::build(&cx, llbb);
a1dfa0c6
XL
434
435 bx.insert_reference_to_gdb_debug_scripts_section_global();
436
94222f64
XL
437 let isize_ty = cx.type_isize();
438 let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
e74abb32 439 let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
a1dfa0c6 440
94222f64 441 let (start_fn, start_ty, args) = if use_start_lang_item {
3dfed10e 442 let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
e74abb32
XL
443 let start_fn = cx.get_fn_addr(
444 ty::Instance::resolve(
445 cx.tcx(),
446 ty::ParamEnv::reveal_all(),
447 start_def_id,
448 cx.tcx().intern_substs(&[main_ret_ty.into()]),
dfeec247 449 )
f9f354fc 450 .unwrap()
dfeec247 451 .unwrap(),
a1dfa0c6 452 );
94222f64
XL
453 let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty], isize_ty);
454 (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv])
a1dfa0c6
XL
455 } else {
456 debug!("using user-defined start fn");
94222f64
XL
457 let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
458 (rust_main, start_ty, vec![arg_argc, arg_argv])
a1dfa0c6
XL
459 };
460
94222f64 461 let result = bx.call(start_ty, start_fn, &args, None);
a1dfa0c6
XL
462 let cast = bx.intcast(result, cx.type_int(), true);
463 bx.ret(cast);
74b04a01
XL
464
465 llfn
a1dfa0c6
XL
466 }
467}
468
e74abb32
XL
469/// Obtain the `argc` and `argv` values to pass to the rust start function.
470fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
471 cx: &'a Bx::CodegenCx,
dfeec247
XL
472 bx: &mut Bx,
473) -> (Bx::Value, Bx::Value) {
29967ef6 474 if cx.sess().target.main_needs_argc_argv {
e74abb32
XL
475 // Params from native `main()` used as args for rust start function
476 let param_argc = bx.get_param(0);
477 let param_argv = bx.get_param(1);
478 let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
479 let arg_argv = param_argv;
480 (arg_argc, arg_argv)
481 } else {
482 // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
483 let arg_argc = bx.const_int(cx.type_int(), 0);
484 let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p()));
485 (arg_argc, arg_argv)
486 }
487}
488
a1dfa0c6
XL
489pub fn codegen_crate<B: ExtraBackendMethods>(
490 backend: B,
a2a8927a 491 tcx: TyCtxt<'_>,
17df50a5 492 target_cpu: String,
48663c56
XL
493 metadata: EncodedMetadata,
494 need_metadata_module: bool,
a1dfa0c6 495) -> OngoingCodegen<B> {
a1dfa0c6 496 // Skip crate items and just output metadata in -Z no-codegen mode.
dfeec247 497 if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
a2a8927a 498 let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
a1dfa0c6 499
a1dfa0c6
XL
500 ongoing_codegen.codegen_finished(tcx);
501
a1dfa0c6
XL
502 ongoing_codegen.check_for_errors(tcx.sess);
503
504 return ongoing_codegen;
505 }
506
48663c56
XL
507 let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
508
a1dfa0c6
XL
509 // Run the monomorphization collector and partition the collected items into
510 // codegen units.
17df50a5 511 let codegen_units = tcx.collect_and_partition_mono_items(()).1;
a1dfa0c6
XL
512
513 // Force all codegen_unit queries so they are already either red or green
514 // when compile_codegen_unit accesses them. We are not able to re-execute
515 // the codegen_unit query from just the DepNode, so an unknown color would
516 // lead to having to re-execute compile_codegen_unit, possibly
517 // unnecessarily.
518 if tcx.dep_graph.is_fully_enabled() {
ba9703b0 519 for cgu in codegen_units {
f9f354fc 520 tcx.ensure().codegen_unit(cgu.name());
a1dfa0c6
XL
521 }
522 }
523
a2a8927a
XL
524 let metadata_module = if need_metadata_module {
525 // Emit compressed metadata object.
526 let metadata_cgu_name =
527 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
528 tcx.sess.time("write_compressed_metadata", || {
529 let file_name =
530 tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
531 let data = create_compressed_metadata_file(
532 tcx.sess,
533 &metadata,
534 &exported_symbols::metadata_symbol_name(tcx),
535 );
536 if let Err(err) = std::fs::write(&file_name, data) {
537 tcx.sess.fatal(&format!("error writing metadata object file: {}", err));
538 }
539 Some(CompiledModule {
540 name: metadata_cgu_name,
541 kind: ModuleKind::Metadata,
542 object: Some(file_name),
543 dwarf_object: None,
544 bytecode: None,
545 })
546 })
547 } else {
548 None
549 };
550
551 let ongoing_codegen = start_async_codegen(
552 backend.clone(),
553 tcx,
554 target_cpu,
555 metadata,
556 metadata_module,
557 codegen_units.len(),
558 );
a1dfa0c6
XL
559 let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
560
561 // Codegen an allocator shim, if necessary.
562 //
563 // If the crate doesn't have an `allocator_kind` set then there's definitely
564 // no shim to generate. Otherwise we also check our dependency graph for all
565 // our output crate types. If anything there looks like its a `Dynamic`
566 // linkage, then it's already got an allocator shim and we'll be using that
567 // one instead. If nothing exists then it's our job to generate the
568 // allocator!
17df50a5 569 let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
ba9703b0 570 use rustc_middle::middle::dependency_format::Linkage;
dfeec247
XL
571 list.iter().any(|&linkage| linkage == Linkage::Dynamic)
572 });
a1dfa0c6
XL
573 let allocator_module = if any_dynamic_crate {
574 None
136023e0 575 } else if let Some(kind) = tcx.allocator_kind(()) {
dfeec247
XL
576 let llmod_id =
577 cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
c295e0f8 578 let mut module_llvm = backend.new_metadata(tcx, &llmod_id);
29967ef6 579 tcx.sess.time("write_allocator_module", || {
c295e0f8
XL
580 backend.codegen_allocator(
581 tcx,
582 &mut module_llvm,
583 &llmod_id,
584 kind,
585 tcx.lang_items().oom().is_some(),
586 )
29967ef6 587 });
a1dfa0c6 588
c295e0f8 589 Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
a1dfa0c6
XL
590 } else {
591 None
592 };
593
594 if let Some(allocator_module) = allocator_module {
595 ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
596 }
597
5869c6ff
XL
598 // For better throughput during parallel processing by LLVM, we used to sort
599 // CGUs largest to smallest. This would lead to better thread utilization
600 // by, for example, preventing a large CGU from being processed last and
601 // having only one LLVM thread working while the rest remained idle.
602 //
603 // However, this strategy would lead to high memory usage, as it meant the
604 // LLVM-IR for all of the largest CGUs would be resident in memory at once.
605 //
606 // Instead, we can compromise by ordering CGUs such that the largest and
607 // smallest are first, second largest and smallest are next, etc. If there
608 // are large size variations, this can reduce memory usage significantly.
609 let codegen_units: Vec<_> = {
610 let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
611 sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
612
613 let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
614 second_half.iter().rev().interleave(first_half).copied().collect()
a1dfa0c6
XL
615 };
616
dfeec247
XL
617 // The non-parallel compiler can only translate codegen units to LLVM IR
618 // on a single thread, leading to a staircase effect where the N LLVM
619 // threads have to wait on the single codegen threads to generate work
620 // for them. The parallel compiler does not have this restriction, so
621 // we can pre-load the LLVM queue in parallel before handing off
622 // coordination to the OnGoingCodegen scheduler.
623 //
624 // This likely is a temporary measure. Once we don't have to support the
625 // non-parallel compiler anymore, we can compile CGUs end-to-end in
626 // parallel and get rid of the complicated scheduling logic.
5e7ed085 627 #[cfg(parallel_compiler)]
dfeec247 628 let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
5e7ed085
FG
629 tcx.sess.time("compile_first_CGU_batch", || {
630 // Try to find one CGU to compile per thread.
631 let cgus: Vec<_> = cgu_reuse
632 .iter()
633 .enumerate()
634 .filter(|&(_, reuse)| reuse == &CguReuse::No)
635 .take(tcx.sess.threads())
636 .collect();
637
638 // Compile the found CGUs in parallel.
639 let start_time = Instant::now();
640
641 let pre_compiled_cgus = par_iter(cgus)
642 .map(|(i, _)| {
643 let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
644 (i, module)
645 })
646 .collect();
647
648 (pre_compiled_cgus, start_time.elapsed())
649 })
dfeec247
XL
650 };
651
5e7ed085
FG
652 #[cfg(not(parallel_compiler))]
653 let pre_compile_cgus = |_: &[CguReuse]| (FxHashMap::default(), Duration::new(0, 0));
654
dfeec247
XL
655 let mut cgu_reuse = Vec::new();
656 let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
5869c6ff
XL
657 let mut total_codegen_time = Duration::new(0, 0);
658 let start_rss = tcx.sess.time_passes().then(|| get_resident_set_size());
a1dfa0c6 659
dfeec247 660 for (i, cgu) in codegen_units.iter().enumerate() {
a1dfa0c6
XL
661 ongoing_codegen.wait_for_signal_to_codegen_item();
662 ongoing_codegen.check_for_errors(tcx.sess);
663
dfeec247
XL
664 // Do some setup work in the first iteration
665 if pre_compiled_cgus.is_none() {
666 // Calculate the CGU reuse
667 cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
668 codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
669 });
670 // Pre compile some CGUs
5869c6ff
XL
671 let (compiled_cgus, codegen_time) = pre_compile_cgus(&cgu_reuse);
672 pre_compiled_cgus = Some(compiled_cgus);
673 total_codegen_time += codegen_time;
dfeec247
XL
674 }
675
676 let cgu_reuse = cgu_reuse[i];
a2a8927a 677 tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
a1dfa0c6
XL
678
679 match cgu_reuse {
680 CguReuse::No => {
dfeec247
XL
681 let (module, cost) =
682 if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
683 cgu
684 } else {
685 let start_time = Instant::now();
686 let module = backend.compile_codegen_unit(tcx, cgu.name());
5869c6ff 687 total_codegen_time += start_time.elapsed();
dfeec247
XL
688 module
689 };
5869c6ff
XL
690 // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
691 // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
692 // compilation hang on post-monomorphization errors.
693 tcx.sess.abort_if_errors();
694
dfeec247
XL
695 submit_codegened_module_to_llvm(
696 &backend,
697 &ongoing_codegen.coordinator_send,
698 module,
699 cost,
700 );
a1dfa0c6
XL
701 false
702 }
703 CguReuse::PreLto => {
dfeec247
XL
704 submit_pre_lto_module_to_llvm(
705 &backend,
706 tcx,
707 &ongoing_codegen.coordinator_send,
708 CachedModuleCodegen {
709 name: cgu.name().to_string(),
710 source: cgu.work_product(tcx),
711 },
712 );
a1dfa0c6
XL
713 true
714 }
715 CguReuse::PostLto => {
dfeec247
XL
716 submit_post_lto_module_to_llvm(
717 &backend,
718 &ongoing_codegen.coordinator_send,
719 CachedModuleCodegen {
720 name: cgu.name().to_string(),
721 source: cgu.work_product(tcx),
722 },
723 );
a1dfa0c6
XL
724 true
725 }
726 };
727 }
728
729 ongoing_codegen.codegen_finished(tcx);
730
731 // Since the main thread is sometimes blocked during codegen, we keep track
732 // -Ztime-passes output manually.
5869c6ff
XL
733 if tcx.sess.time_passes() {
734 let end_rss = get_resident_set_size();
735
736 print_time_passes_entry(
737 "codegen_to_LLVM_IR",
738 total_codegen_time,
739 start_rss.unwrap(),
740 end_rss,
741 );
742 }
a1dfa0c6 743
a1dfa0c6
XL
744 ongoing_codegen.check_for_errors(tcx.sess);
745
a1dfa0c6
XL
746 ongoing_codegen.into_inner()
747}
748
749/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
750/// when it's dropped abnormally.
751///
752/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
753/// stumbled upon. The segfault was never reproduced locally, but it was
754/// suspected to be related to the fact that codegen worker threads were
755/// sticking around by the time the main thread was exiting, causing issues.
756///
757/// This structure is an attempt to fix that issue where the `codegen_aborted`
758/// message will block until all workers have finished. This should ensure that
759/// even if the main codegen thread panics we'll wait for pending work to
760/// complete before returning from the main thread, hopefully avoiding
761/// segfaults.
762///
763/// If you see this comment in the code, then it means that this workaround
764/// worked! We may yet one day track down the mysterious cause of that
765/// segfault...
766struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
767
768impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
769 fn into_inner(mut self) -> OngoingCodegen<B> {
770 self.0.take().unwrap()
771 }
772}
773
774impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
775 type Target = OngoingCodegen<B>;
776
777 fn deref(&self) -> &OngoingCodegen<B> {
778 self.0.as_ref().unwrap()
779 }
780}
781
782impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
783 fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
784 self.0.as_mut().unwrap()
785 }
786}
787
788impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
789 fn drop(&mut self) {
790 if let Some(codegen) = self.0.take() {
791 codegen.codegen_aborted();
792 }
793 }
794}
795
a1dfa0c6 796impl CrateInfo {
136023e0
XL
797 pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
798 let exported_symbols = tcx
799 .sess
800 .crate_types()
801 .iter()
802 .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
803 .collect();
17df50a5
XL
804 let local_crate_name = tcx.crate_name(LOCAL_CRATE);
805 let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
806 let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
807 let windows_subsystem = subsystem.map(|subsystem| {
808 if subsystem != sym::windows && subsystem != sym::console {
809 tcx.sess.fatal(&format!(
810 "invalid windows subsystem `{}`, only \
811 `windows` and `console` are allowed",
812 subsystem
813 ));
814 }
815 subsystem.to_string()
816 });
817
136023e0
XL
818 // This list is used when generating the command line to pass through to
819 // system linker. The linker expects undefined symbols on the left of the
820 // command line to be defined in libraries on the right, not the other way
821 // around. For more info, see some comments in the add_used_library function
822 // below.
823 //
824 // In order to get this left-to-right dependency ordering, we use the reverse
825 // postorder of all crates putting the leaves at the right-most positions.
826 let used_crates = tcx
827 .postorder_cnums(())
828 .iter()
829 .rev()
830 .copied()
831 .filter(|&cnum| !tcx.dep_kind(cnum).macros_only())
832 .collect();
833
a1dfa0c6 834 let mut info = CrateInfo {
136023e0
XL
835 target_cpu,
836 exported_symbols,
17df50a5 837 local_crate_name,
a1dfa0c6
XL
838 compiler_builtins: None,
839 profiler_runtime: None,
a1dfa0c6
XL
840 is_no_builtins: Default::default(),
841 native_libraries: Default::default(),
fc512014 842 used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
a1dfa0c6 843 crate_name: Default::default(),
136023e0 844 used_crates,
a1dfa0c6 845 used_crate_source: Default::default(),
a1dfa0c6
XL
846 lang_item_to_crate: Default::default(),
847 missing_lang_items: Default::default(),
5099ac24 848 dependency_formats: tcx.dependency_formats(()).clone(),
17df50a5 849 windows_subsystem,
a1dfa0c6
XL
850 };
851 let lang_items = tcx.lang_items();
852
136023e0 853 let crates = tcx.crates(());
a1dfa0c6
XL
854
855 let n_crates = crates.len();
856 info.native_libraries.reserve(n_crates);
857 info.crate_name.reserve(n_crates);
858 info.used_crate_source.reserve(n_crates);
859 info.missing_lang_items.reserve(n_crates);
860
861 for &cnum in crates.iter() {
fc512014
XL
862 info.native_libraries
863 .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
a1dfa0c6 864 info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
5099ac24 865 info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum).clone());
a1dfa0c6
XL
866 if tcx.is_compiler_builtins(cnum) {
867 info.compiler_builtins = Some(cnum);
868 }
869 if tcx.is_profiler_runtime(cnum) {
870 info.profiler_runtime = Some(cnum);
871 }
a1dfa0c6
XL
872 if tcx.is_no_builtins(cnum) {
873 info.is_no_builtins.insert(cnum);
874 }
a1dfa0c6
XL
875 let missing = tcx.missing_lang_items(cnum);
876 for &item in missing.iter() {
877 if let Ok(id) = lang_items.require(item) {
878 info.lang_item_to_crate.insert(item, id.krate);
879 }
880 }
881
f035d41b 882 // No need to look for lang items that don't actually need to exist.
74b04a01 883 let missing =
f035d41b 884 missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
a1dfa0c6
XL
885 info.missing_lang_items.insert(cnum, missing);
886 }
887
ba9703b0 888 info
a1dfa0c6 889 }
a1dfa0c6
XL
890}
891
5869c6ff 892pub fn provide(providers: &mut Providers) {
9fa01778
XL
893 providers.backend_optimization_level = |tcx, cratenum| {
894 let for_speed = match tcx.sess.opts.optimize {
895 // If globally no optimisation is done, #[optimize] has no effect.
896 //
897 // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the
898 // pass manager and it is likely that some module-wide passes (such as inliner or
899 // cross-function constant propagation) would ignore the `optnone` annotation we put
900 // on the functions, thus necessarily involving these functions into optimisations.
901 config::OptLevel::No => return config::OptLevel::No,
902 // If globally optimise-speed is already specified, just use that level.
903 config::OptLevel::Less => return config::OptLevel::Less,
904 config::OptLevel::Default => return config::OptLevel::Default,
905 config::OptLevel::Aggressive => return config::OptLevel::Aggressive,
906 // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size)
907 // are present).
908 config::OptLevel::Size => config::OptLevel::Default,
909 config::OptLevel::SizeMin => config::OptLevel::Default,
910 };
911
912 let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
913 for id in &*defids {
dfeec247 914 let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
9fa01778
XL
915 match optimize {
916 attr::OptimizeAttr::None => continue,
917 attr::OptimizeAttr::Size => continue,
918 attr::OptimizeAttr::Speed => {
919 return for_speed;
920 }
921 }
922 }
ba9703b0 923 tcx.sess.opts.optimize
9fa01778 924 };
a1dfa0c6
XL
925}
926
dc9dc135 927fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
a1dfa0c6 928 if !tcx.dep_graph.is_fully_enabled() {
dfeec247 929 return CguReuse::No;
a1dfa0c6
XL
930 }
931
932 let work_product_id = &cgu.work_product_id();
933 if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
934 // We don't have anything cached for this CGU. This can happen
935 // if the CGU did not exist in the previous session.
dfeec247 936 return CguReuse::No;
a1dfa0c6
XL
937 }
938
939 // Try to mark the CGU as green. If it we can do so, it means that nothing
940 // affecting the LLVM module has changed and we can re-use a cached version.
941 // If we compile with any kind of LTO, this means we can re-use the bitcode
942 // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
943 // know that later). If we are not doing LTO, there is only one optimized
944 // version of each module, so we re-use that.
945 let dep_node = cgu.codegen_dep_node(tcx);
dfeec247
XL
946 assert!(
947 !tcx.dep_graph.dep_node_exists(&dep_node),
a1dfa0c6 948 "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
dfeec247
XL
949 cgu.name()
950 );
a1dfa0c6 951
6a06907d 952 if tcx.try_mark_green(&dep_node) {
f9f354fc
XL
953 // We can re-use either the pre- or the post-thinlto state. If no LTO is
954 // being performed then we can use post-LTO artifacts, otherwise we must
955 // reuse pre-LTO artifacts
956 match compute_per_cgu_lto_type(
957 &tcx.sess.lto(),
958 &tcx.sess.opts,
959 &tcx.sess.crate_types(),
960 ModuleKind::Regular,
961 ) {
962 ComputedLtoType::No => CguReuse::PostLto,
963 _ => CguReuse::PreLto,
964 }
a1dfa0c6
XL
965 } else {
966 CguReuse::No
967 }
968}