-//! Codegen the completed AST to the LLVM IR.
-//!
-//! Some functions here, such as `codegen_block` and `codegen_expr`, return a value --
-//! the result of the codegen to LLVM -- while others, such as `codegen_fn`
-//! and `mono_item`, are called only for the side effect of adding a
-//! particular definition to the LLVM IR output we're producing.
-//!
-//! Hopefully useful general knowledge about codegen:
-//!
-//! * There's no way to find out the `Ty` type of a `Value`. Doing so
-//! would be "trying to get the eggs out of an omelette" (credit:
-//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
-//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
-//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
-
+use crate::back::link::are_upstream_rust_objects_already_included;
+use crate::back::metadata::create_compressed_metadata_file;
use crate::back::write::{
compute_per_cgu_lto_type, start_async_codegen, submit_codegened_module_to_llvm,
submit_post_lto_module_to_llvm, submit_pre_lto_module_to_llvm, ComputedLtoType, OngoingCodegen,
};
use crate::common::{IntPredicate, RealPredicate, TypeKind};
+use crate::errors;
use crate::meth;
use crate::mir;
use crate::mir::operand::OperandValue;
use crate::mir::place::PlaceRef;
use crate::traits::*;
-use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
+use crate::{CachedModuleCodegen, CompiledModule, CrateInfo, MemFlags, ModuleCodegen, ModuleKind};
use rustc_attr as attr;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::profiling::print_time_passes_entry;
-use rustc_data_structures::sync::{par_iter, Lock, ParallelIterator};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry};
+
+use rustc_data_structures::sync::par_iter;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sync::ParallelIterator;
use rustc_hir as hir;
-use rustc_hir::def_id::{LocalDefId, LOCAL_CRATE};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_hir::lang_items::LangItem;
use rustc_index::vec::Idx;
+use rustc_metadata::EncodedMetadata;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
-use rustc_middle::middle::cstore::EncodedMetadata;
-use rustc_middle::middle::cstore::{self, LinkagePreference};
+use rustc_middle::middle::exported_symbols;
+use rustc_middle::middle::exported_symbols::SymbolExportKind;
use rustc_middle::middle::lang_items;
use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem};
-use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
-use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
+use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::cgu_reuse_tracker::CguReuse;
-use rustc_session::config::{self, EntryFnType};
-use rustc_session::utils::NativeLibKind;
+use rustc_session::config::{self, CrateType, EntryFnType, OutputType};
use rustc_session::Session;
-use rustc_span::Span;
-use rustc_symbol_mangling::test as symbol_names_test;
-use rustc_target::abi::{Align, LayoutOf, VariantIdx};
+use rustc_span::symbol::sym;
+use rustc_span::Symbol;
+use rustc_span::{DebuggerVisualizerFile, DebuggerVisualizerType};
+use rustc_target::abi::{Align, Size, VariantIdx};
-use std::cmp;
-use std::ops::{Deref, DerefMut};
+use std::collections::BTreeSet;
use std::time::{Duration, Instant};
+use itertools::Itertools;
+
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate {
match op {
hir::BinOpKind::Eq => IntPredicate::IntEQ,
///
/// The `old_info` argument is a bit odd. It is intended for use in an upcast,
/// where the new vtable for an object will be derived from the old one.
-pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
- cx: &Cx,
+pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
source: Ty<'tcx>,
target: Ty<'tcx>,
- old_info: Option<Cx::Value>,
-) -> Cx::Value {
+ old_info: Option<Bx::Value>,
+) -> Bx::Value {
+ let cx = bx.cx();
let (source, target) =
- cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, cx.param_env());
+ cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, bx.param_env());
match (source.kind(), target.kind()) {
(&ty::Array(_, len), &ty::Slice(_)) => {
cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all()))
}
- (&ty::Dynamic(..), &ty::Dynamic(..)) => {
- // For now, upcasts are limited to changes in marker
- // traits, and hence never actually require an actual
- // change to the vtable.
- old_info.expect("unsized_info: missing old info for trait upcast")
+ (
+ &ty::Dynamic(ref data_a, _, src_dyn_kind),
+ &ty::Dynamic(ref data_b, _, target_dyn_kind),
+ ) if src_dyn_kind == target_dyn_kind => {
+ let old_info =
+ old_info.expect("unsized_info: missing old info for trait upcasting coercion");
+ if data_a.principal_def_id() == data_b.principal_def_id() {
+ // A NOP cast that doesn't actually change anything, should be allowed even with invalid vtables.
+ return old_info;
+ }
+
+ // trait upcasting coercion
+
+ let vptr_entry_idx =
+ cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
+
+ if let Some(entry_idx) = vptr_entry_idx {
+ let ptr_ty = cx.type_i8p();
+ let ptr_align = cx.tcx().data_layout.pointer_align.abi;
+ let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
+ let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
+ let gep = bx.inbounds_gep(
+ ptr_ty,
+ llvtable,
+ &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
+ );
+ let new_vptr = bx.load(ptr_ty, gep, ptr_align);
+ bx.nonnull_metadata(new_vptr);
+ // VTable loads are invariant.
+ bx.set_invariant_load(new_vptr);
+ bx.pointercast(new_vptr, vtable_ptr_ty)
+ } else {
+ old_info
+ }
}
- (_, &ty::Dynamic(ref data, ..)) => {
- let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)).field(cx, FAT_PTR_EXTRA);
- cx.const_ptrcast(
- meth::get_vtable(cx, source, data.principal()),
- cx.backend_type(vtable_ptr),
- )
+ (_, &ty::Dynamic(ref data, _, target_dyn_kind)) => {
+ let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
+ cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
}
}
-/// Coerces `src` to `dst_ty`. `src_ty` must be a thin pointer.
-pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+// Returns the vtable pointer type of a `dyn` or `dyn*` type
+fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
+ cx: &Cx,
+ target: Ty<'tcx>,
+ kind: ty::DynKind,
+) -> <Cx as BackendTypes>::Type {
+ cx.scalar_pair_element_backend_type(
+ cx.layout_of(match kind {
+ // vtable is the second field of `*mut dyn Trait`
+ ty::Dyn => cx.tcx().mk_mut_ptr(target),
+ // vtable is the second field of `dyn* Trait`
+ ty::DynStar => target,
+ }),
+ 1,
+ true,
+ )
+}
+
+/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
+pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
src: Bx::Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
) -> (Bx::Value, Bx::Value) {
- debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
+ debug!("unsize_ptr: {:?} => {:?}", src_ty, dst_ty);
match (src_ty.kind(), dst_ty.kind()) {
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
- assert!(bx.cx().type_is_sized(a));
+ assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
- (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
+ (bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
-
let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty);
+ if src_ty == dst_ty {
+ return (src, old_info.unwrap());
+ }
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i);
- assert_eq!(src_layout.fields.offset(i).bytes(), 0);
- assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
+
+ assert_eq!(src_layout.fields.offset(i).bytes(), 0);
+ assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(bx.cx(), i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
- result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
+ result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
}
let (lldata, llextra) = result.unwrap();
+ let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
+ let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
- // FIXME(eddyb) move these out of this `match` arm, so they're always
- // applied, uniformly, no matter the source/destination types.
- (
- bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
- bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)),
- )
+ (bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
}
- _ => bug!("unsize_thin_ptr: called on bad types"),
+ _ => bug!("unsize_ptr: called on bad types"),
}
}
+/// Coerces `src` to `dst_ty` which is guaranteed to be a `dyn*` type.
+pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
+ bx: &mut Bx,
+ src: Bx::Value,
+ src_ty_and_layout: TyAndLayout<'tcx>,
+ dst_ty: Ty<'tcx>,
+ old_info: Option<Bx::Value>,
+) -> (Bx::Value, Bx::Value) {
+ debug!("cast_to_dyn_star: {:?} => {:?}", src_ty_and_layout.ty, dst_ty);
+ assert!(
+ matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
+ "destination type must be a dyn*"
+ );
+ // FIXME(dyn-star): this is probably not the best way to check if this is
+ // a pointer, and really we should ensure that the value is a suitable
+ // pointer earlier in the compilation process.
+ let src = match src_ty_and_layout.pointee_info_at(bx.cx(), Size::ZERO) {
+ Some(_) => bx.ptrtoint(src, bx.cx().type_isize()),
+ None => bx.bitcast(src, bx.type_isize()),
+ };
+ (src, unsized_info(bx, src_ty_and_layout.ty, dst_ty, old_info))
+}
+
/// Coerces `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty`, and stores the result in `dst`.
pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
match (src_ty.kind(), dst_ty.kind()) {
(&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => {
let (base, info) = match bx.load_operand(src).val {
- OperandValue::Pair(base, info) => {
- // fat-ptr to fat-ptr unsize preserves the vtable
- // i.e., &'a fmt::Debug+Send => &'a fmt::Debug
- // So we need to pointercast the base to ensure
- // the types match up.
- // FIXME(eddyb) use `scalar_pair_element_backend_type` here,
- // like `unsize_thin_ptr` does.
- let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
- (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
- }
- OperandValue::Immediate(base) => unsize_thin_ptr(bx, base, src_ty, dst_ty),
+ OperandValue::Pair(base, info) => unsize_ptr(bx, base, src_ty, dst_ty, Some(info)),
+ OperandValue::Immediate(base) => unsize_ptr(bx, base, src_ty, dst_ty, None),
OperandValue::Ref(..) => bug!(),
};
OperandValue::Pair(base, info).store(bx, dst);
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
- for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
+ for i in 0..def_a.variant(VariantIdx::new(0)).fields.len() {
let src_f = src.project_field(bx, i);
let dst_f = dst.project_field(bx, i);
pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
- op: hir::BinOpKind,
- lhs: Bx::Value,
- rhs: Bx::Value,
-) -> Bx::Value {
- cast_shift_rhs(bx, op, lhs, rhs)
-}
-
-fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
- bx: &mut Bx,
- op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
) -> Bx::Value {
// Shifts may have any size int on the rhs
- if op.is_shift() {
- let mut rhs_llty = bx.cx().val_ty(rhs);
- let mut lhs_llty = bx.cx().val_ty(lhs);
- if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
- rhs_llty = bx.cx().element_type(rhs_llty)
- }
- if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
- lhs_llty = bx.cx().element_type(lhs_llty)
- }
- let rhs_sz = bx.cx().int_width(rhs_llty);
- let lhs_sz = bx.cx().int_width(lhs_llty);
- if lhs_sz < rhs_sz {
- bx.trunc(rhs, lhs_llty)
- } else if lhs_sz > rhs_sz {
- // FIXME (#1877: If in the future shifting by negative
- // values is no longer undefined then this is wrong.
- bx.zext(rhs, lhs_llty)
- } else {
- rhs
- }
+ let mut rhs_llty = bx.cx().val_ty(rhs);
+ let mut lhs_llty = bx.cx().val_ty(lhs);
+ if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
+ rhs_llty = bx.cx().element_type(rhs_llty)
+ }
+ if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
+ lhs_llty = bx.cx().element_type(lhs_llty)
+ }
+ let rhs_sz = bx.cx().int_width(rhs_llty);
+ let lhs_sz = bx.cx().int_width(lhs_llty);
+ if lhs_sz < rhs_sz {
+ bx.trunc(rhs, lhs_llty)
+ } else if lhs_sz > rhs_sz {
+ // FIXME (#1877: If in the future shifting by negative
+ // values is no longer undefined then this is wrong.
+ bx.zext(rhs, lhs_llty)
} else {
rhs
}
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
- sess.target.target.options.is_like_msvc
+ sess.target.is_like_msvc
}
pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
) -> Option<Bx::Function> {
- let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) {
- Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)),
- None => return None,
- };
+ let (main_def_id, entry_type) = cx.tcx().entry_fn(())?;
+ let main_is_local = main_def_id.is_local();
+ let instance = Instance::mono(cx.tcx(), main_def_id);
- let instance = Instance::mono(cx.tcx(), main_def_id.to_def_id());
-
- if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+ if main_is_local {
// We want to create the wrapper in the same codegen unit as Rust's main
// function.
+ if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
+ return None;
+ }
+ } else if !cx.codegen_unit().is_primary() {
+ // We want to create the wrapper only when the codegen unit is the primary one
return None;
}
let main_llfn = cx.get_fn_addr(instance);
- return cx.tcx().entry_fn(LOCAL_CRATE).map(|(_, et)| {
- let use_start_lang_item = EntryFnType::Start != et;
- create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, use_start_lang_item)
- });
+ let entry_fn = create_entry_fn::<Bx>(cx, main_llfn, main_def_id, entry_type);
+ return Some(entry_fn);
fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
- sp: Span,
rust_main: Bx::Value,
- rust_main_def_id: LocalDefId,
- use_start_lang_item: bool,
+ rust_main_def_id: DefId,
+ entry_type: EntryFnType,
) -> Bx::Function {
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in.
- let llfty = if cx.sess().target.target.options.main_needs_argc_argv {
+ let llfty = if cx.sess().target.main_needs_argc_argv {
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int())
} else {
cx.type_func(&[], cx.type_int())
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
- let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap());
-
- let llfn = match cx.declare_c_main(llfty) {
- Some(llfn) => llfn,
- None => {
- // FIXME: We should be smart and show a better diagnostic here.
- cx.sess()
- .struct_span_err(sp, "entry symbol `main` declared multiple times")
- .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead")
- .emit();
- cx.sess().abort_if_errors();
- bug!();
- }
+ let main_ret_ty = cx.tcx().normalize_erasing_regions(
+ ty::ParamEnv::reveal_all(),
+ main_ret_ty.no_bound_vars().unwrap(),
+ );
+
+ let Some(llfn) = cx.declare_c_main(llfty) else {
+ // FIXME: We should be smart and show a better diagnostic here.
+ let span = cx.tcx().def_span(rust_main_def_id);
+ cx.sess().emit_err(errors::MultipleMainFunctions { span });
+ cx.sess().abort_if_errors();
+ bug!();
};
// `main` should respect same config for frame pointer elimination as rest of code
- cx.set_frame_pointer_elimination(llfn);
+ cx.set_frame_pointer_type(llfn);
cx.apply_target_cpu_attr(llfn);
- let mut bx = Bx::new_block(&cx, llfn, "top");
+ let llbb = Bx::append_block(&cx, llfn, "top");
+ let mut bx = Bx::build(&cx, llbb);
bx.insert_reference_to_gdb_debug_scripts_section_global();
+ let isize_ty = cx.type_isize();
+ let i8pp_ty = cx.type_ptr_to(cx.type_i8p());
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
- let (start_fn, args) = if use_start_lang_item {
+ let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
let start_def_id = cx.tcx().require_lang_item(LangItem::Start, None);
let start_fn = cx.get_fn_addr(
ty::Instance::resolve(
.unwrap()
.unwrap(),
);
- (
- start_fn,
- vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv],
- )
+
+ let i8_ty = cx.type_i8();
+ let arg_sigpipe = bx.const_u8(sigpipe);
+
+ let start_ty =
+ cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
+ (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else {
debug!("using user-defined start fn");
- (rust_main, vec![arg_argc, arg_argv])
+ let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty);
+ (rust_main, start_ty, vec![arg_argc, arg_argv])
};
- let result = bx.call(start_fn, &args, None);
+ let result = bx.call(start_ty, None, start_fn, &args, None);
let cast = bx.intcast(result, cx.type_int(), true);
bx.ret(cast);
cx: &'a Bx::CodegenCx,
bx: &mut Bx,
) -> (Bx::Value, Bx::Value) {
- if cx.sess().target.target.options.main_needs_argc_argv {
+ if cx.sess().target.main_needs_argc_argv {
// Params from native `main()` used as args for rust start function
let param_argc = bx.get_param(0);
let param_argv = bx.get_param(1);
}
}
-pub const CODEGEN_WORKER_ID: usize = usize::MAX;
+/// This function returns all of the debugger visualizers specified for the
+/// current crate as well as all upstream crates transitively that match the
+/// `visualizer_type` specified.
+pub fn collect_debugger_visualizers_transitive(
+ tcx: TyCtxt<'_>,
+ visualizer_type: DebuggerVisualizerType,
+) -> BTreeSet<DebuggerVisualizerFile> {
+ tcx.debugger_visualizers(LOCAL_CRATE)
+ .iter()
+ .chain(
+ tcx.crates(())
+ .iter()
+ .filter(|&cnum| {
+ let used_crate_source = tcx.used_crate_source(*cnum);
+ used_crate_source.rlib.is_some() || used_crate_source.rmeta.is_some()
+ })
+ .flat_map(|&cnum| tcx.debugger_visualizers(cnum)),
+ )
+ .filter(|visualizer| visualizer.visualizer_type == visualizer_type)
+ .cloned()
+ .collect::<BTreeSet<_>>()
+}
pub fn codegen_crate<B: ExtraBackendMethods>(
backend: B,
- tcx: TyCtxt<'tcx>,
+ tcx: TyCtxt<'_>,
+ target_cpu: String,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> OngoingCodegen<B> {
// Skip crate items and just output metadata in -Z no-codegen mode.
- if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
- let ongoing_codegen = start_async_codegen(backend, tcx, metadata, 1);
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ let ongoing_codegen = start_async_codegen(backend, tcx, target_cpu, metadata, None, 1);
ongoing_codegen.codegen_finished(tcx);
- finalize_tcx(tcx);
-
ongoing_codegen.check_for_errors(tcx.sess);
return ongoing_codegen;
// Run the monomorphization collector and partition the collected items into
// codegen units.
- let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
+ let codegen_units = tcx.collect_and_partition_mono_items(()).1;
// Force all codegen_unit queries so they are already either red or green
// when compile_codegen_unit accesses them. We are not able to re-execute
}
}
- let ongoing_codegen = start_async_codegen(backend.clone(), tcx, metadata, codegen_units.len());
- let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
+ let metadata_module = if need_metadata_module {
+ // Emit compressed metadata object.
+ let metadata_cgu_name =
+ cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
+ tcx.sess.time("write_compressed_metadata", || {
+ let file_name =
+ tcx.output_filenames(()).temp_path(OutputType::Metadata, Some(&metadata_cgu_name));
+ let data = create_compressed_metadata_file(
+ tcx.sess,
+ &metadata,
+ &exported_symbols::metadata_symbol_name(tcx),
+ );
+ if let Err(error) = std::fs::write(&file_name, data) {
+ tcx.sess.emit_fatal(errors::MetadataObjectFileWrite { error });
+ }
+ Some(CompiledModule {
+ name: metadata_cgu_name,
+ kind: ModuleKind::Metadata,
+ object: Some(file_name),
+ dwarf_object: None,
+ bytecode: None,
+ })
+ })
+ } else {
+ None
+ };
+
+ let ongoing_codegen = start_async_codegen(
+ backend.clone(),
+ tcx,
+ target_cpu,
+ metadata,
+ metadata_module,
+ codegen_units.len(),
+ );
// Codegen an allocator shim, if necessary.
//
// linkage, then it's already got an allocator shim and we'll be using that
// one instead. If nothing exists then it's our job to generate the
// allocator!
- let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| {
+ let any_dynamic_crate = tcx.dependency_formats(()).iter().any(|(_, list)| {
use rustc_middle::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let allocator_module = if any_dynamic_crate {
None
- } else if let Some(kind) = tcx.allocator_kind() {
+ } else if let Some(kind) = tcx.allocator_kind(()) {
let llmod_id =
cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string();
- let mut modules = backend.new_metadata(tcx, &llmod_id);
- tcx.sess
- .time("write_allocator_module", || backend.codegen_allocator(tcx, &mut modules, kind));
+ let module_llvm = tcx.sess.time("write_allocator_module", || {
+ backend.codegen_allocator(
+ tcx,
+ &llmod_id,
+ kind,
+ // If allocator_kind is Some then alloc_error_handler_kind must
+ // also be Some.
+ tcx.alloc_error_handler_kind(()).unwrap(),
+ )
+ });
- Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator })
+ Some(ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator })
} else {
None
};
ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module);
}
- if need_metadata_module {
- // Codegen the encoded metadata.
- let metadata_cgu_name =
- cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string();
- let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name);
- tcx.sess.time("write_compressed_metadata", || {
- backend.write_compressed_metadata(
- tcx,
- &ongoing_codegen.metadata,
- &mut metadata_llvm_module,
- );
- });
-
- let metadata_module = ModuleCodegen {
- name: metadata_cgu_name,
- module_llvm: metadata_llvm_module,
- kind: ModuleKind::Metadata,
- };
- ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module);
- }
-
- // We sort the codegen units by size. This way we can schedule work for LLVM
- // a bit more efficiently.
- let codegen_units = {
- let mut codegen_units = codegen_units.iter().collect::<Vec<_>>();
- codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
- codegen_units
+ // For better throughput during parallel processing by LLVM, we used to sort
+ // CGUs largest to smallest. This would lead to better thread utilization
+ // by, for example, preventing a large CGU from being processed last and
+ // having only one LLVM thread working while the rest remained idle.
+ //
+ // However, this strategy would lead to high memory usage, as it meant the
+ // LLVM-IR for all of the largest CGUs would be resident in memory at once.
+ //
+ // Instead, we can compromise by ordering CGUs such that the largest and
+ // smallest are first, second largest and smallest are next, etc. If there
+ // are large size variations, this can reduce memory usage significantly.
+ let codegen_units: Vec<_> = {
+ let mut sorted_cgus = codegen_units.iter().collect::<Vec<_>>();
+ sorted_cgus.sort_by_cached_key(|cgu| cgu.size_estimate());
+
+ let (first_half, second_half) = sorted_cgus.split_at(sorted_cgus.len() / 2);
+ second_half.iter().rev().interleave(first_half).copied().collect()
};
- let total_codegen_time = Lock::new(Duration::new(0, 0));
+ // Calculate the CGU reuse
+ let cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
+ codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect::<Vec<_>>()
+ });
+
+ let mut total_codegen_time = Duration::new(0, 0);
+ let start_rss = tcx.sess.opts.unstable_opts.time_passes.then(|| get_resident_set_size());
// The non-parallel compiler can only translate codegen units to LLVM IR
// on a single thread, leading to a staircase effect where the N LLVM
// This likely is a temporary measure. Once we don't have to support the
// non-parallel compiler anymore, we can compile CGUs end-to-end in
// parallel and get rid of the complicated scheduling logic.
- let pre_compile_cgus = |cgu_reuse: &[CguReuse]| {
- if cfg!(parallel_compiler) {
- tcx.sess.time("compile_first_CGU_batch", || {
- // Try to find one CGU to compile per thread.
- let cgus: Vec<_> = cgu_reuse
- .iter()
- .enumerate()
- .filter(|&(_, reuse)| reuse == &CguReuse::No)
- .take(tcx.sess.threads())
- .collect();
-
- // Compile the found CGUs in parallel.
- par_iter(cgus)
- .map(|(i, _)| {
- let start_time = Instant::now();
- let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
- let mut time = total_codegen_time.lock();
- *time += start_time.elapsed();
- (i, module)
- })
- .collect()
- })
- } else {
- FxHashMap::default()
- }
+ let mut pre_compiled_cgus = if cfg!(parallel_compiler) {
+ tcx.sess.time("compile_first_CGU_batch", || {
+ // Try to find one CGU to compile per thread.
+ let cgus: Vec<_> = cgu_reuse
+ .iter()
+ .enumerate()
+ .filter(|&(_, reuse)| reuse == &CguReuse::No)
+ .take(tcx.sess.threads())
+ .collect();
+
+ // Compile the found CGUs in parallel.
+ let start_time = Instant::now();
+
+ let pre_compiled_cgus = par_iter(cgus)
+ .map(|(i, _)| {
+ let module = backend.compile_codegen_unit(tcx, codegen_units[i].name());
+ (i, module)
+ })
+ .collect();
+
+ total_codegen_time += start_time.elapsed();
+
+ pre_compiled_cgus
+ })
+ } else {
+ FxHashMap::default()
};
- let mut cgu_reuse = Vec::new();
- let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None;
-
for (i, cgu) in codegen_units.iter().enumerate() {
ongoing_codegen.wait_for_signal_to_codegen_item();
ongoing_codegen.check_for_errors(tcx.sess);
- // Do some setup work in the first iteration
- if pre_compiled_cgus.is_none() {
- // Calculate the CGU reuse
- cgu_reuse = tcx.sess.time("find_cgu_reuse", || {
- codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect()
- });
- // Pre compile some CGUs
- pre_compiled_cgus = Some(pre_compile_cgus(&cgu_reuse));
- }
-
let cgu_reuse = cgu_reuse[i];
- tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
+ tcx.sess.cgu_reuse_tracker.set_actual_reuse(cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
CguReuse::No => {
- let (module, cost) =
- if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) {
- cgu
- } else {
- let start_time = Instant::now();
- let module = backend.compile_codegen_unit(tcx, cgu.name());
- let mut time = total_codegen_time.lock();
- *time += start_time.elapsed();
- module
- };
+ let (module, cost) = if let Some(cgu) = pre_compiled_cgus.remove(&i) {
+ cgu
+ } else {
+ let start_time = Instant::now();
+ let module = backend.compile_codegen_unit(tcx, cgu.name());
+ total_codegen_time += start_time.elapsed();
+ module
+ };
+ // This will unwind if there are errors, which triggers our `AbortCodegenOnDrop`
+ // guard. Unfortunately, just skipping the `submit_codegened_module_to_llvm` makes
+ // compilation hang on post-monomorphization errors.
+ tcx.sess.abort_if_errors();
+
submit_codegened_module_to_llvm(
&backend,
- &ongoing_codegen.coordinator_send,
+ &ongoing_codegen.coordinator.sender,
module,
cost,
);
submit_pre_lto_module_to_llvm(
&backend,
tcx,
- &ongoing_codegen.coordinator_send,
+ &ongoing_codegen.coordinator.sender,
CachedModuleCodegen {
name: cgu.name().to_string(),
- source: cgu.work_product(tcx),
+ source: cgu.previous_work_product(tcx),
},
);
true
CguReuse::PostLto => {
submit_post_lto_module_to_llvm(
&backend,
- &ongoing_codegen.coordinator_send,
+ &ongoing_codegen.coordinator.sender,
CachedModuleCodegen {
name: cgu.name().to_string(),
- source: cgu.work_product(tcx),
+ source: cgu.previous_work_product(tcx),
},
);
true
// Since the main thread is sometimes blocked during codegen, we keep track
// -Ztime-passes output manually.
- print_time_passes_entry(
- tcx.sess.time_passes(),
- "codegen_to_LLVM_IR",
- total_codegen_time.into_inner(),
- );
-
- ::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
-
- symbol_names_test::report_symbol_names(tcx);
-
- ongoing_codegen.check_for_errors(tcx.sess);
-
- finalize_tcx(tcx);
-
- ongoing_codegen.into_inner()
-}
-
-/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
-/// when it's dropped abnormally.
-///
-/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
-/// stumbled upon. The segfault was never reproduced locally, but it was
-/// suspected to be related to the fact that codegen worker threads were
-/// sticking around by the time the main thread was exiting, causing issues.
-///
-/// This structure is an attempt to fix that issue where the `codegen_aborted`
-/// message will block until all workers have finished. This should ensure that
-/// even if the main codegen thread panics we'll wait for pending work to
-/// complete before returning from the main thread, hopefully avoiding
-/// segfaults.
-///
-/// If you see this comment in the code, then it means that this workaround
-/// worked! We may yet one day track down the mysterious cause of that
-/// segfault...
-struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>);
-
-impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> {
- fn into_inner(mut self) -> OngoingCodegen<B> {
- self.0.take().unwrap()
+ if tcx.sess.opts.unstable_opts.time_passes {
+ let end_rss = get_resident_set_size();
+
+ print_time_passes_entry(
+ "codegen_to_LLVM_IR",
+ total_codegen_time,
+ start_rss.unwrap(),
+ end_rss,
+ );
}
-}
-
-impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> {
- type Target = OngoingCodegen<B>;
-
- fn deref(&self) -> &OngoingCodegen<B> {
- self.0.as_ref().unwrap()
- }
-}
-impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> {
- fn deref_mut(&mut self) -> &mut OngoingCodegen<B> {
- self.0.as_mut().unwrap()
- }
-}
-
-impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> {
- fn drop(&mut self) {
- if let Some(codegen) = self.0.take() {
- codegen.codegen_aborted();
- }
- }
+ ongoing_codegen.check_for_errors(tcx.sess);
+ ongoing_codegen
}
-fn finalize_tcx(tcx: TyCtxt<'_>) {
- tcx.sess.time("assert_dep_graph", || ::rustc_incremental::assert_dep_graph(tcx));
- tcx.sess.time("serialize_dep_graph", || ::rustc_incremental::save_dep_graph(tcx));
+impl CrateInfo {
+ pub fn new(tcx: TyCtxt<'_>, target_cpu: String) -> CrateInfo {
+ let exported_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::exported_symbols(tcx, c)))
+ .collect();
+ let linked_symbols = tcx
+ .sess
+ .crate_types()
+ .iter()
+ .map(|&c| (c, crate::back::linker::linked_symbols(tcx, c)))
+ .collect();
+ let local_crate_name = tcx.crate_name(LOCAL_CRATE);
+ let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID);
+ let subsystem = tcx.sess.first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem);
+ let windows_subsystem = subsystem.map(|subsystem| {
+ if subsystem != sym::windows && subsystem != sym::console {
+ tcx.sess.emit_fatal(errors::InvalidWindowsSubsystem { subsystem });
+ }
+ subsystem.to_string()
+ });
- // We assume that no queries are run past here. If there are new queries
- // after this point, they'll show up as "<unknown>" in self-profiling data.
- {
- let _prof_timer = tcx.prof.generic_activity("self_profile_alloc_query_strings");
- tcx.alloc_self_profile_query_strings();
- }
-}
+ // This list is used when generating the command line to pass through to
+ // system linker. The linker expects undefined symbols on the left of the
+ // command line to be defined in libraries on the right, not the other way
+ // around. For more info, see some comments in the add_used_library function
+ // below.
+ //
+ // In order to get this left-to-right dependency ordering, we use the reverse
+ // postorder of all crates putting the leaves at the right-most positions.
+ let mut compiler_builtins = None;
+ let mut used_crates: Vec<_> = tcx
+ .postorder_cnums(())
+ .iter()
+ .rev()
+ .copied()
+ .filter(|&cnum| {
+ let link = !tcx.dep_kind(cnum).macros_only();
+ if link && tcx.is_compiler_builtins(cnum) {
+ compiler_builtins = Some(cnum);
+ return false;
+ }
+ link
+ })
+ .collect();
+ // `compiler_builtins` are always placed last to ensure that they're linked correctly.
+ used_crates.extend(compiler_builtins);
-impl CrateInfo {
- pub fn new(tcx: TyCtxt<'_>) -> CrateInfo {
let mut info = CrateInfo {
- panic_runtime: None,
- compiler_builtins: None,
+ target_cpu,
+ exported_symbols,
+ linked_symbols,
+ local_crate_name,
+ compiler_builtins,
profiler_runtime: None,
is_no_builtins: Default::default(),
native_libraries: Default::default(),
- used_libraries: tcx.native_libraries(LOCAL_CRATE),
- link_args: tcx.link_args(LOCAL_CRATE),
+ used_libraries: tcx.native_libraries(LOCAL_CRATE).iter().map(Into::into).collect(),
crate_name: Default::default(),
- used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
- used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
+ used_crates,
used_crate_source: Default::default(),
- lang_item_to_crate: Default::default(),
- missing_lang_items: Default::default(),
- dependency_formats: tcx.dependency_formats(LOCAL_CRATE),
+ dependency_formats: tcx.dependency_formats(()).clone(),
+ windows_subsystem,
+ natvis_debugger_visualizers: Default::default(),
};
- let lang_items = tcx.lang_items();
-
- let crates = tcx.crates();
+ let crates = tcx.crates(());
let n_crates = crates.len();
info.native_libraries.reserve(n_crates);
info.crate_name.reserve(n_crates);
info.used_crate_source.reserve(n_crates);
- info.missing_lang_items.reserve(n_crates);
for &cnum in crates.iter() {
- info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
- info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
- info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
- if tcx.is_panic_runtime(cnum) {
- info.panic_runtime = Some(cnum);
- }
- if tcx.is_compiler_builtins(cnum) {
- info.compiler_builtins = Some(cnum);
- }
+ info.native_libraries
+ .insert(cnum, tcx.native_libraries(cnum).iter().map(Into::into).collect());
+ info.crate_name.insert(cnum, tcx.crate_name(cnum));
+
+ let used_crate_source = tcx.used_crate_source(cnum);
+ info.used_crate_source.insert(cnum, used_crate_source.clone());
if tcx.is_profiler_runtime(cnum) {
info.profiler_runtime = Some(cnum);
}
if tcx.is_no_builtins(cnum) {
info.is_no_builtins.insert(cnum);
}
- let missing = tcx.missing_lang_items(cnum);
- for &item in missing.iter() {
- if let Ok(id) = lang_items.require(item) {
- info.lang_item_to_crate.insert(item, id.krate);
- }
+ }
+
+ // Handle circular dependencies in the standard library.
+ // See comment before `add_linked_symbol_object` function for the details.
+ // If global LTO is enabled then almost everything (*) is glued into a single object file,
+ // so this logic is not necessary and can cause issues on some targets (due to weak lang
+ // item symbols being "privatized" to that object file), so we disable it.
+ // (*) Native libs, and `#[compiler_builtins]` and `#[no_builtins]` crates are not glued,
+ // and we assume that they cannot define weak lang items. This is not currently enforced
+ // by the compiler, but that's ok because all this stuff is unstable anyway.
+ let target = &tcx.sess.target;
+ if !are_upstream_rust_objects_already_included(tcx.sess) {
+ let missing_weak_lang_items: FxHashSet<Symbol> = info
+ .used_crates
+ .iter()
+ .flat_map(|&cnum| tcx.missing_lang_items(cnum))
+ .filter(|l| l.is_weak())
+ .filter_map(|&l| {
+ let name = l.link_name()?;
+ lang_items::required(tcx, l).then_some(name)
+ })
+ .collect();
+ let prefix = if target.is_like_windows && target.arch == "x86" { "_" } else { "" };
+ info.linked_symbols
+ .iter_mut()
+ .filter(|(crate_type, _)| {
+ !matches!(crate_type, CrateType::Rlib | CrateType::Staticlib)
+ })
+ .for_each(|(_, linked_symbols)| {
+ linked_symbols.extend(
+ missing_weak_lang_items
+ .iter()
+ .map(|item| (format!("{prefix}{item}"), SymbolExportKind::Text)),
+ )
+ });
+ }
+
+ let embed_visualizers = tcx.sess.crate_types().iter().any(|&crate_type| match crate_type {
+ CrateType::Executable | CrateType::Dylib | CrateType::Cdylib => {
+ // These are crate types for which we invoke the linker and can embed
+ // NatVis visualizers.
+ true
}
+ CrateType::ProcMacro => {
+ // We could embed NatVis for proc macro crates too (to improve the debugging
+ // experience for them) but it does not seem like a good default, since
+ // this is a rare use case and we don't want to slow down the common case.
+ false
+ }
+ CrateType::Staticlib | CrateType::Rlib => {
+ // We don't invoke the linker for these, so we don't need to collect the NatVis for them.
+ false
+ }
+ });
- // No need to look for lang items that don't actually need to exist.
- let missing =
- missing.iter().cloned().filter(|&l| lang_items::required(tcx, l)).collect();
- info.missing_lang_items.insert(cnum, missing);
+ if target.is_like_msvc && embed_visualizers {
+ info.natvis_debugger_visualizers =
+ collect_debugger_visualizers_transitive(tcx, DebuggerVisualizerType::Natvis);
}
info
}
}
-pub fn provide_both(providers: &mut Providers) {
+pub fn provide(providers: &mut Providers) {
providers.backend_optimization_level = |tcx, cratenum| {
let for_speed = match tcx.sess.opts.optimize {
// If globally no optimisation is done, #[optimize] has no effect.
};
let (defids, _) = tcx.collect_and_partition_mono_items(cratenum);
- for id in &*defids {
+
+ let any_for_speed = defids.items().any(|id| {
let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id);
match optimize {
- attr::OptimizeAttr::None => continue,
- attr::OptimizeAttr::Size => continue,
- attr::OptimizeAttr::Speed => {
- return for_speed;
- }
+ attr::OptimizeAttr::None | attr::OptimizeAttr::Size => false,
+ attr::OptimizeAttr::Speed => true,
}
- }
- tcx.sess.opts.optimize
- };
+ });
- providers.dllimport_foreign_items = |tcx, krate| {
- let module_map = tcx.foreign_modules(krate);
- let module_map =
- module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>();
+ if any_for_speed {
+ return for_speed;
+ }
- let dllimports = tcx
- .native_libraries(krate)
- .iter()
- .filter(|lib| {
- if !matches!(lib.kind, NativeLibKind::Dylib | NativeLibKind::Unspecified) {
- return false;
- }
- let cfg = match lib.cfg {
- Some(ref cfg) => cfg,
- None => return true,
- };
- attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
- })
- .filter_map(|lib| lib.foreign_module)
- .map(|id| &module_map[&id])
- .flat_map(|module| module.foreign_items.iter().cloned())
- .collect();
- dllimports
+ tcx.sess.opts.optimize
};
-
- providers.is_dllimport_foreign_item =
- |tcx, def_id| tcx.dllimport_foreign_items(def_id.krate).contains(&def_id);
}
fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse {
cgu.name()
);
- if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
+ if tcx.try_mark_green(&dep_node) {
// We can re-use either the pre- or the post-thinlto state. If no LTO is
// being performed then we can use post-LTO artifacts, otherwise we must
// reuse pre-LTO artifacts