use std::any::Any;
use std::fs;
use std::io;
+use std::marker::PhantomData;
use std::mem;
use std::path::{Path, PathBuf};
use std::str;
pub emit_ir: bool,
pub emit_asm: bool,
pub emit_obj: EmitObj,
+ pub emit_thin_lto: bool,
pub bc_cmdline: String,
// Miscellaneous flags. These are mostly copied from command-line
SwitchWithOptPath::Disabled
),
pgo_use: if_regular!(sess.opts.cg.profile_use.clone(), None),
- pgo_sample_use: if_regular!(sess.opts.debugging_opts.profile_sample_use.clone(), None),
- debug_info_for_profiling: sess.opts.debugging_opts.debug_info_for_profiling,
+ pgo_sample_use: if_regular!(sess.opts.unstable_opts.profile_sample_use.clone(), None),
+ debug_info_for_profiling: sess.opts.unstable_opts.debug_info_for_profiling,
instrument_coverage: if_regular!(sess.instrument_coverage(), false),
instrument_gcov: if_regular!(
// compiler_builtins overrides the codegen-units settings,
// which is incompatible with -Zprofile which requires that
// only a single codegen unit is used per crate.
- sess.opts.debugging_opts.profile && !is_compiler_builtins,
+ sess.opts.unstable_opts.profile && !is_compiler_builtins,
false
),
- sanitizer: if_regular!(sess.opts.debugging_opts.sanitizer, SanitizerSet::empty()),
+ sanitizer: if_regular!(sess.opts.unstable_opts.sanitizer, SanitizerSet::empty()),
sanitizer_recover: if_regular!(
- sess.opts.debugging_opts.sanitizer_recover,
+ sess.opts.unstable_opts.sanitizer_recover,
SanitizerSet::empty()
),
sanitizer_memory_track_origins: if_regular!(
- sess.opts.debugging_opts.sanitizer_memory_track_origins,
+ sess.opts.unstable_opts.sanitizer_memory_track_origins,
0
),
false
),
emit_obj,
+ emit_thin_lto: sess.opts.unstable_opts.emit_thin_lto,
bc_cmdline: sess.target.bitcode_llvm_cmdline.to_string(),
verify_llvm_ir: sess.verify_llvm_ir(),
// O2 and O3) since it can be useful for reducing code size.
merge_functions: match sess
.opts
- .debugging_opts
+ .unstable_opts
.merge_functions
.unwrap_or(sess.target.merge_functions)
{
},
inline_threshold: sess.opts.cg.inline_threshold,
- new_llvm_pass_manager: sess.opts.debugging_opts.new_llvm_pass_manager,
+ new_llvm_pass_manager: sess.opts.unstable_opts.new_llvm_pass_manager,
emit_lifetime_markers: sess.emit_lifetime_markers(),
- llvm_plugins: if_regular!(sess.opts.debugging_opts.llvm_plugins.clone(), vec![]),
+ llvm_plugins: if_regular!(sess.opts.unstable_opts.llvm_plugins.clone(), vec![]),
}
}
metadata_module,
crate_info,
- coordinator_send,
codegen_worker_receive,
shared_emitter_main,
- future: coordinator_thread,
+ coordinator: Coordinator {
+ sender: coordinator_send,
+ future: Some(coordinator_thread),
+ phantom: PhantomData,
+ },
output_filenames: tcx.output_filenames(()).clone(),
}
}
let _timer = sess.timer("copy_all_cgu_workproducts_to_incr_comp_cache_dir");
for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) {
- if let Some(path) = &module.object {
- if let Some((id, product)) =
- copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, path)
- {
- work_products.insert(id, product);
- }
+ let mut files = Vec::new();
+ if let Some(object_file_path) = &module.object {
+ files.push(("o", object_file_path.as_path()));
+ }
+ if let Some(dwarf_object_file_path) = &module.dwarf_object {
+ files.push(("dwo", dwarf_object_file_path.as_path()));
+ }
+
+ if let Some((id, product)) =
+ copy_cgu_workproduct_to_incr_comp_cache_dir(sess, &module.name, files.as_slice())
+ {
+ work_products.insert(id, product);
}
}
assert!(module_config.emit_obj != EmitObj::None);
let incr_comp_session_dir = cgcx.incr_comp_session_dir.as_ref().unwrap();
- let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name));
- let source_file = in_incr_comp_dir(&incr_comp_session_dir, &module.source.saved_file);
- debug!(
- "copying pre-existing module `{}` from {:?} to {}",
- module.name,
- source_file,
- obj_out.display()
+
+ let load_from_incr_comp_dir = |output_path: PathBuf, saved_path: &str| {
+ let source_file = in_incr_comp_dir(&incr_comp_session_dir, saved_path);
+ debug!(
+ "copying pre-existing module `{}` from {:?} to {}",
+ module.name,
+ source_file,
+ output_path.display()
+ );
+ match link_or_copy(&source_file, &output_path) {
+ Ok(_) => Some(output_path),
+ Err(err) => {
+ let diag_handler = cgcx.create_diag_handler();
+ diag_handler.err(&format!(
+ "unable to copy {} to {}: {}",
+ source_file.display(),
+ output_path.display(),
+ err
+ ));
+ None
+ }
+ }
+ };
+
+ let object = load_from_incr_comp_dir(
+ cgcx.output_filenames.temp_path(OutputType::Object, Some(&module.name)),
+ &module.source.saved_files.get("o").expect("no saved object file in work product"),
);
- if let Err(err) = link_or_copy(&source_file, &obj_out) {
- let diag_handler = cgcx.create_diag_handler();
- diag_handler.err(&format!(
- "unable to copy {} to {}: {}",
- source_file.display(),
- obj_out.display(),
- err
- ));
- }
+ let dwarf_object =
+ module.source.saved_files.get("dwo").as_ref().and_then(|saved_dwarf_object_file| {
+ let dwarf_obj_out = cgcx
+ .output_filenames
+ .split_dwarf_path(cgcx.split_debuginfo, cgcx.split_dwarf_kind, Some(&module.name))
+ .expect(
+ "saved dwarf object in work product but `split_dwarf_path` returned `None`",
+ );
+ load_from_incr_comp_dir(dwarf_obj_out, &saved_dwarf_object_file)
+ });
WorkItemResult::Compiled(CompiledModule {
name: module.name,
kind: ModuleKind::Regular,
- object: Some(obj_out),
- dwarf_object: None,
+ object,
+ dwarf_object,
bytecode: None,
})
}
) -> Result<WorkItemResult<B>, FatalError> {
let diag_handler = cgcx.create_diag_handler();
- if !cgcx.opts.debugging_opts.combine_cgu
+ if !cgcx.opts.unstable_opts.combine_cgu
|| module.kind == ModuleKind::Metadata
|| module.kind == ModuleKind::Allocator
{
each_linked_rlib_for_lto.push((cnum, path.to_path_buf()));
}));
- let ol = if tcx.sess.opts.debugging_opts.no_codegen
- || !tcx.sess.opts.output_types.should_codegen()
- {
- // If we know that we won’t be doing codegen, create target machines without optimisation.
- config::OptLevel::No
- } else {
- tcx.backend_optimization_level(())
- };
+ let ol =
+ if tcx.sess.opts.unstable_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() {
+ // If we know that we won’t be doing codegen, create target machines without optimisation.
+ config::OptLevel::No
+ } else {
+ tcx.backend_optimization_level(())
+ };
let backend_features = tcx.global_backend_features(());
let cgcx = CodegenContext::<B> {
backend: backend.clone(),
lto: sess.lto(),
fewer_names: sess.fewer_names(),
save_temps: sess.opts.cg.save_temps,
- time_trace: sess.opts.debugging_opts.llvm_time_trace,
+ time_trace: sess.opts.unstable_opts.llvm_time_trace,
opts: Arc::new(sess.opts.clone()),
prof: sess.prof.clone(),
exported_symbols,
target_arch: tcx.sess.target.arch.to_string(),
debuginfo: tcx.sess.opts.debuginfo,
split_debuginfo: tcx.sess.split_debuginfo(),
- split_dwarf_kind: tcx.sess.opts.debugging_opts.split_dwarf_kind,
+ split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind,
};
// This is the "main loop" of parallel work happening for parallel codegen.
// work to be done.
while !codegen_done
|| running > 0
+ || main_thread_worker_state == MainThreadWorkerState::LLVMing
|| (!codegen_aborted
&& !(work_items.is_empty()
&& needs_fat_lto.is_empty()
.binary_search_by_key(&cost, |&(_, cost)| cost)
.unwrap_or_else(|e| e);
work_items.insert(insertion_index, (work, cost));
- if !cgcx.opts.debugging_opts.no_parallel_llvm {
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
helper.request_token();
}
}
};
work_items.insert(insertion_index, (llvm_work_item, cost));
- if !cgcx.opts.debugging_opts.no_parallel_llvm {
+ if !cgcx.opts.unstable_opts.no_parallel_llvm {
helper.request_token();
}
- assert!(!codegen_aborted);
assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
}
Message::CodegenComplete => {
codegen_done = true;
- assert!(!codegen_aborted);
assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
main_thread_worker_state = MainThreadWorkerState::Idle;
}
// then conditions above will ensure no more work is spawned but
// we'll keep executing this loop until `running` hits 0.
Message::CodegenAborted => {
- assert!(!codegen_aborted);
codegen_done = true;
codegen_aborted = true;
- assert_eq!(main_thread_worker_state, MainThreadWorkerState::Codegenning);
}
Message::Done { result: Ok(compiled_module), worker_id } => {
free_worker(worker_id);
Message::Done { result: Err(None), worker_id: _ } => {
bug!("worker thread panicked");
}
- Message::Done { result: Err(Some(WorkerFatalError)), worker_id: _ } => {
- return Err(());
+ Message::Done { result: Err(Some(WorkerFatalError)), worker_id } => {
+ // Similar to CodegenAborted, wait for remaining work to finish.
+ free_worker(worker_id);
+ codegen_done = true;
+ codegen_aborted = true;
}
Message::CodegenItem => bug!("the coordinator should not receive codegen requests"),
}
}
+ if codegen_aborted {
+ return Err(());
+ }
+
let needs_link = mem::take(&mut needs_link);
if !needs_link.is_empty() {
assert!(compiled_modules.is_empty());
}
}
+pub struct Coordinator<B: ExtraBackendMethods> {
+ pub sender: Sender<Box<dyn Any + Send>>,
+ future: Option<thread::JoinHandle<Result<CompiledModules, ()>>>,
+ // Only used for the Message type.
+ phantom: PhantomData<B>,
+}
+
+impl<B: ExtraBackendMethods> Coordinator<B> {
+ fn join(mut self) -> std::thread::Result<Result<CompiledModules, ()>> {
+ self.future.take().unwrap().join()
+ }
+}
+
+impl<B: ExtraBackendMethods> Drop for Coordinator<B> {
+ fn drop(&mut self) {
+ if let Some(future) = self.future.take() {
+ // If we haven't joined yet, signal to the coordinator that it should spawn no more
+ // work, and wait for worker threads to finish.
+ drop(self.sender.send(Box::new(Message::CodegenAborted::<B>)));
+ drop(future.join());
+ }
+ }
+}
+
pub struct OngoingCodegen<B: ExtraBackendMethods> {
pub backend: B,
pub metadata: EncodedMetadata,
pub metadata_module: Option<CompiledModule>,
pub crate_info: CrateInfo,
- pub coordinator_send: Sender<Box<dyn Any + Send>>,
pub codegen_worker_receive: Receiver<Message<B>>,
pub shared_emitter_main: SharedEmitterMain,
- pub future: thread::JoinHandle<Result<CompiledModules, ()>>,
pub output_filenames: Arc<OutputFilenames>,
+ pub coordinator: Coordinator<B>,
}
impl<B: ExtraBackendMethods> OngoingCodegen<B> {
let _timer = sess.timer("finish_ongoing_codegen");
self.shared_emitter_main.check(sess, true);
- let future = self.future;
- let compiled_modules = sess.time("join_worker_thread", || match future.join() {
+ let compiled_modules = sess.time("join_worker_thread", || match self.coordinator.join() {
Ok(Ok(compiled_modules)) => compiled_modules,
Ok(Err(())) => {
sess.abort_if_errors();
// These are generally cheap and won't throw off scheduling.
let cost = 0;
- submit_codegened_module_to_llvm(&self.backend, &self.coordinator_send, module, cost);
+ submit_codegened_module_to_llvm(&self.backend, &self.coordinator.sender, module, cost);
}
pub fn codegen_finished(&self, tcx: TyCtxt<'_>) {
self.wait_for_signal_to_codegen_item();
self.check_for_errors(tcx.sess);
- drop(self.coordinator_send.send(Box::new(Message::CodegenComplete::<B>)));
- }
-
- /// Consumes this context indicating that codegen was entirely aborted, and
- /// we need to exit as quickly as possible.
- ///
- /// This method blocks the current thread until all worker threads have
- /// finished, and all worker threads should have exited or be real close to
- /// exiting at this point.
- pub fn codegen_aborted(self) {
- // Signal to the coordinator it should spawn no more work and start
- // shutdown.
- drop(self.coordinator_send.send(Box::new(Message::CodegenAborted::<B>)));
- drop(self.future.join());
+ drop(self.coordinator.sender.send(Box::new(Message::CodegenComplete::<B>)));
}
pub fn check_for_errors(&self, sess: &Session) {