use rustc_fs_util::{link_or_copy, path_to_c_string};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
-use rustc_session::config::{self, Lto, OutputType, Passes, SwitchWithOptPath};
+use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath};
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::InnerSpan;
}
}
-pub fn write_output_file(
+pub fn write_output_file<'ll>(
handler: &rustc_errors::Handler,
target: &'ll llvm::TargetMachine,
pm: &llvm::PassManager<'ll>,
pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut llvm::TargetMachine {
let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() {
- tcx.output_filenames(()).split_dwarf_path(tcx.sess.split_debuginfo(), Some(mod_name))
+ tcx.output_filenames(()).split_dwarf_path(
+ tcx.sess.split_debuginfo(),
+ tcx.sess.opts.debugging_opts.split_dwarf_kind,
+ Some(mod_name),
+ )
} else {
None
};
let use_init_array =
!sess.opts.debugging_opts.use_ctors_section.unwrap_or(sess.target.use_ctors_section);
+ let path_mapping = sess.source_map().path_mapping().clone();
+
Arc::new(move |config: TargetMachineFactoryConfig| {
- let split_dwarf_file = config.split_dwarf_file.unwrap_or_default();
+ let split_dwarf_file =
+ path_mapping.map_prefix(config.split_dwarf_file.unwrap_or_default()).0;
let split_dwarf_file = CString::new(split_dwarf_file.to_str().unwrap()).unwrap();
let tm = unsafe {
pub struct DiagnosticHandlers<'a> {
data: *mut (&'a CodegenContext<LlvmCodegenBackend>, &'a Handler),
llcx: &'a llvm::Context,
+ old_handler: Option<&'a llvm::DiagnosticHandler>,
}
impl<'a> DiagnosticHandlers<'a> {
handler: &'a Handler,
llcx: &'a llvm::Context,
) -> Self {
+ let remark_passes_all: bool;
+ let remark_passes: Vec<CString>;
+ match &cgcx.remark {
+ Passes::All => {
+ remark_passes_all = true;
+ remark_passes = Vec::new();
+ }
+ Passes::Some(passes) => {
+ remark_passes_all = false;
+ remark_passes =
+ passes.iter().map(|name| CString::new(name.as_str()).unwrap()).collect();
+ }
+ };
+ let remark_passes: Vec<*const c_char> =
+ remark_passes.iter().map(|name: &CString| name.as_ptr()).collect();
let data = Box::into_raw(Box::new((cgcx, handler)));
unsafe {
+ let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx);
+ llvm::LLVMRustContextConfigureDiagnosticHandler(
+ llcx,
+ diagnostic_handler,
+ data.cast(),
+ remark_passes_all,
+ remark_passes.as_ptr(),
+ remark_passes.len(),
+ );
llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data.cast());
- llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data.cast());
+ DiagnosticHandlers { data, llcx, old_handler }
}
- DiagnosticHandlers { data, llcx }
}
}
use std::ptr::null_mut;
unsafe {
llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut());
- llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut());
+ llvm::LLVMRustContextSetDiagnosticHandler(self.llcx, self.old_handler);
drop(Box::from_raw(self.data));
}
}
if enabled {
diag_handler.note_without_error(&format!(
- "optimization {} for {} at {}:{}:{}: {}",
- opt.kind.describe(),
- opt.pass_name,
- opt.filename,
- opt.line,
- opt.column,
- opt.message
+ "{}:{}:{}: {}: {}",
+ opt.filename, opt.line, opt.column, opt.pass_name, opt.message,
));
}
}
.map(|path_buf| CString::new(path_buf.to_string_lossy().as_bytes()).unwrap())
}
-pub(crate) fn should_use_new_llvm_pass_manager(
- _cgcx: &CodegenContext<LlvmCodegenBackend>,
- config: &ModuleConfig,
-) -> bool {
- // The new pass manager is causing significant performance issues such as #91128, and is
- // therefore disabled in stable versions of rustc by default.
- config
- .new_llvm_pass_manager
- .unwrap_or(false)
-}
-
pub(crate) unsafe fn optimize_with_new_llvm_pass_manager(
cgcx: &CodegenContext<LlvmCodegenBackend>,
diag_handler: &Handler,
let extra_passes = config.passes.join(",");
+ let llvm_plugins = config.llvm_plugins.join(",");
+
// FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
// We would have to add upstream support for this first, before we can support
// config.inline_threshold and our more aggressive default thresholds.
selfprofile_after_pass_callback,
extra_passes.as_ptr().cast(),
extra_passes.len(),
+ llvm_plugins.as_ptr().cast(),
+ llvm_plugins.len(),
);
result.into_result().map_err(|()| llvm_err(diag_handler, "failed to run LLVM passes"))
}
module: &ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
- let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &module.name[..]);
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name);
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
}
if let Some(opt_level) = config.opt_level {
- if should_use_new_llvm_pass_manager(cgcx, config) {
+ if llvm_util::should_use_new_llvm_pass_manager(
+ &config.new_llvm_pass_manager,
+ &cgcx.target_arch,
+ ) {
let opt_stage = match cgcx.lto {
Lto::Fat => llvm::OptStage::PreLinkFatLTO,
Lto::Thin | Lto::ThinLocal => llvm::OptStage::PreLinkThinLTO,
{
let _timer = cgcx.prof.extra_verbose_generic_activity(
"LLVM_module_optimize_function_passes",
- &module.name[..],
+ &*module.name,
);
llvm::LLVMRustRunFunctionPassManager(fpm, llmod);
}
{
let _timer = cgcx.prof.extra_verbose_generic_activity(
"LLVM_module_optimize_module_passes",
- &module.name[..],
+ &*module.name,
);
llvm::LLVMRunPassManager(mpm, llmod);
}
module: ModuleCodegen<ModuleLlvm>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
- let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &module.name[..]);
+ let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name);
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
if config.bitcode_needed() {
let _timer = cgcx
.prof
- .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &module.name[..]);
+ .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name);
let thin = ThinBuffer::new(llmod);
let data = thin.data();
}
if config.emit_bc || config.emit_obj == EmitObj::Bitcode {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_emit_bitcode",
- &module.name[..],
- );
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name);
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
- let _timer = cgcx.prof.generic_activity_with_arg(
- "LLVM_module_codegen_embed_bitcode",
- &module.name[..],
- );
+ let _timer = cgcx
+ .prof
+ .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name);
embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data);
}
}
if config.emit_ir {
- let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_ir", &module.name[..]);
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name);
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
}
if config.emit_asm {
- let _timer = cgcx
- .prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_asm", &module.name[..]);
+ let _timer =
+ cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name);
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
// We can't use the same module for asm and object code output,
EmitObj::ObjectCode(_) => {
let _timer = cgcx
.prof
- .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &module.name[..]);
+ .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name);
let dwo_out = cgcx.output_filenames.temp_path_dwo(module_name);
- let dwo_out = match cgcx.split_debuginfo {
- // Don't change how DWARF is emitted in single mode (or when disabled).
- SplitDebuginfo::Off | SplitDebuginfo::Packed => None,
- // Emit (a subset of the) DWARF into a separate file in split mode.
- SplitDebuginfo::Unpacked => {
- if cgcx.target_can_use_split_dwarf {
- Some(dwo_out.as_path())
- } else {
- None
- }
- }
+ let dwo_out = match (cgcx.split_debuginfo, cgcx.split_dwarf_kind) {
+ // Don't change how DWARF is emitted when disabled.
+ (SplitDebuginfo::Off, _) => None,
+ // Don't provide a DWARF object path if split debuginfo is enabled but this is
+ // a platform that doesn't support Split DWARF.
+ _ if !cgcx.target_can_use_split_dwarf => None,
+ // Don't provide a DWARF object path in single mode, sections will be written
+ // into the object as normal but ignored by linker.
+ (_, SplitDwarfKind::Single) => None,
+ // Emit (a subset of the) DWARF into a separate dwarf object file in split
+ // mode.
+ (_, SplitDwarfKind::Split) => Some(dwo_out.as_path()),
};
with_codegen(tm, llmod, config.no_builtins, |cpm| {
Ok(module.into_compiled_module(
config.emit_obj != EmitObj::None,
- cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo == SplitDebuginfo::Unpacked,
+ cgcx.target_can_use_split_dwarf
+ && cgcx.split_debuginfo != SplitDebuginfo::Off
+ && cgcx.split_dwarf_kind == SplitDwarfKind::Split,
config.emit_bc,
&cgcx.output_filenames,
))
}
+fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> {
+ let mut asm = format!(".section {},\"{}\"\n", section_name, section_flags).into_bytes();
+ asm.extend_from_slice(b".ascii \"");
+ asm.reserve(data.len());
+ for &byte in data {
+ if byte == b'\\' || byte == b'"' {
+ asm.push(b'\\');
+ asm.push(byte);
+ } else if byte < 0x20 || byte >= 0x80 {
+ // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
+ // width, while hex escapes will consume following characters.
+ asm.push(b'\\');
+ asm.push(b'0' + ((byte >> 6) & 0x7));
+ asm.push(b'0' + ((byte >> 3) & 0x7));
+ asm.push(b'0' + ((byte >> 0) & 0x7));
+ } else {
+ asm.push(byte);
+ }
+ }
+ asm.extend_from_slice(b"\"\n");
+ asm
+}
+
/// Embed the bitcode of an LLVM module in the LLVM module itself.
///
/// This is done primarily for iOS where it appears to be standard to compile C
cmdline: &str,
bitcode: &[u8],
) {
- let llconst = common::bytes_in_context(llcx, bitcode);
- let llglobal = llvm::LLVMAddGlobal(
- llmod,
- common::val_ty(llconst),
- "rustc.embedded.module\0".as_ptr().cast(),
- );
- llvm::LLVMSetInitializer(llglobal, llconst);
-
- let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
- || cgcx.opts.target_triple.triple().contains("-darwin")
- || cgcx.opts.target_triple.triple().contains("-tvos");
-
- let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
- llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
- llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
- llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
-
- let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
- let llglobal = llvm::LLVMAddGlobal(
- llmod,
- common::val_ty(llconst),
- "rustc.embedded.cmdline\0".as_ptr().cast(),
- );
- llvm::LLVMSetInitializer(llglobal, llconst);
- let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
- llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
- llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
-
// We're adding custom sections to the output object file, but we definitely
// do not want these custom sections to make their way into the final linked
// executable. The purpose of these custom sections is for tooling
// * COFF - if we don't do anything the linker will by default copy all
// these sections to the output artifact, not what we want! To subvert
// this we want to flag the sections we inserted here as
- // `IMAGE_SCN_LNK_REMOVE`. Unfortunately though LLVM has no native way to
- // do this. Thankfully though we can do this with some inline assembly,
- // which is easy enough to add via module-level global inline asm.
+ // `IMAGE_SCN_LNK_REMOVE`.
//
// * ELF - this is very similar to COFF above. One difference is that these
// sections are removed from the output linked artifact when
// `--gc-sections` is passed, which we pass by default. If that flag isn't
// passed though then these sections will show up in the final output.
// Additionally the flag that we need to set here is `SHF_EXCLUDE`.
+ //
+ // Unfortunately, LLVM provides no way to set custom section flags. For ELF
+ // and COFF we emit the sections using module level inline assembly for that
+ // reason (see issue #90326 for historical background).
+ let is_apple = cgcx.opts.target_triple.triple().contains("-ios")
+ || cgcx.opts.target_triple.triple().contains("-darwin")
+ || cgcx.opts.target_triple.triple().contains("-tvos");
if is_apple
|| cgcx.opts.target_triple.triple().starts_with("wasm")
|| cgcx.opts.target_triple.triple().starts_with("asmjs")
{
- // nothing to do here
- } else if cgcx.is_pe_coff {
- let asm = "
- .section .llvmbc,\"n\"
- .section .llvmcmd,\"n\"
- ";
- llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ // We don't need custom section flags, create LLVM globals.
+ let llconst = common::bytes_in_context(llcx, bitcode);
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.module\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+
+ let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
+ llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
+
+ let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
+ let llglobal = llvm::LLVMAddGlobal(
+ llmod,
+ common::val_ty(llconst),
+ "rustc.embedded.cmdline\0".as_ptr().cast(),
+ );
+ llvm::LLVMSetInitializer(llglobal, llconst);
+ let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
+ llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
+ llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
} else {
- let asm = "
- .section .llvmbc,\"e\"
- .section .llvmcmd,\"e\"
- ";
+ // We need custom section flags, so emit module-level inline assembly.
+ let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
+ let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode);
+ llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
+ let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes());
llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
}
}