1 use crate::back
::lto
::ThinBuffer
;
2 use crate::back
::profiling
::{
3 selfprofile_after_pass_callback
, selfprofile_before_pass_callback
, LlvmSelfProfiler
,
8 use crate::llvm
::{self, DiagnosticInfo, PassManager}
;
10 use crate::type_
::Type
;
11 use crate::LlvmCodegenBackend
;
12 use crate::ModuleLlvm
;
13 use rustc_codegen_ssa
::back
::link
::ensure_removed
;
14 use rustc_codegen_ssa
::back
::write
::{
15 BitcodeSection
, CodegenContext
, EmitObj
, ModuleConfig
, TargetMachineFactoryConfig
,
16 TargetMachineFactoryFn
,
18 use rustc_codegen_ssa
::traits
::*;
19 use rustc_codegen_ssa
::{CompiledModule, ModuleCodegen}
;
20 use rustc_data_structures
::profiling
::SelfProfilerRef
;
21 use rustc_data_structures
::small_c_str
::SmallCStr
;
22 use rustc_errors
::{FatalError, Handler, Level}
;
23 use rustc_fs_util
::{link_or_copy, path_to_c_string}
;
24 use rustc_middle
::ty
::TyCtxt
;
25 use rustc_session
::config
::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}
;
26 use rustc_session
::Session
;
27 use rustc_span
::symbol
::sym
;
28 use rustc_span
::InnerSpan
;
29 use rustc_target
::spec
::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo}
;
31 use libc
::{c_char, c_int, c_uint, c_void, size_t}
;
32 use std
::ffi
::CString
;
34 use std
::io
::{self, Write}
;
35 use std
::path
::{Path, PathBuf}
;
40 pub fn llvm_err(handler
: &rustc_errors
::Handler
, msg
: &str) -> FatalError
{
41 match llvm
::last_error() {
42 Some(err
) => handler
.fatal(&format
!("{}: {}", msg
, err
)),
43 None
=> handler
.fatal(msg
),
47 pub fn write_output_file
<'ll
>(
48 handler
: &rustc_errors
::Handler
,
49 target
: &'ll llvm
::TargetMachine
,
50 pm
: &llvm
::PassManager
<'ll
>,
53 dwo_output
: Option
<&Path
>,
54 file_type
: llvm
::FileType
,
55 self_profiler_ref
: &SelfProfilerRef
,
56 ) -> Result
<(), FatalError
> {
57 debug
!("write_output_file output={:?} dwo_output={:?}", output
, dwo_output
);
59 let output_c
= path_to_c_string(output
);
61 let dwo_output_ptr
= if let Some(dwo_output
) = dwo_output
{
62 dwo_output_c
= path_to_c_string(dwo_output
);
67 let result
= llvm
::LLVMRustWriteOutputFile(
76 // Record artifact sizes for self-profiling
77 if result
== llvm
::LLVMRustResult
::Success
{
78 let artifact_kind
= match file_type
{
79 llvm
::FileType
::ObjectFile
=> "object_file",
80 llvm
::FileType
::AssemblyFile
=> "assembly_file",
82 record_artifact_size(self_profiler_ref
, artifact_kind
, output
);
83 if let Some(dwo_file
) = dwo_output
{
84 record_artifact_size(self_profiler_ref
, "dwo_file", dwo_file
);
88 result
.into_result().map_err(|()| {
89 let msg
= format
!("could not write output to {}", output
.display());
90 llvm_err(handler
, &msg
)
95 pub fn create_informational_target_machine(sess
: &Session
) -> &'
static mut llvm
::TargetMachine
{
96 let config
= TargetMachineFactoryConfig { split_dwarf_file: None }
;
97 // Can't use query system here quite yet because this function is invoked before the query
98 // system/tcx is set up.
99 let features
= llvm_util
::global_llvm_features(sess
, false);
100 target_machine_factory(sess
, config
::OptLevel
::No
, &features
)(config
)
101 .unwrap_or_else(|err
| llvm_err(sess
.diagnostic(), &err
).raise())
104 pub fn create_target_machine(tcx
: TyCtxt
<'_
>, mod_name
: &str) -> &'
static mut llvm
::TargetMachine
{
105 let split_dwarf_file
= if tcx
.sess
.target_can_use_split_dwarf() {
106 tcx
.output_filenames(()).split_dwarf_path(
107 tcx
.sess
.split_debuginfo(),
108 tcx
.sess
.opts
.unstable_opts
.split_dwarf_kind
,
114 let config
= TargetMachineFactoryConfig { split_dwarf_file }
;
115 target_machine_factory(
117 tcx
.backend_optimization_level(()),
118 tcx
.global_backend_features(()),
120 .unwrap_or_else(|err
| llvm_err(tcx
.sess
.diagnostic(), &err
).raise())
123 pub fn to_llvm_opt_settings(
124 cfg
: config
::OptLevel
,
125 ) -> (llvm
::CodeGenOptLevel
, llvm
::CodeGenOptSize
) {
126 use self::config
::OptLevel
::*;
128 No
=> (llvm
::CodeGenOptLevel
::None
, llvm
::CodeGenOptSizeNone
),
129 Less
=> (llvm
::CodeGenOptLevel
::Less
, llvm
::CodeGenOptSizeNone
),
130 Default
=> (llvm
::CodeGenOptLevel
::Default
, llvm
::CodeGenOptSizeNone
),
131 Aggressive
=> (llvm
::CodeGenOptLevel
::Aggressive
, llvm
::CodeGenOptSizeNone
),
132 Size
=> (llvm
::CodeGenOptLevel
::Default
, llvm
::CodeGenOptSizeDefault
),
133 SizeMin
=> (llvm
::CodeGenOptLevel
::Default
, llvm
::CodeGenOptSizeAggressive
),
137 fn to_pass_builder_opt_level(cfg
: config
::OptLevel
) -> llvm
::PassBuilderOptLevel
{
138 use config
::OptLevel
::*;
140 No
=> llvm
::PassBuilderOptLevel
::O0
,
141 Less
=> llvm
::PassBuilderOptLevel
::O1
,
142 Default
=> llvm
::PassBuilderOptLevel
::O2
,
143 Aggressive
=> llvm
::PassBuilderOptLevel
::O3
,
144 Size
=> llvm
::PassBuilderOptLevel
::Os
,
145 SizeMin
=> llvm
::PassBuilderOptLevel
::Oz
,
149 fn to_llvm_relocation_model(relocation_model
: RelocModel
) -> llvm
::RelocModel
{
150 match relocation_model
{
151 RelocModel
::Static
=> llvm
::RelocModel
::Static
,
152 // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
153 RelocModel
::Pic
| RelocModel
::Pie
=> llvm
::RelocModel
::PIC
,
154 RelocModel
::DynamicNoPic
=> llvm
::RelocModel
::DynamicNoPic
,
155 RelocModel
::Ropi
=> llvm
::RelocModel
::ROPI
,
156 RelocModel
::Rwpi
=> llvm
::RelocModel
::RWPI
,
157 RelocModel
::RopiRwpi
=> llvm
::RelocModel
::ROPI_RWPI
,
161 pub(crate) fn to_llvm_code_model(code_model
: Option
<CodeModel
>) -> llvm
::CodeModel
{
163 Some(CodeModel
::Tiny
) => llvm
::CodeModel
::Tiny
,
164 Some(CodeModel
::Small
) => llvm
::CodeModel
::Small
,
165 Some(CodeModel
::Kernel
) => llvm
::CodeModel
::Kernel
,
166 Some(CodeModel
::Medium
) => llvm
::CodeModel
::Medium
,
167 Some(CodeModel
::Large
) => llvm
::CodeModel
::Large
,
168 None
=> llvm
::CodeModel
::None
,
172 pub fn target_machine_factory(
174 optlvl
: config
::OptLevel
,
175 target_features
: &[String
],
176 ) -> TargetMachineFactoryFn
<LlvmCodegenBackend
> {
177 let reloc_model
= to_llvm_relocation_model(sess
.relocation_model());
179 let (opt_level
, _
) = to_llvm_opt_settings(optlvl
);
180 let use_softfp
= sess
.opts
.cg
.soft_float
;
182 let ffunction_sections
=
183 sess
.opts
.unstable_opts
.function_sections
.unwrap_or(sess
.target
.function_sections
);
184 let fdata_sections
= ffunction_sections
;
185 let funique_section_names
= !sess
.opts
.unstable_opts
.no_unique_section_names
;
187 let code_model
= to_llvm_code_model(sess
.code_model());
189 let mut singlethread
= sess
.target
.singlethread
;
191 // On the wasm target once the `atomics` feature is enabled that means that
192 // we're no longer single-threaded, or otherwise we don't want LLVM to
193 // lower atomic operations to single-threaded operations.
194 if singlethread
&& sess
.target
.is_like_wasm
&& sess
.target_features
.contains(&sym
::atomics
) {
195 singlethread
= false;
198 let triple
= SmallCStr
::new(&sess
.target
.llvm_target
);
199 let cpu
= SmallCStr
::new(llvm_util
::target_cpu(sess
));
200 let features
= CString
::new(target_features
.join(",")).unwrap();
201 let abi
= SmallCStr
::new(&sess
.target
.llvm_abiname
);
202 let trap_unreachable
=
203 sess
.opts
.unstable_opts
.trap_unreachable
.unwrap_or(sess
.target
.trap_unreachable
);
204 let emit_stack_size_section
= sess
.opts
.unstable_opts
.emit_stack_sizes
;
206 let asm_comments
= sess
.asm_comments();
207 let relax_elf_relocations
=
208 sess
.opts
.unstable_opts
.relax_elf_relocations
.unwrap_or(sess
.target
.relax_elf_relocations
);
211 !sess
.opts
.unstable_opts
.use_ctors_section
.unwrap_or(sess
.target
.use_ctors_section
);
213 let path_mapping
= sess
.source_map().path_mapping().clone();
215 Arc
::new(move |config
: TargetMachineFactoryConfig
| {
216 let split_dwarf_file
=
217 path_mapping
.map_prefix(config
.split_dwarf_file
.unwrap_or_default()).0;
218 let split_dwarf_file
= CString
::new(split_dwarf_file
.to_str().unwrap()).unwrap();
221 llvm
::LLVMRustCreateTargetMachine(
232 funique_section_names
,
236 emit_stack_size_section
,
237 relax_elf_relocations
,
239 split_dwarf_file
.as_ptr(),
244 format
!("Could not create LLVM TargetMachine for triple: {}", triple
.to_str().unwrap())
249 pub(crate) fn save_temp_bitcode(
250 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
251 module
: &ModuleCodegen
<ModuleLlvm
>,
254 if !cgcx
.save_temps
{
258 let ext
= format
!("{}.bc", name
);
259 let cgu
= Some(&module
.name
[..]);
260 let path
= cgcx
.output_filenames
.temp_path_ext(&ext
, cgu
);
261 let cstr
= path_to_c_string(&path
);
262 let llmod
= module
.module_llvm
.llmod();
263 llvm
::LLVMWriteBitcodeToFile(llmod
, cstr
.as_ptr());
267 pub struct DiagnosticHandlers
<'a
> {
268 data
: *mut (&'a CodegenContext
<LlvmCodegenBackend
>, &'a Handler
),
269 llcx
: &'a llvm
::Context
,
270 old_handler
: Option
<&'a llvm
::DiagnosticHandler
>,
273 impl<'a
> DiagnosticHandlers
<'a
> {
275 cgcx
: &'a CodegenContext
<LlvmCodegenBackend
>,
276 handler
: &'a Handler
,
277 llcx
: &'a llvm
::Context
,
279 let remark_passes_all
: bool
;
280 let remark_passes
: Vec
<CString
>;
283 remark_passes_all
= true;
284 remark_passes
= Vec
::new();
286 Passes
::Some(passes
) => {
287 remark_passes_all
= false;
289 passes
.iter().map(|name
| CString
::new(name
.as_str()).unwrap()).collect();
292 let remark_passes
: Vec
<*const c_char
> =
293 remark_passes
.iter().map(|name
: &CString
| name
.as_ptr()).collect();
294 let data
= Box
::into_raw(Box
::new((cgcx
, handler
)));
296 let old_handler
= llvm
::LLVMRustContextGetDiagnosticHandler(llcx
);
297 llvm
::LLVMRustContextConfigureDiagnosticHandler(
302 remark_passes
.as_ptr(),
305 DiagnosticHandlers { data, llcx, old_handler }
310 impl<'a
> Drop
for DiagnosticHandlers
<'a
> {
313 llvm
::LLVMRustContextSetDiagnosticHandler(self.llcx
, self.old_handler
);
314 drop(Box
::from_raw(self.data
));
319 fn report_inline_asm(
320 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
322 level
: llvm
::DiagnosticLevel
,
324 source
: Option
<(String
, Vec
<InnerSpan
>)>,
326 // In LTO build we may get srcloc values from other crates which are invalid
327 // since they use a different source map. To be safe we just suppress these
329 if matches
!(cgcx
.lto
, Lto
::Fat
| Lto
::Thin
) {
332 let level
= match level
{
333 llvm
::DiagnosticLevel
::Error
=> Level
::Error { lint: false }
,
334 llvm
::DiagnosticLevel
::Warning
=> Level
::Warning(None
),
335 llvm
::DiagnosticLevel
::Note
| llvm
::DiagnosticLevel
::Remark
=> Level
::Note
,
337 cgcx
.diag_emitter
.inline_asm_error(cookie
as u32, msg
, level
, source
);
340 unsafe extern "C" fn diagnostic_handler(info
: &DiagnosticInfo
, user
: *mut c_void
) {
344 let (cgcx
, diag_handler
) = *(user
as *const (&CodegenContext
<LlvmCodegenBackend
>, &Handler
));
346 match llvm
::diagnostic
::Diagnostic
::unpack(info
) {
347 llvm
::diagnostic
::InlineAsm(inline
) => {
348 report_inline_asm(cgcx
, inline
.message
, inline
.level
, inline
.cookie
, inline
.source
);
351 llvm
::diagnostic
::Optimization(opt
) => {
352 let enabled
= match cgcx
.remark
{
354 Passes
::Some(ref v
) => v
.iter().any(|s
| *s
== opt
.pass_name
),
358 diag_handler
.note_without_error(&format
!(
360 opt
.filename
, opt
.line
, opt
.column
, opt
.pass_name
, opt
.message
,
364 llvm
::diagnostic
::PGO(diagnostic_ref
) | llvm
::diagnostic
::Linker(diagnostic_ref
) => {
365 let msg
= llvm
::build_string(|s
| {
366 llvm
::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref
, s
)
368 .expect("non-UTF8 diagnostic");
369 diag_handler
.warn(&msg
);
371 llvm
::diagnostic
::Unsupported(diagnostic_ref
) => {
372 let msg
= llvm
::build_string(|s
| {
373 llvm
::LLVMRustWriteDiagnosticInfoToString(diagnostic_ref
, s
)
375 .expect("non-UTF8 diagnostic");
376 diag_handler
.err(&msg
);
378 llvm
::diagnostic
::UnknownDiagnostic(..) => {}
382 fn get_pgo_gen_path(config
: &ModuleConfig
) -> Option
<CString
> {
383 match config
.pgo_gen
{
384 SwitchWithOptPath
::Enabled(ref opt_dir_path
) => {
385 let path
= if let Some(dir_path
) = opt_dir_path
{
386 dir_path
.join("default_%m.profraw")
388 PathBuf
::from("default_%m.profraw")
391 Some(CString
::new(format
!("{}", path
.display())).unwrap())
393 SwitchWithOptPath
::Disabled
=> None
,
397 fn get_pgo_use_path(config
: &ModuleConfig
) -> Option
<CString
> {
401 .map(|path_buf
| CString
::new(path_buf
.to_string_lossy().as_bytes()).unwrap())
404 fn get_pgo_sample_use_path(config
: &ModuleConfig
) -> Option
<CString
> {
408 .map(|path_buf
| CString
::new(path_buf
.to_string_lossy().as_bytes()).unwrap())
411 fn get_instr_profile_output_path(config
: &ModuleConfig
) -> Option
<CString
> {
412 if config
.instrument_coverage
{
413 Some(CString
::new("default_%m_%p.profraw").unwrap())
419 pub(crate) unsafe fn llvm_optimize(
420 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
421 diag_handler
: &Handler
,
422 module
: &ModuleCodegen
<ModuleLlvm
>,
423 config
: &ModuleConfig
,
424 opt_level
: config
::OptLevel
,
425 opt_stage
: llvm
::OptStage
,
426 ) -> Result
<(), FatalError
> {
428 opt_level
!= config
::OptLevel
::Size
&& opt_level
!= config
::OptLevel
::SizeMin
;
429 let using_thin_buffers
= opt_stage
== llvm
::OptStage
::PreLinkThinLTO
|| config
.bitcode_needed();
430 let pgo_gen_path
= get_pgo_gen_path(config
);
431 let pgo_use_path
= get_pgo_use_path(config
);
432 let pgo_sample_use_path
= get_pgo_sample_use_path(config
);
433 let is_lto
= opt_stage
== llvm
::OptStage
::ThinLTO
|| opt_stage
== llvm
::OptStage
::FatLTO
;
434 let instr_profile_output_path
= get_instr_profile_output_path(config
);
435 // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
436 let sanitizer_options
= if !is_lto
{
437 Some(llvm
::SanitizerOptions
{
438 sanitize_address
: config
.sanitizer
.contains(SanitizerSet
::ADDRESS
),
439 sanitize_address_recover
: config
.sanitizer_recover
.contains(SanitizerSet
::ADDRESS
),
440 sanitize_memory
: config
.sanitizer
.contains(SanitizerSet
::MEMORY
),
441 sanitize_memory_recover
: config
.sanitizer_recover
.contains(SanitizerSet
::MEMORY
),
442 sanitize_memory_track_origins
: config
.sanitizer_memory_track_origins
as c_int
,
443 sanitize_thread
: config
.sanitizer
.contains(SanitizerSet
::THREAD
),
444 sanitize_hwaddress
: config
.sanitizer
.contains(SanitizerSet
::HWADDRESS
),
445 sanitize_hwaddress_recover
: config
.sanitizer_recover
.contains(SanitizerSet
::HWADDRESS
),
451 let mut llvm_profiler
= if cgcx
.prof
.llvm_recording_enabled() {
452 Some(LlvmSelfProfiler
::new(cgcx
.prof
.get_self_profiler().unwrap()))
457 let llvm_selfprofiler
=
458 llvm_profiler
.as_mut().map(|s
| s
as *mut _
as *mut c_void
).unwrap_or(std
::ptr
::null_mut());
460 let extra_passes
= if !is_lto { config.passes.join(",") }
else { "".to_string() }
;
462 let llvm_plugins
= config
.llvm_plugins
.join(",");
464 // FIXME: NewPM doesn't provide a facility to pass custom InlineParams.
465 // We would have to add upstream support for this first, before we can support
466 // config.inline_threshold and our more aggressive default thresholds.
467 let result
= llvm
::LLVMRustOptimize(
468 module
.module_llvm
.llmod(),
469 &*module
.module_llvm
.tm
,
470 to_pass_builder_opt_level(opt_level
),
472 config
.no_prepopulate_passes
,
473 config
.verify_llvm_ir
,
475 config
.merge_functions
,
477 config
.vectorize_slp
,
478 config
.vectorize_loop
,
480 config
.emit_lifetime_markers
,
481 sanitizer_options
.as_ref(),
482 pgo_gen_path
.as_ref().map_or(std
::ptr
::null(), |s
| s
.as_ptr()),
483 pgo_use_path
.as_ref().map_or(std
::ptr
::null(), |s
| s
.as_ptr()),
484 config
.instrument_coverage
,
485 instr_profile_output_path
.as_ref().map_or(std
::ptr
::null(), |s
| s
.as_ptr()),
486 config
.instrument_gcov
,
487 pgo_sample_use_path
.as_ref().map_or(std
::ptr
::null(), |s
| s
.as_ptr()),
488 config
.debug_info_for_profiling
,
490 selfprofile_before_pass_callback
,
491 selfprofile_after_pass_callback
,
492 extra_passes
.as_ptr().cast(),
494 llvm_plugins
.as_ptr().cast(),
497 result
.into_result().map_err(|()| llvm_err(diag_handler
, "failed to run LLVM passes"))
500 // Unsafe due to LLVM calls.
501 pub(crate) unsafe fn optimize(
502 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
503 diag_handler
: &Handler
,
504 module
: &ModuleCodegen
<ModuleLlvm
>,
505 config
: &ModuleConfig
,
506 ) -> Result
<(), FatalError
> {
507 let _timer
= cgcx
.prof
.generic_activity_with_arg("LLVM_module_optimize", &*module
.name
);
509 let llmod
= module
.module_llvm
.llmod();
510 let llcx
= &*module
.module_llvm
.llcx
;
511 let _handlers
= DiagnosticHandlers
::new(cgcx
, diag_handler
, llcx
);
513 let module_name
= module
.name
.clone();
514 let module_name
= Some(&module_name
[..]);
516 if config
.emit_no_opt_bc
{
517 let out
= cgcx
.output_filenames
.temp_path_ext("no-opt.bc", module_name
);
518 let out
= path_to_c_string(&out
);
519 llvm
::LLVMWriteBitcodeToFile(llmod
, out
.as_ptr());
522 if let Some(opt_level
) = config
.opt_level
{
523 let opt_stage
= match cgcx
.lto
{
524 Lto
::Fat
=> llvm
::OptStage
::PreLinkFatLTO
,
525 Lto
::Thin
| Lto
::ThinLocal
=> llvm
::OptStage
::PreLinkThinLTO
,
526 _
if cgcx
.opts
.cg
.linker_plugin_lto
.enabled() => llvm
::OptStage
::PreLinkThinLTO
,
527 _
=> llvm
::OptStage
::PreLinkNoLTO
,
529 return llvm_optimize(cgcx
, diag_handler
, module
, config
, opt_level
, opt_stage
);
535 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
536 diag_handler
: &Handler
,
537 mut modules
: Vec
<ModuleCodegen
<ModuleLlvm
>>,
538 ) -> Result
<ModuleCodegen
<ModuleLlvm
>, FatalError
> {
539 use super::lto
::{Linker, ModuleBuffer}
;
540 // Sort the modules by name to ensure deterministic behavior.
541 modules
.sort_by(|a
, b
| a
.name
.cmp(&b
.name
));
542 let (first
, elements
) =
543 modules
.split_first().expect("Bug! modules must contain at least one module.");
545 let mut linker
= Linker
::new(first
.module_llvm
.llmod());
546 for module
in elements
{
547 let _timer
= cgcx
.prof
.generic_activity_with_arg("LLVM_link_module", &*module
.name
);
548 let buffer
= ModuleBuffer
::new(module
.module_llvm
.llmod());
549 linker
.add(buffer
.data()).map_err(|()| {
550 let msg
= format
!("failed to serialize module {:?}", module
.name
);
551 llvm_err(diag_handler
, &msg
)
555 Ok(modules
.remove(0))
558 pub(crate) unsafe fn codegen(
559 cgcx
: &CodegenContext
<LlvmCodegenBackend
>,
560 diag_handler
: &Handler
,
561 module
: ModuleCodegen
<ModuleLlvm
>,
562 config
: &ModuleConfig
,
563 ) -> Result
<CompiledModule
, FatalError
> {
564 let _timer
= cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen", &*module
.name
);
566 let llmod
= module
.module_llvm
.llmod();
567 let llcx
= &*module
.module_llvm
.llcx
;
568 let tm
= &*module
.module_llvm
.tm
;
569 let module_name
= module
.name
.clone();
570 let module_name
= Some(&module_name
[..]);
571 let handlers
= DiagnosticHandlers
::new(cgcx
, diag_handler
, llcx
);
573 if cgcx
.msvc_imps_needed
{
574 create_msvc_imps(cgcx
, llcx
, llmod
);
577 // A codegen-specific pass manager is used to generate object
578 // files for an LLVM module.
580 // Apparently each of these pass managers is a one-shot kind of
581 // thing, so we create a new one for each type of output. The
582 // pass manager passed to the closure should be ensured to not
583 // escape the closure itself, and the manager should only be
585 unsafe fn with_codegen
<'ll
, F
, R
>(
586 tm
: &'ll llvm
::TargetMachine
,
587 llmod
: &'ll llvm
::Module
,
592 F
: FnOnce(&'ll
mut PassManager
<'ll
>) -> R
,
594 let cpm
= llvm
::LLVMCreatePassManager();
595 llvm
::LLVMAddAnalysisPasses(tm
, cpm
);
596 llvm
::LLVMRustAddLibraryInfo(cpm
, llmod
, no_builtins
);
600 // Two things to note:
601 // - If object files are just LLVM bitcode we write bitcode, copy it to
602 // the .o file, and delete the bitcode if it wasn't otherwise
604 // - If we don't have the integrated assembler then we need to emit
605 // asm from LLVM and use `gcc` to create the object file.
607 let bc_out
= cgcx
.output_filenames
.temp_path(OutputType
::Bitcode
, module_name
);
608 let obj_out
= cgcx
.output_filenames
.temp_path(OutputType
::Object
, module_name
);
610 if config
.bitcode_needed() {
613 .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module
.name
);
614 let thin
= ThinBuffer
::new(llmod
, config
.emit_thin_lto
);
615 let data
= thin
.data();
617 if let Some(bitcode_filename
) = bc_out
.file_name() {
618 cgcx
.prof
.artifact_size(
620 bitcode_filename
.to_string_lossy(),
625 if config
.emit_bc
|| config
.emit_obj
== EmitObj
::Bitcode
{
628 .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module
.name
);
629 if let Err(e
) = fs
::write(&bc_out
, data
) {
630 let msg
= format
!("failed to write bytecode to {}: {}", bc_out
.display(), e
);
631 diag_handler
.err(&msg
);
635 if config
.emit_obj
== EmitObj
::ObjectCode(BitcodeSection
::Full
) {
638 .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module
.name
);
639 embed_bitcode(cgcx
, llcx
, llmod
, &config
.bc_cmdline
, data
);
645 cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module
.name
);
646 let out
= cgcx
.output_filenames
.temp_path(OutputType
::LlvmAssembly
, module_name
);
647 let out_c
= path_to_c_string(&out
);
649 extern "C" fn demangle_callback(
650 input_ptr
: *const c_char
,
652 output_ptr
: *mut c_char
,
656 unsafe { slice::from_raw_parts(input_ptr as *const u8, input_len as usize) }
;
658 let Ok(input
) = str::from_utf8(input
) else { return 0 }
;
660 let output
= unsafe {
661 slice
::from_raw_parts_mut(output_ptr
as *mut u8, output_len
as usize)
663 let mut cursor
= io
::Cursor
::new(output
);
665 let Ok(demangled
) = rustc_demangle
::try_demangle(input
) else { return 0 }
;
667 if write
!(cursor
, "{:#}", demangled
).is_err() {
668 // Possible only if provided buffer is not big enough
672 cursor
.position() as size_t
675 let result
= llvm
::LLVMRustPrintModule(llmod
, out_c
.as_ptr(), demangle_callback
);
677 if result
== llvm
::LLVMRustResult
::Success
{
678 record_artifact_size(&cgcx
.prof
, "llvm_ir", &out
);
681 result
.into_result().map_err(|()| {
682 let msg
= format
!("failed to write LLVM IR to {}", out
.display());
683 llvm_err(diag_handler
, &msg
)
689 cgcx
.prof
.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module
.name
);
690 let path
= cgcx
.output_filenames
.temp_path(OutputType
::Assembly
, module_name
);
692 // We can't use the same module for asm and object code output,
693 // because that triggers various errors like invalid IR or broken
694 // binaries. So we must clone the module to produce the asm output
695 // if we are also producing object code.
696 let llmod
= if let EmitObj
::ObjectCode(_
) = config
.emit_obj
{
697 llvm
::LLVMCloneModule(llmod
)
701 with_codegen(tm
, llmod
, config
.no_builtins
, |cpm
| {
709 llvm
::FileType
::AssemblyFile
,
715 match config
.emit_obj
{
716 EmitObj
::ObjectCode(_
) => {
719 .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module
.name
);
721 let dwo_out
= cgcx
.output_filenames
.temp_path_dwo(module_name
);
722 let dwo_out
= match (cgcx
.split_debuginfo
, cgcx
.split_dwarf_kind
) {
723 // Don't change how DWARF is emitted when disabled.
724 (SplitDebuginfo
::Off
, _
) => None
,
725 // Don't provide a DWARF object path if split debuginfo is enabled but this is
726 // a platform that doesn't support Split DWARF.
727 _
if !cgcx
.target_can_use_split_dwarf
=> None
,
728 // Don't provide a DWARF object path in single mode, sections will be written
729 // into the object as normal but ignored by linker.
730 (_
, SplitDwarfKind
::Single
) => None
,
731 // Emit (a subset of the) DWARF into a separate dwarf object file in split
733 (_
, SplitDwarfKind
::Split
) => Some(dwo_out
.as_path()),
736 with_codegen(tm
, llmod
, config
.no_builtins
, |cpm
| {
744 llvm
::FileType
::ObjectFile
,
750 EmitObj
::Bitcode
=> {
751 debug
!("copying bitcode {:?} to obj {:?}", bc_out
, obj_out
);
752 if let Err(e
) = link_or_copy(&bc_out
, &obj_out
) {
753 diag_handler
.err(&format
!("failed to copy bitcode to object file: {}", e
));
757 debug
!("removing_bitcode {:?}", bc_out
);
758 ensure_removed(diag_handler
, &bc_out
);
768 Ok(module
.into_compiled_module(
769 config
.emit_obj
!= EmitObj
::None
,
770 cgcx
.target_can_use_split_dwarf
771 && cgcx
.split_debuginfo
!= SplitDebuginfo
::Off
772 && cgcx
.split_dwarf_kind
== SplitDwarfKind
::Split
,
774 &cgcx
.output_filenames
,
778 fn create_section_with_flags_asm(section_name
: &str, section_flags
: &str, data
: &[u8]) -> Vec
<u8> {
779 let mut asm
= format
!(".section {},\"{}\"\n", section_name
, section_flags
).into_bytes();
780 asm
.extend_from_slice(b
".ascii \"");
781 asm
.reserve(data
.len());
783 if byte
== b'
\\'
|| byte
== b'
"' {
786 } else if byte < 0x20 || byte >= 0x80 {
787 // Avoid non UTF-8 inline assembly. Use octal escape sequence, because it is fixed
788 // width, while hex escapes will consume following characters.
790 asm.push(b'0' + ((byte >> 6) & 0x7));
791 asm.push(b'0' + ((byte >> 3) & 0x7));
792 asm.push(b'0' + ((byte >> 0) & 0x7));
797 asm.extend_from_slice(b"\"\n");
801 /// Embed the bitcode of an LLVM module in the LLVM module itself.
803 /// This is done primarily for iOS where it appears to be standard to compile C
804 /// code at least with `-fembed-bitcode` which creates two sections in the
807 /// * __LLVM,__bitcode
808 /// * __LLVM,__cmdline
810 /// It appears *both* of these sections are necessary to get the linker to
811 /// recognize what's going on. A suitable cmdline value is taken from the
814 /// Furthermore debug/O1 builds don't actually embed bitcode but rather just
815 /// embed an empty section.
817 /// Basically all of this is us attempting to follow in the footsteps of clang
818 /// on iOS. See #35968 for lots more info.
819 unsafe fn embed_bitcode(
820 cgcx: &CodegenContext<LlvmCodegenBackend>,
821 llcx: &llvm::Context,
822 llmod: &llvm::Module,
826 // We're adding custom sections to the output object file, but we definitely
827 // do not want these custom sections to make their way into the final linked
828 // executable. The purpose of these custom sections is for tooling
829 // surrounding object files to work with the LLVM IR, if necessary. For
830 // example rustc's own LTO will look for LLVM IR inside of the object file
831 // in these sections by default.
833 // To handle this is a bit different depending on the object file format
834 // used by the backend, broken down into a few different categories:
836 // * Mach-O - this is for macOS. Inspecting the source code for the native
837 // linker here shows that the `.llvmbc` and `.llvmcmd` sections are
838 // automatically skipped by the linker. In that case there's nothing extra
839 // that we need to do here.
841 // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and
842 // `.llvmcmd` sections, so there's nothing extra we need to do.
844 // * COFF - if we don't do anything the linker will by default copy all
845 // these sections to the output artifact, not what we want! To subvert
846 // this we want to flag the sections we inserted here as
847 // `IMAGE_SCN_LNK_REMOVE`.
849 // * ELF - this is very similar to COFF above. One difference is that these
850 // sections are removed from the output linked artifact when
851 // `--gc-sections` is passed, which we pass by default. If that flag isn't
852 // passed though then these sections will show up in the final output.
853 // Additionally the flag that we need to set here is `SHF_EXCLUDE`.
855 // Unfortunately, LLVM provides no way to set custom section flags. For ELF
856 // and COFF we emit the sections using module level inline assembly for that
857 // reason (see issue #90326 for historical background).
858 let is_apple = cgcx.opts.target_triple.triple().contains("-ios
")
859 || cgcx.opts.target_triple.triple().contains("-darwin
")
860 || cgcx.opts.target_triple.triple().contains("-tvos
")
861 || cgcx.opts.target_triple.triple().contains("-watchos
");
863 || cgcx.opts.target_triple.triple().starts_with("wasm
")
864 || cgcx.opts.target_triple.triple().starts_with("asmjs
")
866 // We don't need custom section flags, create LLVM globals.
867 let llconst = common::bytes_in_context(llcx, bitcode);
868 let llglobal = llvm::LLVMAddGlobal(
870 common::val_ty(llconst),
871 "rustc
.embedded
.module
\0".as_ptr().cast(),
873 llvm::LLVMSetInitializer(llglobal, llconst);
875 let section = if is_apple { "__LLVM,__bitcode\0" } else { ".llvmbc\0" };
876 llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
877 llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
878 llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
880 let llconst = common::bytes_in_context(llcx, cmdline.as_bytes());
881 let llglobal = llvm::LLVMAddGlobal(
883 common::val_ty(llconst),
884 "rustc
.embedded
.cmdline
\0".as_ptr().cast(),
886 llvm::LLVMSetInitializer(llglobal, llconst);
887 let section = if is_apple { "__LLVM,__cmdline\0" } else { ".llvmcmd\0" };
888 llvm::LLVMSetSection(llglobal, section.as_ptr().cast());
889 llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
891 // We need custom section flags, so emit module-level inline assembly.
892 let section_flags = if cgcx.is_pe_coff { "n" } else { "e" };
893 let asm = create_section_with_flags_asm(".llvmbc
", section_flags, bitcode);
894 llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
895 let asm = create_section_with_flags_asm(".llvmcmd
", section_flags, cmdline.as_bytes());
896 llvm::LLVMRustAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len());
900 // Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
901 // This is required to satisfy `dllimport` references to static data in .rlibs
902 // when using MSVC linker. We do this only for data, as linker can fix up
903 // code references on its own.
904 // See #26591, #27438
906 cgcx: &CodegenContext<LlvmCodegenBackend>,
907 llcx: &llvm::Context,
908 llmod: &llvm::Module,
910 if !cgcx.msvc_imps_needed {
913 // The x86 ABI seems to require that leading underscores are added to symbol
914 // names, so we need an extra underscore on x86. There's also a leading
915 // '\x01' here which disables LLVM's symbol mangling (e.g., no extra
916 // underscores added in front).
917 let prefix = if cgcx.target_arch == "x86
" { "\x01__imp__" } else { "\x01__imp_" };
920 let i8p_ty = Type::i8p_llcx(llcx);
921 let globals = base::iter_globals(llmod)
923 llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
924 && llvm::LLVMIsDeclaration(val) == 0
927 // Exclude some symbols that we know are not Rust symbols.
928 let name = llvm::get_value_name(val);
929 if ignored(name) { None } else { Some((val, name)) }
931 .map(move |(val, name)| {
932 let mut imp_name = prefix.as_bytes().to_vec();
933 imp_name.extend(name);
934 let imp_name = CString::new(imp_name).unwrap();
937 .collect::<Vec<_>>();
939 for (imp_name, val) in globals {
940 let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast());
941 llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty));
942 llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
946 // Use this function to exclude certain symbols from `__imp` generation.
947 fn ignored(symbol_name: &[u8]) -> bool {
948 // These are symbols generated by LLVM's profiling instrumentation
949 symbol_name.starts_with(b"__llvm_profile_
")
953 fn record_artifact_size(
954 self_profiler_ref: &SelfProfilerRef,
955 artifact_kind: &'static str,
958 // Don't stat the file if we are not going to record its size.
959 if !self_profiler_ref.enabled() {
963 if let Some(artifact_name) = path.file_name() {
964 let file_size = std::fs::metadata(path).map(|m| m.len()).unwrap_or(0);
965 self_profiler_ref.artifact_size(artifact_kind, artifact_name.to_string_lossy(), file_size);