]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_llvm/src/context.rs
79ddfd884dfac69f29fca10fed4063dda0c7c226
[rustc.git] / compiler / rustc_codegen_llvm / src / context.rs
1 use crate::attributes;
2 use crate::back::write::to_llvm_code_model;
3 use crate::callee::get_fn;
4 use crate::coverageinfo;
5 use crate::debuginfo;
6 use crate::llvm;
7 use crate::llvm_util;
8 use crate::type_::Type;
9 use crate::value::Value;
10
11 use cstr::cstr;
12 use rustc_codegen_ssa::base::wants_msvc_seh;
13 use rustc_codegen_ssa::traits::*;
14 use rustc_data_structures::base_n;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_data_structures::small_c_str::SmallCStr;
17 use rustc_hir::def_id::DefId;
18 use rustc_middle::mir::mono::CodegenUnit;
19 use rustc_middle::ty::layout::{
20 FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers,
21 TyAndLayout,
22 };
23 use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
24 use rustc_middle::{bug, span_bug};
25 use rustc_session::config::{BranchProtection, CFGuard, CFProtection};
26 use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet};
27 use rustc_session::Session;
28 use rustc_span::source_map::Span;
29 use rustc_target::abi::{
30 call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
31 };
32 use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
33 use smallvec::SmallVec;
34
35 use std::cell::{Cell, RefCell};
36 use std::ffi::CStr;
37 use std::str;
38
39 /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
40 /// `llvm::Context` so that several compilation units may be optimized in parallel.
41 /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
42 pub struct CodegenCx<'ll, 'tcx> {
43 pub tcx: TyCtxt<'tcx>,
44 pub check_overflow: bool,
45 pub use_dll_storage_attrs: bool,
46 pub tls_model: llvm::ThreadLocalMode,
47
48 pub llmod: &'ll llvm::Module,
49 pub llcx: &'ll llvm::Context,
50 pub codegen_unit: &'tcx CodegenUnit<'tcx>,
51
52 /// Cache instances of monomorphic and polymorphic items
53 pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
54 /// Cache generated vtables
55 pub vtables:
56 RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
57 /// Cache of constant strings,
58 pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
59
60 /// Reverse-direction for const ptrs cast from globals.
61 ///
62 /// Key is a Value holding a `*T`,
63 /// Val is a Value holding a `*[T]`.
64 ///
65 /// Needed because LLVM loses pointer->pointee association
66 /// when we ptrcast, and we have to ptrcast during codegen
67 /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
68 /// a pointer to an LLVM array type. Similar for trait objects.
69 pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
70
71 /// Cache of emitted const globals (value -> global)
72 pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
73
74 /// List of globals for static variables which need to be passed to the
75 /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
76 /// (We have to make sure we don't invalidate any Values referring
77 /// to constants.)
78 pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
79
80 /// Statics that will be placed in the llvm.used variable
81 /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
82 pub used_statics: RefCell<Vec<&'ll Value>>,
83
84 /// Statics that will be placed in the llvm.compiler.used variable
85 /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details
86 pub compiler_used_statics: RefCell<Vec<&'ll Value>>,
87
88 /// Mapping of non-scalar types to llvm types and field remapping if needed.
89 pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>,
90
91 /// Mapping of scalar types to llvm types.
92 pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
93
94 pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
95 pub isize_ty: &'ll Type,
96
97 pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>,
98 pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>,
99
100 eh_personality: Cell<Option<&'ll Value>>,
101 eh_catch_typeinfo: Cell<Option<&'ll Value>>,
102 pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>,
103
104 intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>,
105
106 /// A counter that is used for generating local symbol names
107 local_gen_sym_counter: Cell<usize>,
108
109 /// `codegen_static` will sometimes create a second global variable with a
110 /// different type and clear the symbol name of the original global.
111 /// `global_asm!` needs to be able to find this new global so that it can
112 /// compute the correct mangled symbol name to insert into the asm.
113 pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>,
114 }
115
116 pub struct TypeLowering<'ll> {
117 /// Associated LLVM type
118 pub lltype: &'ll Type,
119
120 /// If padding is used the slice maps fields from source order
121 /// to llvm order.
122 pub field_remapping: Option<SmallVec<[u32; 4]>>,
123 }
124
125 fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
126 match tls_model {
127 TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
128 TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
129 TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
130 TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
131 }
132 }
133
134 pub unsafe fn create_module<'ll>(
135 tcx: TyCtxt<'_>,
136 llcx: &'ll llvm::Context,
137 mod_name: &str,
138 ) -> &'ll llvm::Module {
139 let sess = tcx.sess;
140 let mod_name = SmallCStr::new(mod_name);
141 let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
142
143 let mut target_data_layout = sess.target.data_layout.to_string();
144 let llvm_version = llvm_util::get_version();
145 if llvm_version < (14, 0, 0) {
146 if sess.target.llvm_target == "i686-pc-windows-msvc"
147 || sess.target.llvm_target == "i586-pc-windows-msvc"
148 {
149 target_data_layout =
150 "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:32-n8:16:32-a:0:32-S32"
151 .to_string();
152 }
153 if sess.target.arch == "wasm32" {
154 target_data_layout = target_data_layout.replace("-p10:8:8-p20:8:8", "");
155 }
156 }
157 if llvm_version < (16, 0, 0) {
158 if sess.target.arch == "s390x" {
159 target_data_layout = target_data_layout.replace("-v128:64", "");
160 }
161 }
162
163 // Ensure the data-layout values hardcoded remain the defaults.
164 if sess.target.is_builtin {
165 let tm = crate::back::write::create_informational_target_machine(tcx.sess);
166 llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
167 llvm::LLVMRustDisposeTargetMachine(tm);
168
169 let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
170 let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
171 .expect("got a non-UTF8 data-layout from LLVM");
172
173 // Unfortunately LLVM target specs change over time, and right now we
174 // don't have proper support to work with any more than one
175 // `data_layout` than the one that is in the rust-lang/rust repo. If
176 // this compiler is configured against a custom LLVM, we may have a
177 // differing data layout, even though we should update our own to use
178 // that one.
179 //
180 // As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
181 // disable this check entirely as we may be configured with something
182 // that has a different target layout.
183 //
184 // Unsure if this will actually cause breakage when rustc is configured
185 // as such.
186 //
187 // FIXME(#34960)
188 let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
189 let custom_llvm_used = cfg_llvm_root.trim() != "";
190
191 if !custom_llvm_used && target_data_layout != llvm_data_layout {
192 bug!(
193 "data-layout for target `{rustc_target}`, `{rustc_layout}`, \
194 differs from LLVM target's `{llvm_target}` default layout, `{llvm_layout}`",
195 rustc_target = sess.opts.target_triple,
196 rustc_layout = target_data_layout,
197 llvm_target = sess.target.llvm_target,
198 llvm_layout = llvm_data_layout
199 );
200 }
201 }
202
203 let data_layout = SmallCStr::new(&target_data_layout);
204 llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
205
206 let llvm_target = SmallCStr::new(&sess.target.llvm_target);
207 llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
208
209 let reloc_model = sess.relocation_model();
210 if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) {
211 llvm::LLVMRustSetModulePICLevel(llmod);
212 // PIE is potentially more effective than PIC, but can only be used in executables.
213 // If all our outputs are executables, then we can relax PIC to PIE.
214 if reloc_model == RelocModel::Pie
215 || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable)
216 {
217 llvm::LLVMRustSetModulePIELevel(llmod);
218 }
219 }
220
221 // Linking object files with different code models is undefined behavior
222 // because the compiler would have to generate additional code (to span
223 // longer jumps) if a larger code model is used with a smaller one.
224 //
225 // See https://reviews.llvm.org/D52322 and https://reviews.llvm.org/D52323.
226 llvm::LLVMRustSetModuleCodeModel(llmod, to_llvm_code_model(sess.code_model()));
227
228 // If skipping the PLT is enabled, we need to add some module metadata
229 // to ensure intrinsic calls don't use it.
230 if !sess.needs_plt() {
231 let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
232 llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1);
233 }
234
235 if sess.is_sanitizer_cfi_enabled() {
236 // FIXME(rcvalle): Add support for non canonical jump tables.
237 let canonical_jump_tables = "CFI Canonical Jump Tables\0".as_ptr().cast();
238 // FIXME(rcvalle): Add it with Override behavior flag.
239 llvm::LLVMRustAddModuleFlag(
240 llmod,
241 llvm::LLVMModFlagBehavior::Warning,
242 canonical_jump_tables,
243 1,
244 );
245 }
246
247 // Control Flow Guard is currently only supported by the MSVC linker on Windows.
248 if sess.target.is_like_msvc {
249 match sess.opts.cg.control_flow_guard {
250 CFGuard::Disabled => {}
251 CFGuard::NoChecks => {
252 // Set `cfguard=1` module flag to emit metadata only.
253 llvm::LLVMRustAddModuleFlag(
254 llmod,
255 llvm::LLVMModFlagBehavior::Warning,
256 "cfguard\0".as_ptr() as *const _,
257 1,
258 )
259 }
260 CFGuard::Checks => {
261 // Set `cfguard=2` module flag to emit metadata and checks.
262 llvm::LLVMRustAddModuleFlag(
263 llmod,
264 llvm::LLVMModFlagBehavior::Warning,
265 "cfguard\0".as_ptr() as *const _,
266 2,
267 )
268 }
269 }
270 }
271
272 if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection {
273 if sess.target.arch != "aarch64" {
274 sess.err("-Zbranch-protection is only supported on aarch64");
275 } else {
276 llvm::LLVMRustAddModuleFlag(
277 llmod,
278 llvm::LLVMModFlagBehavior::Error,
279 "branch-target-enforcement\0".as_ptr().cast(),
280 bti.into(),
281 );
282 llvm::LLVMRustAddModuleFlag(
283 llmod,
284 llvm::LLVMModFlagBehavior::Error,
285 "sign-return-address\0".as_ptr().cast(),
286 pac_ret.is_some().into(),
287 );
288 let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A });
289 llvm::LLVMRustAddModuleFlag(
290 llmod,
291 llvm::LLVMModFlagBehavior::Error,
292 "sign-return-address-all\0".as_ptr().cast(),
293 pac_opts.leaf.into(),
294 );
295 llvm::LLVMRustAddModuleFlag(
296 llmod,
297 llvm::LLVMModFlagBehavior::Error,
298 "sign-return-address-with-bkey\0".as_ptr().cast(),
299 u32::from(pac_opts.key == PAuthKey::B),
300 );
301 }
302 }
303
304 // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang).
305 if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
306 llvm::LLVMRustAddModuleFlag(
307 llmod,
308 llvm::LLVMModFlagBehavior::Override,
309 "cf-protection-branch\0".as_ptr().cast(),
310 1,
311 )
312 }
313 if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection {
314 llvm::LLVMRustAddModuleFlag(
315 llmod,
316 llvm::LLVMModFlagBehavior::Override,
317 "cf-protection-return\0".as_ptr().cast(),
318 1,
319 )
320 }
321
322 if sess.opts.unstable_opts.virtual_function_elimination {
323 llvm::LLVMRustAddModuleFlag(
324 llmod,
325 llvm::LLVMModFlagBehavior::Error,
326 "Virtual Function Elim\0".as_ptr().cast(),
327 1,
328 );
329 }
330
331 llmod
332 }
333
334 impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
335 pub(crate) fn new(
336 tcx: TyCtxt<'tcx>,
337 codegen_unit: &'tcx CodegenUnit<'tcx>,
338 llvm_module: &'ll crate::ModuleLlvm,
339 ) -> Self {
340 // An interesting part of Windows which MSVC forces our hand on (and
341 // apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
342 // attributes in LLVM IR as well as native dependencies (in C these
343 // correspond to `__declspec(dllimport)`).
344 //
345 // LD (BFD) in MinGW mode can often correctly guess `dllexport` but
346 // relying on that can result in issues like #50176.
347 // LLD won't support that and expects symbols with proper attributes.
348 // Because of that we make MinGW target emit dllexport just like MSVC.
349 // When it comes to dllimport we use it for constants but for functions
350 // rely on the linker to do the right thing. Opposed to dllexport this
351 // task is easy for them (both LD and LLD) and allows us to easily use
352 // symbols from static libraries in shared libraries.
353 //
354 // Whenever a dynamic library is built on Windows it must have its public
355 // interface specified by functions tagged with `dllexport` or otherwise
356 // they're not available to be linked against. This poses a few problems
357 // for the compiler, some of which are somewhat fundamental, but we use
358 // the `use_dll_storage_attrs` variable below to attach the `dllexport`
359 // attribute to all LLVM functions that are exported e.g., they're
360 // already tagged with external linkage). This is suboptimal for a few
361 // reasons:
362 //
363 // * If an object file will never be included in a dynamic library,
364 // there's no need to attach the dllexport attribute. Most object
365 // files in Rust are not destined to become part of a dll as binaries
366 // are statically linked by default.
367 // * If the compiler is emitting both an rlib and a dylib, the same
368 // source object file is currently used but with MSVC this may be less
369 // feasible. The compiler may be able to get around this, but it may
370 // involve some invasive changes to deal with this.
371 //
372 // The flip side of this situation is that whenever you link to a dll and
373 // you import a function from it, the import should be tagged with
374 // `dllimport`. At this time, however, the compiler does not emit
375 // `dllimport` for any declarations other than constants (where it is
376 // required), which is again suboptimal for even more reasons!
377 //
378 // * Calling a function imported from another dll without using
379 // `dllimport` causes the linker/compiler to have extra overhead (one
380 // `jmp` instruction on x86) when calling the function.
381 // * The same object file may be used in different circumstances, so a
382 // function may be imported from a dll if the object is linked into a
383 // dll, but it may be just linked against if linked into an rlib.
384 // * The compiler has no knowledge about whether native functions should
385 // be tagged dllimport or not.
386 //
387 // For now the compiler takes the perf hit (I do not have any numbers to
388 // this effect) by marking very little as `dllimport` and praying the
389 // linker will take care of everything. Fixing this problem will likely
390 // require adding a few attributes to Rust itself (feature gated at the
391 // start) and then strongly recommending static linkage on Windows!
392 let use_dll_storage_attrs = tcx.sess.target.is_like_windows;
393
394 let check_overflow = tcx.sess.overflow_checks();
395
396 let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
397
398 let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
399
400 let coverage_cx = if tcx.sess.instrument_coverage() {
401 let covctx = coverageinfo::CrateCoverageContext::new();
402 Some(covctx)
403 } else {
404 None
405 };
406
407 let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
408 let dctx = debuginfo::CodegenUnitDebugContext::new(llmod);
409 debuginfo::metadata::build_compile_unit_di_node(
410 tcx,
411 codegen_unit.name().as_str(),
412 &dctx,
413 );
414 Some(dctx)
415 } else {
416 None
417 };
418
419 let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
420
421 CodegenCx {
422 tcx,
423 check_overflow,
424 use_dll_storage_attrs,
425 tls_model,
426 llmod,
427 llcx,
428 codegen_unit,
429 instances: Default::default(),
430 vtables: Default::default(),
431 const_str_cache: Default::default(),
432 const_unsized: Default::default(),
433 const_globals: Default::default(),
434 statics_to_rauw: RefCell::new(Vec::new()),
435 used_statics: RefCell::new(Vec::new()),
436 compiler_used_statics: RefCell::new(Vec::new()),
437 type_lowering: Default::default(),
438 scalar_lltypes: Default::default(),
439 pointee_infos: Default::default(),
440 isize_ty,
441 coverage_cx,
442 dbg_cx,
443 eh_personality: Cell::new(None),
444 eh_catch_typeinfo: Cell::new(None),
445 rust_try_fn: Cell::new(None),
446 intrinsics: Default::default(),
447 local_gen_sym_counter: Cell::new(0),
448 renamed_statics: Default::default(),
449 }
450 }
451
452 pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
453 &self.statics_to_rauw
454 }
455
456 #[inline]
457 pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> {
458 self.coverage_cx.as_ref()
459 }
460
461 pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
462 let section = cstr!("llvm.metadata");
463 let array = self.const_array(self.type_ptr_to(self.type_i8()), values);
464
465 unsafe {
466 let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
467 llvm::LLVMSetInitializer(g, array);
468 llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
469 llvm::LLVMSetSection(g, section.as_ptr());
470 }
471 }
472 }
473
474 impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
475 fn vtables(
476 &self,
477 ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
478 {
479 &self.vtables
480 }
481
482 fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
483 get_fn(self, instance)
484 }
485
486 fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
487 get_fn(self, instance)
488 }
489
490 fn eh_personality(&self) -> &'ll Value {
491 // The exception handling personality function.
492 //
493 // If our compilation unit has the `eh_personality` lang item somewhere
494 // within it, then we just need to codegen that. Otherwise, we're
495 // building an rlib which will depend on some upstream implementation of
496 // this function, so we just codegen a generic reference to it. We don't
497 // specify any of the types for the function, we just make it a symbol
498 // that LLVM can later use.
499 //
500 // Note that MSVC is a little special here in that we don't use the
501 // `eh_personality` lang item at all. Currently LLVM has support for
502 // both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
503 // *name of the personality function* to decide what kind of unwind side
504 // tables/landing pads to emit. It looks like Dwarf is used by default,
505 // injecting a dependency on the `_Unwind_Resume` symbol for resuming
506 // an "exception", but for MSVC we want to force SEH. This means that we
507 // can't actually have the personality function be our standard
508 // `rust_eh_personality` function, but rather we wired it up to the
509 // CRT's custom personality function, which forces LLVM to consider
510 // landing pads as "landing pads for SEH".
511 if let Some(llpersonality) = self.eh_personality.get() {
512 return llpersonality;
513 }
514 let tcx = self.tcx;
515 let llfn = match tcx.lang_items().eh_personality() {
516 Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
517 ty::Instance::resolve(
518 tcx,
519 ty::ParamEnv::reveal_all(),
520 def_id,
521 tcx.intern_substs(&[]),
522 )
523 .unwrap()
524 .unwrap(),
525 ),
526 _ => {
527 let name = if wants_msvc_seh(self.sess()) {
528 "__CxxFrameHandler3"
529 } else {
530 "rust_eh_personality"
531 };
532 if let Some(llfn) = self.get_declared_value(name) {
533 llfn
534 } else {
535 let fty = self.type_variadic_func(&[], self.type_i32());
536 let llfn = self.declare_cfn(name, llvm::UnnamedAddr::Global, fty);
537 let target_cpu = attributes::target_cpu_attr(self);
538 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[target_cpu]);
539 llfn
540 }
541 }
542 };
543 self.eh_personality.set(Some(llfn));
544 llfn
545 }
546
547 fn sess(&self) -> &Session {
548 self.tcx.sess
549 }
550
551 fn check_overflow(&self) -> bool {
552 self.check_overflow
553 }
554
555 fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
556 self.codegen_unit
557 }
558
559 fn set_frame_pointer_type(&self, llfn: &'ll Value) {
560 if let Some(attr) = attributes::frame_pointer_type_attr(self) {
561 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[attr]);
562 }
563 }
564
565 fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
566 let mut attrs = SmallVec::<[_; 2]>::new();
567 attrs.push(attributes::target_cpu_attr(self));
568 attrs.extend(attributes::tune_cpu_attr(self));
569 attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs);
570 }
571
572 fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
573 if self.get_declared_value("main").is_none() {
574 Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type))
575 } else {
576 // If the symbol already exists, it is an error: for example, the user wrote
577 // #[no_mangle] extern "C" fn main(..) {..}
578 // instead of #[start]
579 None
580 }
581 }
582 }
583
584 impl<'ll> CodegenCx<'ll, '_> {
585 pub(crate) fn get_intrinsic(&self, key: &str) -> (&'ll Type, &'ll Value) {
586 if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
587 return v;
588 }
589
590 self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
591 }
592
593 fn insert_intrinsic(
594 &self,
595 name: &'static str,
596 args: Option<&[&'ll llvm::Type]>,
597 ret: &'ll llvm::Type,
598 ) -> (&'ll llvm::Type, &'ll llvm::Value) {
599 let fn_ty = if let Some(args) = args {
600 self.type_func(args, ret)
601 } else {
602 self.type_variadic_func(&[], ret)
603 };
604 let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty);
605 self.intrinsics.borrow_mut().insert(name, (fn_ty, f));
606 (fn_ty, f)
607 }
608
609 fn declare_intrinsic(&self, key: &str) -> Option<(&'ll Type, &'ll Value)> {
610 macro_rules! ifn {
611 ($name:expr, fn() -> $ret:expr) => (
612 if key == $name {
613 return Some(self.insert_intrinsic($name, Some(&[]), $ret));
614 }
615 );
616 ($name:expr, fn(...) -> $ret:expr) => (
617 if key == $name {
618 return Some(self.insert_intrinsic($name, None, $ret));
619 }
620 );
621 ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
622 if key == $name {
623 return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
624 }
625 );
626 }
627 macro_rules! mk_struct {
628 ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
629 }
630
631 let i8p = self.type_i8p();
632 let void = self.type_void();
633 let i1 = self.type_i1();
634 let t_i8 = self.type_i8();
635 let t_i16 = self.type_i16();
636 let t_i32 = self.type_i32();
637 let t_i64 = self.type_i64();
638 let t_i128 = self.type_i128();
639 let t_isize = self.type_isize();
640 let t_f32 = self.type_f32();
641 let t_f64 = self.type_f64();
642 let t_metadata = self.type_metadata();
643
644 ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
645 ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
646 ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
647 ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
648 ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
649 ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
650 ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
651 ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
652
653 ifn!("llvm.fptosi.sat.i8.f32", fn(t_f32) -> t_i8);
654 ifn!("llvm.fptosi.sat.i16.f32", fn(t_f32) -> t_i16);
655 ifn!("llvm.fptosi.sat.i32.f32", fn(t_f32) -> t_i32);
656 ifn!("llvm.fptosi.sat.i64.f32", fn(t_f32) -> t_i64);
657 ifn!("llvm.fptosi.sat.i128.f32", fn(t_f32) -> t_i128);
658 ifn!("llvm.fptosi.sat.i8.f64", fn(t_f64) -> t_i8);
659 ifn!("llvm.fptosi.sat.i16.f64", fn(t_f64) -> t_i16);
660 ifn!("llvm.fptosi.sat.i32.f64", fn(t_f64) -> t_i32);
661 ifn!("llvm.fptosi.sat.i64.f64", fn(t_f64) -> t_i64);
662 ifn!("llvm.fptosi.sat.i128.f64", fn(t_f64) -> t_i128);
663
664 ifn!("llvm.fptoui.sat.i8.f32", fn(t_f32) -> t_i8);
665 ifn!("llvm.fptoui.sat.i16.f32", fn(t_f32) -> t_i16);
666 ifn!("llvm.fptoui.sat.i32.f32", fn(t_f32) -> t_i32);
667 ifn!("llvm.fptoui.sat.i64.f32", fn(t_f32) -> t_i64);
668 ifn!("llvm.fptoui.sat.i128.f32", fn(t_f32) -> t_i128);
669 ifn!("llvm.fptoui.sat.i8.f64", fn(t_f64) -> t_i8);
670 ifn!("llvm.fptoui.sat.i16.f64", fn(t_f64) -> t_i16);
671 ifn!("llvm.fptoui.sat.i32.f64", fn(t_f64) -> t_i32);
672 ifn!("llvm.fptoui.sat.i64.f64", fn(t_f64) -> t_i64);
673 ifn!("llvm.fptoui.sat.i128.f64", fn(t_f64) -> t_i128);
674
675 ifn!("llvm.trap", fn() -> void);
676 ifn!("llvm.debugtrap", fn() -> void);
677 ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
678
679 ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
680 ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
681
682 ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
683 ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
684
685 ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
686 ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
687
688 ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
689 ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
690
691 ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
692 ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
693
694 ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
695 ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
696
697 ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
698 ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
699
700 ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
701 ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
702
703 ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
704 ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
705
706 ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
707 ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
708
709 ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
710 ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
711
712 ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
713 ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
714
715 ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
716 ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
717 ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
718 ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
719
720 ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
721 ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
722
723 ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
724 ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
725
726 ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
727 ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
728
729 ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
730 ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
731 ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
732 ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
733
734 ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
735 ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
736 ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
737 ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
738
739 ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
740 ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
741 ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
742 ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
743 ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
744
745 ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
746 ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
747 ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
748 ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
749 ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
750
751 ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
752 ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
753 ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
754 ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
755 ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
756
757 ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
758 ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
759 ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
760 ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
761
762 ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
763 ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
764 ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
765 ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
766 ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
767
768 ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
769 ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
770 ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
771 ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
772 ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
773
774 ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
775 ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
776 ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
777 ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
778 ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
779
780 ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
781 ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
782 ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
783 ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
784 ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
785
786 ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
787 ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
788 ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
789 ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
790 ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
791
792 ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
793 ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
794 ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
795 ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
796 ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
797
798 ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
799 ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
800 ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
801 ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
802 ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
803
804 ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
805 ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
806 ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
807 ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
808 ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
809
810 ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
811 ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
812 ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
813 ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
814 ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
815
816 ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
817 ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
818 ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
819 ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
820 ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
821
822 ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
823 ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
824 ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
825 ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
826 ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
827
828 ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
829 ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
830 ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
831 ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
832 ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
833
834 ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
835 ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
836 ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
837 ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
838 ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
839
840 ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
841 ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
842
843 ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
844 ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
845 ifn!("llvm.localescape", fn(...) -> void);
846 ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
847 ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
848
849 ifn!("llvm.assume", fn(i1) -> void);
850 ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
851
852 // This isn't an "LLVM intrinsic", but LLVM's optimization passes
853 // recognize it like one and we assume it exists in `core::slice::cmp`
854 match self.sess().target.arch.as_ref() {
855 "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16),
856 _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32),
857 }
858
859 // variadic intrinsics
860 ifn!("llvm.va_start", fn(i8p) -> void);
861 ifn!("llvm.va_end", fn(i8p) -> void);
862 ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
863
864 if self.sess().instrument_coverage() {
865 ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
866 }
867
868 ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1);
869 ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1});
870
871 if self.sess().opts.debuginfo != DebugInfo::None {
872 ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
873 ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
874 }
875
876 ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p);
877
878 None
879 }
880
881 pub(crate) fn eh_catch_typeinfo(&self) -> &'ll Value {
882 if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
883 return eh_catch_typeinfo;
884 }
885 let tcx = self.tcx;
886 assert!(self.sess().target.os == "emscripten");
887 let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
888 Some(def_id) => self.get_static(def_id),
889 _ => {
890 let ty = self
891 .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
892 self.declare_global("rust_eh_catch_typeinfo", ty)
893 }
894 };
895 let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
896 self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
897 eh_catch_typeinfo
898 }
899 }
900
901 impl CodegenCx<'_, '_> {
902 /// Generates a new symbol name with the given prefix. This symbol name must
903 /// only be used for definitions with `internal` or `private` linkage.
904 pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
905 let idx = self.local_gen_sym_counter.get();
906 self.local_gen_sym_counter.set(idx + 1);
907 // Include a '.' character, so there can be no accidental conflicts with
908 // user defined names
909 let mut name = String::with_capacity(prefix.len() + 6);
910 name.push_str(prefix);
911 name.push('.');
912 base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
913 name
914 }
915 }
916
917 impl HasDataLayout for CodegenCx<'_, '_> {
918 #[inline]
919 fn data_layout(&self) -> &TargetDataLayout {
920 &self.tcx.data_layout
921 }
922 }
923
924 impl HasTargetSpec for CodegenCx<'_, '_> {
925 #[inline]
926 fn target_spec(&self) -> &Target {
927 &self.tcx.sess.target
928 }
929 }
930
931 impl<'tcx> ty::layout::HasTyCtxt<'tcx> for CodegenCx<'_, 'tcx> {
932 #[inline]
933 fn tcx(&self) -> TyCtxt<'tcx> {
934 self.tcx
935 }
936 }
937
938 impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
939 fn param_env(&self) -> ty::ParamEnv<'tcx> {
940 ty::ParamEnv::reveal_all()
941 }
942 }
943
944 impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
945 type LayoutOfResult = TyAndLayout<'tcx>;
946
947 #[inline]
948 fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! {
949 if let LayoutError::SizeOverflow(_) = err {
950 self.sess().span_fatal(span, &err.to_string())
951 } else {
952 span_bug!(span, "failed to get layout for `{}`: {}", ty, err)
953 }
954 }
955 }
956
957 impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> {
958 type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>;
959
960 #[inline]
961 fn handle_fn_abi_err(
962 &self,
963 err: FnAbiError<'tcx>,
964 span: Span,
965 fn_abi_request: FnAbiRequest<'tcx>,
966 ) -> ! {
967 if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err {
968 self.sess().span_fatal(span, &err.to_string())
969 } else {
970 match fn_abi_request {
971 FnAbiRequest::OfFnPtr { sig, extra_args } => {
972 span_bug!(
973 span,
974 "`fn_abi_of_fn_ptr({}, {:?})` failed: {}",
975 sig,
976 extra_args,
977 err
978 );
979 }
980 FnAbiRequest::OfInstance { instance, extra_args } => {
981 span_bug!(
982 span,
983 "`fn_abi_of_instance({}, {:?})` failed: {}",
984 instance,
985 extra_args,
986 err
987 );
988 }
989 }
990 }
991 }
992 }