2 use crate::common
::CodegenCx
;
4 use crate::llvm
::{self, True}
;
5 use crate::type_
::Type
;
6 use crate::type_of
::LayoutLlvmExt
;
7 use crate::value
::Value
;
9 use rustc_codegen_ssa
::traits
::*;
10 use rustc_data_structures
::const_cstr
;
12 use rustc_hir
::def_id
::DefId
;
14 use rustc_middle
::middle
::codegen_fn_attrs
::{CodegenFnAttrFlags, CodegenFnAttrs}
;
15 use rustc_middle
::mir
::interpret
::{
16 read_target_uint
, Allocation
, ErrorHandled
, GlobalAlloc
, Pointer
,
18 use rustc_middle
::mir
::mono
::MonoItem
;
19 use rustc_middle
::ty
::{self, Instance, Ty}
;
20 use rustc_middle
::{bug, span_bug}
;
21 use rustc_span
::symbol
::sym
;
22 use rustc_target
::abi
::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size}
;
25 pub fn const_alloc_to_llvm(cx
: &CodegenCx
<'ll
, '_
>, alloc
: &Allocation
) -> &'ll Value
{
26 let mut llvals
= Vec
::with_capacity(alloc
.relocations().len() + 1);
27 let dl
= cx
.data_layout();
28 let pointer_size
= dl
.pointer_size
.bytes() as usize;
30 let mut next_offset
= 0;
31 for &(offset
, ((), alloc_id
)) in alloc
.relocations().iter() {
32 let offset
= offset
.bytes();
33 assert_eq
!(offset
as usize as u64, offset
);
34 let offset
= offset
as usize;
35 if offset
> next_offset
{
36 // This `inspect` is okay since we have checked that it is not within a relocation, it
37 // is within the bounds of the allocation, and it doesn't affect interpreter execution
38 // (we inspect the result after interpreter execution). Any undef byte is replaced with
39 // some arbitrary byte value.
41 // FIXME: relay undef bytes to codegen as undef const bytes
42 let bytes
= alloc
.inspect_with_uninit_and_ptr_outside_interpreter(next_offset
..offset
);
43 llvals
.push(cx
.const_bytes(bytes
));
45 let ptr_offset
= read_target_uint(
47 // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
48 // affect interpreter execution (we inspect the result after interpreter execution),
49 // and we properly interpret the relocation as a relocation pointer offset.
50 alloc
.inspect_with_uninit_and_ptr_outside_interpreter(offset
..(offset
+ pointer_size
)),
52 .expect("const_alloc_to_llvm: could not read relocation pointer")
55 let address_space
= match cx
.tcx
.global_alloc(alloc_id
) {
56 GlobalAlloc
::Function(..) => cx
.data_layout().instruction_address_space
,
57 GlobalAlloc
::Static(..) | GlobalAlloc
::Memory(..) => AddressSpace
::DATA
,
60 llvals
.push(cx
.scalar_to_backend(
61 Pointer
::new(alloc_id
, Size
::from_bytes(ptr_offset
)).into(),
62 &Scalar { value: Primitive::Pointer, valid_range: 0..=!0 }
,
63 cx
.type_i8p_ext(address_space
),
65 next_offset
= offset
+ pointer_size
;
67 if alloc
.len() >= next_offset
{
68 let range
= next_offset
..alloc
.len();
69 // This `inspect` is okay since we have check that it is after all relocations, it is
70 // within the bounds of the allocation, and it doesn't affect interpreter execution (we
71 // inspect the result after interpreter execution). Any undef byte is replaced with some
72 // arbitrary byte value.
74 // FIXME: relay undef bytes to codegen as undef const bytes
75 let bytes
= alloc
.inspect_with_uninit_and_ptr_outside_interpreter(range
);
76 llvals
.push(cx
.const_bytes(bytes
));
79 cx
.const_struct(&llvals
, true)
82 pub fn codegen_static_initializer(
83 cx
: &CodegenCx
<'ll
, 'tcx
>,
85 ) -> Result
<(&'ll Value
, &'tcx Allocation
), ErrorHandled
> {
86 let alloc
= cx
.tcx
.eval_static_initializer(def_id
)?
;
87 Ok((const_alloc_to_llvm(cx
, alloc
), alloc
))
90 fn set_global_alignment(cx
: &CodegenCx
<'ll
, '_
>, gv
: &'ll Value
, mut align
: Align
) {
91 // The target may require greater alignment for globals than the type does.
92 // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
93 // which can force it to be smaller. Rust doesn't support this yet.
94 if let Some(min
) = cx
.sess().target
.min_global_align
{
95 match Align
::from_bits(min
) {
96 Ok(min
) => align
= align
.max(min
),
98 cx
.sess().err(&format
!("invalid minimum global alignment: {}", err
));
103 llvm
::LLVMSetAlignment(gv
, align
.bytes() as u32);
107 fn check_and_apply_linkage(
108 cx
: &CodegenCx
<'ll
, 'tcx
>,
109 attrs
: &CodegenFnAttrs
,
114 let llty
= cx
.layout_of(ty
).llvm_type(cx
);
115 if let Some(linkage
) = attrs
.linkage
{
116 debug
!("get_static: sym={} linkage={:?}", sym
, linkage
);
118 // If this is a static with a linkage specified, then we need to handle
119 // it a little specially. The typesystem prevents things like &T and
120 // extern "C" fn() from being non-null, so we can't just declare a
121 // static and call it a day. Some linkages (like weak) will make it such
122 // that the static actually has a null value.
123 let llty2
= if let ty
::RawPtr(ref mt
) = ty
.kind() {
124 cx
.layout_of(mt
.ty
).llvm_type(cx
)
126 cx
.sess().span_fatal(
127 cx
.tcx
.def_span(span_def_id
),
128 "must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
132 // Declare a symbol `foo` with the desired linkage.
133 let g1
= cx
.declare_global(&sym
, llty2
);
134 llvm
::LLVMRustSetLinkage(g1
, base
::linkage_to_llvm(linkage
));
136 // Declare an internal global `extern_with_linkage_foo` which
137 // is initialized with the address of `foo`. If `foo` is
138 // discarded during linking (for example, if `foo` has weak
139 // linkage and there are no definitions), then
140 // `extern_with_linkage_foo` will instead be initialized to
142 let mut real_name
= "_rust_extern_with_linkage_".to_string();
143 real_name
.push_str(&sym
);
144 let g2
= cx
.define_global(&real_name
, llty
).unwrap_or_else(|| {
145 cx
.sess().span_fatal(
146 cx
.tcx
.def_span(span_def_id
),
147 &format
!("symbol `{}` is already defined", &sym
),
150 llvm
::LLVMRustSetLinkage(g2
, llvm
::Linkage
::InternalLinkage
);
151 llvm
::LLVMSetInitializer(g2
, g1
);
155 // Generate an external declaration.
156 // FIXME(nagisa): investigate whether it can be changed into define_global
157 cx
.declare_global(&sym
, llty
)
161 pub fn ptrcast(val
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
162 unsafe { llvm::LLVMConstPointerCast(val, ty) }
165 impl CodegenCx
<'ll
, 'tcx
> {
166 crate fn const_bitcast(&self, val
: &'ll Value
, ty
: &'ll Type
) -> &'ll Value
{
167 unsafe { llvm::LLVMConstBitCast(val, ty) }
170 crate fn static_addr_of_mut(
177 let gv
= match kind
{
178 Some(kind
) if !self.tcx
.sess
.fewer_names() => {
179 let name
= self.generate_local_symbol_name(kind
);
180 let gv
= self.define_global(&name
[..], self.val_ty(cv
)).unwrap_or_else(|| {
181 bug
!("symbol `{}` is already defined", name
);
183 llvm
::LLVMRustSetLinkage(gv
, llvm
::Linkage
::PrivateLinkage
);
186 _
=> self.define_private_global(self.val_ty(cv
)),
188 llvm
::LLVMSetInitializer(gv
, cv
);
189 set_global_alignment(&self, gv
, align
);
190 llvm
::SetUnnamedAddress(gv
, llvm
::UnnamedAddr
::Global
);
195 crate fn get_static(&self, def_id
: DefId
) -> &'ll Value
{
196 let instance
= Instance
::mono(self.tcx
, def_id
);
197 if let Some(&g
) = self.instances
.borrow().get(&instance
) {
201 let defined_in_current_codegen_unit
=
202 self.codegen_unit
.items().contains_key(&MonoItem
::Static(def_id
));
204 !defined_in_current_codegen_unit
,
205 "consts::get_static() should always hit the cache for \
206 statics defined in the same CGU, but did not for `{:?}`",
210 let ty
= instance
.ty(self.tcx
, ty
::ParamEnv
::reveal_all());
211 let sym
= self.tcx
.symbol_name(instance
).name
;
213 debug
!("get_static: sym={} instance={:?}", sym
, instance
);
215 let g
= if let Some(local_def_id
) = def_id
.as_local() {
216 let id
= self.tcx
.hir().local_def_id_to_hir_id(local_def_id
);
217 let llty
= self.layout_of(ty
).llvm_type(self);
218 // FIXME: refactor this to work without accessing the HIR
219 let (g
, attrs
) = match self.tcx
.hir().get(id
) {
220 Node
::Item(&hir
::Item { attrs, kind: hir::ItemKind::Static(..), .. }
) => {
221 if let Some(g
) = self.get_declared_value(sym
) {
222 if self.val_ty(g
) != self.type_ptr_to(llty
) {
223 span_bug
!(self.tcx
.def_span(def_id
), "Conflicting types for static");
227 let g
= self.declare_global(sym
, llty
);
229 if !self.tcx
.is_reachable_non_generic(local_def_id
) {
231 llvm
::LLVMRustSetVisibility(g
, llvm
::Visibility
::Hidden
);
238 Node
::ForeignItem(&hir
::ForeignItem
{
240 kind
: hir
::ForeignItemKind
::Static(..),
243 let fn_attrs
= self.tcx
.codegen_fn_attrs(local_def_id
);
244 (check_and_apply_linkage(&self, &fn_attrs
, ty
, sym
, def_id
), &**attrs
)
247 item
=> bug
!("get_static: expected static, found {:?}", item
),
250 debug
!("get_static: sym={} attrs={:?}", sym
, attrs
);
253 if self.tcx
.sess
.check_name(attr
, sym
::thread_local
) {
254 llvm
::set_thread_local_mode(g
, self.tls_model
);
260 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
261 debug
!("get_static: sym={} item_attr={:?}", sym
, self.tcx
.item_attrs(def_id
));
263 let attrs
= self.tcx
.codegen_fn_attrs(def_id
);
264 let g
= check_and_apply_linkage(&self, &attrs
, ty
, sym
, def_id
);
266 // Thread-local statics in some other crate need to *always* be linked
267 // against in a thread-local fashion, so we need to be sure to apply the
268 // thread-local attribute locally if it was present remotely. If we
269 // don't do this then linker errors can be generated where the linker
270 // complains that one object files has a thread local version of the
271 // symbol and another one doesn't.
272 if attrs
.flags
.contains(CodegenFnAttrFlags
::THREAD_LOCAL
) {
273 llvm
::set_thread_local_mode(g
, self.tls_model
);
276 let needs_dll_storage_attr
= self.use_dll_storage_attrs
&& !self.tcx
.is_foreign_item(def_id
) &&
277 // ThinLTO can't handle this workaround in all cases, so we don't
278 // emit the attrs. Instead we make them unnecessary by disallowing
279 // dynamic linking when linker plugin based LTO is enabled.
280 !self.tcx
.sess
.opts
.cg
.linker_plugin_lto
.enabled();
282 // If this assertion triggers, there's something wrong with commandline
283 // argument validation.
285 !(self.tcx
.sess
.opts
.cg
.linker_plugin_lto
.enabled()
286 && self.tcx
.sess
.target
.is_like_windows
287 && self.tcx
.sess
.opts
.cg
.prefer_dynamic
)
290 if needs_dll_storage_attr
{
291 // This item is external but not foreign, i.e., it originates from an external Rust
292 // crate. Since we don't know whether this crate will be linked dynamically or
293 // statically in the final application, we always mark such symbols as 'dllimport'.
294 // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
295 // to make things work.
297 // However, in some scenarios we defer emission of statics to downstream
298 // crates, so there are cases where a static with an upstream DefId
299 // is actually present in the current crate. We can find out via the
300 // is_codegened_item query.
301 if !self.tcx
.is_codegened_item(def_id
) {
303 llvm
::LLVMSetDLLStorageClass(g
, llvm
::DLLStorageClass
::DllImport
);
310 if self.use_dll_storage_attrs
&& self.tcx
.is_dllimport_foreign_item(def_id
) {
311 // For foreign (native) libs we know the exact storage type to use.
313 llvm
::LLVMSetDLLStorageClass(g
, llvm
::DLLStorageClass
::DllImport
);
317 self.instances
.borrow_mut().insert(instance
, g
);
322 impl StaticMethods
for CodegenCx
<'ll
, 'tcx
> {
323 fn static_addr_of(&self, cv
: &'ll Value
, align
: Align
, kind
: Option
<&str>) -> &'ll Value
{
324 if let Some(&gv
) = self.const_globals
.borrow().get(&cv
) {
326 // Upgrade the alignment in cases where the same constant is used with different
327 // alignment requirements
328 let llalign
= align
.bytes() as u32;
329 if llalign
> llvm
::LLVMGetAlignment(gv
) {
330 llvm
::LLVMSetAlignment(gv
, llalign
);
335 let gv
= self.static_addr_of_mut(cv
, align
, kind
);
337 llvm
::LLVMSetGlobalConstant(gv
, True
);
339 self.const_globals
.borrow_mut().insert(cv
, gv
);
343 fn codegen_static(&self, def_id
: DefId
, is_mutable
: bool
) {
345 let attrs
= self.tcx
.codegen_fn_attrs(def_id
);
347 let (v
, alloc
) = match codegen_static_initializer(&self, def_id
) {
349 // Error has already been reported
353 let g
= self.get_static(def_id
);
355 // boolean SSA values are i1, but they have to be stored in i8 slots,
356 // otherwise some LLVM optimization passes don't work as expected
357 let mut val_llty
= self.val_ty(v
);
358 let v
= if val_llty
== self.type_i1() {
359 val_llty
= self.type_i8();
360 llvm
::LLVMConstZExt(v
, val_llty
)
365 let instance
= Instance
::mono(self.tcx
, def_id
);
366 let ty
= instance
.ty(self.tcx
, ty
::ParamEnv
::reveal_all());
367 let llty
= self.layout_of(ty
).llvm_type(self);
368 let g
= if val_llty
== llty
{
371 // If we created the global with the wrong type,
373 let name
= llvm
::get_value_name(g
).to_vec();
374 llvm
::set_value_name(g
, b
"");
376 let linkage
= llvm
::LLVMRustGetLinkage(g
);
377 let visibility
= llvm
::LLVMRustGetVisibility(g
);
379 let new_g
= llvm
::LLVMRustGetOrInsertGlobal(
381 name
.as_ptr().cast(),
386 llvm
::LLVMRustSetLinkage(new_g
, linkage
);
387 llvm
::LLVMRustSetVisibility(new_g
, visibility
);
389 // To avoid breaking any invariants, we leave around the old
390 // global for the moment; we'll replace all references to it
391 // with the new global later. (See base::codegen_backend.)
392 self.statics_to_rauw
.borrow_mut().push((g
, new_g
));
395 set_global_alignment(&self, g
, self.align_of(ty
));
396 llvm
::LLVMSetInitializer(g
, v
);
398 // As an optimization, all shared statics which do not have interior
399 // mutability are placed into read-only memory.
400 if !is_mutable
&& self.type_is_freeze(ty
) {
401 llvm
::LLVMSetGlobalConstant(g
, llvm
::True
);
404 debuginfo
::create_global_var_metadata(&self, def_id
, g
);
406 if attrs
.flags
.contains(CodegenFnAttrFlags
::THREAD_LOCAL
) {
407 llvm
::set_thread_local_mode(g
, self.tls_model
);
409 // Do not allow LLVM to change the alignment of a TLS on macOS.
411 // By default a global's alignment can be freely increased.
412 // This allows LLVM to generate more performant instructions
413 // e.g., using load-aligned into a SIMD register.
415 // However, on macOS 10.10 or below, the dynamic linker does not
416 // respect any alignment given on the TLS (radar 24221680).
417 // This will violate the alignment assumption, and causing segfault at runtime.
419 // This bug is very easy to trigger. In `println!` and `panic!`,
420 // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
421 // which the values would be `mem::replace`d on initialization.
422 // The implementation of `mem::replace` will use SIMD
423 // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
424 // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
425 // which macOS's dyld disregarded and causing crashes
426 // (see issues #51794, #51758, #50867, #48866 and #44056).
428 // To workaround the bug, we trick LLVM into not increasing
429 // the global's alignment by explicitly assigning a section to it
430 // (equivalent to automatically generating a `#[link_section]` attribute).
431 // See the comment in the `GlobalValue::canIncreaseAlignment()` function
432 // of `lib/IR/Globals.cpp` for why this works.
434 // When the alignment is not increased, the optimized `mem::replace`
435 // will use load-unaligned instructions instead, and thus avoiding the crash.
437 // We could remove this hack whenever we decide to drop macOS 10.10 support.
438 if self.tcx
.sess
.target
.is_like_osx
{
439 // The `inspect` method is okay here because we checked relocations, and
440 // because we are doing this access to inspect the final interpreter state
441 // (not as part of the interpreter execution).
443 // FIXME: This check requires that the (arbitrary) value of undefined bytes
444 // happens to be zero. Instead, we should only check the value of defined bytes
445 // and set all undefined bytes to zero if this allocation is headed for the
447 let all_bytes_are_zero
= alloc
.relocations().is_empty()
449 .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc
.len())
451 .all(|&byte
| byte
== 0);
453 let sect_name
= if all_bytes_are_zero
{
454 const_cstr
!("__DATA,__thread_bss")
456 const_cstr
!("__DATA,__thread_data")
458 llvm
::LLVMSetSection(g
, sect_name
.as_ptr());
462 // Wasm statics with custom link sections get special treatment as they
463 // go into custom sections of the wasm executable.
464 if self.tcx
.sess
.opts
.target_triple
.triple().starts_with("wasm32") {
465 if let Some(section
) = attrs
.link_section
{
466 let section
= llvm
::LLVMMDStringInContext(
468 section
.as_str().as_ptr().cast(),
469 section
.as_str().len() as c_uint
,
471 assert
!(alloc
.relocations().is_empty());
473 // The `inspect` method is okay here because we checked relocations, and
474 // because we are doing this access to inspect the final interpreter state (not
475 // as part of the interpreter execution).
477 alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc
.len());
478 let alloc
= llvm
::LLVMMDStringInContext(
480 bytes
.as_ptr().cast(),
481 bytes
.len() as c_uint
,
483 let data
= [section
, alloc
];
484 let meta
= llvm
::LLVMMDNodeInContext(self.llcx
, data
.as_ptr(), 2);
485 llvm
::LLVMAddNamedMetadataOperand(
487 "wasm.custom_sections\0".as_ptr().cast(),
492 base
::set_link_section(g
, &attrs
);
495 if attrs
.flags
.contains(CodegenFnAttrFlags
::USED
) {
496 self.add_used_global(g
);
501 /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
502 fn add_used_global(&self, global
: &'ll Value
) {
503 let cast
= unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }
;
504 self.used_statics
.borrow_mut().push(cast
);