]>
Commit | Line | Data |
---|---|---|
9fa01778 | 1 | use crate::base; |
f2b60f7d | 2 | use crate::common::{self, CodegenCx}; |
dfeec247 | 3 | use crate::debuginfo; |
487cf647 | 4 | use crate::errors::{InvalidMinimumAlignment, SymbolAlreadyDefined}; |
ba9703b0 | 5 | use crate::llvm::{self, True}; |
9fa01778 XL |
6 | use crate::type_::Type; |
7 | use crate::type_of::LayoutLlvmExt; | |
8 | use crate::value::Value; | |
6a06907d | 9 | use cstr::cstr; |
8faf50e0 | 10 | use libc::c_uint; |
dfeec247 | 11 | use rustc_codegen_ssa::traits::*; |
dfeec247 | 12 | use rustc_hir::def_id::DefId; |
ba9703b0 XL |
13 | use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; |
14 | use rustc_middle::mir::interpret::{ | |
9ffffee4 | 15 | read_target_uint, Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer, |
94222f64 | 16 | Scalar as InterpScalar, |
ba9703b0 XL |
17 | }; |
18 | use rustc_middle::mir::mono::MonoItem; | |
c295e0f8 | 19 | use rustc_middle::ty::layout::LayoutOf; |
ba9703b0 XL |
20 | use rustc_middle::ty::{self, Instance, Ty}; |
21 | use rustc_middle::{bug, span_bug}; | |
487cf647 | 22 | use rustc_session::config::Lto; |
9ffffee4 | 23 | use rustc_target::abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange}; |
94222f64 | 24 | use std::ops::Range; |
e9174d1e | 25 | |
5e7ed085 FG |
26 | pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value { |
27 | let alloc = alloc.inner(); | |
487cf647 | 28 | let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); |
a1dfa0c6 XL |
29 | let dl = cx.data_layout(); |
30 | let pointer_size = dl.pointer_size.bytes() as usize; | |
31 | ||
f2b60f7d FG |
32 | // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, so `range` |
33 | // must be within the bounds of `alloc` and not contain or overlap a pointer provenance. | |
94222f64 XL |
34 | fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>( |
35 | llvals: &mut Vec<&'ll Value>, | |
36 | cx: &'a CodegenCx<'ll, 'b>, | |
37 | alloc: &'a Allocation, | |
38 | range: Range<usize>, | |
39 | ) { | |
487cf647 | 40 | let chunks = alloc.init_mask().range_as_init_chunks(range.clone().into()); |
94222f64 XL |
41 | |
42 | let chunk_to_llval = move |chunk| match chunk { | |
43 | InitChunk::Init(range) => { | |
44 | let range = (range.start.bytes() as usize)..(range.end.bytes() as usize); | |
45 | let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); | |
46 | cx.const_bytes(bytes) | |
47 | } | |
48 | InitChunk::Uninit(range) => { | |
49 | let len = range.end.bytes() - range.start.bytes(); | |
50 | cx.const_undef(cx.type_array(cx.type_i8(), len)) | |
51 | } | |
52 | }; | |
53 | ||
5e7ed085 FG |
54 | // Generating partially-uninit consts is limited to small numbers of chunks, |
55 | // to avoid the cost of generating large complex const expressions. | |
56 | // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element, | |
57 | // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`. | |
9ffffee4 | 58 | let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold; |
5e7ed085 | 59 | let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max; |
94222f64 | 60 | |
5e7ed085 | 61 | if allow_uninit_chunks { |
94222f64 XL |
62 | llvals.extend(chunks.map(chunk_to_llval)); |
63 | } else { | |
5e7ed085 FG |
64 | // If this allocation contains any uninit bytes, codegen as if it was initialized |
65 | // (using some arbitrary value for uninit bytes). | |
66 | let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); | |
67 | llvals.push(cx.const_bytes(bytes)); | |
94222f64 XL |
68 | } |
69 | } | |
70 | ||
a1dfa0c6 | 71 | let mut next_offset = 0; |
487cf647 | 72 | for &(offset, alloc_id) in alloc.provenance().ptrs().iter() { |
a1dfa0c6 XL |
73 | let offset = offset.bytes(); |
74 | assert_eq!(offset as usize as u64, offset); | |
75 | let offset = offset as usize; | |
76 | if offset > next_offset { | |
f2b60f7d | 77 | // This `inspect` is okay since we have checked that there is no provenance, it |
e1599b0c | 78 | // is within the bounds of the allocation, and it doesn't affect interpreter execution |
94222f64 XL |
79 | // (we inspect the result after interpreter execution). |
80 | append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset); | |
a1dfa0c6 XL |
81 | } |
82 | let ptr_offset = read_target_uint( | |
83 | dl.endian, | |
e1599b0c XL |
84 | // This `inspect` is okay since it is within the bounds of the allocation, it doesn't |
85 | // affect interpreter execution (we inspect the result after interpreter execution), | |
f2b60f7d | 86 | // and we properly interpret the provenance as a relocation pointer offset. |
3dfed10e | 87 | alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)), |
dfeec247 XL |
88 | ) |
89 | .expect("const_alloc_to_llvm: could not read relocation pointer") | |
90 | as u64; | |
3dfed10e | 91 | |
9ffffee4 | 92 | let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx); |
3dfed10e | 93 | |
a1dfa0c6 | 94 | llvals.push(cx.scalar_to_backend( |
136023e0 XL |
95 | InterpScalar::from_pointer( |
96 | Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), | |
97 | &cx.tcx, | |
98 | ), | |
04454e1e | 99 | Scalar::Initialized { |
9ffffee4 | 100 | value: Primitive::Pointer(address_space), |
04454e1e FG |
101 | valid_range: WrappingRange::full(dl.pointer_size), |
102 | }, | |
3dfed10e | 103 | cx.type_i8p_ext(address_space), |
a1dfa0c6 XL |
104 | )); |
105 | next_offset = offset + pointer_size; | |
1a4d82fc | 106 | } |
e1599b0c XL |
107 | if alloc.len() >= next_offset { |
108 | let range = next_offset..alloc.len(); | |
f2b60f7d | 109 | // This `inspect` is okay since we have check that it is after all provenance, it is |
e1599b0c | 110 | // within the bounds of the allocation, and it doesn't affect interpreter execution (we |
94222f64 XL |
111 | // inspect the result after interpreter execution). |
112 | append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range); | |
a1dfa0c6 XL |
113 | } |
114 | ||
115 | cx.const_struct(&llvals, true) | |
1a4d82fc JJ |
116 | } |
117 | ||
a2a8927a | 118 | pub fn codegen_static_initializer<'ll, 'tcx>( |
a1dfa0c6 XL |
119 | cx: &CodegenCx<'ll, 'tcx>, |
120 | def_id: DefId, | |
5e7ed085 | 121 | ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { |
1b1a35ee | 122 | let alloc = cx.tcx.eval_static_initializer(def_id)?; |
a1dfa0c6 | 123 | Ok((const_alloc_to_llvm(cx, alloc), alloc)) |
3b2f2976 XL |
124 | } |
125 | ||
a2a8927a | 126 | fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) { |
ea8adc8c XL |
127 | // The target may require greater alignment for globals than the type does. |
128 | // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, | |
9c376795 | 129 | // which can force it to be smaller. Rust doesn't support this yet. |
29967ef6 | 130 | if let Some(min) = cx.sess().target.min_global_align { |
a1dfa0c6 | 131 | match Align::from_bits(min) { |
ff7c6d11 | 132 | Ok(min) => align = align.max(min), |
ea8adc8c | 133 | Err(err) => { |
487cf647 | 134 | cx.sess().emit_err(InvalidMinimumAlignment { err }); |
ea8adc8c XL |
135 | } |
136 | } | |
137 | } | |
138 | unsafe { | |
a1dfa0c6 | 139 | llvm::LLVMSetAlignment(gv, align.bytes() as u32); |
ea8adc8c XL |
140 | } |
141 | } | |
142 | ||
a2a8927a | 143 | fn check_and_apply_linkage<'ll, 'tcx>( |
b7449926 | 144 | cx: &CodegenCx<'ll, 'tcx>, |
8faf50e0 XL |
145 | attrs: &CodegenFnAttrs, |
146 | ty: Ty<'tcx>, | |
3dfed10e | 147 | sym: &str, |
f2b60f7d | 148 | def_id: DefId, |
b7449926 | 149 | ) -> &'ll Value { |
8faf50e0 | 150 | let llty = cx.layout_of(ty).llvm_type(cx); |
487cf647 | 151 | if let Some(linkage) = attrs.import_linkage { |
8faf50e0 XL |
152 | debug!("get_static: sym={} linkage={:?}", sym, linkage); |
153 | ||
8faf50e0 XL |
154 | unsafe { |
155 | // Declare a symbol `foo` with the desired linkage. | |
487cf647 | 156 | let g1 = cx.declare_global(sym, cx.type_i8()); |
8faf50e0 XL |
157 | llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); |
158 | ||
159 | // Declare an internal global `extern_with_linkage_foo` which | |
9c376795 | 160 | // is initialized with the address of `foo`. If `foo` is |
8faf50e0 XL |
161 | // discarded during linking (for example, if `foo` has weak |
162 | // linkage and there are no definitions), then | |
163 | // `extern_with_linkage_foo` will instead be initialized to | |
164 | // zero. | |
165 | let mut real_name = "_rust_extern_with_linkage_".to_string(); | |
c295e0f8 | 166 | real_name.push_str(sym); |
dfeec247 | 167 | let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| { |
487cf647 FG |
168 | cx.sess().emit_fatal(SymbolAlreadyDefined { |
169 | span: cx.tcx.def_span(def_id), | |
170 | symbol_name: sym, | |
171 | }) | |
8faf50e0 XL |
172 | }); |
173 | llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); | |
487cf647 | 174 | llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty)); |
8faf50e0 XL |
175 | g2 |
176 | } | |
f2b60f7d FG |
177 | } else if cx.tcx.sess.target.arch == "x86" && |
178 | let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym) | |
179 | { | |
180 | cx.declare_global(&common::i686_decorated_name(&dllimport, common::is_mingw_gnu_toolchain(&cx.tcx.sess.target), true), llty) | |
8faf50e0 XL |
181 | } else { |
182 | // Generate an external declaration. | |
183 | // FIXME(nagisa): investigate whether it can be changed into define_global | |
c295e0f8 | 184 | cx.declare_global(sym, llty) |
8faf50e0 XL |
185 | } |
186 | } | |
187 | ||
a2a8927a | 188 | pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value { |
dfeec247 | 189 | unsafe { llvm::LLVMConstPointerCast(val, ty) } |
a1dfa0c6 | 190 | } |
c1a9b12d | 191 | |
a2a8927a | 192 | impl<'ll> CodegenCx<'ll, '_> { |
923072b8 | 193 | pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { |
dfeec247 | 194 | unsafe { llvm::LLVMConstBitCast(val, ty) } |
a1dfa0c6 | 195 | } |
c1a9b12d | 196 | |
923072b8 | 197 | pub(crate) fn static_addr_of_mut( |
a1dfa0c6 XL |
198 | &self, |
199 | cv: &'ll Value, | |
200 | align: Align, | |
201 | kind: Option<&str>, | |
202 | ) -> &'ll Value { | |
203 | unsafe { | |
204 | let gv = match kind { | |
205 | Some(kind) if !self.tcx.sess.fewer_names() => { | |
206 | let name = self.generate_local_symbol_name(kind); | |
a2a8927a | 207 | let gv = self.define_global(&name, self.val_ty(cv)).unwrap_or_else(|| { |
dfeec247 | 208 | bug!("symbol `{}` is already defined", name); |
a1dfa0c6 XL |
209 | }); |
210 | llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); | |
211 | gv | |
dfeec247 | 212 | } |
a1dfa0c6 XL |
213 | _ => self.define_private_global(self.val_ty(cv)), |
214 | }; | |
215 | llvm::LLVMSetInitializer(gv, cv); | |
c295e0f8 | 216 | set_global_alignment(self, gv, align); |
ba9703b0 | 217 | llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); |
a1dfa0c6 XL |
218 | gv |
219 | } | |
220 | } | |
8faf50e0 | 221 | |
923072b8 | 222 | pub(crate) fn get_static(&self, def_id: DefId) -> &'ll Value { |
a1dfa0c6 XL |
223 | let instance = Instance::mono(self.tcx, def_id); |
224 | if let Some(&g) = self.instances.borrow().get(&instance) { | |
225 | return g; | |
226 | } | |
227 | ||
dfeec247 XL |
228 | let defined_in_current_codegen_unit = |
229 | self.codegen_unit.items().contains_key(&MonoItem::Static(def_id)); | |
230 | assert!( | |
231 | !defined_in_current_codegen_unit, | |
232 | "consts::get_static() should always hit the cache for \ | |
a1dfa0c6 | 233 | statics defined in the same CGU, but did not for `{:?}`", |
dfeec247 XL |
234 | def_id |
235 | ); | |
a1dfa0c6 | 236 | |
3dfed10e | 237 | let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); |
e74abb32 | 238 | let sym = self.tcx.symbol_name(instance).name; |
5869c6ff | 239 | let fn_attrs = self.tcx.codegen_fn_attrs(def_id); |
a1dfa0c6 | 240 | |
5869c6ff | 241 | debug!("get_static: sym={} instance={:?} fn_attrs={:?}", sym, instance, fn_attrs); |
a1dfa0c6 | 242 | |
5869c6ff | 243 | let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { |
a1dfa0c6 | 244 | let llty = self.layout_of(ty).llvm_type(self); |
5869c6ff XL |
245 | if let Some(g) = self.get_declared_value(sym) { |
246 | if self.val_ty(g) != self.type_ptr_to(llty) { | |
247 | span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); | |
a1dfa0c6 | 248 | } |
5869c6ff | 249 | } |
a1dfa0c6 | 250 | |
5869c6ff | 251 | let g = self.declare_global(sym, llty); |
a1dfa0c6 | 252 | |
5869c6ff XL |
253 | if !self.tcx.is_reachable_non_generic(def_id) { |
254 | unsafe { | |
255 | llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); | |
a1dfa0c6 XL |
256 | } |
257 | } | |
c1a9b12d | 258 | |
9e0c209e | 259 | g |
c1a9b12d | 260 | } else { |
c295e0f8 | 261 | check_and_apply_linkage(self, fn_attrs, ty, sym, def_id) |
5869c6ff | 262 | }; |
a1dfa0c6 | 263 | |
5869c6ff XL |
264 | // Thread-local statics in some other crate need to *always* be linked |
265 | // against in a thread-local fashion, so we need to be sure to apply the | |
266 | // thread-local attribute locally if it was present remotely. If we | |
267 | // don't do this then linker errors can be generated where the linker | |
268 | // complains that one object files has a thread local version of the | |
269 | // symbol and another one doesn't. | |
270 | if fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { | |
271 | llvm::set_thread_local_mode(g, self.tls_model); | |
272 | } | |
a1dfa0c6 | 273 | |
487cf647 FG |
274 | let dso_local = unsafe { self.should_assume_dso_local(g, true) }; |
275 | if dso_local { | |
276 | unsafe { | |
277 | llvm::LLVMRustSetDSOLocal(g, true); | |
278 | } | |
279 | } | |
280 | ||
5869c6ff | 281 | if !def_id.is_local() { |
dfeec247 | 282 | let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && |
487cf647 FG |
283 | // Local definitions can never be imported, so we must not apply |
284 | // the DLLImport annotation. | |
285 | !dso_local && | |
a1dfa0c6 XL |
286 | // ThinLTO can't handle this workaround in all cases, so we don't |
287 | // emit the attrs. Instead we make them unnecessary by disallowing | |
9fa01778 | 288 | // dynamic linking when linker plugin based LTO is enabled. |
487cf647 FG |
289 | !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() && |
290 | self.tcx.sess.lto() != Lto::Thin; | |
a1dfa0c6 XL |
291 | |
292 | // If this assertion triggers, there's something wrong with commandline | |
293 | // argument validation. | |
dfeec247 XL |
294 | debug_assert!( |
295 | !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() | |
29967ef6 | 296 | && self.tcx.sess.target.is_like_windows |
dfeec247 XL |
297 | && self.tcx.sess.opts.cg.prefer_dynamic) |
298 | ); | |
a1dfa0c6 XL |
299 | |
300 | if needs_dll_storage_attr { | |
0731742a | 301 | // This item is external but not foreign, i.e., it originates from an external Rust |
a1dfa0c6 XL |
302 | // crate. Since we don't know whether this crate will be linked dynamically or |
303 | // statically in the final application, we always mark such symbols as 'dllimport'. | |
304 | // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs | |
305 | // to make things work. | |
306 | // | |
307 | // However, in some scenarios we defer emission of statics to downstream | |
308 | // crates, so there are cases where a static with an upstream DefId | |
309 | // is actually present in the current crate. We can find out via the | |
310 | // is_codegened_item query. | |
311 | if !self.tcx.is_codegened_item(def_id) { | |
312 | unsafe { | |
313 | llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); | |
314 | } | |
315 | } | |
316 | } | |
5869c6ff | 317 | } |
a1dfa0c6 | 318 | |
2b03887a FG |
319 | if self.use_dll_storage_attrs |
320 | && let Some(library) = self.tcx.native_library(def_id) | |
321 | && library.kind.is_dllimport() | |
322 | { | |
a1dfa0c6 XL |
323 | // For foreign (native) libs we know the exact storage type to use. |
324 | unsafe { | |
325 | llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); | |
1a4d82fc JJ |
326 | } |
327 | } | |
c1a9b12d | 328 | |
a1dfa0c6 XL |
329 | self.instances.borrow_mut().insert(instance, g); |
330 | g | |
331 | } | |
332 | } | |
333 | ||
a2a8927a | 334 | impl<'ll> StaticMethods for CodegenCx<'ll, '_> { |
dfeec247 | 335 | fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value { |
a1dfa0c6 XL |
336 | if let Some(&gv) = self.const_globals.borrow().get(&cv) { |
337 | unsafe { | |
338 | // Upgrade the alignment in cases where the same constant is used with different | |
339 | // alignment requirements | |
340 | let llalign = align.bytes() as u32; | |
341 | if llalign > llvm::LLVMGetAlignment(gv) { | |
342 | llvm::LLVMSetAlignment(gv, llalign); | |
343 | } | |
94b46f34 | 344 | } |
a1dfa0c6 XL |
345 | return gv; |
346 | } | |
347 | let gv = self.static_addr_of_mut(cv, align, kind); | |
348 | unsafe { | |
349 | llvm::LLVMSetGlobalConstant(gv, True); | |
c1a9b12d | 350 | } |
a1dfa0c6 XL |
351 | self.const_globals.borrow_mut().insert(cv, gv); |
352 | gv | |
353 | } | |
5bcae85e | 354 | |
dfeec247 | 355 | fn codegen_static(&self, def_id: DefId, is_mutable: bool) { |
a1dfa0c6 XL |
356 | unsafe { |
357 | let attrs = self.tcx.codegen_fn_attrs(def_id); | |
358 | ||
5e7ed085 | 359 | let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { |
a1dfa0c6 | 360 | // Error has already been reported |
5e7ed085 | 361 | return; |
a1dfa0c6 | 362 | }; |
5e7ed085 | 363 | let alloc = alloc.inner(); |
a1dfa0c6 XL |
364 | |
365 | let g = self.get_static(def_id); | |
366 | ||
367 | // boolean SSA values are i1, but they have to be stored in i8 slots, | |
368 | // otherwise some LLVM optimization passes don't work as expected | |
369 | let mut val_llty = self.val_ty(v); | |
370 | let v = if val_llty == self.type_i1() { | |
371 | val_llty = self.type_i8(); | |
372 | llvm::LLVMConstZExt(v, val_llty) | |
373 | } else { | |
374 | v | |
375 | }; | |
376 | ||
377 | let instance = Instance::mono(self.tcx, def_id); | |
3dfed10e | 378 | let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); |
a1dfa0c6 XL |
379 | let llty = self.layout_of(ty).llvm_type(self); |
380 | let g = if val_llty == llty { | |
381 | g | |
382 | } else { | |
383 | // If we created the global with the wrong type, | |
384 | // correct the type. | |
60c5eb7d XL |
385 | let name = llvm::get_value_name(g).to_vec(); |
386 | llvm::set_value_name(g, b""); | |
a1dfa0c6 XL |
387 | |
388 | let linkage = llvm::LLVMRustGetLinkage(g); | |
389 | let visibility = llvm::LLVMRustGetVisibility(g); | |
390 | ||
391 | let new_g = llvm::LLVMRustGetOrInsertGlobal( | |
dfeec247 XL |
392 | self.llmod, |
393 | name.as_ptr().cast(), | |
394 | name.len(), | |
395 | val_llty, | |
396 | ); | |
a1dfa0c6 XL |
397 | |
398 | llvm::LLVMRustSetLinkage(new_g, linkage); | |
399 | llvm::LLVMRustSetVisibility(new_g, visibility); | |
400 | ||
04454e1e FG |
401 | // The old global has had its name removed but is returned by |
402 | // get_static since it is in the instance cache. Provide an | |
403 | // alternative lookup that points to the new global so that | |
404 | // global_asm! can compute the correct mangled symbol name | |
405 | // for the global. | |
406 | self.renamed_statics.borrow_mut().insert(def_id, new_g); | |
407 | ||
a1dfa0c6 XL |
408 | // To avoid breaking any invariants, we leave around the old |
409 | // global for the moment; we'll replace all references to it | |
410 | // with the new global later. (See base::codegen_backend.) | |
411 | self.statics_to_rauw.borrow_mut().push((g, new_g)); | |
412 | new_g | |
413 | }; | |
c295e0f8 | 414 | set_global_alignment(self, g, self.align_of(ty)); |
a1dfa0c6 XL |
415 | llvm::LLVMSetInitializer(g, v); |
416 | ||
17df50a5 XL |
417 | if self.should_assume_dso_local(g, true) { |
418 | llvm::LLVMRustSetDSOLocal(g, true); | |
419 | } | |
420 | ||
a1dfa0c6 XL |
421 | // As an optimization, all shared statics which do not have interior |
422 | // mutability are placed into read-only memory. | |
29967ef6 XL |
423 | if !is_mutable && self.type_is_freeze(ty) { |
424 | llvm::LLVMSetGlobalConstant(g, llvm::True); | |
a1dfa0c6 | 425 | } |
5bcae85e | 426 | |
5e7ed085 | 427 | debuginfo::build_global_var_di_node(self, def_id, g); |
a1dfa0c6 XL |
428 | |
429 | if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { | |
430 | llvm::set_thread_local_mode(g, self.tls_model); | |
431 | ||
432 | // Do not allow LLVM to change the alignment of a TLS on macOS. | |
433 | // | |
434 | // By default a global's alignment can be freely increased. | |
435 | // This allows LLVM to generate more performant instructions | |
0731742a | 436 | // e.g., using load-aligned into a SIMD register. |
a1dfa0c6 XL |
437 | // |
438 | // However, on macOS 10.10 or below, the dynamic linker does not | |
439 | // respect any alignment given on the TLS (radar 24221680). | |
440 | // This will violate the alignment assumption, and causing segfault at runtime. | |
441 | // | |
442 | // This bug is very easy to trigger. In `println!` and `panic!`, | |
443 | // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, | |
444 | // which the values would be `mem::replace`d on initialization. | |
445 | // The implementation of `mem::replace` will use SIMD | |
446 | // whenever the size is 32 bytes or higher. LLVM notices SIMD is used | |
447 | // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, | |
448 | // which macOS's dyld disregarded and causing crashes | |
449 | // (see issues #51794, #51758, #50867, #48866 and #44056). | |
450 | // | |
451 | // To workaround the bug, we trick LLVM into not increasing | |
452 | // the global's alignment by explicitly assigning a section to it | |
453 | // (equivalent to automatically generating a `#[link_section]` attribute). | |
454 | // See the comment in the `GlobalValue::canIncreaseAlignment()` function | |
455 | // of `lib/IR/Globals.cpp` for why this works. | |
456 | // | |
457 | // When the alignment is not increased, the optimized `mem::replace` | |
458 | // will use load-unaligned instructions instead, and thus avoiding the crash. | |
459 | // | |
460 | // We could remove this hack whenever we decide to drop macOS 10.10 support. | |
29967ef6 | 461 | if self.tcx.sess.target.is_like_osx { |
f2b60f7d | 462 | // The `inspect` method is okay here because we checked for provenance, and |
ba9703b0 XL |
463 | // because we are doing this access to inspect the final interpreter state |
464 | // (not as part of the interpreter execution). | |
465 | // | |
466 | // FIXME: This check requires that the (arbitrary) value of undefined bytes | |
467 | // happens to be zero. Instead, we should only check the value of defined bytes | |
468 | // and set all undefined bytes to zero if this allocation is headed for the | |
469 | // BSS. | |
487cf647 | 470 | let all_bytes_are_zero = alloc.provenance().ptrs().is_empty() |
ba9703b0 | 471 | && alloc |
3dfed10e | 472 | .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()) |
e1599b0c | 473 | .iter() |
ba9703b0 XL |
474 | .all(|&byte| byte == 0); |
475 | ||
476 | let sect_name = if all_bytes_are_zero { | |
6a06907d | 477 | cstr!("__DATA,__thread_bss") |
a1dfa0c6 | 478 | } else { |
6a06907d | 479 | cstr!("__DATA,__thread_data") |
a1dfa0c6 XL |
480 | }; |
481 | llvm::LLVMSetSection(g, sect_name.as_ptr()); | |
482 | } | |
483 | } | |
484 | ||
a1dfa0c6 XL |
485 | // Wasm statics with custom link sections get special treatment as they |
486 | // go into custom sections of the wasm executable. | |
3c0e092e | 487 | if self.tcx.sess.target.is_like_wasm { |
a1dfa0c6 XL |
488 | if let Some(section) = attrs.link_section { |
489 | let section = llvm::LLVMMDStringInContext( | |
490 | self.llcx, | |
e74abb32 | 491 | section.as_str().as_ptr().cast(), |
a1dfa0c6 XL |
492 | section.as_str().len() as c_uint, |
493 | ); | |
487cf647 | 494 | assert!(alloc.provenance().ptrs().is_empty()); |
e1599b0c | 495 | |
f2b60f7d | 496 | // The `inspect` method is okay here because we checked for provenance, and |
e1599b0c XL |
497 | // because we are doing this access to inspect the final interpreter state (not |
498 | // as part of the interpreter execution). | |
dfeec247 | 499 | let bytes = |
3dfed10e | 500 | alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); |
a1dfa0c6 XL |
501 | let alloc = llvm::LLVMMDStringInContext( |
502 | self.llcx, | |
e74abb32 | 503 | bytes.as_ptr().cast(), |
e1599b0c | 504 | bytes.len() as c_uint, |
a1dfa0c6 XL |
505 | ); |
506 | let data = [section, alloc]; | |
507 | let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2); | |
508 | llvm::LLVMAddNamedMetadataOperand( | |
509 | self.llmod, | |
e74abb32 | 510 | "wasm.custom_sections\0".as_ptr().cast(), |
a1dfa0c6 XL |
511 | meta, |
512 | ); | |
513 | } | |
514 | } else { | |
c295e0f8 | 515 | base::set_link_section(g, attrs); |
8faf50e0 | 516 | } |
8faf50e0 | 517 | |
a1dfa0c6 | 518 | if attrs.flags.contains(CodegenFnAttrFlags::USED) { |
5099ac24 FG |
519 | // `USED` and `USED_LINKER` can't be used together. |
520 | assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); | |
521 | ||
94222f64 XL |
522 | // The semantics of #[used] in Rust only require the symbol to make it into the |
523 | // object file. It is explicitly allowed for the linker to strip the symbol if it | |
9ffffee4 | 524 | // is dead, which means we are allowed to use `llvm.compiler.used` instead of |
064997fb FG |
525 | // `llvm.used` here. |
526 | // | |
94222f64 XL |
527 | // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique |
528 | // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs | |
064997fb FG |
529 | // in the handling of `.init_array` (the static constructor list) in versions of |
530 | // the gold linker (prior to the one released with binutils 2.36). | |
531 | // | |
532 | // That said, we only ever emit these when compiling for ELF targets, unless | |
533 | // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage | |
534 | // on other targets, in particular MachO targets have *their* static constructor | |
9ffffee4 | 535 | // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However, |
2b03887a | 536 | // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`, |
064997fb | 537 | // so we don't need to take care of it here. |
94222f64 | 538 | self.add_compiler_used_global(g); |
a1dfa0c6 | 539 | } |
5099ac24 FG |
540 | if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { |
541 | // `USED` and `USED_LINKER` can't be used together. | |
542 | assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED)); | |
543 | ||
544 | self.add_used_global(g); | |
545 | } | |
cc61c64b | 546 | } |
1a4d82fc | 547 | } |
3dfed10e XL |
548 | |
549 | /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. | |
550 | fn add_used_global(&self, global: &'ll Value) { | |
551 | let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; | |
552 | self.used_statics.borrow_mut().push(cast); | |
553 | } | |
94222f64 XL |
554 | |
555 | /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, | |
556 | /// an array of i8*. | |
557 | fn add_compiler_used_global(&self, global: &'ll Value) { | |
558 | let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; | |
559 | self.compiler_used_statics.borrow_mut().push(cast); | |
560 | } | |
1a4d82fc | 561 | } |