]> git.proxmox.com Git - rustc.git/blob - src/librustc_codegen_llvm/consts.rs
New upstream version 1.39.0+dfsg1
[rustc.git] / src / librustc_codegen_llvm / consts.rs
1 use crate::llvm::{self, SetUnnamedAddr, True};
2 use crate::debuginfo;
3 use crate::common::CodegenCx;
4 use crate::base;
5 use crate::type_::Type;
6 use crate::type_of::LayoutLlvmExt;
7 use crate::value::Value;
8 use libc::c_uint;
9 use rustc::hir::def_id::DefId;
10 use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint,
11 Pointer, ErrorHandled, GlobalId};
12 use rustc::mir::mono::MonoItem;
13 use rustc::hir::Node;
14 use rustc_target::abi::HasDataLayout;
15 use rustc::ty::{self, Ty, Instance};
16 use rustc_codegen_ssa::traits::*;
17 use syntax::symbol::{Symbol, sym};
18 use syntax_pos::Span;
19
20 use rustc::ty::layout::{self, Size, Align, LayoutOf};
21
22 use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
23
24 use std::ffi::{CStr, CString};
25
26 pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
27 let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
28 let dl = cx.data_layout();
29 let pointer_size = dl.pointer_size.bytes() as usize;
30
31 let mut next_offset = 0;
32 for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
33 let offset = offset.bytes();
34 assert_eq!(offset as usize as u64, offset);
35 let offset = offset as usize;
36 if offset > next_offset {
37 // This `inspect` is okay since we have checked that it is not within a relocation, it
38 // is within the bounds of the allocation, and it doesn't affect interpreter execution
39 // (we inspect the result after interpreter execution). Any undef byte is replaced with
40 // some arbitrary byte value.
41 //
42 // FIXME: relay undef bytes to codegen as undef const bytes
43 let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(next_offset..offset);
44 llvals.push(cx.const_bytes(bytes));
45 }
46 let ptr_offset = read_target_uint(
47 dl.endian,
48 // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
49 // affect interpreter execution (we inspect the result after interpreter execution),
50 // and we properly interpret the relocation as a relocation pointer offset.
51 alloc.inspect_with_undef_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
52 ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
53 llvals.push(cx.scalar_to_backend(
54 Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
55 &layout::Scalar {
56 value: layout::Primitive::Pointer,
57 valid_range: 0..=!0
58 },
59 cx.type_i8p()
60 ));
61 next_offset = offset + pointer_size;
62 }
63 if alloc.len() >= next_offset {
64 let range = next_offset..alloc.len();
65 // This `inspect` is okay since we have check that it is after all relocations, it is
66 // within the bounds of the allocation, and it doesn't affect interpreter execution (we
67 // inspect the result after interpreter execution). Any undef byte is replaced with some
68 // arbitrary byte value.
69 //
70 // FIXME: relay undef bytes to codegen as undef const bytes
71 let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(range);
72 llvals.push(cx.const_bytes(bytes));
73 }
74
75 cx.const_struct(&llvals, true)
76 }
77
78 pub fn codegen_static_initializer(
79 cx: &CodegenCx<'ll, 'tcx>,
80 def_id: DefId,
81 ) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
82 let instance = ty::Instance::mono(cx.tcx, def_id);
83 let cid = GlobalId {
84 instance,
85 promoted: None,
86 };
87 let param_env = ty::ParamEnv::reveal_all();
88 let static_ = cx.tcx.const_eval(param_env.and(cid))?;
89
90 let alloc = match static_.val {
91 ConstValue::ByRef {
92 alloc, offset,
93 } if offset.bytes() == 0 => {
94 alloc
95 },
96 _ => bug!("static const eval returned {:#?}", static_),
97 };
98 Ok((const_alloc_to_llvm(cx, alloc), alloc))
99 }
100
101 fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
102 gv: &'ll Value,
103 mut align: Align) {
104 // The target may require greater alignment for globals than the type does.
105 // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
106 // which can force it to be smaller. Rust doesn't support this yet.
107 if let Some(min) = cx.sess().target.target.options.min_global_align {
108 match Align::from_bits(min) {
109 Ok(min) => align = align.max(min),
110 Err(err) => {
111 cx.sess().err(&format!("invalid minimum global alignment: {}", err));
112 }
113 }
114 }
115 unsafe {
116 llvm::LLVMSetAlignment(gv, align.bytes() as u32);
117 }
118 }
119
120 fn check_and_apply_linkage(
121 cx: &CodegenCx<'ll, 'tcx>,
122 attrs: &CodegenFnAttrs,
123 ty: Ty<'tcx>,
124 sym: Symbol,
125 span: Span
126 ) -> &'ll Value {
127 let llty = cx.layout_of(ty).llvm_type(cx);
128 let sym = sym.as_str();
129 if let Some(linkage) = attrs.linkage {
130 debug!("get_static: sym={} linkage={:?}", sym, linkage);
131
132 // If this is a static with a linkage specified, then we need to handle
133 // it a little specially. The typesystem prevents things like &T and
134 // extern "C" fn() from being non-null, so we can't just declare a
135 // static and call it a day. Some linkages (like weak) will make it such
136 // that the static actually has a null value.
137 let llty2 = if let ty::RawPtr(ref mt) = ty.sty {
138 cx.layout_of(mt.ty).llvm_type(cx)
139 } else {
140 cx.sess().span_fatal(
141 span, "must have type `*const T` or `*mut T` due to `#[linkage]` attribute")
142 };
143 unsafe {
144 // Declare a symbol `foo` with the desired linkage.
145 let g1 = cx.declare_global(&sym, llty2);
146 llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
147
148 // Declare an internal global `extern_with_linkage_foo` which
149 // is initialized with the address of `foo`. If `foo` is
150 // discarded during linking (for example, if `foo` has weak
151 // linkage and there are no definitions), then
152 // `extern_with_linkage_foo` will instead be initialized to
153 // zero.
154 let mut real_name = "_rust_extern_with_linkage_".to_string();
155 real_name.push_str(&sym);
156 let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
157 cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
158 });
159 llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
160 llvm::LLVMSetInitializer(g2, g1);
161 g2
162 }
163 } else {
164 // Generate an external declaration.
165 // FIXME(nagisa): investigate whether it can be changed into define_global
166 cx.declare_global(&sym, llty)
167 }
168 }
169
170 pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
171 unsafe {
172 llvm::LLVMConstPointerCast(val, ty)
173 }
174 }
175
176 impl CodegenCx<'ll, 'tcx> {
177 crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
178 unsafe {
179 llvm::LLVMConstBitCast(val, ty)
180 }
181 }
182
183 crate fn static_addr_of_mut(
184 &self,
185 cv: &'ll Value,
186 align: Align,
187 kind: Option<&str>,
188 ) -> &'ll Value {
189 unsafe {
190 let gv = match kind {
191 Some(kind) if !self.tcx.sess.fewer_names() => {
192 let name = self.generate_local_symbol_name(kind);
193 let gv = self.define_global(&name[..],
194 self.val_ty(cv)).unwrap_or_else(||{
195 bug!("symbol `{}` is already defined", name);
196 });
197 llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
198 gv
199 },
200 _ => self.define_private_global(self.val_ty(cv)),
201 };
202 llvm::LLVMSetInitializer(gv, cv);
203 set_global_alignment(&self, gv, align);
204 SetUnnamedAddr(gv, true);
205 gv
206 }
207 }
208
209 crate fn get_static(&self, def_id: DefId) -> &'ll Value {
210 let instance = Instance::mono(self.tcx, def_id);
211 if let Some(&g) = self.instances.borrow().get(&instance) {
212 return g;
213 }
214
215 let defined_in_current_codegen_unit = self.codegen_unit
216 .items()
217 .contains_key(&MonoItem::Static(def_id));
218 assert!(!defined_in_current_codegen_unit,
219 "consts::get_static() should always hit the cache for \
220 statics defined in the same CGU, but did not for `{:?}`",
221 def_id);
222
223 let ty = instance.ty(self.tcx);
224 let sym = self.tcx.symbol_name(instance).name.as_symbol();
225
226 debug!("get_static: sym={} instance={:?}", sym, instance);
227
228 let g = if let Some(id) = self.tcx.hir().as_local_hir_id(def_id) {
229
230 let llty = self.layout_of(ty).llvm_type(self);
231 let (g, attrs) = match self.tcx.hir().get(id) {
232 Node::Item(&hir::Item {
233 ref attrs, span, node: hir::ItemKind::Static(..), ..
234 }) => {
235 let sym_str = sym.as_str();
236 if self.get_declared_value(&sym_str).is_some() {
237 span_bug!(span, "Conflicting symbol names for static?");
238 }
239
240 let g = self.define_global(&sym_str, llty).unwrap();
241
242 if !self.tcx.is_reachable_non_generic(def_id) {
243 unsafe {
244 llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
245 }
246 }
247
248 (g, attrs)
249 }
250
251 Node::ForeignItem(&hir::ForeignItem {
252 ref attrs, span, node: hir::ForeignItemKind::Static(..), ..
253 }) => {
254 let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
255 (check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), attrs)
256 }
257
258 item => bug!("get_static: expected static, found {:?}", item)
259 };
260
261 debug!("get_static: sym={} attrs={:?}", sym, attrs);
262
263 for attr in attrs {
264 if attr.check_name(sym::thread_local) {
265 llvm::set_thread_local_mode(g, self.tls_model);
266 }
267 }
268
269 g
270 } else {
271 // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
272 debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
273
274 let attrs = self.tcx.codegen_fn_attrs(def_id);
275 let span = self.tcx.def_span(def_id);
276 let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
277
278 // Thread-local statics in some other crate need to *always* be linked
279 // against in a thread-local fashion, so we need to be sure to apply the
280 // thread-local attribute locally if it was present remotely. If we
281 // don't do this then linker errors can be generated where the linker
282 // complains that one object files has a thread local version of the
283 // symbol and another one doesn't.
284 if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
285 llvm::set_thread_local_mode(g, self.tls_model);
286 }
287
288 let needs_dll_storage_attr =
289 self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
290 // ThinLTO can't handle this workaround in all cases, so we don't
291 // emit the attrs. Instead we make them unnecessary by disallowing
292 // dynamic linking when linker plugin based LTO is enabled.
293 !self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
294
295 // If this assertion triggers, there's something wrong with commandline
296 // argument validation.
297 debug_assert!(!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
298 self.tcx.sess.target.target.options.is_like_msvc &&
299 self.tcx.sess.opts.cg.prefer_dynamic));
300
301 if needs_dll_storage_attr {
302 // This item is external but not foreign, i.e., it originates from an external Rust
303 // crate. Since we don't know whether this crate will be linked dynamically or
304 // statically in the final application, we always mark such symbols as 'dllimport'.
305 // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
306 // to make things work.
307 //
308 // However, in some scenarios we defer emission of statics to downstream
309 // crates, so there are cases where a static with an upstream DefId
310 // is actually present in the current crate. We can find out via the
311 // is_codegened_item query.
312 if !self.tcx.is_codegened_item(def_id) {
313 unsafe {
314 llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
315 }
316 }
317 }
318 g
319 };
320
321 if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
322 // For foreign (native) libs we know the exact storage type to use.
323 unsafe {
324 llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
325 }
326 }
327
328 self.instances.borrow_mut().insert(instance, g);
329 g
330 }
331 }
332
333 impl StaticMethods for CodegenCx<'ll, 'tcx> {
334 fn static_addr_of(
335 &self,
336 cv: &'ll Value,
337 align: Align,
338 kind: Option<&str>,
339 ) -> &'ll Value {
340 if let Some(&gv) = self.const_globals.borrow().get(&cv) {
341 unsafe {
342 // Upgrade the alignment in cases where the same constant is used with different
343 // alignment requirements
344 let llalign = align.bytes() as u32;
345 if llalign > llvm::LLVMGetAlignment(gv) {
346 llvm::LLVMSetAlignment(gv, llalign);
347 }
348 }
349 return gv;
350 }
351 let gv = self.static_addr_of_mut(cv, align, kind);
352 unsafe {
353 llvm::LLVMSetGlobalConstant(gv, True);
354 }
355 self.const_globals.borrow_mut().insert(cv, gv);
356 gv
357 }
358
359 fn codegen_static(
360 &self,
361 def_id: DefId,
362 is_mutable: bool,
363 ) {
364 unsafe {
365 let attrs = self.tcx.codegen_fn_attrs(def_id);
366
367 let (v, alloc) = match codegen_static_initializer(&self, def_id) {
368 Ok(v) => v,
369 // Error has already been reported
370 Err(_) => return,
371 };
372
373 let g = self.get_static(def_id);
374
375 // boolean SSA values are i1, but they have to be stored in i8 slots,
376 // otherwise some LLVM optimization passes don't work as expected
377 let mut val_llty = self.val_ty(v);
378 let v = if val_llty == self.type_i1() {
379 val_llty = self.type_i8();
380 llvm::LLVMConstZExt(v, val_llty)
381 } else {
382 v
383 };
384
385 let instance = Instance::mono(self.tcx, def_id);
386 let ty = instance.ty(self.tcx);
387 let llty = self.layout_of(ty).llvm_type(self);
388 let g = if val_llty == llty {
389 g
390 } else {
391 // If we created the global with the wrong type,
392 // correct the type.
393 let empty_string = const_cstr!("");
394 let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g));
395 let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
396 llvm::LLVMSetValueName(g, empty_string.as_ptr());
397
398 let linkage = llvm::LLVMRustGetLinkage(g);
399 let visibility = llvm::LLVMRustGetVisibility(g);
400
401 let new_g = llvm::LLVMRustGetOrInsertGlobal(
402 self.llmod, name_string.as_ptr(), val_llty);
403
404 llvm::LLVMRustSetLinkage(new_g, linkage);
405 llvm::LLVMRustSetVisibility(new_g, visibility);
406
407 // To avoid breaking any invariants, we leave around the old
408 // global for the moment; we'll replace all references to it
409 // with the new global later. (See base::codegen_backend.)
410 self.statics_to_rauw.borrow_mut().push((g, new_g));
411 new_g
412 };
413 set_global_alignment(&self, g, self.align_of(ty));
414 llvm::LLVMSetInitializer(g, v);
415
416 // As an optimization, all shared statics which do not have interior
417 // mutability are placed into read-only memory.
418 if !is_mutable {
419 if self.type_is_freeze(ty) {
420 llvm::LLVMSetGlobalConstant(g, llvm::True);
421 }
422 }
423
424 debuginfo::create_global_var_metadata(&self, def_id, g);
425
426 if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
427 llvm::set_thread_local_mode(g, self.tls_model);
428
429 // Do not allow LLVM to change the alignment of a TLS on macOS.
430 //
431 // By default a global's alignment can be freely increased.
432 // This allows LLVM to generate more performant instructions
433 // e.g., using load-aligned into a SIMD register.
434 //
435 // However, on macOS 10.10 or below, the dynamic linker does not
436 // respect any alignment given on the TLS (radar 24221680).
437 // This will violate the alignment assumption, and causing segfault at runtime.
438 //
439 // This bug is very easy to trigger. In `println!` and `panic!`,
440 // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
441 // which the values would be `mem::replace`d on initialization.
442 // The implementation of `mem::replace` will use SIMD
443 // whenever the size is 32 bytes or higher. LLVM notices SIMD is used
444 // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
445 // which macOS's dyld disregarded and causing crashes
446 // (see issues #51794, #51758, #50867, #48866 and #44056).
447 //
448 // To workaround the bug, we trick LLVM into not increasing
449 // the global's alignment by explicitly assigning a section to it
450 // (equivalent to automatically generating a `#[link_section]` attribute).
451 // See the comment in the `GlobalValue::canIncreaseAlignment()` function
452 // of `lib/IR/Globals.cpp` for why this works.
453 //
454 // When the alignment is not increased, the optimized `mem::replace`
455 // will use load-unaligned instructions instead, and thus avoiding the crash.
456 //
457 // We could remove this hack whenever we decide to drop macOS 10.10 support.
458 if self.tcx.sess.target.target.options.is_like_osx {
459 assert_eq!(alloc.relocations().len(), 0);
460
461 let is_zeroed = {
462 // Treats undefined bytes as if they were defined with the byte value that
463 // happens to be currently assigned in mir. This is valid since reading
464 // undef bytes may yield arbitrary values.
465 //
466 // FIXME: ignore undef bytes even with representation `!= 0`.
467 //
468 // The `inspect` method is okay here because we checked relocations, and
469 // because we are doing this access to inspect the final interpreter state
470 // (not as part of the interpreter execution).
471 alloc.inspect_with_undef_and_ptr_outside_interpreter(0..alloc.len())
472 .iter()
473 .all(|b| *b == 0)
474 };
475 let sect_name = if is_zeroed {
476 CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
477 } else {
478 CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
479 };
480 llvm::LLVMSetSection(g, sect_name.as_ptr());
481 }
482 }
483
484
485 // Wasm statics with custom link sections get special treatment as they
486 // go into custom sections of the wasm executable.
487 if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
488 if let Some(section) = attrs.link_section {
489 let section = llvm::LLVMMDStringInContext(
490 self.llcx,
491 section.as_str().as_ptr() as *const _,
492 section.as_str().len() as c_uint,
493 );
494 assert!(alloc.relocations().is_empty());
495
496 // The `inspect` method is okay here because we checked relocations, and
497 // because we are doing this access to inspect the final interpreter state (not
498 // as part of the interpreter execution).
499 let bytes = alloc.inspect_with_undef_and_ptr_outside_interpreter(
500 0..alloc.len());
501 let alloc = llvm::LLVMMDStringInContext(
502 self.llcx,
503 bytes.as_ptr() as *const _,
504 bytes.len() as c_uint,
505 );
506 let data = [section, alloc];
507 let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
508 llvm::LLVMAddNamedMetadataOperand(
509 self.llmod,
510 "wasm.custom_sections\0".as_ptr() as *const _,
511 meta,
512 );
513 }
514 } else {
515 base::set_link_section(g, &attrs);
516 }
517
518 if attrs.flags.contains(CodegenFnAttrFlags::USED) {
519 // This static will be stored in the llvm.used variable which is an array of i8*
520 let cast = llvm::LLVMConstPointerCast(g, self.type_i8p());
521 self.used_statics.borrow_mut().push(cast);
522 }
523 }
524 }
525 }