]>
Commit | Line | Data |
---|---|---|
a1dfa0c6 XL |
1 | //! Codegen the completed AST to the LLVM IR. |
2 | //! | |
60c5eb7d XL |
3 | //! Some functions here, such as `codegen_block` and `codegen_expr`, return a value -- |
4 | //! the result of the codegen to LLVM -- while others, such as `codegen_fn` | |
5 | //! and `mono_item`, are called only for the side effect of adding a | |
a1dfa0c6 XL |
6 | //! particular definition to the LLVM IR output we're producing. |
7 | //! | |
8 | //! Hopefully useful general knowledge about codegen: | |
9 | //! | |
60c5eb7d | 10 | //! * There's no way to find out the `Ty` type of a `Value`. Doing so |
9fa01778 XL |
11 | //! would be "trying to get the eggs out of an omelette" (credit: |
12 | //! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`, | |
13 | //! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int, | |
14 | //! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`. | |
a1dfa0c6 | 15 | |
60c5eb7d | 16 | use crate::back::write::{ |
dfeec247 XL |
17 | start_async_codegen, submit_codegened_module_to_llvm, submit_post_lto_module_to_llvm, |
18 | submit_pre_lto_module_to_llvm, OngoingCodegen, | |
60c5eb7d | 19 | }; |
dfeec247 | 20 | use crate::common::{IntPredicate, RealPredicate, TypeKind}; |
60c5eb7d XL |
21 | use crate::meth; |
22 | use crate::mir; | |
23 | use crate::mir::operand::OperandValue; | |
24 | use crate::mir::place::PlaceRef; | |
25 | use crate::traits::*; | |
dfeec247 | 26 | use crate::{CachedModuleCodegen, CrateInfo, MemFlags, ModuleCodegen, ModuleKind}; |
a1dfa0c6 | 27 | |
74b04a01 | 28 | use rustc_attr as attr; |
dfeec247 XL |
29 | use rustc_data_structures::fx::FxHashMap; |
30 | use rustc_data_structures::profiling::print_time_passes_entry; | |
31 | use rustc_data_structures::sync::{par_iter, Lock, ParallelIterator}; | |
32 | use rustc_hir as hir; | |
33 | use rustc_hir::def_id::{DefId, LOCAL_CRATE}; | |
ba9703b0 | 34 | use rustc_hir::lang_items::StartFnLangItem; |
e74abb32 | 35 | use rustc_index::vec::Idx; |
ba9703b0 XL |
36 | use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; |
37 | use rustc_middle::middle::cstore::EncodedMetadata; | |
38 | use rustc_middle::middle::cstore::{self, LinkagePreference}; | |
39 | use rustc_middle::middle::lang_items; | |
40 | use rustc_middle::mir::mono::{CodegenUnit, CodegenUnitNameBuilder, MonoItem}; | |
41 | use rustc_middle::ty::layout::{self, HasTyCtxt, TyAndLayout}; | |
42 | use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; | |
43 | use rustc_middle::ty::query::Providers; | |
44 | use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; | |
dfeec247 | 45 | use rustc_session::cgu_reuse_tracker::CguReuse; |
ba9703b0 XL |
46 | use rustc_session::config::{self, EntryFnType, Lto}; |
47 | use rustc_session::Session; | |
dfeec247 | 48 | use rustc_span::Span; |
ba9703b0 XL |
49 | use rustc_symbol_mangling::test as symbol_names_test; |
50 | use rustc_target::abi::{Abi, Align, LayoutOf, Scalar, VariantIdx}; | |
a1dfa0c6 | 51 | |
a1dfa0c6 XL |
52 | use std::cmp; |
53 | use std::ops::{Deref, DerefMut}; | |
dfeec247 | 54 | use std::time::{Duration, Instant}; |
a1dfa0c6 | 55 | |
dfeec247 | 56 | pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, signed: bool) -> IntPredicate { |
a1dfa0c6 XL |
57 | match op { |
58 | hir::BinOpKind::Eq => IntPredicate::IntEQ, | |
59 | hir::BinOpKind::Ne => IntPredicate::IntNE, | |
dfeec247 XL |
60 | hir::BinOpKind::Lt => { |
61 | if signed { | |
62 | IntPredicate::IntSLT | |
63 | } else { | |
64 | IntPredicate::IntULT | |
65 | } | |
a1dfa0c6 | 66 | } |
dfeec247 XL |
67 | hir::BinOpKind::Le => { |
68 | if signed { | |
69 | IntPredicate::IntSLE | |
70 | } else { | |
71 | IntPredicate::IntULE | |
72 | } | |
73 | } | |
74 | hir::BinOpKind::Gt => { | |
75 | if signed { | |
76 | IntPredicate::IntSGT | |
77 | } else { | |
78 | IntPredicate::IntUGT | |
79 | } | |
80 | } | |
81 | hir::BinOpKind::Ge => { | |
82 | if signed { | |
83 | IntPredicate::IntSGE | |
84 | } else { | |
85 | IntPredicate::IntUGE | |
86 | } | |
87 | } | |
88 | op => bug!( | |
89 | "comparison_op_to_icmp_predicate: expected comparison operator, \ | |
90 | found {:?}", | |
91 | op | |
92 | ), | |
a1dfa0c6 XL |
93 | } |
94 | } | |
95 | ||
96 | pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { | |
97 | match op { | |
98 | hir::BinOpKind::Eq => RealPredicate::RealOEQ, | |
99 | hir::BinOpKind::Ne => RealPredicate::RealUNE, | |
100 | hir::BinOpKind::Lt => RealPredicate::RealOLT, | |
101 | hir::BinOpKind::Le => RealPredicate::RealOLE, | |
102 | hir::BinOpKind::Gt => RealPredicate::RealOGT, | |
103 | hir::BinOpKind::Ge => RealPredicate::RealOGE, | |
104 | op => { | |
dfeec247 XL |
105 | bug!( |
106 | "comparison_op_to_fcmp_predicate: expected comparison operator, \ | |
107 | found {:?}", | |
108 | op | |
109 | ); | |
a1dfa0c6 XL |
110 | } |
111 | } | |
112 | } | |
113 | ||
dc9dc135 | 114 | pub fn compare_simd_types<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
115 | bx: &mut Bx, |
116 | lhs: Bx::Value, | |
117 | rhs: Bx::Value, | |
118 | t: Ty<'tcx>, | |
119 | ret_ty: Bx::Type, | |
dc9dc135 | 120 | op: hir::BinOpKind, |
a1dfa0c6 | 121 | ) -> Bx::Value { |
e74abb32 | 122 | let signed = match t.kind { |
a1dfa0c6 XL |
123 | ty::Float(_) => { |
124 | let cmp = bin_op_to_fcmp_predicate(op); | |
125 | let cmp = bx.fcmp(cmp, lhs, rhs); | |
126 | return bx.sext(cmp, ret_ty); | |
dfeec247 | 127 | } |
a1dfa0c6 XL |
128 | ty::Uint(_) => false, |
129 | ty::Int(_) => true, | |
130 | _ => bug!("compare_simd_types: invalid SIMD type"), | |
131 | }; | |
132 | ||
133 | let cmp = bin_op_to_icmp_predicate(op, signed); | |
134 | let cmp = bx.icmp(cmp, lhs, rhs); | |
135 | // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension | |
136 | // to get the correctly sized type. This will compile to a single instruction | |
137 | // once the IR is converted to assembly if the SIMD instruction is supported | |
138 | // by the target architecture. | |
139 | bx.sext(cmp, ret_ty) | |
140 | } | |
141 | ||
9fa01778 | 142 | /// Retrieves the information we are losing (making dynamic) in an unsizing |
a1dfa0c6 XL |
143 | /// adjustment. |
144 | /// | |
60c5eb7d XL |
145 | /// The `old_info` argument is a bit odd. It is intended for use in an upcast, |
146 | /// where the new vtable for an object will be derived from the old one. | |
a1dfa0c6 XL |
147 | pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>( |
148 | cx: &Cx, | |
149 | source: Ty<'tcx>, | |
150 | target: Ty<'tcx>, | |
151 | old_info: Option<Cx::Value>, | |
152 | ) -> Cx::Value { | |
416331ca XL |
153 | let (source, target) = |
154 | cx.tcx().struct_lockstep_tails_erasing_lifetimes(source, target, cx.param_env()); | |
e74abb32 | 155 | match (&source.kind, &target.kind) { |
a1dfa0c6 | 156 | (&ty::Array(_, len), &ty::Slice(_)) => { |
416331ca | 157 | cx.const_usize(len.eval_usize(cx.tcx(), ty::ParamEnv::reveal_all())) |
a1dfa0c6 XL |
158 | } |
159 | (&ty::Dynamic(..), &ty::Dynamic(..)) => { | |
160 | // For now, upcasts are limited to changes in marker | |
161 | // traits, and hence never actually require an actual | |
162 | // change to the vtable. | |
163 | old_info.expect("unsized_info: missing old info for trait upcast") | |
164 | } | |
165 | (_, &ty::Dynamic(ref data, ..)) => { | |
dfeec247 | 166 | let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)).field(cx, FAT_PTR_EXTRA); |
60c5eb7d XL |
167 | cx.const_ptrcast( |
168 | meth::get_vtable(cx, source, data.principal()), | |
169 | cx.backend_type(vtable_ptr), | |
170 | ) | |
a1dfa0c6 | 171 | } |
dfeec247 | 172 | _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), |
a1dfa0c6 XL |
173 | } |
174 | } | |
175 | ||
60c5eb7d | 176 | /// Coerces `src` to `dst_ty`. `src_ty` must be a thin pointer. |
dc9dc135 | 177 | pub fn unsize_thin_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
178 | bx: &mut Bx, |
179 | src: Bx::Value, | |
180 | src_ty: Ty<'tcx>, | |
dc9dc135 | 181 | dst_ty: Ty<'tcx>, |
a1dfa0c6 XL |
182 | ) -> (Bx::Value, Bx::Value) { |
183 | debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); | |
e74abb32 | 184 | match (&src_ty.kind, &dst_ty.kind) { |
ba9703b0 | 185 | (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
dfeec247 | 186 | | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { |
a1dfa0c6 XL |
187 | assert!(bx.cx().type_is_sized(a)); |
188 | let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); | |
189 | (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) | |
190 | } | |
a1dfa0c6 XL |
191 | (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { |
192 | assert_eq!(def_a, def_b); | |
193 | ||
194 | let src_layout = bx.cx().layout_of(src_ty); | |
195 | let dst_layout = bx.cx().layout_of(dst_ty); | |
196 | let mut result = None; | |
197 | for i in 0..src_layout.fields.count() { | |
198 | let src_f = src_layout.field(bx.cx(), i); | |
199 | assert_eq!(src_layout.fields.offset(i).bytes(), 0); | |
200 | assert_eq!(dst_layout.fields.offset(i).bytes(), 0); | |
201 | if src_f.is_zst() { | |
202 | continue; | |
203 | } | |
204 | assert_eq!(src_layout.size, src_f.size); | |
205 | ||
206 | let dst_f = dst_layout.field(bx.cx(), i); | |
207 | assert_ne!(src_f.ty, dst_f.ty); | |
208 | assert_eq!(result, None); | |
209 | result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); | |
210 | } | |
211 | let (lldata, llextra) = result.unwrap(); | |
212 | // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. | |
e74abb32 XL |
213 | // FIXME(eddyb) move these out of this `match` arm, so they're always |
214 | // applied, uniformly, no matter the source/destination types. | |
dfeec247 XL |
215 | ( |
216 | bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)), | |
217 | bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)), | |
218 | ) | |
a1dfa0c6 XL |
219 | } |
220 | _ => bug!("unsize_thin_ptr: called on bad types"), | |
221 | } | |
222 | } | |
223 | ||
60c5eb7d XL |
224 | /// Coerces `src`, which is a reference to a value of type `src_ty`, |
225 | /// to a value of type `dst_ty`, and stores the result in `dst`. | |
dc9dc135 | 226 | pub fn coerce_unsized_into<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
227 | bx: &mut Bx, |
228 | src: PlaceRef<'tcx, Bx::Value>, | |
dc9dc135 XL |
229 | dst: PlaceRef<'tcx, Bx::Value>, |
230 | ) { | |
a1dfa0c6 XL |
231 | let src_ty = src.layout.ty; |
232 | let dst_ty = dst.layout.ty; | |
e74abb32 | 233 | match (&src_ty.kind, &dst_ty.kind) { |
ba9703b0 | 234 | (&ty::Ref(..), &ty::Ref(..) | &ty::RawPtr(..)) | (&ty::RawPtr(..), &ty::RawPtr(..)) => { |
e74abb32 XL |
235 | let (base, info) = match bx.load_operand(src).val { |
236 | OperandValue::Pair(base, info) => { | |
237 | // fat-ptr to fat-ptr unsize preserves the vtable | |
238 | // i.e., &'a fmt::Debug+Send => &'a fmt::Debug | |
239 | // So we need to pointercast the base to ensure | |
240 | // the types match up. | |
241 | // FIXME(eddyb) use `scalar_pair_element_backend_type` here, | |
242 | // like `unsize_thin_ptr` does. | |
243 | let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); | |
244 | (bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info) | |
245 | } | |
dfeec247 XL |
246 | OperandValue::Immediate(base) => unsize_thin_ptr(bx, base, src_ty, dst_ty), |
247 | OperandValue::Ref(..) => bug!(), | |
e74abb32 XL |
248 | }; |
249 | OperandValue::Pair(base, info).store(bx, dst); | |
a1dfa0c6 XL |
250 | } |
251 | ||
252 | (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { | |
253 | assert_eq!(def_a, def_b); | |
254 | ||
255 | for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() { | |
256 | let src_f = src.project_field(bx, i); | |
257 | let dst_f = dst.project_field(bx, i); | |
258 | ||
259 | if dst_f.layout.is_zst() { | |
260 | continue; | |
261 | } | |
262 | ||
263 | if src_f.layout.ty == dst_f.layout.ty { | |
dfeec247 XL |
264 | memcpy_ty( |
265 | bx, | |
266 | dst_f.llval, | |
267 | dst_f.align, | |
268 | src_f.llval, | |
269 | src_f.align, | |
270 | src_f.layout, | |
271 | MemFlags::empty(), | |
272 | ); | |
a1dfa0c6 XL |
273 | } else { |
274 | coerce_unsized_into(bx, src_f, dst_f); | |
275 | } | |
276 | } | |
277 | } | |
dfeec247 | 278 | _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", src_ty, dst_ty,), |
a1dfa0c6 XL |
279 | } |
280 | } | |
281 | ||
dc9dc135 | 282 | pub fn cast_shift_expr_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
283 | bx: &mut Bx, |
284 | op: hir::BinOpKind, | |
285 | lhs: Bx::Value, | |
dc9dc135 | 286 | rhs: Bx::Value, |
a1dfa0c6 XL |
287 | ) -> Bx::Value { |
288 | cast_shift_rhs(bx, op, lhs, rhs) | |
289 | } | |
290 | ||
dc9dc135 | 291 | fn cast_shift_rhs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
292 | bx: &mut Bx, |
293 | op: hir::BinOpKind, | |
294 | lhs: Bx::Value, | |
295 | rhs: Bx::Value, | |
296 | ) -> Bx::Value { | |
297 | // Shifts may have any size int on the rhs | |
298 | if op.is_shift() { | |
299 | let mut rhs_llty = bx.cx().val_ty(rhs); | |
300 | let mut lhs_llty = bx.cx().val_ty(lhs); | |
301 | if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { | |
302 | rhs_llty = bx.cx().element_type(rhs_llty) | |
303 | } | |
304 | if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { | |
305 | lhs_llty = bx.cx().element_type(lhs_llty) | |
306 | } | |
307 | let rhs_sz = bx.cx().int_width(rhs_llty); | |
308 | let lhs_sz = bx.cx().int_width(lhs_llty); | |
309 | if lhs_sz < rhs_sz { | |
310 | bx.trunc(rhs, lhs_llty) | |
311 | } else if lhs_sz > rhs_sz { | |
312 | // FIXME (#1877: If in the future shifting by negative | |
313 | // values is no longer undefined then this is wrong. | |
314 | bx.zext(rhs, lhs_llty) | |
315 | } else { | |
316 | rhs | |
317 | } | |
318 | } else { | |
319 | rhs | |
320 | } | |
321 | } | |
322 | ||
9fa01778 | 323 | /// Returns `true` if this session's target will use SEH-based unwinding. |
a1dfa0c6 XL |
324 | /// |
325 | /// This is only true for MSVC targets, and even then the 64-bit MSVC target | |
326 | /// currently uses SEH-ish unwinding with DWARF info tables to the side (same as | |
327 | /// 64-bit MinGW) instead of "full SEH". | |
328 | pub fn wants_msvc_seh(sess: &Session) -> bool { | |
329 | sess.target.target.options.is_like_msvc | |
330 | } | |
331 | ||
dc9dc135 | 332 | pub fn from_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 | 333 | bx: &mut Bx, |
dc9dc135 | 334 | val: Bx::Value, |
a1dfa0c6 | 335 | ) -> Bx::Value { |
dfeec247 | 336 | if bx.cx().val_ty(val) == bx.cx().type_i1() { bx.zext(val, bx.cx().type_i8()) } else { val } |
a1dfa0c6 XL |
337 | } |
338 | ||
dc9dc135 | 339 | pub fn to_immediate<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
340 | bx: &mut Bx, |
341 | val: Bx::Value, | |
ba9703b0 | 342 | layout: layout::TyAndLayout<'_>, |
a1dfa0c6 | 343 | ) -> Bx::Value { |
ba9703b0 | 344 | if let Abi::Scalar(ref scalar) = layout.abi { |
a1dfa0c6 XL |
345 | return to_immediate_scalar(bx, val, scalar); |
346 | } | |
347 | val | |
348 | } | |
349 | ||
dc9dc135 | 350 | pub fn to_immediate_scalar<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
351 | bx: &mut Bx, |
352 | val: Bx::Value, | |
ba9703b0 | 353 | scalar: &Scalar, |
a1dfa0c6 XL |
354 | ) -> Bx::Value { |
355 | if scalar.is_bool() { | |
356 | return bx.trunc(val, bx.cx().type_i1()); | |
357 | } | |
358 | val | |
359 | } | |
360 | ||
dc9dc135 | 361 | pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
362 | bx: &mut Bx, |
363 | dst: Bx::Value, | |
364 | dst_align: Align, | |
365 | src: Bx::Value, | |
366 | src_align: Align, | |
ba9703b0 | 367 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 XL |
368 | flags: MemFlags, |
369 | ) { | |
370 | let size = layout.size.bytes(); | |
371 | if size == 0 { | |
372 | return; | |
373 | } | |
374 | ||
375 | bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags); | |
376 | } | |
377 | ||
378 | pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>( | |
379 | cx: &'a Bx::CodegenCx, | |
380 | instance: Instance<'tcx>, | |
381 | ) { | |
a1dfa0c6 XL |
382 | // this is an info! to allow collecting monomorphization statistics |
383 | // and to allow finding the last function before LLVM aborts from | |
384 | // release builds. | |
385 | info!("codegen_instance({})", instance); | |
386 | ||
60c5eb7d | 387 | mir::codegen_mir::<Bx>(cx, instance); |
a1dfa0c6 XL |
388 | } |
389 | ||
9fa01778 | 390 | /// Creates the `main` function which will initialize the rust runtime and call |
a1dfa0c6 | 391 | /// users main function. |
74b04a01 XL |
392 | pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
393 | cx: &'a Bx::CodegenCx, | |
394 | ) -> Option<Bx::Function> { | |
9fa01778 | 395 | let (main_def_id, span) = match cx.tcx().entry_fn(LOCAL_CRATE) { |
dfeec247 | 396 | Some((def_id, _)) => (def_id, cx.tcx().def_span(def_id)), |
74b04a01 | 397 | None => return None, |
a1dfa0c6 XL |
398 | }; |
399 | ||
400 | let instance = Instance::mono(cx.tcx(), main_def_id); | |
401 | ||
402 | if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { | |
403 | // We want to create the wrapper in the same codegen unit as Rust's main | |
404 | // function. | |
74b04a01 | 405 | return None; |
a1dfa0c6 XL |
406 | } |
407 | ||
e74abb32 | 408 | let main_llfn = cx.get_fn_addr(instance); |
a1dfa0c6 | 409 | |
74b04a01 XL |
410 | return cx.tcx().entry_fn(LOCAL_CRATE).map(|(_, et)| { |
411 | let use_start_lang_item = EntryFnType::Start != et; | |
412 | create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, use_start_lang_item) | |
413 | }); | |
a1dfa0c6 | 414 | |
dc9dc135 | 415 | fn create_entry_fn<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( |
a1dfa0c6 XL |
416 | cx: &'a Bx::CodegenCx, |
417 | sp: Span, | |
418 | rust_main: Bx::Value, | |
419 | rust_main_def_id: DefId, | |
420 | use_start_lang_item: bool, | |
74b04a01 | 421 | ) -> Bx::Function { |
e74abb32 XL |
422 | // The entry function is either `int main(void)` or `int main(int argc, char **argv)`, |
423 | // depending on whether the target needs `argc` and `argv` to be passed in. | |
424 | let llfty = if cx.sess().target.target.options.main_needs_argc_argv { | |
425 | cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()) | |
426 | } else { | |
427 | cx.type_func(&[], cx.type_int()) | |
428 | }; | |
a1dfa0c6 XL |
429 | |
430 | let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); | |
431 | // Given that `main()` has no arguments, | |
432 | // then its return type cannot have | |
433 | // late-bound regions, since late-bound | |
434 | // regions must appear in the argument | |
435 | // listing. | |
dfeec247 | 436 | let main_ret_ty = cx.tcx().erase_regions(&main_ret_ty.no_bound_vars().unwrap()); |
a1dfa0c6 | 437 | |
74b04a01 | 438 | if cx.get_declared_value("main").is_some() { |
a1dfa0c6 | 439 | // FIXME: We should be smart and show a better diagnostic here. |
dfeec247 | 440 | cx.sess() |
74b04a01 | 441 | .struct_span_err(sp, "entry symbol `main` declared multiple times") |
dfeec247 XL |
442 | .help("did you use `#[no_mangle]` on `fn main`? Use `#[start]` instead") |
443 | .emit(); | |
a1dfa0c6 XL |
444 | cx.sess().abort_if_errors(); |
445 | bug!(); | |
446 | } | |
447 | let llfn = cx.declare_cfn("main", llfty); | |
448 | ||
449 | // `main` should respect same config for frame pointer elimination as rest of code | |
450 | cx.set_frame_pointer_elimination(llfn); | |
451 | cx.apply_target_cpu_attr(llfn); | |
452 | ||
453 | let mut bx = Bx::new_block(&cx, llfn, "top"); | |
454 | ||
455 | bx.insert_reference_to_gdb_debug_scripts_section_global(); | |
456 | ||
e74abb32 | 457 | let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx); |
a1dfa0c6 XL |
458 | |
459 | let (start_fn, args) = if use_start_lang_item { | |
e1599b0c | 460 | let start_def_id = cx.tcx().require_lang_item(StartFnLangItem, None); |
e74abb32 XL |
461 | let start_fn = cx.get_fn_addr( |
462 | ty::Instance::resolve( | |
463 | cx.tcx(), | |
464 | ty::ParamEnv::reveal_all(), | |
465 | start_def_id, | |
466 | cx.tcx().intern_substs(&[main_ret_ty.into()]), | |
dfeec247 XL |
467 | ) |
468 | .unwrap(), | |
a1dfa0c6 | 469 | ); |
dfeec247 XL |
470 | ( |
471 | start_fn, | |
472 | vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), arg_argc, arg_argv], | |
473 | ) | |
a1dfa0c6 XL |
474 | } else { |
475 | debug!("using user-defined start fn"); | |
476 | (rust_main, vec![arg_argc, arg_argv]) | |
477 | }; | |
478 | ||
479 | let result = bx.call(start_fn, &args, None); | |
480 | let cast = bx.intcast(result, cx.type_int(), true); | |
481 | bx.ret(cast); | |
74b04a01 XL |
482 | |
483 | llfn | |
a1dfa0c6 XL |
484 | } |
485 | } | |
486 | ||
e74abb32 XL |
487 | /// Obtain the `argc` and `argv` values to pass to the rust start function. |
488 | fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
489 | cx: &'a Bx::CodegenCx, | |
dfeec247 XL |
490 | bx: &mut Bx, |
491 | ) -> (Bx::Value, Bx::Value) { | |
e74abb32 XL |
492 | if cx.sess().target.target.options.main_needs_argc_argv { |
493 | // Params from native `main()` used as args for rust start function | |
494 | let param_argc = bx.get_param(0); | |
495 | let param_argv = bx.get_param(1); | |
496 | let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); | |
497 | let arg_argv = param_argv; | |
498 | (arg_argc, arg_argv) | |
499 | } else { | |
500 | // The Rust start function doesn't need `argc` and `argv`, so just pass zeros. | |
501 | let arg_argc = bx.const_int(cx.type_int(), 0); | |
502 | let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p())); | |
503 | (arg_argc, arg_argv) | |
504 | } | |
505 | } | |
506 | ||
ba9703b0 | 507 | pub const CODEGEN_WORKER_ID: usize = usize::MAX; |
a1dfa0c6 XL |
508 | |
509 | pub fn codegen_crate<B: ExtraBackendMethods>( | |
510 | backend: B, | |
dc9dc135 | 511 | tcx: TyCtxt<'tcx>, |
48663c56 XL |
512 | metadata: EncodedMetadata, |
513 | need_metadata_module: bool, | |
a1dfa0c6 | 514 | ) -> OngoingCodegen<B> { |
a1dfa0c6 | 515 | // Skip crate items and just output metadata in -Z no-codegen mode. |
dfeec247 | 516 | if tcx.sess.opts.debugging_opts.no_codegen || !tcx.sess.opts.output_types.should_codegen() { |
e74abb32 | 517 | let ongoing_codegen = start_async_codegen(backend, tcx, metadata, 1); |
a1dfa0c6 | 518 | |
a1dfa0c6 XL |
519 | ongoing_codegen.codegen_finished(tcx); |
520 | ||
dfeec247 | 521 | finalize_tcx(tcx); |
a1dfa0c6 XL |
522 | |
523 | ongoing_codegen.check_for_errors(tcx.sess); | |
524 | ||
525 | return ongoing_codegen; | |
526 | } | |
527 | ||
48663c56 XL |
528 | let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); |
529 | ||
a1dfa0c6 XL |
530 | // Run the monomorphization collector and partition the collected items into |
531 | // codegen units. | |
532 | let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; | |
a1dfa0c6 XL |
533 | |
534 | // Force all codegen_unit queries so they are already either red or green | |
535 | // when compile_codegen_unit accesses them. We are not able to re-execute | |
536 | // the codegen_unit query from just the DepNode, so an unknown color would | |
537 | // lead to having to re-execute compile_codegen_unit, possibly | |
538 | // unnecessarily. | |
539 | if tcx.dep_graph.is_fully_enabled() { | |
ba9703b0 | 540 | for cgu in codegen_units { |
e74abb32 | 541 | tcx.codegen_unit(cgu.name()); |
a1dfa0c6 XL |
542 | } |
543 | } | |
544 | ||
e74abb32 | 545 | let ongoing_codegen = start_async_codegen(backend.clone(), tcx, metadata, codegen_units.len()); |
a1dfa0c6 XL |
546 | let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen)); |
547 | ||
548 | // Codegen an allocator shim, if necessary. | |
549 | // | |
550 | // If the crate doesn't have an `allocator_kind` set then there's definitely | |
551 | // no shim to generate. Otherwise we also check our dependency graph for all | |
552 | // our output crate types. If anything there looks like its a `Dynamic` | |
553 | // linkage, then it's already got an allocator shim and we'll be using that | |
554 | // one instead. If nothing exists then it's our job to generate the | |
555 | // allocator! | |
dfeec247 | 556 | let any_dynamic_crate = tcx.dependency_formats(LOCAL_CRATE).iter().any(|(_, list)| { |
ba9703b0 | 557 | use rustc_middle::middle::dependency_format::Linkage; |
dfeec247 XL |
558 | list.iter().any(|&linkage| linkage == Linkage::Dynamic) |
559 | }); | |
a1dfa0c6 XL |
560 | let allocator_module = if any_dynamic_crate { |
561 | None | |
60c5eb7d | 562 | } else if let Some(kind) = tcx.allocator_kind() { |
dfeec247 XL |
563 | let llmod_id = |
564 | cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("allocator")).to_string(); | |
9fa01778 | 565 | let mut modules = backend.new_metadata(tcx, &llmod_id); |
dfeec247 XL |
566 | tcx.sess |
567 | .time("write_allocator_module", || backend.codegen_allocator(tcx, &mut modules, kind)); | |
a1dfa0c6 | 568 | |
dfeec247 | 569 | Some(ModuleCodegen { name: llmod_id, module_llvm: modules, kind: ModuleKind::Allocator }) |
a1dfa0c6 XL |
570 | } else { |
571 | None | |
572 | }; | |
573 | ||
574 | if let Some(allocator_module) = allocator_module { | |
575 | ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); | |
576 | } | |
577 | ||
48663c56 XL |
578 | if need_metadata_module { |
579 | // Codegen the encoded metadata. | |
dfeec247 XL |
580 | let metadata_cgu_name = |
581 | cgu_name_builder.build_cgu_name(LOCAL_CRATE, &["crate"], Some("metadata")).to_string(); | |
48663c56 | 582 | let mut metadata_llvm_module = backend.new_metadata(tcx, &metadata_cgu_name); |
dfeec247 XL |
583 | tcx.sess.time("write_compressed_metadata", || { |
584 | backend.write_compressed_metadata( | |
585 | tcx, | |
586 | &ongoing_codegen.metadata, | |
587 | &mut metadata_llvm_module, | |
588 | ); | |
48663c56 | 589 | }); |
48663c56 XL |
590 | |
591 | let metadata_module = ModuleCodegen { | |
592 | name: metadata_cgu_name, | |
593 | module_llvm: metadata_llvm_module, | |
594 | kind: ModuleKind::Metadata, | |
595 | }; | |
596 | ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); | |
597 | } | |
a1dfa0c6 XL |
598 | |
599 | // We sort the codegen units by size. This way we can schedule work for LLVM | |
600 | // a bit more efficiently. | |
601 | let codegen_units = { | |
ba9703b0 | 602 | let mut codegen_units = codegen_units.iter().collect::<Vec<_>>(); |
a1dfa0c6 XL |
603 | codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); |
604 | codegen_units | |
605 | }; | |
606 | ||
dfeec247 XL |
607 | let total_codegen_time = Lock::new(Duration::new(0, 0)); |
608 | ||
609 | // The non-parallel compiler can only translate codegen units to LLVM IR | |
610 | // on a single thread, leading to a staircase effect where the N LLVM | |
611 | // threads have to wait on the single codegen threads to generate work | |
612 | // for them. The parallel compiler does not have this restriction, so | |
613 | // we can pre-load the LLVM queue in parallel before handing off | |
614 | // coordination to the OnGoingCodegen scheduler. | |
615 | // | |
616 | // This likely is a temporary measure. Once we don't have to support the | |
617 | // non-parallel compiler anymore, we can compile CGUs end-to-end in | |
618 | // parallel and get rid of the complicated scheduling logic. | |
619 | let pre_compile_cgus = |cgu_reuse: &[CguReuse]| { | |
620 | if cfg!(parallel_compiler) { | |
621 | tcx.sess.time("compile_first_CGU_batch", || { | |
622 | // Try to find one CGU to compile per thread. | |
623 | let cgus: Vec<_> = cgu_reuse | |
624 | .iter() | |
625 | .enumerate() | |
626 | .filter(|&(_, reuse)| reuse == &CguReuse::No) | |
627 | .take(tcx.sess.threads()) | |
628 | .collect(); | |
629 | ||
630 | // Compile the found CGUs in parallel. | |
631 | par_iter(cgus) | |
632 | .map(|(i, _)| { | |
633 | let start_time = Instant::now(); | |
634 | let module = backend.compile_codegen_unit(tcx, codegen_units[i].name()); | |
635 | let mut time = total_codegen_time.lock(); | |
636 | *time += start_time.elapsed(); | |
637 | (i, module) | |
638 | }) | |
639 | .collect() | |
640 | }) | |
641 | } else { | |
642 | FxHashMap::default() | |
643 | } | |
644 | }; | |
645 | ||
646 | let mut cgu_reuse = Vec::new(); | |
647 | let mut pre_compiled_cgus: Option<FxHashMap<usize, _>> = None; | |
a1dfa0c6 | 648 | |
dfeec247 | 649 | for (i, cgu) in codegen_units.iter().enumerate() { |
a1dfa0c6 XL |
650 | ongoing_codegen.wait_for_signal_to_codegen_item(); |
651 | ongoing_codegen.check_for_errors(tcx.sess); | |
652 | ||
dfeec247 XL |
653 | // Do some setup work in the first iteration |
654 | if pre_compiled_cgus.is_none() { | |
655 | // Calculate the CGU reuse | |
656 | cgu_reuse = tcx.sess.time("find_cgu_reuse", || { | |
657 | codegen_units.iter().map(|cgu| determine_cgu_reuse(tcx, &cgu)).collect() | |
658 | }); | |
659 | // Pre compile some CGUs | |
660 | pre_compiled_cgus = Some(pre_compile_cgus(&cgu_reuse)); | |
661 | } | |
662 | ||
663 | let cgu_reuse = cgu_reuse[i]; | |
a1dfa0c6 XL |
664 | tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); |
665 | ||
666 | match cgu_reuse { | |
667 | CguReuse::No => { | |
dfeec247 XL |
668 | let (module, cost) = |
669 | if let Some(cgu) = pre_compiled_cgus.as_mut().unwrap().remove(&i) { | |
670 | cgu | |
671 | } else { | |
672 | let start_time = Instant::now(); | |
673 | let module = backend.compile_codegen_unit(tcx, cgu.name()); | |
674 | let mut time = total_codegen_time.lock(); | |
675 | *time += start_time.elapsed(); | |
676 | module | |
677 | }; | |
678 | submit_codegened_module_to_llvm( | |
679 | &backend, | |
680 | &ongoing_codegen.coordinator_send, | |
681 | module, | |
682 | cost, | |
683 | ); | |
a1dfa0c6 XL |
684 | false |
685 | } | |
686 | CguReuse::PreLto => { | |
dfeec247 XL |
687 | submit_pre_lto_module_to_llvm( |
688 | &backend, | |
689 | tcx, | |
690 | &ongoing_codegen.coordinator_send, | |
691 | CachedModuleCodegen { | |
692 | name: cgu.name().to_string(), | |
693 | source: cgu.work_product(tcx), | |
694 | }, | |
695 | ); | |
a1dfa0c6 XL |
696 | true |
697 | } | |
698 | CguReuse::PostLto => { | |
dfeec247 XL |
699 | submit_post_lto_module_to_llvm( |
700 | &backend, | |
701 | &ongoing_codegen.coordinator_send, | |
702 | CachedModuleCodegen { | |
703 | name: cgu.name().to_string(), | |
704 | source: cgu.work_product(tcx), | |
705 | }, | |
706 | ); | |
a1dfa0c6 XL |
707 | true |
708 | } | |
709 | }; | |
710 | } | |
711 | ||
712 | ongoing_codegen.codegen_finished(tcx); | |
713 | ||
714 | // Since the main thread is sometimes blocked during codegen, we keep track | |
715 | // -Ztime-passes output manually. | |
dfeec247 XL |
716 | print_time_passes_entry( |
717 | tcx.sess.time_passes(), | |
718 | "codegen_to_LLVM_IR", | |
719 | total_codegen_time.into_inner(), | |
720 | ); | |
a1dfa0c6 XL |
721 | |
722 | ::rustc_incremental::assert_module_sources::assert_module_sources(tcx); | |
723 | ||
724 | symbol_names_test::report_symbol_names(tcx); | |
725 | ||
a1dfa0c6 XL |
726 | ongoing_codegen.check_for_errors(tcx.sess); |
727 | ||
dfeec247 XL |
728 | finalize_tcx(tcx); |
729 | ||
a1dfa0c6 XL |
730 | ongoing_codegen.into_inner() |
731 | } | |
732 | ||
733 | /// A curious wrapper structure whose only purpose is to call `codegen_aborted` | |
734 | /// when it's dropped abnormally. | |
735 | /// | |
736 | /// In the process of working on rust-lang/rust#55238 a mysterious segfault was | |
737 | /// stumbled upon. The segfault was never reproduced locally, but it was | |
738 | /// suspected to be related to the fact that codegen worker threads were | |
739 | /// sticking around by the time the main thread was exiting, causing issues. | |
740 | /// | |
741 | /// This structure is an attempt to fix that issue where the `codegen_aborted` | |
742 | /// message will block until all workers have finished. This should ensure that | |
743 | /// even if the main codegen thread panics we'll wait for pending work to | |
744 | /// complete before returning from the main thread, hopefully avoiding | |
745 | /// segfaults. | |
746 | /// | |
747 | /// If you see this comment in the code, then it means that this workaround | |
748 | /// worked! We may yet one day track down the mysterious cause of that | |
749 | /// segfault... | |
750 | struct AbortCodegenOnDrop<B: ExtraBackendMethods>(Option<OngoingCodegen<B>>); | |
751 | ||
752 | impl<B: ExtraBackendMethods> AbortCodegenOnDrop<B> { | |
753 | fn into_inner(mut self) -> OngoingCodegen<B> { | |
754 | self.0.take().unwrap() | |
755 | } | |
756 | } | |
757 | ||
758 | impl<B: ExtraBackendMethods> Deref for AbortCodegenOnDrop<B> { | |
759 | type Target = OngoingCodegen<B>; | |
760 | ||
761 | fn deref(&self) -> &OngoingCodegen<B> { | |
762 | self.0.as_ref().unwrap() | |
763 | } | |
764 | } | |
765 | ||
766 | impl<B: ExtraBackendMethods> DerefMut for AbortCodegenOnDrop<B> { | |
767 | fn deref_mut(&mut self) -> &mut OngoingCodegen<B> { | |
768 | self.0.as_mut().unwrap() | |
769 | } | |
770 | } | |
771 | ||
772 | impl<B: ExtraBackendMethods> Drop for AbortCodegenOnDrop<B> { | |
773 | fn drop(&mut self) { | |
774 | if let Some(codegen) = self.0.take() { | |
775 | codegen.codegen_aborted(); | |
776 | } | |
777 | } | |
778 | } | |
779 | ||
dfeec247 XL |
780 | fn finalize_tcx(tcx: TyCtxt<'_>) { |
781 | tcx.sess.time("assert_dep_graph", || ::rustc_incremental::assert_dep_graph(tcx)); | |
782 | tcx.sess.time("serialize_dep_graph", || ::rustc_incremental::save_dep_graph(tcx)); | |
a1dfa0c6 | 783 | |
dfeec247 XL |
784 | // We assume that no queries are run past here. If there are new queries |
785 | // after this point, they'll show up as "<unknown>" in self-profiling data. | |
786 | { | |
787 | let _prof_timer = tcx.prof.generic_activity("self_profile_alloc_query_strings"); | |
788 | tcx.alloc_self_profile_query_strings(); | |
789 | } | |
a1dfa0c6 XL |
790 | } |
791 | ||
792 | impl CrateInfo { | |
dc9dc135 | 793 | pub fn new(tcx: TyCtxt<'_>) -> CrateInfo { |
a1dfa0c6 XL |
794 | let mut info = CrateInfo { |
795 | panic_runtime: None, | |
796 | compiler_builtins: None, | |
797 | profiler_runtime: None, | |
a1dfa0c6 XL |
798 | is_no_builtins: Default::default(), |
799 | native_libraries: Default::default(), | |
800 | used_libraries: tcx.native_libraries(LOCAL_CRATE), | |
801 | link_args: tcx.link_args(LOCAL_CRATE), | |
802 | crate_name: Default::default(), | |
803 | used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), | |
804 | used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), | |
805 | used_crate_source: Default::default(), | |
a1dfa0c6 XL |
806 | lang_item_to_crate: Default::default(), |
807 | missing_lang_items: Default::default(), | |
e74abb32 | 808 | dependency_formats: tcx.dependency_formats(LOCAL_CRATE), |
a1dfa0c6 XL |
809 | }; |
810 | let lang_items = tcx.lang_items(); | |
811 | ||
a1dfa0c6 XL |
812 | let crates = tcx.crates(); |
813 | ||
814 | let n_crates = crates.len(); | |
815 | info.native_libraries.reserve(n_crates); | |
816 | info.crate_name.reserve(n_crates); | |
817 | info.used_crate_source.reserve(n_crates); | |
818 | info.missing_lang_items.reserve(n_crates); | |
819 | ||
820 | for &cnum in crates.iter() { | |
821 | info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); | |
822 | info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); | |
823 | info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); | |
824 | if tcx.is_panic_runtime(cnum) { | |
825 | info.panic_runtime = Some(cnum); | |
826 | } | |
827 | if tcx.is_compiler_builtins(cnum) { | |
828 | info.compiler_builtins = Some(cnum); | |
829 | } | |
830 | if tcx.is_profiler_runtime(cnum) { | |
831 | info.profiler_runtime = Some(cnum); | |
832 | } | |
a1dfa0c6 XL |
833 | if tcx.is_no_builtins(cnum) { |
834 | info.is_no_builtins.insert(cnum); | |
835 | } | |
a1dfa0c6 XL |
836 | let missing = tcx.missing_lang_items(cnum); |
837 | for &item in missing.iter() { | |
838 | if let Ok(id) = lang_items.require(item) { | |
839 | info.lang_item_to_crate.insert(item, id.krate); | |
840 | } | |
841 | } | |
842 | ||
843 | // No need to look for lang items that are whitelisted and don't | |
844 | // actually need to exist. | |
74b04a01 XL |
845 | let missing = |
846 | missing.iter().cloned().filter(|&l| !lang_items::whitelisted(tcx, l)).collect(); | |
a1dfa0c6 XL |
847 | info.missing_lang_items.insert(cnum, missing); |
848 | } | |
849 | ||
ba9703b0 | 850 | info |
a1dfa0c6 | 851 | } |
a1dfa0c6 XL |
852 | } |
853 | ||
9fa01778 XL |
854 | pub fn provide_both(providers: &mut Providers<'_>) { |
855 | providers.backend_optimization_level = |tcx, cratenum| { | |
856 | let for_speed = match tcx.sess.opts.optimize { | |
857 | // If globally no optimisation is done, #[optimize] has no effect. | |
858 | // | |
859 | // This is done because if we ended up "upgrading" to `-O2` here, we’d populate the | |
860 | // pass manager and it is likely that some module-wide passes (such as inliner or | |
861 | // cross-function constant propagation) would ignore the `optnone` annotation we put | |
862 | // on the functions, thus necessarily involving these functions into optimisations. | |
863 | config::OptLevel::No => return config::OptLevel::No, | |
864 | // If globally optimise-speed is already specified, just use that level. | |
865 | config::OptLevel::Less => return config::OptLevel::Less, | |
866 | config::OptLevel::Default => return config::OptLevel::Default, | |
867 | config::OptLevel::Aggressive => return config::OptLevel::Aggressive, | |
868 | // If globally optimize-for-size has been requested, use -O2 instead (if optimize(size) | |
869 | // are present). | |
870 | config::OptLevel::Size => config::OptLevel::Default, | |
871 | config::OptLevel::SizeMin => config::OptLevel::Default, | |
872 | }; | |
873 | ||
874 | let (defids, _) = tcx.collect_and_partition_mono_items(cratenum); | |
875 | for id in &*defids { | |
dfeec247 | 876 | let CodegenFnAttrs { optimize, .. } = tcx.codegen_fn_attrs(*id); |
9fa01778 XL |
877 | match optimize { |
878 | attr::OptimizeAttr::None => continue, | |
879 | attr::OptimizeAttr::Size => continue, | |
880 | attr::OptimizeAttr::Speed => { | |
881 | return for_speed; | |
882 | } | |
883 | } | |
884 | } | |
ba9703b0 | 885 | tcx.sess.opts.optimize |
9fa01778 XL |
886 | }; |
887 | ||
a1dfa0c6 XL |
888 | providers.dllimport_foreign_items = |tcx, krate| { |
889 | let module_map = tcx.foreign_modules(krate); | |
dfeec247 XL |
890 | let module_map = |
891 | module_map.iter().map(|lib| (lib.def_id, lib)).collect::<FxHashMap<_, _>>(); | |
a1dfa0c6 | 892 | |
dfeec247 XL |
893 | let dllimports = tcx |
894 | .native_libraries(krate) | |
a1dfa0c6 XL |
895 | .iter() |
896 | .filter(|lib| { | |
897 | if lib.kind != cstore::NativeLibraryKind::NativeUnknown { | |
dfeec247 | 898 | return false; |
a1dfa0c6 XL |
899 | } |
900 | let cfg = match lib.cfg { | |
901 | Some(ref cfg) => cfg, | |
902 | None => return true, | |
903 | }; | |
904 | attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) | |
905 | }) | |
906 | .filter_map(|lib| lib.foreign_module) | |
907 | .map(|id| &module_map[&id]) | |
908 | .flat_map(|module| module.foreign_items.iter().cloned()) | |
909 | .collect(); | |
dc9dc135 | 910 | tcx.arena.alloc(dllimports) |
a1dfa0c6 XL |
911 | }; |
912 | ||
dfeec247 XL |
913 | providers.is_dllimport_foreign_item = |
914 | |tcx, def_id| tcx.dllimport_foreign_items(def_id.krate).contains(&def_id); | |
a1dfa0c6 XL |
915 | } |
916 | ||
dc9dc135 | 917 | fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) -> CguReuse { |
a1dfa0c6 | 918 | if !tcx.dep_graph.is_fully_enabled() { |
dfeec247 | 919 | return CguReuse::No; |
a1dfa0c6 XL |
920 | } |
921 | ||
922 | let work_product_id = &cgu.work_product_id(); | |
923 | if tcx.dep_graph.previous_work_product(work_product_id).is_none() { | |
924 | // We don't have anything cached for this CGU. This can happen | |
925 | // if the CGU did not exist in the previous session. | |
dfeec247 | 926 | return CguReuse::No; |
a1dfa0c6 XL |
927 | } |
928 | ||
929 | // Try to mark the CGU as green. If it we can do so, it means that nothing | |
930 | // affecting the LLVM module has changed and we can re-use a cached version. | |
931 | // If we compile with any kind of LTO, this means we can re-use the bitcode | |
932 | // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only | |
933 | // know that later). If we are not doing LTO, there is only one optimized | |
934 | // version of each module, so we re-use that. | |
935 | let dep_node = cgu.codegen_dep_node(tcx); | |
dfeec247 XL |
936 | assert!( |
937 | !tcx.dep_graph.dep_node_exists(&dep_node), | |
a1dfa0c6 | 938 | "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", |
dfeec247 XL |
939 | cgu.name() |
940 | ); | |
a1dfa0c6 XL |
941 | |
942 | if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { | |
943 | // We can re-use either the pre- or the post-thinlto state | |
dfeec247 | 944 | if tcx.sess.lto() != Lto::No { CguReuse::PreLto } else { CguReuse::PostLto } |
a1dfa0c6 XL |
945 | } else { |
946 | CguReuse::No | |
947 | } | |
948 | } |