]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_codegen_ssa/src/mir/mod.rs
New upstream version 1.56.0~beta.4+dfsg1
[rustc.git] / compiler / rustc_codegen_ssa / src / mir / mod.rs
1 use crate::traits::*;
2 use rustc_errors::ErrorReported;
3 use rustc_middle::mir;
4 use rustc_middle::mir::interpret::ErrorHandled;
5 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt, TyAndLayout};
6 use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
7 use rustc_target::abi::call::{FnAbi, PassMode};
8
9 use std::iter;
10
11 use rustc_index::bit_set::BitSet;
12 use rustc_index::vec::IndexVec;
13
14 use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
15 use self::place::PlaceRef;
16 use rustc_middle::mir::traversal;
17
18 use self::operand::{OperandRef, OperandValue};
19
20 /// Master context for codegenning from MIR.
21 pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
22 instance: Instance<'tcx>,
23
24 mir: &'tcx mir::Body<'tcx>,
25
26 debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
27
28 llfn: Bx::Function,
29
30 cx: &'a Bx::CodegenCx,
31
32 fn_abi: FnAbi<'tcx, Ty<'tcx>>,
33
34 /// When unwinding is initiated, we have to store this personality
35 /// value somewhere so that we can load it and re-use it in the
36 /// resume instruction. The personality is (afaik) some kind of
37 /// value used for C++ unwinding, which must filter by type: we
38 /// don't really care about it very much. Anyway, this value
39 /// contains an alloca into which the personality is stored and
40 /// then later loaded when generating the DIVERGE_BLOCK.
41 personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
42
43 /// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
44 /// as-needed (e.g. RPO reaching it or another block branching to it).
45 // FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
46 // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
47 cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
48
49 /// The funclet status of each basic block
50 cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
51
52 /// When targeting MSVC, this stores the cleanup info for each funclet BB.
53 /// This is initialized at the same time as the `landing_pads` entry for the
54 /// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
55 funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
56
57 /// This stores the cached landing/cleanup pad block for a given BB.
58 // FIXME(eddyb) rename this to `eh_pads`.
59 landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
60
61 /// Cached unreachable block
62 unreachable_block: Option<Bx::BasicBlock>,
63
64 /// The location where each MIR arg/var/tmp/ret is stored. This is
65 /// usually an `PlaceRef` representing an alloca, but not always:
66 /// sometimes we can skip the alloca and just store the value
67 /// directly using an `OperandRef`, which makes for tighter LLVM
68 /// IR. The conditions for using an `OperandRef` are as follows:
69 ///
70 /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
71 /// - the operand must never be referenced indirectly
72 /// - we should not take its address using the `&` operator
73 /// - nor should it appear in a place path like `tmp.a`
74 /// - the operand must be defined by an rvalue that can generate immediate
75 /// values
76 ///
77 /// Avoiding allocs can also be important for certain intrinsics,
78 /// notably `expect`.
79 locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
80
81 /// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
82 /// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
83 per_local_var_debug_info:
84 Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
85
86 /// Caller location propagated if this function has `#[track_caller]`.
87 caller_location: Option<OperandRef<'tcx, Bx::Value>>,
88 }
89
90 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
91 pub fn monomorphize<T>(&self, value: T) -> T
92 where
93 T: Copy + TypeFoldable<'tcx>,
94 {
95 debug!("monomorphize: self.instance={:?}", self.instance);
96 self.instance.subst_mir_and_normalize_erasing_regions(
97 self.cx.tcx(),
98 ty::ParamEnv::reveal_all(),
99 value,
100 )
101 }
102 }
103
104 enum LocalRef<'tcx, V> {
105 Place(PlaceRef<'tcx, V>),
106 /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
107 /// `*p` is the fat pointer that references the actual unsized place.
108 /// Every time it is initialized, we have to reallocate the place
109 /// and update the fat pointer. That's the reason why it is indirect.
110 UnsizedPlace(PlaceRef<'tcx, V>),
111 Operand(Option<OperandRef<'tcx, V>>),
112 }
113
114 impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
115 fn new_operand<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
116 bx: &mut Bx,
117 layout: TyAndLayout<'tcx>,
118 ) -> LocalRef<'tcx, V> {
119 if layout.is_zst() {
120 // Zero-size temporaries aren't always initialized, which
121 // doesn't matter because they don't contain data, but
122 // we need something in the operand.
123 LocalRef::Operand(Some(OperandRef::new_zst(bx, layout)))
124 } else {
125 LocalRef::Operand(None)
126 }
127 }
128 }
129
130 ///////////////////////////////////////////////////////////////////////////
131
132 pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
133 cx: &'a Bx::CodegenCx,
134 instance: Instance<'tcx>,
135 ) {
136 assert!(!instance.substs.needs_infer());
137
138 let llfn = cx.get_fn(instance);
139
140 let mir = cx.tcx().instance_mir(instance.def);
141
142 let fn_abi = FnAbi::of_instance(cx, instance, &[]);
143 debug!("fn_abi: {:?}", fn_abi);
144
145 let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
146
147 let start_llbb = Bx::append_block(cx, llfn, "start");
148 let mut bx = Bx::build(cx, start_llbb);
149
150 if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
151 bx.set_personality_fn(cx.eh_personality());
152 }
153
154 let cleanup_kinds = analyze::cleanup_kinds(&mir);
155 // Allocate a `Block` for every basic block, except
156 // the start block, if nothing loops back to it.
157 let reentrant_start_block = !mir.predecessors()[mir::START_BLOCK].is_empty();
158 let cached_llbbs: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>> =
159 mir.basic_blocks()
160 .indices()
161 .map(|bb| {
162 if bb == mir::START_BLOCK && !reentrant_start_block {
163 Some(start_llbb)
164 } else {
165 None
166 }
167 })
168 .collect();
169
170 let mut fx = FunctionCx {
171 instance,
172 mir,
173 llfn,
174 fn_abi,
175 cx,
176 personality_slot: None,
177 cached_llbbs,
178 unreachable_block: None,
179 cleanup_kinds,
180 landing_pads: IndexVec::from_elem(None, mir.basic_blocks()),
181 funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks().len()),
182 locals: IndexVec::new(),
183 debug_context,
184 per_local_var_debug_info: None,
185 caller_location: None,
186 };
187
188 fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut bx);
189
190 // Evaluate all required consts; codegen later assumes that CTFE will never fail.
191 let mut all_consts_ok = true;
192 for const_ in &mir.required_consts {
193 if let Err(err) = fx.eval_mir_constant(const_) {
194 all_consts_ok = false;
195 match err {
196 // errored or at least linted
197 ErrorHandled::Reported(ErrorReported) | ErrorHandled::Linted => {}
198 ErrorHandled::TooGeneric => {
199 span_bug!(const_.span, "codgen encountered polymorphic constant: {:?}", err)
200 }
201 }
202 }
203 }
204 if !all_consts_ok {
205 // We leave the IR in some half-built state here, and rely on this code not even being
206 // submitted to LLVM once an error was raised.
207 return;
208 }
209
210 let memory_locals = analyze::non_ssa_locals(&fx);
211
212 // Allocate variable and temp allocas
213 fx.locals = {
214 let args = arg_local_refs(&mut bx, &mut fx, &memory_locals);
215
216 let mut allocate_local = |local| {
217 let decl = &mir.local_decls[local];
218 let layout = bx.layout_of(fx.monomorphize(decl.ty));
219 assert!(!layout.ty.has_erasable_regions(cx.tcx()));
220
221 if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
222 debug!("alloc: {:?} (return place) -> place", local);
223 let llretptr = bx.get_param(0);
224 return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
225 }
226
227 if memory_locals.contains(local) {
228 debug!("alloc: {:?} -> place", local);
229 if layout.is_unsized() {
230 LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut bx, layout))
231 } else {
232 LocalRef::Place(PlaceRef::alloca(&mut bx, layout))
233 }
234 } else {
235 debug!("alloc: {:?} -> operand", local);
236 LocalRef::new_operand(&mut bx, layout)
237 }
238 };
239
240 let retptr = allocate_local(mir::RETURN_PLACE);
241 iter::once(retptr)
242 .chain(args.into_iter())
243 .chain(mir.vars_and_temps_iter().map(allocate_local))
244 .collect()
245 };
246
247 // Apply debuginfo to the newly allocated locals.
248 fx.debug_introduce_locals(&mut bx);
249
250 // Branch to the START block, if it's not the entry block.
251 if reentrant_start_block {
252 bx.br(fx.llbb(mir::START_BLOCK));
253 }
254
255 // Codegen the body of each block using reverse postorder
256 // FIXME(eddyb) reuse RPO iterator between `analysis` and this.
257 for (bb, _) in traversal::reverse_postorder(&mir) {
258 fx.codegen_block(bb);
259 }
260 }
261
262 /// Produces, for each argument, a `Value` pointing at the
263 /// argument's value. As arguments are places, these are always
264 /// indirect.
265 fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
266 bx: &mut Bx,
267 fx: &mut FunctionCx<'a, 'tcx, Bx>,
268 memory_locals: &BitSet<mir::Local>,
269 ) -> Vec<LocalRef<'tcx, Bx::Value>> {
270 let mir = fx.mir;
271 let mut idx = 0;
272 let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
273
274 let args = mir
275 .args_iter()
276 .enumerate()
277 .map(|(arg_index, local)| {
278 let arg_decl = &mir.local_decls[local];
279
280 if Some(local) == mir.spread_arg {
281 // This argument (e.g., the last argument in the "rust-call" ABI)
282 // is a tuple that was spread at the ABI level and now we have
283 // to reconstruct it into a tuple local variable, from multiple
284 // individual LLVM function arguments.
285
286 let arg_ty = fx.monomorphize(arg_decl.ty);
287 let tupled_arg_tys = match arg_ty.kind() {
288 ty::Tuple(tys) => tys,
289 _ => bug!("spread argument isn't a tuple?!"),
290 };
291
292 let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
293 for i in 0..tupled_arg_tys.len() {
294 let arg = &fx.fn_abi.args[idx];
295 idx += 1;
296 if arg.pad.is_some() {
297 llarg_idx += 1;
298 }
299 let pr_field = place.project_field(bx, i);
300 bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
301 }
302
303 return LocalRef::Place(place);
304 }
305
306 if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
307 let arg_ty = fx.monomorphize(arg_decl.ty);
308
309 let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
310 bx.va_start(va_list.llval);
311
312 return LocalRef::Place(va_list);
313 }
314
315 let arg = &fx.fn_abi.args[idx];
316 idx += 1;
317 if arg.pad.is_some() {
318 llarg_idx += 1;
319 }
320
321 if !memory_locals.contains(local) {
322 // We don't have to cast or keep the argument in the alloca.
323 // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
324 // of putting everything in allocas just so we can use llvm.dbg.declare.
325 let local = |op| LocalRef::Operand(Some(op));
326 match arg.mode {
327 PassMode::Ignore => {
328 return local(OperandRef::new_zst(bx, arg.layout));
329 }
330 PassMode::Direct(_) => {
331 let llarg = bx.get_param(llarg_idx);
332 llarg_idx += 1;
333 return local(OperandRef::from_immediate_or_packed_pair(
334 bx, llarg, arg.layout,
335 ));
336 }
337 PassMode::Pair(..) => {
338 let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
339 llarg_idx += 2;
340
341 return local(OperandRef {
342 val: OperandValue::Pair(a, b),
343 layout: arg.layout,
344 });
345 }
346 _ => {}
347 }
348 }
349
350 if arg.is_sized_indirect() {
351 // Don't copy an indirect argument to an alloca, the caller
352 // already put it in a temporary alloca and gave it up.
353 // FIXME: lifetimes
354 let llarg = bx.get_param(llarg_idx);
355 llarg_idx += 1;
356 LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
357 } else if arg.is_unsized_indirect() {
358 // As the storage for the indirect argument lives during
359 // the whole function call, we just copy the fat pointer.
360 let llarg = bx.get_param(llarg_idx);
361 llarg_idx += 1;
362 let llextra = bx.get_param(llarg_idx);
363 llarg_idx += 1;
364 let indirect_operand = OperandValue::Pair(llarg, llextra);
365
366 let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
367 indirect_operand.store(bx, tmp);
368 LocalRef::UnsizedPlace(tmp)
369 } else {
370 let tmp = PlaceRef::alloca(bx, arg.layout);
371 bx.store_fn_arg(arg, &mut llarg_idx, tmp);
372 LocalRef::Place(tmp)
373 }
374 })
375 .collect::<Vec<_>>();
376
377 if fx.instance.def.requires_caller_location(bx.tcx()) {
378 assert_eq!(
379 fx.fn_abi.args.len(),
380 args.len() + 1,
381 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR",
382 );
383
384 let arg = fx.fn_abi.args.last().unwrap();
385 match arg.mode {
386 PassMode::Direct(_) => (),
387 _ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
388 }
389
390 fx.caller_location = Some(OperandRef {
391 val: OperandValue::Immediate(bx.get_param(llarg_idx)),
392 layout: arg.layout,
393 });
394 }
395
396 args
397 }
398
399 mod analyze;
400 mod block;
401 pub mod constant;
402 pub mod coverageinfo;
403 pub mod debuginfo;
404 mod intrinsic;
405 pub mod operand;
406 pub mod place;
407 mod rvalue;
408 mod statement;