]>
Commit | Line | Data |
---|---|---|
9c376795 | 1 | use hir::def_id::DefId; |
2b03887a FG |
2 | use rustc_hir as hir; |
3 | use rustc_index::bit_set::BitSet; | |
4 | use rustc_index::vec::{Idx, IndexVec}; | |
5 | use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal}; | |
6 | use rustc_middle::ty::layout::{ | |
7 | IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES, | |
8 | }; | |
9 | use rustc_middle::ty::{ | |
9c376795 | 10 | self, subst::SubstsRef, AdtDef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable, |
2b03887a FG |
11 | }; |
12 | use rustc_session::{DataTypeKind, FieldInfo, SizeKind, VariantInfo}; | |
13 | use rustc_span::symbol::Symbol; | |
14 | use rustc_span::DUMMY_SP; | |
15 | use rustc_target::abi::*; | |
16 | ||
487cf647 | 17 | use std::fmt::Debug; |
2b03887a | 18 | use std::iter; |
2b03887a FG |
19 | |
20 | use crate::layout_sanity_check::sanity_check_layout; | |
21 | ||
22 | pub fn provide(providers: &mut ty::query::Providers) { | |
23 | *providers = ty::query::Providers { layout_of, ..*providers }; | |
24 | } | |
25 | ||
26 | #[instrument(skip(tcx, query), level = "debug")] | |
27 | fn layout_of<'tcx>( | |
28 | tcx: TyCtxt<'tcx>, | |
29 | query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, | |
30 | ) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> { | |
31 | let (param_env, ty) = query.into_parts(); | |
32 | debug!(?ty); | |
33 | ||
34 | let param_env = param_env.with_reveal_all_normalized(tcx); | |
35 | let unnormalized_ty = ty; | |
36 | ||
37 | // FIXME: We might want to have two different versions of `layout_of`: | |
38 | // One that can be called after typecheck has completed and can use | |
39 | // `normalize_erasing_regions` here and another one that can be called | |
40 | // before typecheck has completed and uses `try_normalize_erasing_regions`. | |
41 | let ty = match tcx.try_normalize_erasing_regions(param_env, ty) { | |
42 | Ok(t) => t, | |
43 | Err(normalization_error) => { | |
44 | return Err(LayoutError::NormalizationFailure(ty, normalization_error)); | |
45 | } | |
46 | }; | |
47 | ||
48 | if ty != unnormalized_ty { | |
49 | // Ensure this layout is also cached for the normalized type. | |
50 | return tcx.layout_of(param_env.and(ty)); | |
51 | } | |
52 | ||
53 | let cx = LayoutCx { tcx, param_env }; | |
54 | ||
55 | let layout = layout_of_uncached(&cx, ty)?; | |
56 | let layout = TyAndLayout { ty, layout }; | |
57 | ||
58 | record_layout_for_printing(&cx, layout); | |
59 | ||
60 | sanity_check_layout(&cx, &layout); | |
61 | ||
62 | Ok(layout) | |
63 | } | |
64 | ||
2b03887a FG |
65 | // Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`. |
66 | // This is used to go between `memory_index` (source field order to memory order) | |
67 | // and `inverse_memory_index` (memory order to source field order). | |
68 | // See also `FieldsShape::Arbitrary::memory_index` for more details. | |
69 | // FIXME(eddyb) build a better abstraction for permutations, if possible. | |
70 | fn invert_mapping(map: &[u32]) -> Vec<u32> { | |
71 | let mut inverse = vec![0; map.len()]; | |
72 | for i in 0..map.len() { | |
73 | inverse[map[i] as usize] = i as u32; | |
74 | } | |
75 | inverse | |
76 | } | |
77 | ||
2b03887a FG |
78 | fn univariant_uninterned<'tcx>( |
79 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
80 | ty: Ty<'tcx>, | |
81 | fields: &[TyAndLayout<'_>], | |
82 | repr: &ReprOptions, | |
83 | kind: StructKind, | |
487cf647 | 84 | ) -> Result<LayoutS<VariantIdx>, LayoutError<'tcx>> { |
2b03887a FG |
85 | let dl = cx.data_layout(); |
86 | let pack = repr.pack; | |
87 | if pack.is_some() && repr.align.is_some() { | |
88 | cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned"); | |
89 | return Err(LayoutError::Unknown(ty)); | |
90 | } | |
91 | ||
487cf647 | 92 | cx.univariant(dl, fields, repr, kind).ok_or(LayoutError::SizeOverflow(ty)) |
2b03887a FG |
93 | } |
94 | ||
95 | fn layout_of_uncached<'tcx>( | |
96 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
97 | ty: Ty<'tcx>, | |
98 | ) -> Result<Layout<'tcx>, LayoutError<'tcx>> { | |
99 | let tcx = cx.tcx; | |
100 | let param_env = cx.param_env; | |
101 | let dl = cx.data_layout(); | |
102 | let scalar_unit = |value: Primitive| { | |
103 | let size = value.size(dl); | |
104 | assert!(size.bits() <= 128); | |
105 | Scalar::Initialized { value, valid_range: WrappingRange::full(size) } | |
106 | }; | |
107 | let scalar = |value: Primitive| tcx.intern_layout(LayoutS::scalar(cx, scalar_unit(value))); | |
108 | ||
109 | let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| { | |
110 | Ok(tcx.intern_layout(univariant_uninterned(cx, ty, fields, repr, kind)?)) | |
111 | }; | |
112 | debug_assert!(!ty.has_non_region_infer()); | |
113 | ||
114 | Ok(match *ty.kind() { | |
115 | // Basic scalars. | |
116 | ty::Bool => tcx.intern_layout(LayoutS::scalar( | |
117 | cx, | |
118 | Scalar::Initialized { | |
119 | value: Int(I8, false), | |
120 | valid_range: WrappingRange { start: 0, end: 1 }, | |
121 | }, | |
122 | )), | |
123 | ty::Char => tcx.intern_layout(LayoutS::scalar( | |
124 | cx, | |
125 | Scalar::Initialized { | |
126 | value: Int(I32, false), | |
127 | valid_range: WrappingRange { start: 0, end: 0x10FFFF }, | |
128 | }, | |
129 | )), | |
130 | ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)), | |
131 | ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)), | |
132 | ty::Float(fty) => scalar(match fty { | |
133 | ty::FloatTy::F32 => F32, | |
134 | ty::FloatTy::F64 => F64, | |
135 | }), | |
136 | ty::FnPtr(_) => { | |
137 | let mut ptr = scalar_unit(Pointer); | |
138 | ptr.valid_range_mut().start = 1; | |
139 | tcx.intern_layout(LayoutS::scalar(cx, ptr)) | |
140 | } | |
141 | ||
142 | // The never type. | |
487cf647 | 143 | ty::Never => tcx.intern_layout(cx.layout_of_never_type()), |
2b03887a FG |
144 | |
145 | // Potentially-wide pointers. | |
146 | ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { | |
147 | let mut data_ptr = scalar_unit(Pointer); | |
148 | if !ty.is_unsafe_ptr() { | |
149 | data_ptr.valid_range_mut().start = 1; | |
150 | } | |
151 | ||
152 | let pointee = tcx.normalize_erasing_regions(param_env, pointee); | |
153 | if pointee.is_sized(tcx, param_env) { | |
154 | return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr))); | |
155 | } | |
156 | ||
157 | let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env); | |
9c376795 FG |
158 | |
159 | let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type() { | |
160 | let metadata_ty = tcx.normalize_erasing_regions( | |
161 | param_env, | |
162 | tcx.mk_projection(metadata_def_id, [pointee]), | |
163 | ); | |
164 | let metadata_layout = cx.layout_of(metadata_ty)?; | |
165 | // If the metadata is a 1-zst, then the pointer is thin. | |
166 | if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 { | |
2b03887a FG |
167 | return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr))); |
168 | } | |
9c376795 FG |
169 | |
170 | let Abi::Scalar(metadata) = metadata_layout.abi else { | |
171 | return Err(LayoutError::Unknown(unsized_part)); | |
172 | }; | |
173 | metadata | |
174 | } else { | |
175 | match unsized_part.kind() { | |
176 | ty::Foreign(..) => { | |
177 | return Ok(tcx.intern_layout(LayoutS::scalar(cx, data_ptr))); | |
178 | } | |
179 | ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), | |
180 | ty::Dynamic(..) => { | |
181 | let mut vtable = scalar_unit(Pointer); | |
182 | vtable.valid_range_mut().start = 1; | |
183 | vtable | |
184 | } | |
185 | _ => { | |
186 | return Err(LayoutError::Unknown(unsized_part)); | |
187 | } | |
2b03887a | 188 | } |
2b03887a FG |
189 | }; |
190 | ||
191 | // Effectively a (ptr, meta) tuple. | |
487cf647 | 192 | tcx.intern_layout(cx.scalar_pair(data_ptr, metadata)) |
2b03887a FG |
193 | } |
194 | ||
195 | ty::Dynamic(_, _, ty::DynStar) => { | |
196 | let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false)); | |
197 | data.valid_range_mut().start = 0; | |
198 | let mut vtable = scalar_unit(Pointer); | |
199 | vtable.valid_range_mut().start = 1; | |
487cf647 | 200 | tcx.intern_layout(cx.scalar_pair(data, vtable)) |
2b03887a FG |
201 | } |
202 | ||
203 | // Arrays and slices. | |
204 | ty::Array(element, mut count) => { | |
205 | if count.has_projections() { | |
206 | count = tcx.normalize_erasing_regions(param_env, count); | |
207 | if count.has_projections() { | |
208 | return Err(LayoutError::Unknown(ty)); | |
209 | } | |
210 | } | |
211 | ||
212 | let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?; | |
213 | let element = cx.layout_of(element)?; | |
214 | let size = element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?; | |
215 | ||
487cf647 | 216 | let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) { |
2b03887a FG |
217 | Abi::Uninhabited |
218 | } else { | |
219 | Abi::Aggregate { sized: true } | |
220 | }; | |
221 | ||
222 | let largest_niche = if count != 0 { element.largest_niche } else { None }; | |
223 | ||
224 | tcx.intern_layout(LayoutS { | |
225 | variants: Variants::Single { index: VariantIdx::new(0) }, | |
226 | fields: FieldsShape::Array { stride: element.size, count }, | |
227 | abi, | |
228 | largest_niche, | |
229 | align: element.align, | |
230 | size, | |
231 | }) | |
232 | } | |
233 | ty::Slice(element) => { | |
234 | let element = cx.layout_of(element)?; | |
235 | tcx.intern_layout(LayoutS { | |
236 | variants: Variants::Single { index: VariantIdx::new(0) }, | |
237 | fields: FieldsShape::Array { stride: element.size, count: 0 }, | |
238 | abi: Abi::Aggregate { sized: false }, | |
239 | largest_niche: None, | |
240 | align: element.align, | |
241 | size: Size::ZERO, | |
242 | }) | |
243 | } | |
244 | ty::Str => tcx.intern_layout(LayoutS { | |
245 | variants: Variants::Single { index: VariantIdx::new(0) }, | |
246 | fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, | |
247 | abi: Abi::Aggregate { sized: false }, | |
248 | largest_niche: None, | |
249 | align: dl.i8_align, | |
250 | size: Size::ZERO, | |
251 | }), | |
252 | ||
253 | // Odd unit types. | |
254 | ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?, | |
255 | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => { | |
256 | let mut unit = univariant_uninterned( | |
257 | cx, | |
258 | ty, | |
259 | &[], | |
260 | &ReprOptions::default(), | |
261 | StructKind::AlwaysSized, | |
262 | )?; | |
263 | match unit.abi { | |
264 | Abi::Aggregate { ref mut sized } => *sized = false, | |
265 | _ => bug!(), | |
266 | } | |
267 | tcx.intern_layout(unit) | |
268 | } | |
269 | ||
270 | ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?, | |
271 | ||
272 | ty::Closure(_, ref substs) => { | |
273 | let tys = substs.as_closure().upvar_tys(); | |
274 | univariant( | |
275 | &tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?, | |
276 | &ReprOptions::default(), | |
277 | StructKind::AlwaysSized, | |
278 | )? | |
279 | } | |
280 | ||
281 | ty::Tuple(tys) => { | |
282 | let kind = | |
283 | if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; | |
284 | ||
285 | univariant( | |
286 | &tys.iter().map(|k| cx.layout_of(k)).collect::<Result<Vec<_>, _>>()?, | |
287 | &ReprOptions::default(), | |
288 | kind, | |
289 | )? | |
290 | } | |
291 | ||
292 | // SIMD vector types. | |
293 | ty::Adt(def, substs) if def.repr().simd() => { | |
294 | if !def.is_struct() { | |
295 | // Should have yielded E0517 by now. | |
296 | tcx.sess.delay_span_bug( | |
297 | DUMMY_SP, | |
298 | "#[repr(simd)] was applied to an ADT that is not a struct", | |
299 | ); | |
300 | return Err(LayoutError::Unknown(ty)); | |
301 | } | |
302 | ||
303 | // Supported SIMD vectors are homogeneous ADTs with at least one field: | |
304 | // | |
305 | // * #[repr(simd)] struct S(T, T, T, T); | |
306 | // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T } | |
307 | // * #[repr(simd)] struct S([T; 4]) | |
308 | // | |
309 | // where T is a primitive scalar (integer/float/pointer). | |
310 | ||
311 | // SIMD vectors with zero fields are not supported. | |
312 | // (should be caught by typeck) | |
313 | if def.non_enum_variant().fields.is_empty() { | |
314 | tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty)); | |
315 | } | |
316 | ||
317 | // Type of the first ADT field: | |
318 | let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs); | |
319 | ||
320 | // Heterogeneous SIMD vectors are not supported: | |
321 | // (should be caught by typeck) | |
322 | for fi in &def.non_enum_variant().fields { | |
323 | if fi.ty(tcx, substs) != f0_ty { | |
324 | tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty)); | |
325 | } | |
326 | } | |
327 | ||
328 | // The element type and number of elements of the SIMD vector | |
329 | // are obtained from: | |
330 | // | |
331 | // * the element type and length of the single array field, if | |
332 | // the first field is of array type, or | |
333 | // | |
334 | // * the homogeneous field type and the number of fields. | |
335 | let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() { | |
336 | // First ADT field is an array: | |
337 | ||
338 | // SIMD vectors with multiple array fields are not supported: | |
339 | // (should be caught by typeck) | |
340 | if def.non_enum_variant().fields.len() != 1 { | |
341 | tcx.sess.fatal(&format!( | |
342 | "monomorphising SIMD type `{}` with more than one array field", | |
343 | ty | |
344 | )); | |
345 | } | |
346 | ||
347 | // Extract the number of elements from the layout of the array field: | |
348 | let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else { | |
487cf647 FG |
349 | return Err(LayoutError::Unknown(ty)); |
350 | }; | |
2b03887a FG |
351 | |
352 | (*e_ty, *count, true) | |
353 | } else { | |
354 | // First ADT field is not an array: | |
355 | (f0_ty, def.non_enum_variant().fields.len() as _, false) | |
356 | }; | |
357 | ||
358 | // SIMD vectors of zero length are not supported. | |
359 | // Additionally, lengths are capped at 2^16 as a fixed maximum backends must | |
360 | // support. | |
361 | // | |
362 | // Can't be caught in typeck if the array length is generic. | |
363 | if e_len == 0 { | |
364 | tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty)); | |
365 | } else if e_len > MAX_SIMD_LANES { | |
366 | tcx.sess.fatal(&format!( | |
367 | "monomorphising SIMD type `{}` of length greater than {}", | |
368 | ty, MAX_SIMD_LANES, | |
369 | )); | |
370 | } | |
371 | ||
372 | // Compute the ABI of the element type: | |
373 | let e_ly = cx.layout_of(e_ty)?; | |
374 | let Abi::Scalar(e_abi) = e_ly.abi else { | |
487cf647 FG |
375 | // This error isn't caught in typeck, e.g., if |
376 | // the element type of the vector is generic. | |
377 | tcx.sess.fatal(&format!( | |
378 | "monomorphising SIMD type `{}` with a non-primitive-scalar \ | |
379 | (integer/float/pointer) element type `{}`", | |
380 | ty, e_ty | |
381 | )) | |
382 | }; | |
2b03887a FG |
383 | |
384 | // Compute the size and alignment of the vector: | |
385 | let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?; | |
386 | let align = dl.vector_align(size); | |
387 | let size = size.align_to(align.abi); | |
388 | ||
389 | // Compute the placement of the vector fields: | |
390 | let fields = if is_array { | |
391 | FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] } | |
392 | } else { | |
393 | FieldsShape::Array { stride: e_ly.size, count: e_len } | |
394 | }; | |
395 | ||
396 | tcx.intern_layout(LayoutS { | |
397 | variants: Variants::Single { index: VariantIdx::new(0) }, | |
398 | fields, | |
399 | abi: Abi::Vector { element: e_abi, count: e_len }, | |
400 | largest_niche: e_ly.largest_niche, | |
401 | size, | |
402 | align, | |
403 | }) | |
404 | } | |
405 | ||
406 | // ADTs. | |
407 | ty::Adt(def, substs) => { | |
408 | // Cache the field layouts. | |
409 | let variants = def | |
410 | .variants() | |
411 | .iter() | |
412 | .map(|v| { | |
413 | v.fields | |
414 | .iter() | |
415 | .map(|field| cx.layout_of(field.ty(tcx, substs))) | |
416 | .collect::<Result<Vec<_>, _>>() | |
417 | }) | |
418 | .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; | |
419 | ||
420 | if def.is_union() { | |
421 | if def.repr().pack.is_some() && def.repr().align.is_some() { | |
422 | cx.tcx.sess.delay_span_bug( | |
423 | tcx.def_span(def.did()), | |
424 | "union cannot be packed and aligned", | |
425 | ); | |
426 | return Err(LayoutError::Unknown(ty)); | |
427 | } | |
428 | ||
487cf647 FG |
429 | return Ok(tcx.intern_layout( |
430 | cx.layout_of_union(&def.repr(), &variants).ok_or(LayoutError::Unknown(ty))?, | |
431 | )); | |
2b03887a FG |
432 | } |
433 | ||
487cf647 FG |
434 | tcx.intern_layout( |
435 | cx.layout_of_struct_or_enum( | |
436 | &def.repr(), | |
437 | &variants, | |
438 | def.is_enum(), | |
439 | def.is_unsafe_cell(), | |
440 | tcx.layout_scalar_valid_range(def.did()), | |
441 | |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max), | |
442 | def.is_enum() | |
443 | .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128))) | |
444 | .into_iter() | |
445 | .flatten(), | |
446 | def.repr().inhibit_enum_layout_opt() | |
2b03887a FG |
447 | || def |
448 | .variants() | |
449 | .iter_enumerated() | |
487cf647 | 450 | .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32())), |
2b03887a | 451 | { |
487cf647 FG |
452 | let param_env = tcx.param_env(def.did()); |
453 | def.is_struct() | |
454 | && match def.variants().iter().next().and_then(|x| x.fields.last()) { | |
455 | Some(last_field) => { | |
456 | tcx.type_of(last_field.did).is_sized(tcx, param_env) | |
2b03887a | 457 | } |
487cf647 | 458 | None => false, |
2b03887a | 459 | } |
487cf647 FG |
460 | }, |
461 | ) | |
462 | .ok_or(LayoutError::SizeOverflow(ty))?, | |
463 | ) | |
2b03887a FG |
464 | } |
465 | ||
466 | // Types with no meaningful known layout. | |
9c376795 | 467 | ty::Alias(..) => { |
2b03887a FG |
468 | // NOTE(eddyb) `layout_of` query should've normalized these away, |
469 | // if that was possible, so there's no reason to try again here. | |
470 | return Err(LayoutError::Unknown(ty)); | |
471 | } | |
472 | ||
473 | ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => { | |
474 | bug!("Layout::compute: unexpected type `{}`", ty) | |
475 | } | |
476 | ||
477 | ty::Bound(..) | ty::Param(_) | ty::Error(_) => { | |
478 | return Err(LayoutError::Unknown(ty)); | |
479 | } | |
480 | }) | |
481 | } | |
482 | ||
483 | /// Overlap eligibility and variant assignment for each GeneratorSavedLocal. | |
484 | #[derive(Clone, Debug, PartialEq)] | |
485 | enum SavedLocalEligibility { | |
486 | Unassigned, | |
487 | Assigned(VariantIdx), | |
488 | // FIXME: Use newtype_index so we aren't wasting bytes | |
489 | Ineligible(Option<u32>), | |
490 | } | |
491 | ||
492 | // When laying out generators, we divide our saved local fields into two | |
493 | // categories: overlap-eligible and overlap-ineligible. | |
494 | // | |
495 | // Those fields which are ineligible for overlap go in a "prefix" at the | |
496 | // beginning of the layout, and always have space reserved for them. | |
497 | // | |
498 | // Overlap-eligible fields are only assigned to one variant, so we lay | |
499 | // those fields out for each variant and put them right after the | |
500 | // prefix. | |
501 | // | |
502 | // Finally, in the layout details, we point to the fields from the | |
503 | // variants they are assigned to. It is possible for some fields to be | |
504 | // included in multiple variants. No field ever "moves around" in the | |
505 | // layout; its offset is always the same. | |
506 | // | |
507 | // Also included in the layout are the upvars and the discriminant. | |
508 | // These are included as fields on the "outer" layout; they are not part | |
509 | // of any variant. | |
510 | ||
511 | /// Compute the eligibility and assignment of each local. | |
9c376795 FG |
512 | fn generator_saved_local_eligibility( |
513 | info: &GeneratorLayout<'_>, | |
2b03887a FG |
514 | ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) { |
515 | use SavedLocalEligibility::*; | |
516 | ||
517 | let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> = | |
518 | IndexVec::from_elem_n(Unassigned, info.field_tys.len()); | |
519 | ||
520 | // The saved locals not eligible for overlap. These will get | |
521 | // "promoted" to the prefix of our generator. | |
522 | let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); | |
523 | ||
524 | // Figure out which of our saved locals are fields in only | |
525 | // one variant. The rest are deemed ineligible for overlap. | |
526 | for (variant_index, fields) in info.variant_fields.iter_enumerated() { | |
527 | for local in fields { | |
528 | match assignments[*local] { | |
529 | Unassigned => { | |
530 | assignments[*local] = Assigned(variant_index); | |
531 | } | |
532 | Assigned(idx) => { | |
533 | // We've already seen this local at another suspension | |
534 | // point, so it is no longer a candidate. | |
535 | trace!( | |
536 | "removing local {:?} in >1 variant ({:?}, {:?})", | |
537 | local, | |
538 | variant_index, | |
539 | idx | |
540 | ); | |
541 | ineligible_locals.insert(*local); | |
542 | assignments[*local] = Ineligible(None); | |
543 | } | |
544 | Ineligible(_) => {} | |
545 | } | |
546 | } | |
547 | } | |
548 | ||
549 | // Next, check every pair of eligible locals to see if they | |
550 | // conflict. | |
551 | for local_a in info.storage_conflicts.rows() { | |
552 | let conflicts_a = info.storage_conflicts.count(local_a); | |
553 | if ineligible_locals.contains(local_a) { | |
554 | continue; | |
555 | } | |
556 | ||
557 | for local_b in info.storage_conflicts.iter(local_a) { | |
558 | // local_a and local_b are storage live at the same time, therefore they | |
559 | // cannot overlap in the generator layout. The only way to guarantee | |
560 | // this is if they are in the same variant, or one is ineligible | |
561 | // (which means it is stored in every variant). | |
562 | if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] { | |
563 | continue; | |
564 | } | |
565 | ||
566 | // If they conflict, we will choose one to make ineligible. | |
567 | // This is not always optimal; it's just a greedy heuristic that | |
568 | // seems to produce good results most of the time. | |
569 | let conflicts_b = info.storage_conflicts.count(local_b); | |
570 | let (remove, other) = | |
571 | if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) }; | |
572 | ineligible_locals.insert(remove); | |
573 | assignments[remove] = Ineligible(None); | |
574 | trace!("removing local {:?} due to conflict with {:?}", remove, other); | |
575 | } | |
576 | } | |
577 | ||
578 | // Count the number of variants in use. If only one of them, then it is | |
579 | // impossible to overlap any locals in our layout. In this case it's | |
580 | // always better to make the remaining locals ineligible, so we can | |
581 | // lay them out with the other locals in the prefix and eliminate | |
582 | // unnecessary padding bytes. | |
583 | { | |
584 | let mut used_variants = BitSet::new_empty(info.variant_fields.len()); | |
585 | for assignment in &assignments { | |
586 | if let Assigned(idx) = assignment { | |
587 | used_variants.insert(*idx); | |
588 | } | |
589 | } | |
590 | if used_variants.count() < 2 { | |
591 | for assignment in assignments.iter_mut() { | |
592 | *assignment = Ineligible(None); | |
593 | } | |
594 | ineligible_locals.insert_all(); | |
595 | } | |
596 | } | |
597 | ||
598 | // Write down the order of our locals that will be promoted to the prefix. | |
599 | { | |
600 | for (idx, local) in ineligible_locals.iter().enumerate() { | |
601 | assignments[local] = Ineligible(Some(idx as u32)); | |
602 | } | |
603 | } | |
604 | debug!("generator saved local assignments: {:?}", assignments); | |
605 | ||
606 | (ineligible_locals, assignments) | |
607 | } | |
608 | ||
609 | /// Compute the full generator layout. | |
610 | fn generator_layout<'tcx>( | |
611 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
612 | ty: Ty<'tcx>, | |
613 | def_id: hir::def_id::DefId, | |
614 | substs: SubstsRef<'tcx>, | |
615 | ) -> Result<Layout<'tcx>, LayoutError<'tcx>> { | |
616 | use SavedLocalEligibility::*; | |
617 | let tcx = cx.tcx; | |
618 | let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs); | |
619 | ||
620 | let Some(info) = tcx.generator_layout(def_id) else { | |
487cf647 FG |
621 | return Err(LayoutError::Unknown(ty)); |
622 | }; | |
2b03887a FG |
623 | let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info); |
624 | ||
625 | // Build a prefix layout, including "promoting" all ineligible | |
626 | // locals as part of the prefix. We compute the layout of all of | |
627 | // these fields at once to get optimal packing. | |
628 | let tag_index = substs.as_generator().prefix_tys().count(); | |
629 | ||
630 | // `info.variant_fields` already accounts for the reserved variants, so no need to add them. | |
631 | let max_discr = (info.variant_fields.len() - 1) as u128; | |
632 | let discr_int = Integer::fit_unsigned(max_discr); | |
633 | let discr_int_ty = discr_int.to_ty(tcx, false); | |
634 | let tag = Scalar::Initialized { | |
635 | value: Primitive::Int(discr_int, false), | |
636 | valid_range: WrappingRange { start: 0, end: max_discr }, | |
637 | }; | |
638 | let tag_layout = cx.tcx.intern_layout(LayoutS::scalar(cx, tag)); | |
639 | let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout }; | |
640 | ||
641 | let promoted_layouts = ineligible_locals | |
642 | .iter() | |
643 | .map(|local| subst_field(info.field_tys[local])) | |
644 | .map(|ty| tcx.mk_maybe_uninit(ty)) | |
645 | .map(|ty| cx.layout_of(ty)); | |
646 | let prefix_layouts = substs | |
647 | .as_generator() | |
648 | .prefix_tys() | |
649 | .map(|ty| cx.layout_of(ty)) | |
650 | .chain(iter::once(Ok(tag_layout))) | |
651 | .chain(promoted_layouts) | |
652 | .collect::<Result<Vec<_>, _>>()?; | |
653 | let prefix = univariant_uninterned( | |
654 | cx, | |
655 | ty, | |
656 | &prefix_layouts, | |
657 | &ReprOptions::default(), | |
658 | StructKind::AlwaysSized, | |
659 | )?; | |
660 | ||
661 | let (prefix_size, prefix_align) = (prefix.size, prefix.align); | |
662 | ||
663 | // Split the prefix layout into the "outer" fields (upvars and | |
664 | // discriminant) and the "promoted" fields. Promoted fields will | |
665 | // get included in each variant that requested them in | |
666 | // GeneratorLayout. | |
667 | debug!("prefix = {:#?}", prefix); | |
668 | let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { | |
669 | FieldsShape::Arbitrary { mut offsets, memory_index } => { | |
670 | let mut inverse_memory_index = invert_mapping(&memory_index); | |
671 | ||
672 | // "a" (`0..b_start`) and "b" (`b_start..`) correspond to | |
673 | // "outer" and "promoted" fields respectively. | |
674 | let b_start = (tag_index + 1) as u32; | |
675 | let offsets_b = offsets.split_off(b_start as usize); | |
676 | let offsets_a = offsets; | |
677 | ||
678 | // Disentangle the "a" and "b" components of `inverse_memory_index` | |
679 | // by preserving the order but keeping only one disjoint "half" each. | |
680 | // FIXME(eddyb) build a better abstraction for permutations, if possible. | |
681 | let inverse_memory_index_b: Vec<_> = | |
682 | inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect(); | |
683 | inverse_memory_index.retain(|&i| i < b_start); | |
684 | let inverse_memory_index_a = inverse_memory_index; | |
685 | ||
686 | // Since `inverse_memory_index_{a,b}` each only refer to their | |
687 | // respective fields, they can be safely inverted | |
688 | let memory_index_a = invert_mapping(&inverse_memory_index_a); | |
689 | let memory_index_b = invert_mapping(&inverse_memory_index_b); | |
690 | ||
691 | let outer_fields = | |
692 | FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }; | |
693 | (outer_fields, offsets_b, memory_index_b) | |
694 | } | |
695 | _ => bug!(), | |
696 | }; | |
697 | ||
698 | let mut size = prefix.size; | |
699 | let mut align = prefix.align; | |
700 | let variants = info | |
701 | .variant_fields | |
702 | .iter_enumerated() | |
703 | .map(|(index, variant_fields)| { | |
704 | // Only include overlap-eligible fields when we compute our variant layout. | |
705 | let variant_only_tys = variant_fields | |
706 | .iter() | |
707 | .filter(|local| match assignments[**local] { | |
708 | Unassigned => bug!(), | |
709 | Assigned(v) if v == index => true, | |
710 | Assigned(_) => bug!("assignment does not match variant"), | |
711 | Ineligible(_) => false, | |
712 | }) | |
713 | .map(|local| subst_field(info.field_tys[*local])); | |
714 | ||
715 | let mut variant = univariant_uninterned( | |
716 | cx, | |
717 | ty, | |
718 | &variant_only_tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?, | |
719 | &ReprOptions::default(), | |
720 | StructKind::Prefixed(prefix_size, prefix_align.abi), | |
721 | )?; | |
722 | variant.variants = Variants::Single { index }; | |
723 | ||
724 | let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else { | |
487cf647 FG |
725 | bug!(); |
726 | }; | |
2b03887a FG |
727 | |
728 | // Now, stitch the promoted and variant-only fields back together in | |
729 | // the order they are mentioned by our GeneratorLayout. | |
730 | // Because we only use some subset (that can differ between variants) | |
731 | // of the promoted fields, we can't just pick those elements of the | |
732 | // `promoted_memory_index` (as we'd end up with gaps). | |
733 | // So instead, we build an "inverse memory_index", as if all of the | |
734 | // promoted fields were being used, but leave the elements not in the | |
735 | // subset as `INVALID_FIELD_IDX`, which we can filter out later to | |
736 | // obtain a valid (bijective) mapping. | |
737 | const INVALID_FIELD_IDX: u32 = !0; | |
738 | let mut combined_inverse_memory_index = | |
739 | vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()]; | |
740 | let mut offsets_and_memory_index = iter::zip(offsets, memory_index); | |
741 | let combined_offsets = variant_fields | |
742 | .iter() | |
743 | .enumerate() | |
744 | .map(|(i, local)| { | |
745 | let (offset, memory_index) = match assignments[*local] { | |
746 | Unassigned => bug!(), | |
747 | Assigned(_) => { | |
748 | let (offset, memory_index) = offsets_and_memory_index.next().unwrap(); | |
749 | (offset, promoted_memory_index.len() as u32 + memory_index) | |
750 | } | |
751 | Ineligible(field_idx) => { | |
752 | let field_idx = field_idx.unwrap() as usize; | |
753 | (promoted_offsets[field_idx], promoted_memory_index[field_idx]) | |
754 | } | |
755 | }; | |
756 | combined_inverse_memory_index[memory_index as usize] = i as u32; | |
757 | offset | |
758 | }) | |
759 | .collect(); | |
760 | ||
761 | // Remove the unused slots and invert the mapping to obtain the | |
762 | // combined `memory_index` (also see previous comment). | |
763 | combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX); | |
764 | let combined_memory_index = invert_mapping(&combined_inverse_memory_index); | |
765 | ||
766 | variant.fields = FieldsShape::Arbitrary { | |
767 | offsets: combined_offsets, | |
768 | memory_index: combined_memory_index, | |
769 | }; | |
770 | ||
771 | size = size.max(variant.size); | |
772 | align = align.max(variant.align); | |
487cf647 | 773 | Ok(variant) |
2b03887a FG |
774 | }) |
775 | .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; | |
776 | ||
777 | size = size.align_to(align.abi); | |
778 | ||
487cf647 | 779 | let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) { |
2b03887a FG |
780 | Abi::Uninhabited |
781 | } else { | |
782 | Abi::Aggregate { sized: true } | |
783 | }; | |
784 | ||
785 | let layout = tcx.intern_layout(LayoutS { | |
786 | variants: Variants::Multiple { | |
787 | tag, | |
788 | tag_encoding: TagEncoding::Direct, | |
789 | tag_field: tag_index, | |
790 | variants, | |
791 | }, | |
792 | fields: outer_fields, | |
793 | abi, | |
794 | largest_niche: prefix.largest_niche, | |
795 | size, | |
796 | align, | |
797 | }); | |
798 | debug!("generator layout ({:?}): {:#?}", ty, layout); | |
799 | Ok(layout) | |
800 | } | |
801 | ||
802 | /// This is invoked by the `layout_of` query to record the final | |
803 | /// layout of each type. | |
804 | #[inline(always)] | |
805 | fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) { | |
806 | // If we are running with `-Zprint-type-sizes`, maybe record layouts | |
807 | // for dumping later. | |
808 | if cx.tcx.sess.opts.unstable_opts.print_type_sizes { | |
809 | record_layout_for_printing_outlined(cx, layout) | |
810 | } | |
811 | } | |
812 | ||
813 | fn record_layout_for_printing_outlined<'tcx>( | |
814 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
815 | layout: TyAndLayout<'tcx>, | |
816 | ) { | |
817 | // Ignore layouts that are done with non-empty environments or | |
818 | // non-monomorphic layouts, as the user only wants to see the stuff | |
819 | // resulting from the final codegen session. | |
820 | if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() { | |
821 | return; | |
822 | } | |
823 | ||
824 | // (delay format until we actually need it) | |
825 | let record = |kind, packed, opt_discr_size, variants| { | |
826 | let type_desc = format!("{:?}", layout.ty); | |
827 | cx.tcx.sess.code_stats.record_type_size( | |
828 | kind, | |
829 | type_desc, | |
830 | layout.align.abi, | |
831 | layout.size, | |
832 | packed, | |
833 | opt_discr_size, | |
834 | variants, | |
835 | ); | |
836 | }; | |
837 | ||
9c376795 FG |
838 | match *layout.ty.kind() { |
839 | ty::Adt(adt_def, _) => { | |
2b03887a | 840 | debug!("print-type-size t: `{:?}` process adt", layout.ty); |
9c376795 FG |
841 | let adt_kind = adt_def.adt_kind(); |
842 | let adt_packed = adt_def.repr().pack.is_some(); | |
843 | let (variant_infos, opt_discr_size) = variant_info_for_adt(cx, layout, adt_def); | |
844 | record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos); | |
845 | } | |
846 | ||
847 | ty::Generator(def_id, substs, _) => { | |
848 | debug!("print-type-size t: `{:?}` record generator", layout.ty); | |
849 | // Generators always have a begin/poisoned/end state with additional suspend points | |
850 | let (variant_infos, opt_discr_size) = | |
851 | variant_info_for_generator(cx, layout, def_id, substs); | |
852 | record(DataTypeKind::Generator, false, opt_discr_size, variant_infos); | |
2b03887a FG |
853 | } |
854 | ||
855 | ty::Closure(..) => { | |
856 | debug!("print-type-size t: `{:?}` record closure", layout.ty); | |
857 | record(DataTypeKind::Closure, false, None, vec![]); | |
2b03887a FG |
858 | } |
859 | ||
860 | _ => { | |
861 | debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty); | |
2b03887a FG |
862 | } |
863 | }; | |
9c376795 | 864 | } |
2b03887a | 865 | |
9c376795 FG |
866 | fn variant_info_for_adt<'tcx>( |
867 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
868 | layout: TyAndLayout<'tcx>, | |
869 | adt_def: AdtDef<'tcx>, | |
870 | ) -> (Vec<VariantInfo>, Option<Size>) { | |
2b03887a FG |
871 | let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| { |
872 | let mut min_size = Size::ZERO; | |
873 | let field_info: Vec<_> = flds | |
874 | .iter() | |
875 | .enumerate() | |
876 | .map(|(i, &name)| { | |
877 | let field_layout = layout.field(cx, i); | |
878 | let offset = layout.fields.offset(i); | |
9c376795 | 879 | min_size = min_size.max(offset + field_layout.size); |
2b03887a FG |
880 | FieldInfo { |
881 | name, | |
882 | offset: offset.bytes(), | |
883 | size: field_layout.size.bytes(), | |
884 | align: field_layout.align.abi.bytes(), | |
885 | } | |
886 | }) | |
887 | .collect(); | |
888 | ||
889 | VariantInfo { | |
890 | name: n, | |
891 | kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact }, | |
892 | align: layout.align.abi.bytes(), | |
893 | size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() }, | |
894 | fields: field_info, | |
895 | } | |
896 | }; | |
897 | ||
898 | match layout.variants { | |
899 | Variants::Single { index } => { | |
900 | if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive { | |
901 | debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name); | |
902 | let variant_def = &adt_def.variant(index); | |
903 | let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect(); | |
9c376795 | 904 | (vec![build_variant_info(Some(variant_def.name), &fields, layout)], None) |
2b03887a | 905 | } else { |
9c376795 | 906 | (vec![], None) |
2b03887a FG |
907 | } |
908 | } | |
909 | ||
910 | Variants::Multiple { tag, ref tag_encoding, .. } => { | |
911 | debug!( | |
912 | "print-type-size `{:#?}` adt general variants def {}", | |
913 | layout.ty, | |
914 | adt_def.variants().len() | |
915 | ); | |
916 | let variant_infos: Vec<_> = adt_def | |
917 | .variants() | |
918 | .iter_enumerated() | |
919 | .map(|(i, variant_def)| { | |
920 | let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect(); | |
921 | build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i)) | |
922 | }) | |
923 | .collect(); | |
9c376795 FG |
924 | |
925 | ( | |
926 | variant_infos, | |
2b03887a FG |
927 | match tag_encoding { |
928 | TagEncoding::Direct => Some(tag.size(cx)), | |
929 | _ => None, | |
930 | }, | |
9c376795 | 931 | ) |
2b03887a FG |
932 | } |
933 | } | |
934 | } | |
9c376795 FG |
935 | |
936 | fn variant_info_for_generator<'tcx>( | |
937 | cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, | |
938 | layout: TyAndLayout<'tcx>, | |
939 | def_id: DefId, | |
940 | substs: ty::SubstsRef<'tcx>, | |
941 | ) -> (Vec<VariantInfo>, Option<Size>) { | |
942 | let Variants::Multiple { tag, ref tag_encoding, tag_field, .. } = layout.variants else { | |
943 | return (vec![], None); | |
944 | }; | |
945 | ||
946 | let (generator, state_specific_names) = cx.tcx.generator_layout_and_saved_local_names(def_id); | |
947 | let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id); | |
948 | ||
949 | let mut upvars_size = Size::ZERO; | |
950 | let upvar_fields: Vec<_> = substs | |
951 | .as_generator() | |
952 | .upvar_tys() | |
953 | .zip(upvar_names) | |
954 | .enumerate() | |
955 | .map(|(field_idx, (_, name))| { | |
956 | let field_layout = layout.field(cx, field_idx); | |
957 | let offset = layout.fields.offset(field_idx); | |
958 | upvars_size = upvars_size.max(offset + field_layout.size); | |
959 | FieldInfo { | |
960 | name: Symbol::intern(&name), | |
961 | offset: offset.bytes(), | |
962 | size: field_layout.size.bytes(), | |
963 | align: field_layout.align.abi.bytes(), | |
964 | } | |
965 | }) | |
966 | .collect(); | |
967 | ||
968 | let variant_infos: Vec<_> = generator | |
969 | .variant_fields | |
970 | .iter_enumerated() | |
971 | .map(|(variant_idx, variant_def)| { | |
972 | let variant_layout = layout.for_variant(cx, variant_idx); | |
973 | let mut variant_size = Size::ZERO; | |
974 | let fields = variant_def | |
975 | .iter() | |
976 | .enumerate() | |
977 | .map(|(field_idx, local)| { | |
978 | let field_layout = variant_layout.field(cx, field_idx); | |
979 | let offset = variant_layout.fields.offset(field_idx); | |
980 | // The struct is as large as the last field's end | |
981 | variant_size = variant_size.max(offset + field_layout.size); | |
982 | FieldInfo { | |
983 | name: state_specific_names.get(*local).copied().flatten().unwrap_or( | |
984 | Symbol::intern(&format!(".generator_field{}", local.as_usize())), | |
985 | ), | |
986 | offset: offset.bytes(), | |
987 | size: field_layout.size.bytes(), | |
988 | align: field_layout.align.abi.bytes(), | |
989 | } | |
990 | }) | |
991 | .chain(upvar_fields.iter().copied()) | |
992 | .collect(); | |
993 | ||
994 | // If the variant has no state-specific fields, then it's the size of the upvars. | |
995 | if variant_size == Size::ZERO { | |
996 | variant_size = upvars_size; | |
997 | } | |
998 | ||
999 | // This `if` deserves some explanation. | |
1000 | // | |
1001 | // The layout code has a choice of where to place the discriminant of this generator. | |
1002 | // If the discriminant of the generator is placed early in the layout (before the | |
1003 | // variant's own fields), then it'll implicitly be counted towards the size of the | |
1004 | // variant, since we use the maximum offset to calculate size. | |
1005 | // (side-note: I know this is a bit problematic given upvars placement, etc). | |
1006 | // | |
1007 | // This is important, since the layout printing code always subtracts this discriminant | |
1008 | // size from the variant size if the struct is "enum"-like, so failing to account for it | |
1009 | // will either lead to numerical underflow, or an underreported variant size... | |
1010 | // | |
1011 | // However, if the discriminant is placed past the end of the variant, then we need | |
1012 | // to factor in the size of the discriminant manually. This really should be refactored | |
1013 | // better, but this "works" for now. | |
1014 | if layout.fields.offset(tag_field) >= variant_size { | |
1015 | variant_size += match tag_encoding { | |
1016 | TagEncoding::Direct => tag.size(cx), | |
1017 | _ => Size::ZERO, | |
1018 | }; | |
1019 | } | |
1020 | ||
1021 | VariantInfo { | |
1022 | name: Some(Symbol::intern(&ty::GeneratorSubsts::variant_name(variant_idx))), | |
1023 | kind: SizeKind::Exact, | |
1024 | size: variant_size.bytes(), | |
1025 | align: variant_layout.align.abi.bytes(), | |
1026 | fields, | |
1027 | } | |
1028 | }) | |
1029 | .collect(); | |
1030 | ( | |
1031 | variant_infos, | |
1032 | match tag_encoding { | |
1033 | TagEncoding::Direct => Some(tag.size(cx)), | |
1034 | _ => None, | |
1035 | }, | |
1036 | ) | |
1037 | } |