]>
Commit | Line | Data |
---|---|---|
60c5eb7d | 1 | use super::operand::OperandValue; |
dfeec247 | 2 | use super::{FunctionCx, LocalRef}; |
60c5eb7d | 3 | |
9fa01778 XL |
4 | use crate::common::IntPredicate; |
5 | use crate::glue; | |
9fa01778 | 6 | use crate::traits::*; |
ff7c6d11 | 7 | |
ba9703b0 XL |
8 | use rustc_middle::mir; |
9 | use rustc_middle::mir::tcx::PlaceTy; | |
c295e0f8 | 10 | use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; |
ba9703b0 | 11 | use rustc_middle::ty::{self, Ty}; |
9ffffee4 | 12 | use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding}; |
c295e0f8 | 13 | use rustc_target::abi::{VariantIdx, Variants}; |
ff7c6d11 XL |
14 | |
15 | #[derive(Copy, Clone, Debug)] | |
a1dfa0c6 | 16 | pub struct PlaceRef<'tcx, V> { |
60c5eb7d | 17 | /// A pointer to the contents of the place. |
a1dfa0c6 | 18 | pub llval: V, |
ff7c6d11 | 19 | |
60c5eb7d | 20 | /// This place's extra data if it is unsized, or `None` if null. |
a1dfa0c6 | 21 | pub llextra: Option<V>, |
ff7c6d11 | 22 | |
60c5eb7d | 23 | /// The monomorphized type of this place, including variant information. |
ba9703b0 | 24 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 | 25 | |
60c5eb7d | 26 | /// The alignment we know for this place. |
ff7c6d11 XL |
27 | pub align: Align, |
28 | } | |
29 | ||
dc9dc135 | 30 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ba9703b0 | 31 | pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { |
487cf647 | 32 | assert!(layout.is_sized()); |
dfeec247 | 33 | PlaceRef { llval, llextra: None, layout, align: layout.align.abi } |
e1599b0c XL |
34 | } |
35 | ||
ba9703b0 XL |
36 | pub fn new_sized_aligned( |
37 | llval: V, | |
38 | layout: TyAndLayout<'tcx>, | |
39 | align: Align, | |
40 | ) -> PlaceRef<'tcx, V> { | |
487cf647 | 41 | assert!(layout.is_sized()); |
dfeec247 | 42 | PlaceRef { llval, llextra: None, layout, align } |
9fa01778 XL |
43 | } |
44 | ||
e74abb32 XL |
45 | // FIXME(eddyb) pass something else for the name so no work is done |
46 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
47 | pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
48 | bx: &mut Bx, | |
ba9703b0 | 49 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 50 | ) -> Self { |
487cf647 | 51 | assert!(layout.is_sized(), "tried to statically allocate unsized place"); |
e1599b0c XL |
52 | let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); |
53 | Self::new_sized(tmp, layout) | |
ff7c6d11 XL |
54 | } |
55 | ||
b7449926 | 56 | /// Returns a place for an indirect reference to an unsized place. |
e74abb32 XL |
57 | // FIXME(eddyb) pass something else for the name so no work is done |
58 | // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`). | |
a1dfa0c6 XL |
59 | pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
60 | bx: &mut Bx, | |
ba9703b0 | 61 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 62 | ) -> Self { |
b7449926 | 63 | assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); |
a1dfa0c6 XL |
64 | let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); |
65 | let ptr_layout = bx.cx().layout_of(ptr_ty); | |
e1599b0c | 66 | Self::alloca(bx, ptr_layout) |
b7449926 XL |
67 | } |
68 | ||
dfeec247 | 69 | pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V { |
ba9703b0 | 70 | if let FieldsShape::Array { count, .. } = self.layout.fields { |
ff7c6d11 | 71 | if self.layout.is_unsized() { |
ff7c6d11 | 72 | assert_eq!(count, 0); |
b7449926 | 73 | self.llextra.unwrap() |
ff7c6d11 | 74 | } else { |
a1dfa0c6 | 75 | cx.const_usize(count) |
ff7c6d11 XL |
76 | } |
77 | } else { | |
78 | bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) | |
79 | } | |
80 | } | |
a1dfa0c6 | 81 | } |
ff7c6d11 | 82 | |
dc9dc135 | 83 | impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { |
ff7c6d11 | 84 | /// Access a field, at a point when the value's case is known. |
a1dfa0c6 | 85 | pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
dfeec247 XL |
86 | self, |
87 | bx: &mut Bx, | |
a1dfa0c6 XL |
88 | ix: usize, |
89 | ) -> Self { | |
90 | let field = self.layout.field(bx.cx(), ix); | |
ff7c6d11 | 91 | let offset = self.layout.fields.offset(ix); |
0bf4aa26 | 92 | let effective_field_align = self.align.restrict_for_offset(offset); |
ff7c6d11 | 93 | |
a1dfa0c6 | 94 | let mut simple = || { |
1b1a35ee XL |
95 | let llval = match self.layout.abi { |
96 | _ if offset.bytes() == 0 => { | |
97 | // Unions and newtypes only use an offset of 0. | |
98 | // Also handles the first field of Scalar, ScalarPair, and Vector layouts. | |
99 | self.llval | |
100 | } | |
c295e0f8 | 101 | Abi::ScalarPair(a, b) |
04454e1e | 102 | if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) => |
1b1a35ee XL |
103 | { |
104 | // Offset matches second field. | |
94222f64 XL |
105 | let ty = bx.backend_type(self.layout); |
106 | bx.struct_gep(ty, self.llval, 1) | |
1b1a35ee XL |
107 | } |
108 | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { | |
109 | // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. | |
110 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); | |
94222f64 | 111 | bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) |
1b1a35ee XL |
112 | } |
113 | Abi::Scalar(_) | Abi::ScalarPair(..) => { | |
114 | // All fields of Scalar and ScalarPair layouts must have been handled by this point. | |
115 | // Vector layouts have additional fields for each element of the vector, so don't panic in that case. | |
116 | bug!( | |
117 | "offset of non-ZST field `{:?}` does not match layout `{:#?}`", | |
118 | field, | |
119 | self.layout | |
120 | ); | |
121 | } | |
94222f64 XL |
122 | _ => { |
123 | let ty = bx.backend_type(self.layout); | |
124 | bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix)) | |
125 | } | |
ff7c6d11 XL |
126 | }; |
127 | PlaceRef { | |
dc9dc135 | 128 | // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. |
a1dfa0c6 | 129 | llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))), |
dfeec247 | 130 | llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, |
ff7c6d11 | 131 | layout: field, |
b7449926 | 132 | align: effective_field_align, |
ff7c6d11 XL |
133 | } |
134 | }; | |
135 | ||
136 | // Simple cases, which don't need DST adjustment: | |
137 | // * no metadata available - just log the case | |
dc9dc135 | 138 | // * known alignment - sized types, `[T]`, `str` or a foreign type |
ff7c6d11 | 139 | // * packed struct - there is no alignment padding |
1b1a35ee | 140 | match field.ty.kind() { |
b7449926 | 141 | _ if self.llextra.is_none() => { |
dfeec247 XL |
142 | debug!( |
143 | "unsized field `{}`, of `{:?}` has no metadata for adjustment", | |
144 | ix, self.llval | |
145 | ); | |
ff7c6d11 XL |
146 | return simple(); |
147 | } | |
487cf647 | 148 | _ if field.is_sized() => return simple(), |
b7449926 XL |
149 | ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(), |
150 | ty::Adt(def, _) => { | |
5e7ed085 | 151 | if def.repr().packed() { |
ff7c6d11 XL |
152 | // FIXME(eddyb) generalize the adjustment when we |
153 | // start supporting packing to larger alignments. | |
a1dfa0c6 | 154 | assert_eq!(self.layout.align.abi.bytes(), 1); |
ff7c6d11 XL |
155 | return simple(); |
156 | } | |
157 | } | |
158 | _ => {} | |
159 | } | |
160 | ||
161 | // We need to get the pointer manually now. | |
dc9dc135 | 162 | // We do this by casting to a `*i8`, then offsetting it by the appropriate amount. |
ff7c6d11 XL |
163 | // We do this instead of, say, simply adjusting the pointer from the result of a GEP |
164 | // because the field may have an arbitrary alignment in the LLVM representation | |
165 | // anyway. | |
166 | // | |
167 | // To demonstrate: | |
ff7c6d11 | 168 | // |
dc9dc135 XL |
169 | // struct Foo<T: ?Sized> { |
170 | // x: u16, | |
171 | // y: T | |
172 | // } | |
173 | // | |
174 | // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that | |
ff7c6d11 XL |
175 | // the `y` field has 16-bit alignment. |
176 | ||
177 | let meta = self.llextra; | |
178 | ||
a1dfa0c6 | 179 | let unaligned_offset = bx.cx().const_usize(offset.bytes()); |
ff7c6d11 XL |
180 | |
181 | // Get the alignment of the field | |
2c00a5a8 | 182 | let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); |
ff7c6d11 | 183 | |
5869c6ff XL |
184 | // Bump the unaligned offset up to the appropriate alignment |
185 | let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align); | |
ff7c6d11 | 186 | |
b7449926 | 187 | debug!("struct_field_ptr: DST field offset: {:?}", offset); |
ff7c6d11 | 188 | |
dc9dc135 | 189 | // Cast and adjust pointer. |
a1dfa0c6 | 190 | let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); |
94222f64 | 191 | let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); |
ff7c6d11 | 192 | |
dc9dc135 | 193 | // Finally, cast back to the type expected. |
a1dfa0c6 | 194 | let ll_fty = bx.cx().backend_type(field); |
ff7c6d11 XL |
195 | debug!("struct_field_ptr: Field type is {:?}", ll_fty); |
196 | ||
197 | PlaceRef { | |
a1dfa0c6 | 198 | llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), |
ff7c6d11 XL |
199 | llextra: self.llextra, |
200 | layout: field, | |
b7449926 | 201 | align: effective_field_align, |
ff7c6d11 XL |
202 | } |
203 | } | |
204 | ||
205 | /// Obtain the actual discriminant of a value. | |
064997fb | 206 | #[instrument(level = "trace", skip(bx))] |
a1dfa0c6 XL |
207 | pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
208 | self, | |
209 | bx: &mut Bx, | |
dfeec247 | 210 | cast_to: Ty<'tcx>, |
a1dfa0c6 | 211 | ) -> V { |
9ffffee4 | 212 | let dl = &bx.tcx().data_layout; |
487cf647 | 213 | let cast_to_layout = bx.cx().layout_of(cast_to); |
487cf647 | 214 | let cast_to = bx.cx().immediate_backend_type(cast_to_layout); |
0bf4aa26 | 215 | if self.layout.abi.is_uninhabited() { |
353b0b11 | 216 | return bx.cx().const_poison(cast_to); |
83c7162d | 217 | } |
f035d41b | 218 | let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants { |
ba9703b0 | 219 | Variants::Single { index } => { |
dfeec247 XL |
220 | let discr_val = self |
221 | .layout | |
222 | .ty | |
223 | .discriminant_for_variant(bx.cx().tcx(), index) | |
48663c56 | 224 | .map_or(index.as_u32() as u128, |discr| discr.val); |
a1dfa0c6 | 225 | return bx.cx().const_uint_big(cast_to, discr_val); |
ff7c6d11 | 226 | } |
c295e0f8 | 227 | Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => { |
f035d41b | 228 | (tag, tag_encoding, tag_field) |
532ac7d7 XL |
229 | } |
230 | }; | |
ff7c6d11 | 231 | |
416331ca | 232 | // Read the tag/niche-encoded discriminant from memory. |
f035d41b | 233 | let tag = self.project_field(bx, tag_field); |
487cf647 FG |
234 | let tag_op = bx.load_operand(tag); |
235 | let tag_imm = tag_op.immediate(); | |
416331ca XL |
236 | |
237 | // Decode the discriminant (specifically if it's niche-encoded). | |
f035d41b XL |
238 | match *tag_encoding { |
239 | TagEncoding::Direct => { | |
04454e1e | 240 | let signed = match tag_scalar.primitive() { |
94b46f34 | 241 | // We use `i1` for bytes that are always `0` or `1`, |
0731742a | 242 | // e.g., `#[repr(i8)] enum E { A, B }`, but we can't |
94b46f34 | 243 | // let LLVM interpret the `i1` as signed, because |
dc9dc135 | 244 | // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`. |
f035d41b | 245 | Int(_, signed) => !tag_scalar.is_bool() && signed, |
dfeec247 | 246 | _ => false, |
ff7c6d11 | 247 | }; |
487cf647 | 248 | bx.intcast(tag_imm, cast_to, signed) |
ff7c6d11 | 249 | } |
f2b60f7d | 250 | TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { |
487cf647 FG |
251 | // Cast to an integer so we don't have to treat a pointer as a |
252 | // special case. | |
9ffffee4 FG |
253 | let (tag, tag_llty) = match tag_scalar.primitive() { |
254 | // FIXME(erikdesjardins): handle non-default addrspace ptr sizes | |
255 | Pointer(_) => { | |
256 | let t = bx.type_from_integer(dl.ptr_sized_integer()); | |
257 | let tag = bx.ptrtoint(tag_imm, t); | |
258 | (tag, t) | |
259 | } | |
260 | _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)), | |
416331ca | 261 | }; |
487cf647 | 262 | |
416331ca | 263 | let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32(); |
487cf647 FG |
264 | |
265 | // We have a subrange `niche_start..=niche_end` inside `range`. | |
266 | // If the value of the tag is inside this subrange, it's a | |
267 | // "niche value", an increment of the discriminant. Otherwise it | |
268 | // indicates the untagged variant. | |
269 | // A general algorithm to extract the discriminant from the tag | |
270 | // is: | |
271 | // relative_tag = tag - niche_start | |
272 | // is_niche = relative_tag <= (ule) relative_max | |
273 | // discr = if is_niche { | |
274 | // cast(relative_tag) + niche_variants.start() | |
275 | // } else { | |
276 | // untagged_variant | |
277 | // } | |
278 | // However, we will likely be able to emit simpler code. | |
487cf647 FG |
279 | let (is_niche, tagged_discr, delta) = if relative_max == 0 { |
280 | // Best case scenario: only one tagged variant. This will | |
281 | // likely become just a comparison and a jump. | |
282 | // The algorithm is: | |
283 | // is_niche = tag == niche_start | |
284 | // discr = if is_niche { | |
285 | // niche_start | |
286 | // } else { | |
287 | // untagged_variant | |
288 | // } | |
289 | let niche_start = bx.cx().const_uint_big(tag_llty, niche_start); | |
290 | let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start); | |
291 | let tagged_discr = | |
292 | bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64); | |
293 | (is_niche, tagged_discr, 0) | |
487cf647 FG |
294 | } else { |
295 | // The special cases don't apply, so we'll have to go with | |
296 | // the general algorithm. | |
297 | let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start)); | |
298 | let cast_tag = bx.intcast(relative_discr, cast_to, false); | |
299 | let is_niche = bx.icmp( | |
300 | IntPredicate::IntULE, | |
416331ca | 301 | relative_discr, |
487cf647 FG |
302 | bx.cx().const_uint(tag_llty, relative_max as u64), |
303 | ); | |
304 | (is_niche, cast_tag, niche_variants.start().as_u32() as u128) | |
416331ca XL |
305 | }; |
306 | ||
487cf647 FG |
307 | let tagged_discr = if delta == 0 { |
308 | tagged_discr | |
309 | } else { | |
310 | bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta)) | |
311 | }; | |
312 | ||
313 | let discr = bx.select( | |
416331ca | 314 | is_niche, |
487cf647 | 315 | tagged_discr, |
f2b60f7d | 316 | bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64), |
487cf647 FG |
317 | ); |
318 | ||
319 | // In principle we could insert assumes on the possible range of `discr`, but | |
320 | // currently in LLVM this seems to be a pessimization. | |
321 | ||
322 | discr | |
ff7c6d11 XL |
323 | } |
324 | } | |
325 | } | |
326 | ||
9fa01778 | 327 | /// Sets the discriminant for a new value of the given case of the given |
ff7c6d11 | 328 | /// representation. |
a1dfa0c6 XL |
329 | pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
330 | &self, | |
331 | bx: &mut Bx, | |
dfeec247 | 332 | variant_index: VariantIdx, |
a1dfa0c6 XL |
333 | ) { |
334 | if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { | |
60c5eb7d XL |
335 | // We play it safe by using a well-defined `abort`, but we could go for immediate UB |
336 | // if that turns out to be helpful. | |
337 | bx.abort(); | |
ff7c6d11 XL |
338 | return; |
339 | } | |
340 | match self.layout.variants { | |
ba9703b0 | 341 | Variants::Single { index } => { |
ff7c6d11 XL |
342 | assert_eq!(index, variant_index); |
343 | } | |
f035d41b XL |
344 | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => { |
345 | let ptr = self.project_field(bx, tag_field); | |
48663c56 XL |
346 | let to = |
347 | self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val; | |
83c7162d | 348 | bx.store( |
a1dfa0c6 | 349 | bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to), |
83c7162d | 350 | ptr.llval, |
dfeec247 XL |
351 | ptr.align, |
352 | ); | |
ff7c6d11 | 353 | } |
ba9703b0 | 354 | Variants::Multiple { |
f035d41b | 355 | tag_encoding: |
f2b60f7d | 356 | TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start }, |
f035d41b | 357 | tag_field, |
ff7c6d11 XL |
358 | .. |
359 | } => { | |
f2b60f7d | 360 | if variant_index != untagged_variant { |
f035d41b | 361 | let niche = self.project_field(bx, tag_field); |
a1dfa0c6 XL |
362 | let niche_llty = bx.cx().immediate_backend_type(niche.layout); |
363 | let niche_value = variant_index.as_u32() - niche_variants.start().as_u32(); | |
dfeec247 | 364 | let niche_value = (niche_value as u128).wrapping_add(niche_start); |
dc9dc135 | 365 | // FIXME(eddyb): check the actual primitive type here. |
ff7c6d11 | 366 | let niche_llval = if niche_value == 0 { |
dc9dc135 | 367 | // HACK(eddyb): using `c_null` as it works on all types. |
a1dfa0c6 | 368 | bx.cx().const_null(niche_llty) |
ff7c6d11 | 369 | } else { |
a1dfa0c6 | 370 | bx.cx().const_uint_big(niche_llty, niche_value) |
ff7c6d11 | 371 | }; |
2c00a5a8 | 372 | OperandValue::Immediate(niche_llval).store(bx, niche); |
ff7c6d11 XL |
373 | } |
374 | } | |
375 | } | |
376 | } | |
377 | ||
a1dfa0c6 XL |
378 | pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
379 | &self, | |
380 | bx: &mut Bx, | |
dfeec247 | 381 | llindex: V, |
a1dfa0c6 | 382 | ) -> Self { |
69743fb6 XL |
383 | // Statically compute the offset if we can, otherwise just use the element size, |
384 | // as this will yield the lowest alignment. | |
385 | let layout = self.layout.field(bx, 0); | |
e74abb32 XL |
386 | let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) { |
387 | layout.size.checked_mul(llindex, bx).unwrap_or(layout.size) | |
69743fb6 XL |
388 | } else { |
389 | layout.size | |
390 | }; | |
391 | ||
ff7c6d11 | 392 | PlaceRef { |
94222f64 XL |
393 | llval: bx.inbounds_gep( |
394 | bx.cx().backend_type(self.layout), | |
395 | self.llval, | |
396 | &[bx.cx().const_usize(0), llindex], | |
397 | ), | |
b7449926 | 398 | llextra: None, |
69743fb6 XL |
399 | layout, |
400 | align: self.align.restrict_for_offset(offset), | |
ff7c6d11 XL |
401 | } |
402 | } | |
403 | ||
a1dfa0c6 XL |
404 | pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
405 | &self, | |
406 | bx: &mut Bx, | |
dfeec247 | 407 | variant_index: VariantIdx, |
a1dfa0c6 | 408 | ) -> Self { |
ff7c6d11 | 409 | let mut downcast = *self; |
a1dfa0c6 | 410 | downcast.layout = self.layout.for_variant(bx.cx(), variant_index); |
ff7c6d11 XL |
411 | |
412 | // Cast to the appropriate variant struct type. | |
a1dfa0c6 XL |
413 | let variant_ty = bx.cx().backend_type(downcast.layout); |
414 | downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); | |
ff7c6d11 XL |
415 | |
416 | downcast | |
417 | } | |
418 | ||
2b03887a FG |
419 | pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>( |
420 | &self, | |
421 | bx: &mut Bx, | |
422 | ty: Ty<'tcx>, | |
423 | ) -> Self { | |
424 | let mut downcast = *self; | |
425 | downcast.layout = bx.cx().layout_of(ty); | |
426 | ||
427 | // Cast to the appropriate type. | |
428 | let variant_ty = bx.cx().backend_type(downcast.layout); | |
429 | downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); | |
430 | ||
431 | downcast | |
432 | } | |
433 | ||
a1dfa0c6 | 434 | pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 435 | bx.lifetime_start(self.llval, self.layout.size); |
ff7c6d11 XL |
436 | } |
437 | ||
a1dfa0c6 | 438 | pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { |
2c00a5a8 | 439 | bx.lifetime_end(self.llval, self.layout.size); |
ff7c6d11 XL |
440 | } |
441 | } | |
442 | ||
dc9dc135 | 443 | impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { |
064997fb | 444 | #[instrument(level = "trace", skip(self, bx))] |
a1dfa0c6 XL |
445 | pub fn codegen_place( |
446 | &mut self, | |
447 | bx: &mut Bx, | |
74b04a01 | 448 | place_ref: mir::PlaceRef<'tcx>, |
a1dfa0c6 | 449 | ) -> PlaceRef<'tcx, Bx::Value> { |
a1dfa0c6 XL |
450 | let cx = self.cx; |
451 | let tcx = self.cx.tcx(); | |
ff7c6d11 | 452 | |
5099ac24 FG |
453 | let mut base = 0; |
454 | let mut cg_base = match self.locals[place_ref.local] { | |
455 | LocalRef::Place(place) => place, | |
456 | LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx), | |
457 | LocalRef::Operand(..) => { | |
064997fb FG |
458 | if place_ref.has_deref() { |
459 | base = 1; | |
5e7ed085 | 460 | let cg_base = self.codegen_consume( |
5099ac24 | 461 | bx, |
064997fb | 462 | mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref }, |
5e7ed085 | 463 | ); |
923072b8 | 464 | cg_base.deref(bx.cx()) |
5099ac24 | 465 | } else { |
dfeec247 | 466 | bug!("using operand local {:?} as place", place_ref); |
8faf50e0 | 467 | } |
ff7c6d11 | 468 | } |
353b0b11 FG |
469 | LocalRef::PendingOperand => { |
470 | bug!("using still-pending operand local {:?} as place", place_ref); | |
471 | } | |
5099ac24 FG |
472 | }; |
473 | for elem in place_ref.projection[base..].iter() { | |
923072b8 FG |
474 | cg_base = match *elem { |
475 | mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()), | |
5099ac24 FG |
476 | mir::ProjectionElem::Field(ref field, _) => { |
477 | cg_base.project_field(bx, field.index()) | |
478 | } | |
2b03887a | 479 | mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty), |
5099ac24 FG |
480 | mir::ProjectionElem::Index(index) => { |
481 | let index = &mir::Operand::Copy(mir::Place::from(index)); | |
482 | let index = self.codegen_operand(bx, index); | |
483 | let llindex = index.immediate(); | |
484 | cg_base.project_index(bx, llindex) | |
485 | } | |
486 | mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { | |
487 | let lloffset = bx.cx().const_usize(offset as u64); | |
488 | cg_base.project_index(bx, lloffset) | |
489 | } | |
490 | mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { | |
491 | let lloffset = bx.cx().const_usize(offset as u64); | |
492 | let lllen = cg_base.len(bx.cx()); | |
493 | let llindex = bx.sub(lllen, lloffset); | |
494 | cg_base.project_index(bx, llindex) | |
495 | } | |
496 | mir::ProjectionElem::Subslice { from, to, from_end } => { | |
497 | let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64)); | |
498 | let projected_ty = | |
499 | PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty; | |
500 | subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty)); | |
501 | ||
502 | if subslice.layout.is_unsized() { | |
503 | assert!(from_end, "slice subslices should be `from_end`"); | |
504 | subslice.llextra = Some(bx.sub( | |
505 | cg_base.llextra.unwrap(), | |
506 | bx.cx().const_usize((from as u64) + (to as u64)), | |
507 | )); | |
ff7c6d11 | 508 | } |
5099ac24 FG |
509 | |
510 | // Cast the place pointer type to the new | |
511 | // array or slice type (`*[%_; new_len]`). | |
512 | subslice.llval = bx.pointercast( | |
513 | subslice.llval, | |
514 | bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)), | |
515 | ); | |
516 | ||
517 | subslice | |
ff7c6d11 | 518 | } |
5099ac24 FG |
519 | mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), |
520 | }; | |
521 | } | |
522 | debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base); | |
523 | cg_base | |
ff7c6d11 XL |
524 | } |
525 | ||
74b04a01 | 526 | pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> { |
a1dfa0c6 | 527 | let tcx = self.cx.tcx(); |
5869c6ff | 528 | let place_ty = place_ref.ty(self.mir, tcx); |
fc512014 | 529 | self.monomorphize(place_ty.ty) |
ff7c6d11 XL |
530 | } |
531 | } | |
5869c6ff XL |
532 | |
533 | fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( | |
534 | bx: &mut Bx, | |
535 | value: Bx::Value, | |
536 | align: Bx::Value, | |
537 | ) -> Bx::Value { | |
538 | // In pseudo code: | |
539 | // | |
540 | // if value & (align - 1) == 0 { | |
541 | // value | |
542 | // } else { | |
543 | // (value & !(align - 1)) + align | |
544 | // } | |
545 | // | |
546 | // Usually this is written without branches as | |
547 | // | |
548 | // (value + align - 1) & !(align - 1) | |
549 | // | |
550 | // But this formula cannot take advantage of constant `value`. E.g. if `value` is known | |
551 | // at compile time to be `1`, this expression should be optimized to `align`. However, | |
552 | // optimization only holds if `align` is a power of two. Since the optimizer doesn't know | |
553 | // that `align` is a power of two, it cannot perform this optimization. | |
554 | // | |
555 | // Instead we use | |
556 | // | |
557 | // value + (-value & (align - 1)) | |
558 | // | |
559 | // Since `align` is used only once, the expression can be optimized. For `value = 0` | |
560 | // its optimized to `0` even in debug mode. | |
561 | // | |
562 | // NB: The previous version of this code used | |
563 | // | |
564 | // (value + align - 1) & -align | |
565 | // | |
566 | // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for | |
567 | // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559 | |
568 | let one = bx.const_usize(1); | |
569 | let align_minus_1 = bx.sub(align, one); | |
570 | let neg_value = bx.neg(value); | |
571 | let offset = bx.and(neg_value, align_minus_1); | |
572 | bx.add(value, offset) | |
573 | } |