]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_const_eval/src/interpret/place.rs
New upstream version 1.61.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / place.rs
1 //! Computations on places -- field projections, going from mir::Place, and writing
2 //! into a place.
3 //! All high-level functions to write to memory work on places as destinations.
4
5 use std::convert::TryFrom;
6 use std::hash::Hash;
7
8 use rustc_ast::Mutability;
9 use rustc_macros::HashStable;
10 use rustc_middle::mir;
11 use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
12 use rustc_middle::ty::{self, Ty};
13 use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
14 use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
15
16 use super::{
17 alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
18 ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
19 Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
20 };
21
22 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
23 /// Information required for the sound usage of a `MemPlace`.
24 pub enum MemPlaceMeta<Tag: Provenance = AllocId> {
25 /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
26 Meta(Scalar<Tag>),
27 /// `Sized` types or unsized `extern type`
28 None,
29 /// The address of this place may not be taken. This protects the `MemPlace` from coming from
30 /// a ZST Operand without a backing allocation and being converted to an integer address. This
31 /// should be impossible, because you can't take the address of an operand, but this is a second
32 /// protection layer ensuring that we don't mess up.
33 Poison,
34 }
35
36 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
37 rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
38
39 impl<Tag: Provenance> MemPlaceMeta<Tag> {
40 pub fn unwrap_meta(self) -> Scalar<Tag> {
41 match self {
42 Self::Meta(s) => s,
43 Self::None | Self::Poison => {
44 bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
45 }
46 }
47 }
48 fn has_meta(self) -> bool {
49 match self {
50 Self::Meta(_) => true,
51 Self::None | Self::Poison => false,
52 }
53 }
54 }
55
56 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
57 pub struct MemPlace<Tag: Provenance = AllocId> {
58 /// The pointer can be a pure integer, with the `None` tag.
59 pub ptr: Pointer<Option<Tag>>,
60 pub align: Align,
61 /// Metadata for unsized places. Interpretation is up to the type.
62 /// Must not be present for sized types, but can be missing for unsized types
63 /// (e.g., `extern type`).
64 pub meta: MemPlaceMeta<Tag>,
65 }
66
67 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
68 rustc_data_structures::static_assert_size!(MemPlace, 48);
69
70 #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
71 pub enum Place<Tag: Provenance = AllocId> {
72 /// A place referring to a value allocated in the `Memory` system.
73 Ptr(MemPlace<Tag>),
74
75 /// To support alloc-free locals, we are able to write directly to a local.
76 /// (Without that optimization, we'd just always be a `MemPlace`.)
77 Local { frame: usize, local: mir::Local },
78 }
79
80 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
81 rustc_data_structures::static_assert_size!(Place, 56);
82
83 #[derive(Copy, Clone, Debug)]
84 pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
85 place: Place<Tag>, // Keep this private; it helps enforce invariants.
86 pub layout: TyAndLayout<'tcx>,
87 }
88
89 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
90 rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
91
92 impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> {
93 type Target = Place<Tag>;
94 #[inline(always)]
95 fn deref(&self) -> &Place<Tag> {
96 &self.place
97 }
98 }
99
100 /// A MemPlace with its layout. Constructing it is only possible in this module.
101 #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
102 pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
103 mplace: MemPlace<Tag>,
104 pub layout: TyAndLayout<'tcx>,
105 }
106
107 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
108 rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
109
110 impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> {
111 type Target = MemPlace<Tag>;
112 #[inline(always)]
113 fn deref(&self) -> &MemPlace<Tag> {
114 &self.mplace
115 }
116 }
117
118 impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
119 #[inline(always)]
120 fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
121 PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
122 }
123 }
124
125 impl<Tag: Provenance> MemPlace<Tag> {
126 #[inline(always)]
127 pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
128 MemPlace { ptr, align, meta: MemPlaceMeta::None }
129 }
130
131 /// Adjust the provenance of the main pointer (metadata is unaffected).
132 pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self {
133 MemPlace { ptr: self.ptr.map_provenance(f), ..self }
134 }
135
136 /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
137 /// This is the inverse of `ref_to_mplace`.
138 #[inline(always)]
139 pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
140 match self.meta {
141 MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
142 MemPlaceMeta::Meta(meta) => {
143 Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
144 }
145 MemPlaceMeta::Poison => bug!(
146 "MPlaceTy::dangling may never be used to produce a \
147 place that will have the address of its pointee taken"
148 ),
149 }
150 }
151
152 #[inline]
153 pub fn offset<'tcx>(
154 self,
155 offset: Size,
156 meta: MemPlaceMeta<Tag>,
157 cx: &impl HasDataLayout,
158 ) -> InterpResult<'tcx, Self> {
159 Ok(MemPlace {
160 ptr: self.ptr.offset(offset, cx)?,
161 align: self.align.restrict_for_offset(offset),
162 meta,
163 })
164 }
165 }
166
167 impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
168 /// Produces a MemPlace that works for ZST but nothing else
169 #[inline]
170 pub fn dangling(layout: TyAndLayout<'tcx>) -> Self {
171 let align = layout.align.abi;
172 let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address
173 // `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
174 MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
175 }
176
177 #[inline]
178 pub fn offset(
179 &self,
180 offset: Size,
181 meta: MemPlaceMeta<Tag>,
182 layout: TyAndLayout<'tcx>,
183 cx: &impl HasDataLayout,
184 ) -> InterpResult<'tcx, Self> {
185 Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout })
186 }
187
188 #[inline]
189 pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
190 MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
191 }
192
193 #[inline]
194 pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
195 if self.layout.is_unsized() {
196 // We need to consult `meta` metadata
197 match self.layout.ty.kind() {
198 ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx),
199 _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
200 }
201 } else {
202 // Go through the layout. There are lots of types that support a length,
203 // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
204 match self.layout.fields {
205 FieldsShape::Array { count, .. } => Ok(count),
206 _ => bug!("len not supported on sized type {:?}", self.layout.ty),
207 }
208 }
209 }
210
211 #[inline]
212 pub(super) fn vtable(&self) -> Scalar<Tag> {
213 match self.layout.ty.kind() {
214 ty::Dynamic(..) => self.mplace.meta.unwrap_meta(),
215 _ => bug!("vtable not supported on type {:?}", self.layout.ty),
216 }
217 }
218 }
219
220 // These are defined here because they produce a place.
221 impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
222 #[inline(always)]
223 /// Note: do not call `as_ref` on the resulting place. This function should only be used to
224 /// read from the resulting mplace, not to get its address back.
225 pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
226 match **self {
227 Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
228 Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)),
229 Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
230 }
231 }
232
233 #[inline(always)]
234 /// Note: do not call `as_ref` on the resulting place. This function should only be used to
235 /// read from the resulting mplace, not to get its address back.
236 pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
237 self.try_as_mplace().unwrap()
238 }
239 }
240
241 impl<Tag: Provenance> Place<Tag> {
242 #[inline]
243 pub fn assert_mem_place(self) -> MemPlace<Tag> {
244 match self {
245 Place::Ptr(mplace) => mplace,
246 _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self),
247 }
248 }
249 }
250
251 impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
252 #[inline]
253 pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
254 MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
255 }
256 }
257
258 // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385
259 impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
260 where
261 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
262 Tag: Provenance + Eq + Hash + 'static,
263 M: Machine<'mir, 'tcx, PointerTag = Tag>,
264 {
265 /// Take a value, which represents a (thin or wide) reference, and make it a place.
266 /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
267 ///
268 /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
269 /// want to ever use the place for memory access!
270 /// Generally prefer `deref_operand`.
271 pub fn ref_to_mplace(
272 &self,
273 val: &ImmTy<'tcx, M::PointerTag>,
274 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
275 let pointee_type =
276 val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
277 let layout = self.layout_of(pointee_type)?;
278 let (ptr, meta) = match **val {
279 Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
280 Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
281 };
282
283 let mplace = MemPlace {
284 ptr: self.scalar_to_ptr(ptr.check_init()?),
285 // We could use the run-time alignment here. For now, we do not, because
286 // the point of tracking the alignment here is to make sure that the *static*
287 // alignment information emitted with the loads is correct. The run-time
288 // alignment can only be more restrictive.
289 align: layout.align.abi,
290 meta,
291 };
292 Ok(MPlaceTy { mplace, layout })
293 }
294
295 /// Take an operand, representing a pointer, and dereference it to a place -- that
296 /// will always be a MemPlace. Lives in `place.rs` because it creates a place.
297 pub fn deref_operand(
298 &self,
299 src: &OpTy<'tcx, M::PointerTag>,
300 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
301 let val = self.read_immediate(src)?;
302 trace!("deref to {} on {:?}", val.layout.ty, *val);
303 let mplace = self.ref_to_mplace(&val)?;
304 self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
305 Ok(mplace)
306 }
307
308 #[inline]
309 pub(super) fn get_alloc(
310 &self,
311 place: &MPlaceTy<'tcx, M::PointerTag>,
312 ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
313 assert!(!place.layout.is_unsized());
314 assert!(!place.meta.has_meta());
315 let size = place.layout.size;
316 self.memory.get(place.ptr, size, place.align)
317 }
318
319 #[inline]
320 pub(super) fn get_alloc_mut(
321 &mut self,
322 place: &MPlaceTy<'tcx, M::PointerTag>,
323 ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> {
324 assert!(!place.layout.is_unsized());
325 assert!(!place.meta.has_meta());
326 let size = place.layout.size;
327 self.memory.get_mut(place.ptr, size, place.align)
328 }
329
330 /// Check if this mplace is dereferenceable and sufficiently aligned.
331 fn check_mplace_access(
332 &self,
333 mplace: MPlaceTy<'tcx, M::PointerTag>,
334 msg: CheckInAllocMsg,
335 ) -> InterpResult<'tcx> {
336 let (size, align) = self
337 .size_and_align_of_mplace(&mplace)?
338 .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
339 assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?");
340 let align = M::enforce_alignment(&self.memory.extra).then_some(align);
341 self.memory.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
342 Ok(())
343 }
344
345 /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
346 /// always possible without allocating, so it can take `&self`. Also return the field's layout.
347 /// This supports both struct and array fields.
348 ///
349 /// This also works for arrays, but then the `usize` index type is restricting.
350 /// For indexing into arrays, use `mplace_index`.
351 #[inline(always)]
352 pub fn mplace_field(
353 &self,
354 base: &MPlaceTy<'tcx, M::PointerTag>,
355 field: usize,
356 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
357 let offset = base.layout.fields.offset(field);
358 let field_layout = base.layout.field(self, field);
359
360 // Offset may need adjustment for unsized fields.
361 let (meta, offset) = if field_layout.is_unsized() {
362 // Re-use parent metadata to determine dynamic field layout.
363 // With custom DSTS, this *will* execute user-defined code, but the same
364 // happens at run-time so that's okay.
365 match self.size_and_align_of(&base.meta, &field_layout)? {
366 Some((_, align)) => (base.meta, offset.align_to(align)),
367 None => {
368 // For unsized types with an extern type tail we perform no adjustments.
369 // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend.
370 assert!(matches!(base.meta, MemPlaceMeta::None));
371 (base.meta, offset)
372 }
373 }
374 } else {
375 // base.meta could be present; we might be accessing a sized field of an unsized
376 // struct.
377 (MemPlaceMeta::None, offset)
378 };
379
380 // We do not look at `base.layout.align` nor `field_layout.align`, unlike
381 // codegen -- mostly to see if we can get away with that
382 base.offset(offset, meta, field_layout, self)
383 }
384
385 /// Index into an array.
386 #[inline(always)]
387 pub fn mplace_index(
388 &self,
389 base: &MPlaceTy<'tcx, M::PointerTag>,
390 index: u64,
391 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
392 // Not using the layout method because we want to compute on u64
393 match base.layout.fields {
394 FieldsShape::Array { stride, .. } => {
395 let len = base.len(self)?;
396 if index >= len {
397 // This can only be reached in ConstProp and non-rustc-MIR.
398 throw_ub!(BoundsCheckFailed { len, index });
399 }
400 let offset = stride * index; // `Size` multiplication
401 // All fields have the same layout.
402 let field_layout = base.layout.field(self, 0);
403
404 assert!(!field_layout.is_unsized());
405 base.offset(offset, MemPlaceMeta::None, field_layout, self)
406 }
407 _ => span_bug!(
408 self.cur_span(),
409 "`mplace_index` called on non-array type {:?}",
410 base.layout.ty
411 ),
412 }
413 }
414
415 // Iterates over all fields of an array. Much more efficient than doing the
416 // same by repeatedly calling `mplace_array`.
417 pub(super) fn mplace_array_fields<'a>(
418 &self,
419 base: &'a MPlaceTy<'tcx, Tag>,
420 ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a>
421 {
422 let len = base.len(self)?; // also asserts that we have a type where this makes sense
423 let FieldsShape::Array { stride, .. } = base.layout.fields else {
424 span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout");
425 };
426 let layout = base.layout.field(self, 0);
427 let dl = &self.tcx.data_layout;
428 // `Size` multiplication
429 Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
430 }
431
432 fn mplace_subslice(
433 &self,
434 base: &MPlaceTy<'tcx, M::PointerTag>,
435 from: u64,
436 to: u64,
437 from_end: bool,
438 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
439 let len = base.len(self)?; // also asserts that we have a type where this makes sense
440 let actual_to = if from_end {
441 if from.checked_add(to).map_or(true, |to| to > len) {
442 // This can only be reached in ConstProp and non-rustc-MIR.
443 throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
444 }
445 len.checked_sub(to).unwrap()
446 } else {
447 to
448 };
449
450 // Not using layout method because that works with usize, and does not work with slices
451 // (that have count 0 in their layout).
452 let from_offset = match base.layout.fields {
453 FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked
454 _ => {
455 span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout)
456 }
457 };
458
459 // Compute meta and new layout
460 let inner_len = actual_to.checked_sub(from).unwrap();
461 let (meta, ty) = match base.layout.ty.kind() {
462 // It is not nice to match on the type, but that seems to be the only way to
463 // implement this.
464 ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)),
465 ty::Slice(..) => {
466 let len = Scalar::from_machine_usize(inner_len, self);
467 (MemPlaceMeta::Meta(len), base.layout.ty)
468 }
469 _ => {
470 span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty)
471 }
472 };
473 let layout = self.layout_of(ty)?;
474 base.offset(from_offset, meta, layout, self)
475 }
476
477 pub(crate) fn mplace_downcast(
478 &self,
479 base: &MPlaceTy<'tcx, M::PointerTag>,
480 variant: VariantIdx,
481 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
482 // Downcasts only change the layout.
483 // (In particular, no check about whether this is even the active variant -- that's by design,
484 // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.)
485 assert!(!base.meta.has_meta());
486 Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base })
487 }
488
489 /// Project into an mplace
490 pub(super) fn mplace_projection(
491 &self,
492 base: &MPlaceTy<'tcx, M::PointerTag>,
493 proj_elem: mir::PlaceElem<'tcx>,
494 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
495 use rustc_middle::mir::ProjectionElem::*;
496 Ok(match proj_elem {
497 Field(field, _) => self.mplace_field(base, field.index())?,
498 Downcast(_, variant) => self.mplace_downcast(base, variant)?,
499 Deref => self.deref_operand(&base.into())?,
500
501 Index(local) => {
502 let layout = self.layout_of(self.tcx.types.usize)?;
503 let n = self.access_local(self.frame(), local, Some(layout))?;
504 let n = self.read_scalar(&n)?;
505 let n = n.to_machine_usize(self)?;
506 self.mplace_index(base, n)?
507 }
508
509 ConstantIndex { offset, min_length, from_end } => {
510 let n = base.len(self)?;
511 if n < min_length {
512 // This can only be reached in ConstProp and non-rustc-MIR.
513 throw_ub!(BoundsCheckFailed { len: min_length, index: n });
514 }
515
516 let index = if from_end {
517 assert!(0 < offset && offset <= min_length);
518 n.checked_sub(offset).unwrap()
519 } else {
520 assert!(offset < min_length);
521 offset
522 };
523
524 self.mplace_index(base, index)?
525 }
526
527 Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?,
528 })
529 }
530
531 /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
532 /// Also returns the number of elements.
533 pub fn mplace_to_simd(
534 &self,
535 base: &MPlaceTy<'tcx, M::PointerTag>,
536 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
537 // Basically we just transmute this place into an array following simd_size_and_type.
538 // (Transmuting is okay since this is an in-memory place. We also double-check the size
539 // stays the same.)
540 let (len, e_ty) = base.layout.ty.simd_size_and_type(*self.tcx);
541 let array = self.tcx.mk_array(e_ty, len);
542 let layout = self.layout_of(array)?;
543 assert_eq!(layout.size, base.layout.size);
544 Ok((MPlaceTy { layout, ..*base }, len))
545 }
546
547 /// Gets the place of a field inside the place, and also the field's type.
548 /// Just a convenience function, but used quite a bit.
549 /// This is the only projection that might have a side-effect: We cannot project
550 /// into the field of a local `ScalarPair`, we have to first allocate it.
551 pub fn place_field(
552 &mut self,
553 base: &PlaceTy<'tcx, M::PointerTag>,
554 field: usize,
555 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
556 // FIXME: We could try to be smarter and avoid allocation for fields that span the
557 // entire place.
558 let mplace = self.force_allocation(base)?;
559 Ok(self.mplace_field(&mplace, field)?.into())
560 }
561
562 pub fn place_index(
563 &mut self,
564 base: &PlaceTy<'tcx, M::PointerTag>,
565 index: u64,
566 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
567 let mplace = self.force_allocation(base)?;
568 Ok(self.mplace_index(&mplace, index)?.into())
569 }
570
571 pub fn place_downcast(
572 &self,
573 base: &PlaceTy<'tcx, M::PointerTag>,
574 variant: VariantIdx,
575 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
576 // Downcast just changes the layout
577 Ok(match base.place {
578 Place::Ptr(mplace) => {
579 self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into()
580 }
581 Place::Local { .. } => {
582 let layout = base.layout.for_variant(self, variant);
583 PlaceTy { layout, ..*base }
584 }
585 })
586 }
587
588 /// Projects into a place.
589 pub fn place_projection(
590 &mut self,
591 base: &PlaceTy<'tcx, M::PointerTag>,
592 &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>,
593 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
594 use rustc_middle::mir::ProjectionElem::*;
595 Ok(match proj_elem {
596 Field(field, _) => self.place_field(base, field.index())?,
597 Downcast(_, variant) => self.place_downcast(base, variant)?,
598 Deref => self.deref_operand(&self.place_to_op(base)?)?.into(),
599 // For the other variants, we have to force an allocation.
600 // This matches `operand_projection`.
601 Subslice { .. } | ConstantIndex { .. } | Index(_) => {
602 let mplace = self.force_allocation(base)?;
603 self.mplace_projection(&mplace, proj_elem)?.into()
604 }
605 })
606 }
607
608 /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
609 /// Also returns the number of elements.
610 pub fn place_to_simd(
611 &mut self,
612 base: &PlaceTy<'tcx, M::PointerTag>,
613 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> {
614 let mplace = self.force_allocation(base)?;
615 self.mplace_to_simd(&mplace)
616 }
617
618 /// Computes a place. You should only use this if you intend to write into this
619 /// place; for reading, a more efficient alternative is `eval_place_for_read`.
620 pub fn eval_place(
621 &mut self,
622 place: mir::Place<'tcx>,
623 ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
624 let mut place_ty = PlaceTy {
625 // This works even for dead/uninitialized locals; we check further when writing
626 place: Place::Local { frame: self.frame_idx(), local: place.local },
627 layout: self.layout_of_local(self.frame(), place.local, None)?,
628 };
629
630 for elem in place.projection.iter() {
631 place_ty = self.place_projection(&place_ty, &elem)?
632 }
633
634 trace!("{:?}", self.dump_place(place_ty.place));
635 // Sanity-check the type we ended up with.
636 debug_assert!(mir_assign_valid_types(
637 *self.tcx,
638 self.param_env,
639 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
640 place.ty(&self.frame().body.local_decls, *self.tcx).ty
641 )?)?,
642 place_ty.layout,
643 ));
644 Ok(place_ty)
645 }
646
647 /// Write an immediate to a place
648 #[inline(always)]
649 pub fn write_immediate(
650 &mut self,
651 src: Immediate<M::PointerTag>,
652 dest: &PlaceTy<'tcx, M::PointerTag>,
653 ) -> InterpResult<'tcx> {
654 self.write_immediate_no_validate(src, dest)?;
655
656 if M::enforce_validity(self) {
657 // Data got changed, better make sure it matches the type!
658 self.validate_operand(&self.place_to_op(dest)?)?;
659 }
660
661 Ok(())
662 }
663
664 /// Write a scalar to a place
665 #[inline(always)]
666 pub fn write_scalar(
667 &mut self,
668 val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
669 dest: &PlaceTy<'tcx, M::PointerTag>,
670 ) -> InterpResult<'tcx> {
671 self.write_immediate(Immediate::Scalar(val.into()), dest)
672 }
673
674 /// Write a pointer to a place
675 #[inline(always)]
676 pub fn write_pointer(
677 &mut self,
678 ptr: impl Into<Pointer<Option<M::PointerTag>>>,
679 dest: &PlaceTy<'tcx, M::PointerTag>,
680 ) -> InterpResult<'tcx> {
681 self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
682 }
683
684 /// Write an immediate to a place.
685 /// If you use this you are responsible for validating that things got copied at the
686 /// right type.
687 fn write_immediate_no_validate(
688 &mut self,
689 src: Immediate<M::PointerTag>,
690 dest: &PlaceTy<'tcx, M::PointerTag>,
691 ) -> InterpResult<'tcx> {
692 if cfg!(debug_assertions) {
693 // This is a very common path, avoid some checks in release mode
694 assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
695 match src {
696 Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
697 self.pointer_size(),
698 dest.layout.size,
699 "Size mismatch when writing pointer"
700 ),
701 Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
702 assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
703 }
704 Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
705 Immediate::ScalarPair(_, _) => {
706 // FIXME: Can we check anything here?
707 }
708 }
709 }
710 trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
711
712 // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`,
713 // but not factored as a separate function.
714 let mplace = match dest.place {
715 Place::Local { frame, local } => {
716 match M::access_local_mut(self, frame, local)? {
717 Ok(local) => {
718 // Local can be updated in-place.
719 *local = LocalValue::Live(Operand::Immediate(src));
720 return Ok(());
721 }
722 Err(mplace) => {
723 // The local is in memory, go on below.
724 mplace
725 }
726 }
727 }
728 Place::Ptr(mplace) => mplace, // already referring to memory
729 };
730 let dest = MPlaceTy { mplace, layout: dest.layout };
731
732 // This is already in memory, write there.
733 self.write_immediate_to_mplace_no_validate(src, &dest)
734 }
735
736 /// Write an immediate to memory.
737 /// If you use this you are responsible for validating that things got copied at the
738 /// right type.
739 fn write_immediate_to_mplace_no_validate(
740 &mut self,
741 value: Immediate<M::PointerTag>,
742 dest: &MPlaceTy<'tcx, M::PointerTag>,
743 ) -> InterpResult<'tcx> {
744 // Note that it is really important that the type here is the right one, and matches the
745 // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
746 // to handle padding properly, which is only correct if we never look at this data with the
747 // wrong type.
748
749 // Invalid places are a thing: the return place of a diverging function
750 let tcx = *self.tcx;
751 let Some(mut alloc) = self.get_alloc_mut(dest)? else {
752 // zero-sized access
753 return Ok(());
754 };
755
756 // FIXME: We should check that there are dest.layout.size many bytes available in
757 // memory. The code below is not sufficient, with enough padding it might not
758 // cover all the bytes!
759 match value {
760 Immediate::Scalar(scalar) => {
761 match dest.layout.abi {
762 Abi::Scalar(_) => {} // fine
763 _ => span_bug!(
764 self.cur_span(),
765 "write_immediate_to_mplace: invalid Scalar layout: {:#?}",
766 dest.layout
767 ),
768 }
769 alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
770 }
771 Immediate::ScalarPair(a_val, b_val) => {
772 // We checked `ptr_align` above, so all fields will have the alignment they need.
773 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
774 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
775 let (a, b) = match dest.layout.abi {
776 Abi::ScalarPair(a, b) => (a.value, b.value),
777 _ => span_bug!(
778 self.cur_span(),
779 "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
780 dest.layout
781 ),
782 };
783 let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
784 let b_offset = a_size.align_to(b.align(&tcx).abi);
785
786 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
787 // but that does not work: We could be a newtype around a pair, then the
788 // fields do not match the `ScalarPair` components.
789
790 alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
791 alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
792 }
793 }
794 }
795
796 /// Copies the data from an operand to a place. This does not support transmuting!
797 /// Use `copy_op_transmute` if the layouts could disagree.
798 #[inline(always)]
799 pub fn copy_op(
800 &mut self,
801 src: &OpTy<'tcx, M::PointerTag>,
802 dest: &PlaceTy<'tcx, M::PointerTag>,
803 ) -> InterpResult<'tcx> {
804 self.copy_op_no_validate(src, dest)?;
805
806 if M::enforce_validity(self) {
807 // Data got changed, better make sure it matches the type!
808 self.validate_operand(&self.place_to_op(dest)?)?;
809 }
810
811 Ok(())
812 }
813
814 /// Copies the data from an operand to a place. This does not support transmuting!
815 /// Use `copy_op_transmute` if the layouts could disagree.
816 /// Also, if you use this you are responsible for validating that things get copied at the
817 /// right type.
818 fn copy_op_no_validate(
819 &mut self,
820 src: &OpTy<'tcx, M::PointerTag>,
821 dest: &PlaceTy<'tcx, M::PointerTag>,
822 ) -> InterpResult<'tcx> {
823 // We do NOT compare the types for equality, because well-typed code can
824 // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
825 if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
826 span_bug!(
827 self.cur_span(),
828 "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
829 src.layout.ty,
830 dest.layout.ty,
831 );
832 }
833
834 // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
835 let src = match self.try_read_immediate(src)? {
836 Ok(src_val) => {
837 assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
838 // Yay, we got a value that we can write directly.
839 // FIXME: Add a check to make sure that if `src` is indirect,
840 // it does not overlap with `dest`.
841 return self.write_immediate_no_validate(*src_val, dest);
842 }
843 Err(mplace) => mplace,
844 };
845 // Slow path, this does not fit into an immediate. Just memcpy.
846 trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
847
848 // This interprets `src.meta` with the `dest` local's layout, if an unsized local
849 // is being initialized!
850 let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
851 let size = size.unwrap_or_else(|| {
852 assert!(
853 !dest.layout.is_unsized(),
854 "Cannot copy into already initialized unsized place"
855 );
856 dest.layout.size
857 });
858 assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
859
860 self.memory
861 .copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true)
862 }
863
864 /// Copies the data from an operand to a place. The layouts may disagree, but they must
865 /// have the same size.
866 pub fn copy_op_transmute(
867 &mut self,
868 src: &OpTy<'tcx, M::PointerTag>,
869 dest: &PlaceTy<'tcx, M::PointerTag>,
870 ) -> InterpResult<'tcx> {
871 if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
872 // Fast path: Just use normal `copy_op`
873 return self.copy_op(src, dest);
874 }
875 // We still require the sizes to match.
876 if src.layout.size != dest.layout.size {
877 // FIXME: This should be an assert instead of an error, but if we transmute within an
878 // array length computation, `typeck` may not have yet been run and errored out. In fact
879 // most likely we *are* running `typeck` right now. Investigate whether we can bail out
880 // on `typeck_results().has_errors` at all const eval entry points.
881 debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
882 self.tcx.sess.delay_span_bug(
883 self.cur_span(),
884 "size-changing transmute, should have been caught by transmute checking",
885 );
886 throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty));
887 }
888 // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
889 // to avoid that here.
890 assert!(
891 !src.layout.is_unsized() && !dest.layout.is_unsized(),
892 "Cannot transmute unsized data"
893 );
894
895 // The hard case is `ScalarPair`. `src` is already read from memory in this case,
896 // using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
897 // We have to write them to `dest` at the offsets they were *read at*, which is
898 // not necessarily the same as the offsets in `dest.layout`!
899 // Hence we do the copy with the source layout on both sides. We also make sure to write
900 // into memory, because if `dest` is a local we would not even have a way to write
901 // at the `src` offsets; the fact that we came from a different layout would
902 // just be lost.
903 let dest = self.force_allocation(dest)?;
904 self.copy_op_no_validate(
905 src,
906 &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }),
907 )?;
908
909 if M::enforce_validity(self) {
910 // Data got changed, better make sure it matches the type!
911 self.validate_operand(&dest.into())?;
912 }
913
914 Ok(())
915 }
916
917 /// Ensures that a place is in memory, and returns where it is.
918 /// If the place currently refers to a local that doesn't yet have a matching allocation,
919 /// create such an allocation.
920 /// This is essentially `force_to_memplace`.
921 ///
922 /// This supports unsized types and returns the computed size to avoid some
923 /// redundant computation when copying; use `force_allocation` for a simpler, sized-only
924 /// version.
925 pub fn force_allocation_maybe_sized(
926 &mut self,
927 place: &PlaceTy<'tcx, M::PointerTag>,
928 meta: MemPlaceMeta<M::PointerTag>,
929 ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
930 let (mplace, size) = match place.place {
931 Place::Local { frame, local } => {
932 match M::access_local_mut(self, frame, local)? {
933 Ok(&mut local_val) => {
934 // We need to make an allocation.
935
936 // We need the layout of the local. We can NOT use the layout we got,
937 // that might e.g., be an inner field of a struct with `Scalar` layout,
938 // that has different alignment than the outer field.
939 let local_layout =
940 self.layout_of_local(&self.stack()[frame], local, None)?;
941 // We also need to support unsized types, and hence cannot use `allocate`.
942 let (size, align) = self
943 .size_and_align_of(&meta, &local_layout)?
944 .expect("Cannot allocate for non-dyn-sized type");
945 let ptr = self.memory.allocate(size, align, MemoryKind::Stack)?;
946 let mplace = MemPlace { ptr: ptr.into(), align, meta };
947 if let LocalValue::Live(Operand::Immediate(value)) = local_val {
948 // Preserve old value.
949 // We don't have to validate as we can assume the local
950 // was already valid for its type.
951 let mplace = MPlaceTy { mplace, layout: local_layout };
952 self.write_immediate_to_mplace_no_validate(value, &mplace)?;
953 }
954 // Now we can call `access_mut` again, asserting it goes well,
955 // and actually overwrite things.
956 *M::access_local_mut(self, frame, local).unwrap().unwrap() =
957 LocalValue::Live(Operand::Indirect(mplace));
958 (mplace, Some(size))
959 }
960 Err(mplace) => (mplace, None), // this already was an indirect local
961 }
962 }
963 Place::Ptr(mplace) => (mplace, None),
964 };
965 // Return with the original layout, so that the caller can go on
966 Ok((MPlaceTy { mplace, layout: place.layout }, size))
967 }
968
969 #[inline(always)]
970 pub fn force_allocation(
971 &mut self,
972 place: &PlaceTy<'tcx, M::PointerTag>,
973 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
974 Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0)
975 }
976
977 pub fn allocate(
978 &mut self,
979 layout: TyAndLayout<'tcx>,
980 kind: MemoryKind<M::MemoryKind>,
981 ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> {
982 let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
983 Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
984 }
985
986 /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
987 pub fn allocate_str(
988 &mut self,
989 str: &str,
990 kind: MemoryKind<M::MemoryKind>,
991 mutbl: Mutability,
992 ) -> MPlaceTy<'tcx, M::PointerTag> {
993 let ptr = self.memory.allocate_bytes(str.as_bytes(), Align::ONE, kind, mutbl);
994 let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self);
995 let mplace =
996 MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) };
997
998 let ty = self.tcx.mk_ref(
999 self.tcx.lifetimes.re_static,
1000 ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
1001 );
1002 let layout = self.layout_of(ty).unwrap();
1003 MPlaceTy { mplace, layout }
1004 }
1005
1006 /// Writes the discriminant of the given variant.
1007 pub fn write_discriminant(
1008 &mut self,
1009 variant_index: VariantIdx,
1010 dest: &PlaceTy<'tcx, M::PointerTag>,
1011 ) -> InterpResult<'tcx> {
1012 // This must be an enum or generator.
1013 match dest.layout.ty.kind() {
1014 ty::Adt(adt, _) => assert!(adt.is_enum()),
1015 ty::Generator(..) => {}
1016 _ => span_bug!(
1017 self.cur_span(),
1018 "write_discriminant called on non-variant-type (neither enum nor generator)"
1019 ),
1020 }
1021 // Layout computation excludes uninhabited variants from consideration
1022 // therefore there's no way to represent those variants in the given layout.
1023 // Essentially, uninhabited variants do not have a tag that corresponds to their
1024 // discriminant, so we cannot do anything here.
1025 // When evaluating we will always error before even getting here, but ConstProp 'executes'
1026 // dead code, so we cannot ICE here.
1027 if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() {
1028 throw_ub!(UninhabitedEnumVariantWritten)
1029 }
1030
1031 match dest.layout.variants {
1032 Variants::Single { index } => {
1033 assert_eq!(index, variant_index);
1034 }
1035 Variants::Multiple {
1036 tag_encoding: TagEncoding::Direct,
1037 tag: tag_layout,
1038 tag_field,
1039 ..
1040 } => {
1041 // No need to validate that the discriminant here because the
1042 // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
1043
1044 let discr_val =
1045 dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val;
1046
1047 // raw discriminants for enums are isize or bigger during
1048 // their computation, but the in-memory tag is the smallest possible
1049 // representation
1050 let size = tag_layout.value.size(self);
1051 let tag_val = size.truncate(discr_val);
1052
1053 let tag_dest = self.place_field(dest, tag_field)?;
1054 self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?;
1055 }
1056 Variants::Multiple {
1057 tag_encoding:
1058 TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start },
1059 tag: tag_layout,
1060 tag_field,
1061 ..
1062 } => {
1063 // No need to validate that the discriminant here because the
1064 // `TyAndLayout::for_variant()` call earlier already checks the variant is valid.
1065
1066 if variant_index != dataful_variant {
1067 let variants_start = niche_variants.start().as_u32();
1068 let variant_index_relative = variant_index
1069 .as_u32()
1070 .checked_sub(variants_start)
1071 .expect("overflow computing relative variant idx");
1072 // We need to use machine arithmetic when taking into account `niche_start`:
1073 // tag_val = variant_index_relative + niche_start_val
1074 let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?;
1075 let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
1076 let variant_index_relative_val =
1077 ImmTy::from_uint(variant_index_relative, tag_layout);
1078 let tag_val = self.binary_op(
1079 mir::BinOp::Add,
1080 &variant_index_relative_val,
1081 &niche_start_val,
1082 )?;
1083 // Write result.
1084 let niche_dest = self.place_field(dest, tag_field)?;
1085 self.write_immediate(*tag_val, &niche_dest)?;
1086 }
1087 }
1088 }
1089
1090 Ok(())
1091 }
1092
1093 pub fn raw_const_to_mplace(
1094 &self,
1095 raw: ConstAlloc<'tcx>,
1096 ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
1097 // This must be an allocation in `tcx`
1098 let _ = self.tcx.global_alloc(raw.alloc_id);
1099 let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
1100 let layout = self.layout_of(raw.ty)?;
1101 Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
1102 }
1103
1104 /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
1105 /// Also return some more information so drop doesn't have to run the same code twice.
1106 pub(super) fn unpack_dyn_trait(
1107 &self,
1108 mplace: &MPlaceTy<'tcx, M::PointerTag>,
1109 ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
1110 let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
1111 let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
1112 let layout = self.layout_of(ty)?;
1113
1114 // More sanity checks
1115 if cfg!(debug_assertions) {
1116 let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
1117 assert_eq!(size, layout.size);
1118 // only ABI alignment is preserved
1119 assert_eq!(align, layout.align.abi);
1120 }
1121
1122 let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout };
1123 Ok((instance, mplace))
1124 }
1125 }