]>
Commit | Line | Data |
---|---|---|
b7449926 XL |
1 | //! Computations on places -- field projections, going from mir::Place, and writing |
2 | //! into a place. | |
3 | //! All high-level functions to write to memory work on places as destinations. | |
4 | ||
5 | use std::convert::TryFrom; | |
29967ef6 | 6 | use std::fmt::Debug; |
0bf4aa26 | 7 | use std::hash::Hash; |
b7449926 | 8 | |
60c5eb7d | 9 | use rustc_macros::HashStable; |
ba9703b0 XL |
10 | use rustc_middle::mir; |
11 | use rustc_middle::ty::layout::{PrimitiveExt, TyAndLayout}; | |
12 | use rustc_middle::ty::{self, Ty}; | |
f035d41b | 13 | use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding}; |
ba9703b0 | 14 | use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants}; |
ff7c6d11 | 15 | |
0bf4aa26 | 16 | use super::{ |
29967ef6 XL |
17 | mir_assign_valid_types, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, ImmTy, |
18 | Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer, | |
19 | PointerArithmetic, Scalar, ScalarMaybeUninit, | |
b7449926 | 20 | }; |
ff7c6d11 | 21 | |
60c5eb7d | 22 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] |
dfeec247 | 23 | /// Information required for the sound usage of a `MemPlace`. |
f9f354fc | 24 | pub enum MemPlaceMeta<Tag = ()> { |
dfeec247 | 25 | /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). |
f9f354fc | 26 | Meta(Scalar<Tag>), |
dfeec247 XL |
27 | /// `Sized` types or unsized `extern type` |
28 | None, | |
29 | /// The address of this place may not be taken. This protects the `MemPlace` from coming from | |
ba9703b0 | 30 | /// a ZST Operand without a backing allocation and being converted to an integer address. This |
dfeec247 XL |
31 | /// should be impossible, because you can't take the address of an operand, but this is a second |
32 | /// protection layer ensuring that we don't mess up. | |
33 | Poison, | |
34 | } | |
35 | ||
6a06907d XL |
36 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
37 | rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); | |
38 | ||
f9f354fc XL |
39 | impl<Tag> MemPlaceMeta<Tag> { |
40 | pub fn unwrap_meta(self) -> Scalar<Tag> { | |
dfeec247 XL |
41 | match self { |
42 | Self::Meta(s) => s, | |
43 | Self::None | Self::Poison => { | |
44 | bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)") | |
45 | } | |
46 | } | |
47 | } | |
48 | fn has_meta(self) -> bool { | |
49 | match self { | |
50 | Self::Meta(_) => true, | |
51 | Self::None | Self::Poison => false, | |
52 | } | |
53 | } | |
dfeec247 | 54 | |
dfeec247 XL |
55 | pub fn erase_tag(self) -> MemPlaceMeta<()> { |
56 | match self { | |
57 | Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()), | |
58 | Self::None => MemPlaceMeta::None, | |
59 | Self::Poison => MemPlaceMeta::Poison, | |
60 | } | |
61 | } | |
62 | } | |
63 | ||
64 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] | |
f9f354fc | 65 | pub struct MemPlace<Tag = ()> { |
b7449926 XL |
66 | /// A place may have an integral pointer for ZSTs, and since it might |
67 | /// be turned back into a reference before ever being dereferenced. | |
3dfed10e | 68 | /// However, it may never be uninit. |
f9f354fc | 69 | pub ptr: Scalar<Tag>, |
b7449926 | 70 | pub align: Align, |
9fa01778 | 71 | /// Metadata for unsized places. Interpretation is up to the type. |
b7449926 | 72 | /// Must not be present for sized types, but can be missing for unsized types |
0731742a | 73 | /// (e.g., `extern type`). |
f9f354fc | 74 | pub meta: MemPlaceMeta<Tag>, |
b7449926 XL |
75 | } |
76 | ||
6a06907d XL |
77 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
78 | rustc_data_structures::static_assert_size!(MemPlace, 56); | |
79 | ||
60c5eb7d | 80 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] |
f9f354fc | 81 | pub enum Place<Tag = ()> { |
2c00a5a8 | 82 | /// A place referring to a value allocated in the `Memory` system. |
f9f354fc | 83 | Ptr(MemPlace<Tag>), |
b7449926 XL |
84 | |
85 | /// To support alloc-free locals, we are able to write directly to a local. | |
86 | /// (Without that optimization, we'd just always be a `MemPlace`.) | |
dfeec247 | 87 | Local { frame: usize, local: mir::Local }, |
b7449926 XL |
88 | } |
89 | ||
6a06907d XL |
90 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
91 | rustc_data_structures::static_assert_size!(Place, 64); | |
92 | ||
b7449926 | 93 | #[derive(Copy, Clone, Debug)] |
dfeec247 | 94 | pub struct PlaceTy<'tcx, Tag = ()> { |
60c5eb7d | 95 | place: Place<Tag>, // Keep this private; it helps enforce invariants. |
ba9703b0 | 96 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 XL |
97 | } |
98 | ||
6a06907d XL |
99 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
100 | rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80); | |
101 | ||
29967ef6 | 102 | impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> { |
0bf4aa26 | 103 | type Target = Place<Tag>; |
b7449926 | 104 | #[inline(always)] |
0bf4aa26 | 105 | fn deref(&self) -> &Place<Tag> { |
b7449926 XL |
106 | &self.place |
107 | } | |
ff7c6d11 XL |
108 | } |
109 | ||
b7449926 | 110 | /// A MemPlace with its layout. Constructing it is only possible in this module. |
9fa01778 | 111 | #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] |
dfeec247 | 112 | pub struct MPlaceTy<'tcx, Tag = ()> { |
0bf4aa26 | 113 | mplace: MemPlace<Tag>, |
ba9703b0 | 114 | pub layout: TyAndLayout<'tcx>, |
b7449926 XL |
115 | } |
116 | ||
6a06907d XL |
117 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
118 | rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72); | |
119 | ||
29967ef6 | 120 | impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> { |
0bf4aa26 | 121 | type Target = MemPlace<Tag>; |
b7449926 | 122 | #[inline(always)] |
0bf4aa26 | 123 | fn deref(&self) -> &MemPlace<Tag> { |
b7449926 XL |
124 | &self.mplace |
125 | } | |
126 | } | |
127 | ||
0bf4aa26 | 128 | impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> { |
b7449926 | 129 | #[inline(always)] |
0bf4aa26 | 130 | fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { |
dfeec247 | 131 | PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout } |
ff7c6d11 | 132 | } |
b7449926 | 133 | } |
ff7c6d11 | 134 | |
48663c56 XL |
135 | impl<Tag> MemPlace<Tag> { |
136 | /// Replace ptr tag, maintain vtable tag (if any) | |
0bf4aa26 | 137 | #[inline] |
48663c56 | 138 | pub fn replace_tag(self, new_tag: Tag) -> Self { |
dfeec247 | 139 | MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta } |
0bf4aa26 | 140 | } |
0bf4aa26 | 141 | |
0bf4aa26 | 142 | #[inline] |
48663c56 | 143 | pub fn erase_tag(self) -> MemPlace { |
dfeec247 | 144 | MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() } |
0bf4aa26 XL |
145 | } |
146 | ||
b7449926 | 147 | #[inline(always)] |
dfeec247 XL |
148 | fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self { |
149 | MemPlace { ptr, align, meta: MemPlaceMeta::None } | |
ff7c6d11 XL |
150 | } |
151 | ||
b7449926 | 152 | #[inline(always)] |
0bf4aa26 | 153 | pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self { |
94b46f34 | 154 | Self::from_scalar_ptr(ptr.into(), align) |
ff7c6d11 XL |
155 | } |
156 | ||
60c5eb7d | 157 | /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. |
a1dfa0c6 XL |
158 | /// This is the inverse of `ref_to_mplace`. |
159 | #[inline(always)] | |
160 | pub fn to_ref(self) -> Immediate<Tag> { | |
161 | match self.meta { | |
dfeec247 XL |
162 | MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()), |
163 | MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), | |
164 | MemPlaceMeta::Poison => bug!( | |
165 | "MPlaceTy::dangling may never be used to produce a \ | |
166 | place that will have the address of its pointee taken" | |
167 | ), | |
a1dfa0c6 XL |
168 | } |
169 | } | |
170 | ||
5869c6ff | 171 | #[inline] |
a1dfa0c6 XL |
172 | pub fn offset( |
173 | self, | |
174 | offset: Size, | |
dfeec247 | 175 | meta: MemPlaceMeta<Tag>, |
a1dfa0c6 | 176 | cx: &impl HasDataLayout, |
dc9dc135 | 177 | ) -> InterpResult<'tcx, Self> { |
a1dfa0c6 XL |
178 | Ok(MemPlace { |
179 | ptr: self.ptr.ptr_offset(offset, cx)?, | |
180 | align: self.align.restrict_for_offset(offset), | |
181 | meta, | |
182 | }) | |
183 | } | |
0bf4aa26 | 184 | } |
b7449926 | 185 | |
6a06907d | 186 | impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> { |
0bf4aa26 XL |
187 | /// Produces a MemPlace that works for ZST but nothing else |
188 | #[inline] | |
ba9703b0 | 189 | pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { |
dfeec247 | 190 | let align = layout.align.abi; |
ba9703b0 | 191 | let ptr = Scalar::from_machine_usize(align.bytes(), cx); |
dfeec247 XL |
192 | // `Poison` this to make sure that the pointer value `ptr` is never observable by the program. |
193 | MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } | |
ff7c6d11 XL |
194 | } |
195 | ||
48663c56 | 196 | /// Replace ptr tag, maintain vtable tag (if any) |
0731742a | 197 | #[inline] |
6a06907d | 198 | pub fn replace_tag(&self, new_tag: Tag) -> Self { |
dfeec247 | 199 | MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout } |
0731742a XL |
200 | } |
201 | ||
202 | #[inline] | |
a1dfa0c6 | 203 | pub fn offset( |
6a06907d | 204 | &self, |
a1dfa0c6 | 205 | offset: Size, |
dfeec247 | 206 | meta: MemPlaceMeta<Tag>, |
ba9703b0 | 207 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 208 | cx: &impl HasDataLayout, |
dc9dc135 | 209 | ) -> InterpResult<'tcx, Self> { |
dfeec247 | 210 | Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout }) |
a1dfa0c6 XL |
211 | } |
212 | ||
b7449926 | 213 | #[inline] |
ba9703b0 | 214 | fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self { |
a1dfa0c6 | 215 | MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } |
ff7c6d11 XL |
216 | } |
217 | ||
b7449926 | 218 | #[inline] |
6a06907d | 219 | pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { |
b7449926 | 220 | if self.layout.is_unsized() { |
0bf4aa26 | 221 | // We need to consult `meta` metadata |
1b1a35ee | 222 | match self.layout.ty.kind() { |
ba9703b0 | 223 | ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx), |
b7449926 | 224 | _ => bug!("len not supported on unsized type {:?}", self.layout.ty), |
ff7c6d11 | 225 | } |
b7449926 XL |
226 | } else { |
227 | // Go through the layout. There are lots of types that support a length, | |
0731742a | 228 | // e.g., SIMD types. |
b7449926 | 229 | match self.layout.fields { |
ba9703b0 | 230 | FieldsShape::Array { count, .. } => Ok(count), |
b7449926 XL |
231 | _ => bug!("len not supported on sized type {:?}", self.layout.ty), |
232 | } | |
233 | } | |
234 | } | |
ff7c6d11 | 235 | |
b7449926 | 236 | #[inline] |
6a06907d | 237 | pub(super) fn vtable(&self) -> Scalar<Tag> { |
1b1a35ee | 238 | match self.layout.ty.kind() { |
dfeec247 | 239 | ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), |
b7449926 | 240 | _ => bug!("vtable not supported on type {:?}", self.layout.ty), |
ff7c6d11 XL |
241 | } |
242 | } | |
243 | } | |
244 | ||
416331ca | 245 | // These are defined here because they produce a place. |
29967ef6 | 246 | impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> { |
b7449926 | 247 | #[inline(always)] |
dfeec247 XL |
248 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
249 | /// read from the resulting mplace, not to get its address back. | |
250 | pub fn try_as_mplace( | |
6a06907d | 251 | &self, |
dfeec247 XL |
252 | cx: &impl HasDataLayout, |
253 | ) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> { | |
6a06907d | 254 | match **self { |
b7449926 | 255 | Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), |
dfeec247 XL |
256 | Operand::Immediate(_) if self.layout.is_zst() => { |
257 | Ok(MPlaceTy::dangling(self.layout, cx)) | |
258 | } | |
ba9703b0 | 259 | Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), |
b7449926 XL |
260 | } |
261 | } | |
262 | ||
263 | #[inline(always)] | |
dfeec247 XL |
264 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
265 | /// read from the resulting mplace, not to get its address back. | |
6a06907d | 266 | pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { |
dfeec247 | 267 | self.try_as_mplace(cx).unwrap() |
b7449926 XL |
268 | } |
269 | } | |
270 | ||
29967ef6 | 271 | impl<Tag: Debug> Place<Tag> { |
b7449926 | 272 | #[inline] |
416331ca | 273 | pub fn assert_mem_place(self) -> MemPlace<Tag> { |
b7449926 XL |
274 | match self { |
275 | Place::Ptr(mplace) => mplace, | |
416331ca | 276 | _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self), |
b7449926 XL |
277 | } |
278 | } | |
b7449926 XL |
279 | } |
280 | ||
29967ef6 | 281 | impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> { |
b7449926 | 282 | #[inline] |
416331ca XL |
283 | pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { |
284 | MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout } | |
b7449926 XL |
285 | } |
286 | } | |
287 | ||
0bf4aa26 | 288 | // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 |
ba9703b0 | 289 | impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> |
0bf4aa26 | 290 | where |
a1dfa0c6 | 291 | // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 |
29967ef6 | 292 | Tag: Debug + Copy + Eq + Hash + 'static, |
dc9dc135 | 293 | M: Machine<'mir, 'tcx, PointerTag = Tag>, |
a1dfa0c6 | 294 | // FIXME: Working around https://github.com/rust-lang/rust/issues/24159 |
ba9703b0 | 295 | M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>, |
48663c56 | 296 | M::AllocExtra: AllocationExtra<Tag>, |
0bf4aa26 | 297 | { |
60c5eb7d | 298 | /// Take a value, which represents a (thin or wide) reference, and make it a place. |
a1dfa0c6 | 299 | /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. |
e1599b0c XL |
300 | /// |
301 | /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not | |
302 | /// want to ever use the place for memory access! | |
303 | /// Generally prefer `deref_operand`. | |
b7449926 | 304 | pub fn ref_to_mplace( |
0bf4aa26 | 305 | &self, |
6a06907d | 306 | val: &ImmTy<'tcx, M::PointerTag>, |
dc9dc135 | 307 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 XL |
308 | let pointee_type = |
309 | val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; | |
b7449926 | 310 | let layout = self.layout_of(pointee_type)?; |
6a06907d | 311 | let (ptr, meta) = match **val { |
3dfed10e | 312 | Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None), |
dfeec247 | 313 | Immediate::ScalarPair(ptr, meta) => { |
3dfed10e | 314 | (ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?)) |
dfeec247 | 315 | } |
60c5eb7d | 316 | }; |
0bf4aa26 | 317 | |
a1dfa0c6 | 318 | let mplace = MemPlace { |
60c5eb7d | 319 | ptr, |
9fa01778 XL |
320 | // We could use the run-time alignment here. For now, we do not, because |
321 | // the point of tracking the alignment here is to make sure that the *static* | |
322 | // alignment information emitted with the loads is correct. The run-time | |
323 | // alignment can only be more restrictive. | |
a1dfa0c6 | 324 | align: layout.align.abi, |
60c5eb7d | 325 | meta, |
b7449926 XL |
326 | }; |
327 | Ok(MPlaceTy { mplace, layout }) | |
328 | } | |
329 | ||
416331ca XL |
330 | /// Take an operand, representing a pointer, and dereference it to a place -- that |
331 | /// will always be a MemPlace. Lives in `place.rs` because it creates a place. | |
a1dfa0c6 XL |
332 | pub fn deref_operand( |
333 | &self, | |
6a06907d | 334 | src: &OpTy<'tcx, M::PointerTag>, |
dc9dc135 | 335 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 XL |
336 | let val = self.read_immediate(src)?; |
337 | trace!("deref to {} on {:?}", val.layout.ty, *val); | |
6a06907d | 338 | let place = self.ref_to_mplace(&val)?; |
f9f354fc | 339 | self.mplace_access_checked(place, None) |
0bf4aa26 XL |
340 | } |
341 | ||
416331ca XL |
342 | /// Check if the given place is good for memory access with the given |
343 | /// size, falling back to the layout's size if `None` (in the latter case, | |
344 | /// this must be a statically sized type). | |
345 | /// | |
346 | /// On success, returns `None` for zero-sized accesses (where nothing else is | |
347 | /// left to do) and a `Pointer` to use for the actual access otherwise. | |
348 | #[inline] | |
dfeec247 | 349 | pub(super) fn check_mplace_access( |
416331ca | 350 | &self, |
6a06907d | 351 | place: &MPlaceTy<'tcx, M::PointerTag>, |
416331ca XL |
352 | size: Option<Size>, |
353 | ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> { | |
354 | let size = size.unwrap_or_else(|| { | |
355 | assert!(!place.layout.is_unsized()); | |
dfeec247 | 356 | assert!(!place.meta.has_meta()); |
416331ca XL |
357 | place.layout.size |
358 | }); | |
359 | self.memory.check_ptr_access(place.ptr, size, place.align) | |
360 | } | |
361 | ||
e1599b0c XL |
362 | /// Return the "access-checked" version of this `MPlace`, where for non-ZST |
363 | /// this is definitely a `Pointer`. | |
f9f354fc XL |
364 | /// |
365 | /// `force_align` must only be used when correct alignment does not matter, | |
366 | /// like in Stacked Borrows. | |
e1599b0c XL |
367 | pub fn mplace_access_checked( |
368 | &self, | |
369 | mut place: MPlaceTy<'tcx, M::PointerTag>, | |
f9f354fc | 370 | force_align: Option<Align>, |
e1599b0c | 371 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 | 372 | let (size, align) = self |
6a06907d | 373 | .size_and_align_of_mplace(&place)? |
e1599b0c XL |
374 | .unwrap_or((place.layout.size, place.layout.align.abi)); |
375 | assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?"); | |
f9f354fc XL |
376 | // Check (stricter) dynamic alignment, unless forced otherwise. |
377 | place.mplace.align = force_align.unwrap_or(align); | |
e1599b0c | 378 | // When dereferencing a pointer, it must be non-NULL, aligned, and live. |
6a06907d | 379 | if let Some(ptr) = self.check_mplace_access(&place, Some(size))? { |
e1599b0c XL |
380 | place.mplace.ptr = ptr.into(); |
381 | } | |
382 | Ok(place) | |
383 | } | |
384 | ||
416331ca XL |
385 | /// Force `place.ptr` to a `Pointer`. |
386 | /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot. | |
dfeec247 | 387 | pub(super) fn force_mplace_ptr( |
416331ca XL |
388 | &self, |
389 | mut place: MPlaceTy<'tcx, M::PointerTag>, | |
390 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { | |
391 | place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into(); | |
392 | Ok(place) | |
393 | } | |
394 | ||
ba9703b0 XL |
395 | /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is |
396 | /// always possible without allocating, so it can take `&self`. Also return the field's layout. | |
b7449926 | 397 | /// This supports both struct and array fields. |
ba9703b0 XL |
398 | /// |
399 | /// This also works for arrays, but then the `usize` index type is restricting. | |
400 | /// For indexing into arrays, use `mplace_index`. | |
b7449926 XL |
401 | #[inline(always)] |
402 | pub fn mplace_field( | |
8faf50e0 | 403 | &self, |
6a06907d | 404 | base: &MPlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 405 | field: usize, |
dc9dc135 | 406 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 XL |
407 | let offset = base.layout.fields.offset(field); |
408 | let field_layout = base.layout.field(self, field)?; | |
b7449926 | 409 | |
9fa01778 | 410 | // Offset may need adjustment for unsized fields. |
0bf4aa26 | 411 | let (meta, offset) = if field_layout.is_unsized() { |
9fa01778 XL |
412 | // Re-use parent metadata to determine dynamic field layout. |
413 | // With custom DSTS, this *will* execute user-defined code, but the same | |
414 | // happens at run-time so that's okay. | |
6a06907d | 415 | let align = match self.size_and_align_of(&base.meta, &field_layout)? { |
a1dfa0c6 | 416 | Some((_, align)) => align, |
dfeec247 | 417 | None if offset == Size::ZERO => { |
a1dfa0c6 XL |
418 | // An extern type at offset 0, we fall back to its static alignment. |
419 | // FIXME: Once we have made decisions for how to handle size and alignment | |
420 | // of `extern type`, this should be adapted. It is just a temporary hack | |
421 | // to get some code to work that probably ought to work. | |
dfeec247 XL |
422 | field_layout.align.abi |
423 | } | |
f035d41b XL |
424 | None => span_bug!( |
425 | self.cur_span(), | |
426 | "cannot compute offset for extern type field at non-0 offset" | |
427 | ), | |
a1dfa0c6 XL |
428 | }; |
429 | (base.meta, offset.align_to(align)) | |
b7449926 | 430 | } else { |
0bf4aa26 | 431 | // base.meta could be present; we might be accessing a sized field of an unsized |
b7449926 | 432 | // struct. |
dfeec247 | 433 | (MemPlaceMeta::None, offset) |
b7449926 XL |
434 | }; |
435 | ||
a1dfa0c6 XL |
436 | // We do not look at `base.layout.align` nor `field_layout.align`, unlike |
437 | // codegen -- mostly to see if we can get away with that | |
438 | base.offset(offset, meta, field_layout, self) | |
ff7c6d11 XL |
439 | } |
440 | ||
ba9703b0 XL |
441 | /// Index into an array. |
442 | #[inline(always)] | |
443 | pub fn mplace_index( | |
444 | &self, | |
6a06907d | 445 | base: &MPlaceTy<'tcx, M::PointerTag>, |
ba9703b0 XL |
446 | index: u64, |
447 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { | |
448 | // Not using the layout method because we want to compute on u64 | |
449 | match base.layout.fields { | |
450 | FieldsShape::Array { stride, .. } => { | |
451 | let len = base.len(self)?; | |
452 | if index >= len { | |
453 | // This can only be reached in ConstProp and non-rustc-MIR. | |
454 | throw_ub!(BoundsCheckFailed { len, index }); | |
455 | } | |
456 | let offset = stride * index; // `Size` multiplication | |
457 | // All fields have the same layout. | |
458 | let field_layout = base.layout.field(self, 0)?; | |
459 | ||
460 | assert!(!field_layout.is_unsized()); | |
461 | base.offset(offset, MemPlaceMeta::None, field_layout, self) | |
462 | } | |
f035d41b XL |
463 | _ => span_bug!( |
464 | self.cur_span(), | |
465 | "`mplace_index` called on non-array type {:?}", | |
466 | base.layout.ty | |
467 | ), | |
ba9703b0 XL |
468 | } |
469 | } | |
470 | ||
b7449926 XL |
471 | // Iterates over all fields of an array. Much more efficient than doing the |
472 | // same by repeatedly calling `mplace_array`. | |
dfeec247 | 473 | pub(super) fn mplace_array_fields( |
0531ce1d | 474 | &self, |
6a06907d XL |
475 | base: &'a MPlaceTy<'tcx, Tag>, |
476 | ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a> | |
0bf4aa26 | 477 | { |
b7449926 XL |
478 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
479 | let stride = match base.layout.fields { | |
ba9703b0 | 480 | FieldsShape::Array { stride, .. } => stride, |
f035d41b | 481 | _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"), |
94b46f34 | 482 | }; |
b7449926 XL |
483 | let layout = base.layout.field(self, 0)?; |
484 | let dl = &self.tcx.data_layout; | |
ba9703b0 XL |
485 | // `Size` multiplication |
486 | Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) | |
0531ce1d XL |
487 | } |
488 | ||
dfeec247 | 489 | fn mplace_subslice( |
8faf50e0 | 490 | &self, |
6a06907d | 491 | base: &MPlaceTy<'tcx, M::PointerTag>, |
b7449926 XL |
492 | from: u64, |
493 | to: u64, | |
60c5eb7d | 494 | from_end: bool, |
dc9dc135 | 495 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
b7449926 | 496 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
60c5eb7d | 497 | let actual_to = if from_end { |
ba9703b0 | 498 | if from.checked_add(to).map_or(true, |to| to > len) { |
dfeec247 | 499 | // This can only be reached in ConstProp and non-rustc-MIR. |
ba9703b0 | 500 | throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); |
dfeec247 | 501 | } |
ba9703b0 | 502 | len.checked_sub(to).unwrap() |
60c5eb7d XL |
503 | } else { |
504 | to | |
505 | }; | |
b7449926 XL |
506 | |
507 | // Not using layout method because that works with usize, and does not work with slices | |
508 | // (that have count 0 in their layout). | |
509 | let from_offset = match base.layout.fields { | |
ba9703b0 | 510 | FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked |
f035d41b XL |
511 | _ => { |
512 | span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout) | |
513 | } | |
ff7c6d11 | 514 | }; |
b7449926 | 515 | |
0bf4aa26 | 516 | // Compute meta and new layout |
ba9703b0 | 517 | let inner_len = actual_to.checked_sub(from).unwrap(); |
1b1a35ee | 518 | let (meta, ty) = match base.layout.ty.kind() { |
b7449926 XL |
519 | // It is not nice to match on the type, but that seems to be the only way to |
520 | // implement this. | |
dfeec247 | 521 | ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)), |
b7449926 | 522 | ty::Slice(..) => { |
ba9703b0 | 523 | let len = Scalar::from_machine_usize(inner_len, self); |
dfeec247 | 524 | (MemPlaceMeta::Meta(len), base.layout.ty) |
b7449926 | 525 | } |
f035d41b XL |
526 | _ => { |
527 | span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty) | |
528 | } | |
b7449926 XL |
529 | }; |
530 | let layout = self.layout_of(ty)?; | |
a1dfa0c6 | 531 | base.offset(from_offset, meta, layout, self) |
b7449926 XL |
532 | } |
533 | ||
6a06907d | 534 | pub(crate) fn mplace_downcast( |
b7449926 | 535 | &self, |
6a06907d | 536 | base: &MPlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 537 | variant: VariantIdx, |
dc9dc135 | 538 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
b7449926 | 539 | // Downcasts only change the layout |
dfeec247 | 540 | assert!(!base.meta.has_meta()); |
6a06907d | 541 | Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base }) |
b7449926 XL |
542 | } |
543 | ||
544 | /// Project into an mplace | |
dfeec247 | 545 | pub(super) fn mplace_projection( |
b7449926 | 546 | &self, |
6a06907d | 547 | base: &MPlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 548 | proj_elem: mir::PlaceElem<'tcx>, |
dc9dc135 | 549 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 550 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 551 | Ok(match proj_elem { |
ba9703b0 | 552 | Field(field, _) => self.mplace_field(base, field.index())?, |
b7449926 | 553 | Downcast(_, variant) => self.mplace_downcast(base, variant)?, |
6a06907d | 554 | Deref => self.deref_operand(&base.into())?, |
b7449926 XL |
555 | |
556 | Index(local) => { | |
9fa01778 XL |
557 | let layout = self.layout_of(self.tcx.types.usize)?; |
558 | let n = self.access_local(self.frame(), local, Some(layout))?; | |
6a06907d | 559 | let n = self.read_scalar(&n)?; |
ba9703b0 | 560 | let n = u64::try_from( |
3dfed10e | 561 | self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?, |
ba9703b0 XL |
562 | ) |
563 | .unwrap(); | |
564 | self.mplace_index(base, n)? | |
b7449926 XL |
565 | } |
566 | ||
dfeec247 | 567 | ConstantIndex { offset, min_length, from_end } => { |
b7449926 | 568 | let n = base.len(self)?; |
1b1a35ee | 569 | if n < min_length { |
dfeec247 | 570 | // This can only be reached in ConstProp and non-rustc-MIR. |
1b1a35ee | 571 | throw_ub!(BoundsCheckFailed { len: min_length, index: n }); |
dfeec247 | 572 | } |
b7449926 XL |
573 | |
574 | let index = if from_end { | |
ba9703b0 | 575 | assert!(0 < offset && offset <= min_length); |
1b1a35ee | 576 | n.checked_sub(offset).unwrap() |
b7449926 | 577 | } else { |
dfeec247 | 578 | assert!(offset < min_length); |
1b1a35ee | 579 | offset |
b7449926 XL |
580 | }; |
581 | ||
ba9703b0 | 582 | self.mplace_index(base, index)? |
b7449926 XL |
583 | } |
584 | ||
1b1a35ee | 585 | Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?, |
b7449926 | 586 | }) |
ff7c6d11 XL |
587 | } |
588 | ||
9fa01778 | 589 | /// Gets the place of a field inside the place, and also the field's type. |
b7449926 | 590 | /// Just a convenience function, but used quite a bit. |
a1dfa0c6 XL |
591 | /// This is the only projection that might have a side-effect: We cannot project |
592 | /// into the field of a local `ScalarPair`, we have to first allocate it. | |
b7449926 | 593 | pub fn place_field( |
ff7c6d11 | 594 | &mut self, |
6a06907d | 595 | base: &PlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 596 | field: usize, |
dc9dc135 | 597 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
598 | // FIXME: We could try to be smarter and avoid allocation for fields that span the |
599 | // entire place. | |
600 | let mplace = self.force_allocation(base)?; | |
6a06907d | 601 | Ok(self.mplace_field(&mplace, field)?.into()) |
ff7c6d11 XL |
602 | } |
603 | ||
ba9703b0 XL |
604 | pub fn place_index( |
605 | &mut self, | |
6a06907d | 606 | base: &PlaceTy<'tcx, M::PointerTag>, |
ba9703b0 XL |
607 | index: u64, |
608 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { | |
609 | let mplace = self.force_allocation(base)?; | |
6a06907d | 610 | Ok(self.mplace_index(&mplace, index)?.into()) |
ba9703b0 XL |
611 | } |
612 | ||
b7449926 | 613 | pub fn place_downcast( |
a1dfa0c6 | 614 | &self, |
6a06907d | 615 | base: &PlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 616 | variant: VariantIdx, |
dc9dc135 | 617 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
618 | // Downcast just changes the layout |
619 | Ok(match base.place { | |
dfeec247 | 620 | Place::Ptr(mplace) => { |
6a06907d | 621 | self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into() |
dfeec247 | 622 | } |
b7449926 | 623 | Place::Local { .. } => { |
a1dfa0c6 | 624 | let layout = base.layout.for_variant(self, variant); |
6a06907d | 625 | PlaceTy { layout, ..*base } |
ff7c6d11 | 626 | } |
b7449926 | 627 | }) |
ff7c6d11 XL |
628 | } |
629 | ||
9fa01778 | 630 | /// Projects into a place. |
b7449926 XL |
631 | pub fn place_projection( |
632 | &mut self, | |
6a06907d | 633 | base: &PlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 634 | &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>, |
dc9dc135 | 635 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 636 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 637 | Ok(match proj_elem { |
ba9703b0 | 638 | Field(field, _) => self.place_field(base, field.index())?, |
b7449926 | 639 | Downcast(_, variant) => self.place_downcast(base, variant)?, |
6a06907d | 640 | Deref => self.deref_operand(&self.place_to_op(base)?)?.into(), |
b7449926 XL |
641 | // For the other variants, we have to force an allocation. |
642 | // This matches `operand_projection`. | |
643 | Subslice { .. } | ConstantIndex { .. } | Index(_) => { | |
644 | let mplace = self.force_allocation(base)?; | |
6a06907d | 645 | self.mplace_projection(&mplace, proj_elem)?.into() |
b7449926 XL |
646 | } |
647 | }) | |
648 | } | |
ff7c6d11 | 649 | |
9fa01778 | 650 | /// Computes a place. You should only use this if you intend to write into this |
b7449926 | 651 | /// place; for reading, a more efficient alternative is `eval_place_for_read`. |
0bf4aa26 XL |
652 | pub fn eval_place( |
653 | &mut self, | |
ba9703b0 | 654 | place: mir::Place<'tcx>, |
dc9dc135 | 655 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
f9f354fc XL |
656 | let mut place_ty = PlaceTy { |
657 | // This works even for dead/uninitialized locals; we check further when writing | |
658 | place: Place::Local { frame: self.frame_idx(), local: place.local }, | |
659 | layout: self.layout_of_local(self.frame(), place.local, None)?, | |
e1599b0c | 660 | }; |
b7449926 | 661 | |
e1599b0c | 662 | for elem in place.projection.iter() { |
6a06907d | 663 | place_ty = self.place_projection(&place_ty, &elem)? |
e1599b0c | 664 | } |
ff7c6d11 | 665 | |
3dfed10e | 666 | trace!("{:?}", self.dump_place(place_ty.place)); |
f9f354fc XL |
667 | // Sanity-check the type we ended up with. |
668 | debug_assert!(mir_assign_valid_types( | |
669 | *self.tcx, | |
f035d41b | 670 | self.param_env, |
f9f354fc XL |
671 | self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( |
672 | place.ty(&self.frame().body.local_decls, *self.tcx).ty | |
673 | ))?, | |
674 | place_ty.layout, | |
675 | )); | |
e1599b0c | 676 | Ok(place_ty) |
ff7c6d11 XL |
677 | } |
678 | ||
b7449926 | 679 | /// Write a scalar to a place |
60c5eb7d | 680 | #[inline(always)] |
b7449926 | 681 | pub fn write_scalar( |
ff7c6d11 | 682 | &mut self, |
f9f354fc | 683 | val: impl Into<ScalarMaybeUninit<M::PointerTag>>, |
6a06907d | 684 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 685 | ) -> InterpResult<'tcx> { |
a1dfa0c6 | 686 | self.write_immediate(Immediate::Scalar(val.into()), dest) |
b7449926 | 687 | } |
ff7c6d11 | 688 | |
a1dfa0c6 | 689 | /// Write an immediate to a place |
0bf4aa26 | 690 | #[inline(always)] |
a1dfa0c6 | 691 | pub fn write_immediate( |
b7449926 | 692 | &mut self, |
a1dfa0c6 | 693 | src: Immediate<M::PointerTag>, |
6a06907d | 694 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 695 | ) -> InterpResult<'tcx> { |
a1dfa0c6 | 696 | self.write_immediate_no_validate(src, dest)?; |
0bf4aa26 XL |
697 | |
698 | if M::enforce_validity(self) { | |
699 | // Data got changed, better make sure it matches the type! | |
6a06907d | 700 | self.validate_operand(&self.place_to_op(dest)?)?; |
dc9dc135 XL |
701 | } |
702 | ||
703 | Ok(()) | |
704 | } | |
705 | ||
706 | /// Write an `Immediate` to memory. | |
707 | #[inline(always)] | |
708 | pub fn write_immediate_to_mplace( | |
709 | &mut self, | |
710 | src: Immediate<M::PointerTag>, | |
6a06907d | 711 | dest: &MPlaceTy<'tcx, M::PointerTag>, |
dc9dc135 XL |
712 | ) -> InterpResult<'tcx> { |
713 | self.write_immediate_to_mplace_no_validate(src, dest)?; | |
714 | ||
715 | if M::enforce_validity(self) { | |
716 | // Data got changed, better make sure it matches the type! | |
6a06907d | 717 | self.validate_operand(&dest.into())?; |
0bf4aa26 XL |
718 | } |
719 | ||
720 | Ok(()) | |
721 | } | |
722 | ||
a1dfa0c6 | 723 | /// Write an immediate to a place. |
0bf4aa26 XL |
724 | /// If you use this you are responsible for validating that things got copied at the |
725 | /// right type. | |
a1dfa0c6 | 726 | fn write_immediate_no_validate( |
0bf4aa26 | 727 | &mut self, |
a1dfa0c6 | 728 | src: Immediate<M::PointerTag>, |
6a06907d | 729 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 730 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
731 | if cfg!(debug_assertions) { |
732 | // This is a very common path, avoid some checks in release mode | |
733 | assert!(!dest.layout.is_unsized(), "Cannot write unsized data"); | |
a1dfa0c6 | 734 | match src { |
f9f354fc | 735 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(_))) => assert_eq!( |
dfeec247 XL |
736 | self.pointer_size(), |
737 | dest.layout.size, | |
738 | "Size mismatch when writing pointer" | |
739 | ), | |
29967ef6 XL |
740 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => { |
741 | assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits") | |
dfeec247 | 742 | } |
3dfed10e | 743 | Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size |
a1dfa0c6 | 744 | Immediate::ScalarPair(_, _) => { |
0bf4aa26 XL |
745 | // FIXME: Can we check anything here? |
746 | } | |
747 | } | |
748 | } | |
a1dfa0c6 | 749 | trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
0bf4aa26 | 750 | |
a1dfa0c6 | 751 | // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`, |
b7449926 XL |
752 | // but not factored as a separate function. |
753 | let mplace = match dest.place { | |
ff7c6d11 | 754 | Place::Local { frame, local } => { |
f035d41b | 755 | match M::access_local_mut(self, frame, local)? { |
48663c56 XL |
756 | Ok(local) => { |
757 | // Local can be updated in-place. | |
758 | *local = LocalValue::Live(Operand::Immediate(src)); | |
b7449926 | 759 | return Ok(()); |
48663c56 XL |
760 | } |
761 | Err(mplace) => { | |
762 | // The local is in memory, go on below. | |
763 | mplace | |
764 | } | |
ff7c6d11 | 765 | } |
dfeec247 | 766 | } |
48663c56 | 767 | Place::Ptr(mplace) => mplace, // already referring to memory |
ff7c6d11 | 768 | }; |
0bf4aa26 | 769 | let dest = MPlaceTy { mplace, layout: dest.layout }; |
ff7c6d11 | 770 | |
b7449926 | 771 | // This is already in memory, write there. |
6a06907d | 772 | self.write_immediate_to_mplace_no_validate(src, &dest) |
ff7c6d11 XL |
773 | } |
774 | ||
a1dfa0c6 | 775 | /// Write an immediate to memory. |
dc9dc135 | 776 | /// If you use this you are responsible for validating that things got copied at the |
0bf4aa26 | 777 | /// right type. |
a1dfa0c6 | 778 | fn write_immediate_to_mplace_no_validate( |
b7449926 | 779 | &mut self, |
a1dfa0c6 | 780 | value: Immediate<M::PointerTag>, |
6a06907d | 781 | dest: &MPlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 782 | ) -> InterpResult<'tcx> { |
b7449926 XL |
783 | // Note that it is really important that the type here is the right one, and matches the |
784 | // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here | |
785 | // to handle padding properly, which is only correct if we never look at this data with the | |
786 | // wrong type. | |
787 | ||
60c5eb7d | 788 | // Invalid places are a thing: the return place of a diverging function |
dfeec247 | 789 | let ptr = match self.check_mplace_access(dest, None)? { |
dc9dc135 XL |
790 | Some(ptr) => ptr, |
791 | None => return Ok(()), // zero-sized access | |
792 | }; | |
b7449926 | 793 | |
f035d41b | 794 | let tcx = *self.tcx; |
0bf4aa26 XL |
795 | // FIXME: We should check that there are dest.layout.size many bytes available in |
796 | // memory. The code below is not sufficient, with enough padding it might not | |
797 | // cover all the bytes! | |
b7449926 | 798 | match value { |
a1dfa0c6 | 799 | Immediate::Scalar(scalar) => { |
0bf4aa26 | 800 | match dest.layout.abi { |
ba9703b0 | 801 | Abi::Scalar(_) => {} // fine |
f035d41b XL |
802 | _ => span_bug!( |
803 | self.cur_span(), | |
804 | "write_immediate_to_mplace: invalid Scalar layout: {:#?}", | |
805 | dest.layout | |
806 | ), | |
0bf4aa26 | 807 | } |
60c5eb7d | 808 | self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar( |
f035d41b | 809 | &tcx, |
dfeec247 XL |
810 | ptr, |
811 | scalar, | |
812 | dest.layout.size, | |
b7449926 | 813 | ) |
ff7c6d11 | 814 | } |
a1dfa0c6 | 815 | Immediate::ScalarPair(a_val, b_val) => { |
dc9dc135 XL |
816 | // We checked `ptr_align` above, so all fields will have the alignment they need. |
817 | // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, | |
818 | // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. | |
b7449926 | 819 | let (a, b) = match dest.layout.abi { |
ba9703b0 | 820 | Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), |
f035d41b XL |
821 | _ => span_bug!( |
822 | self.cur_span(), | |
dfeec247 XL |
823 | "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", |
824 | dest.layout | |
825 | ), | |
b7449926 | 826 | }; |
a1dfa0c6 XL |
827 | let (a_size, b_size) = (a.size(self), b.size(self)); |
828 | let b_offset = a_size.align_to(b.align(self).abi); | |
a1dfa0c6 XL |
829 | let b_ptr = ptr.offset(b_offset, self)?; |
830 | ||
0bf4aa26 XL |
831 | // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, |
832 | // but that does not work: We could be a newtype around a pair, then the | |
833 | // fields do not match the `ScalarPair` components. | |
834 | ||
f035d41b XL |
835 | self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?; |
836 | self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size) | |
ff7c6d11 | 837 | } |
b7449926 | 838 | } |
ff7c6d11 XL |
839 | } |
840 | ||
9fa01778 | 841 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 XL |
842 | /// Use `copy_op_transmute` if the layouts could disagree. |
843 | #[inline(always)] | |
b7449926 | 844 | pub fn copy_op( |
ff7c6d11 | 845 | &mut self, |
6a06907d XL |
846 | src: &OpTy<'tcx, M::PointerTag>, |
847 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 848 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
849 | self.copy_op_no_validate(src, dest)?; |
850 | ||
851 | if M::enforce_validity(self) { | |
852 | // Data got changed, better make sure it matches the type! | |
6a06907d | 853 | self.validate_operand(&self.place_to_op(dest)?)?; |
0bf4aa26 XL |
854 | } |
855 | ||
856 | Ok(()) | |
857 | } | |
858 | ||
9fa01778 | 859 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 | 860 | /// Use `copy_op_transmute` if the layouts could disagree. |
dc9dc135 | 861 | /// Also, if you use this you are responsible for validating that things get copied at the |
0bf4aa26 XL |
862 | /// right type. |
863 | fn copy_op_no_validate( | |
864 | &mut self, | |
6a06907d XL |
865 | src: &OpTy<'tcx, M::PointerTag>, |
866 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 867 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
868 | // We do NOT compare the types for equality, because well-typed code can |
869 | // actually "transmute" `&mut T` to `&T` in an assignment without a cast. | |
f035d41b | 870 | if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
ba9703b0 | 871 | span_bug!( |
f035d41b | 872 | self.cur_span(), |
ba9703b0 XL |
873 | "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", |
874 | src.layout.ty, | |
875 | dest.layout.ty, | |
876 | ); | |
877 | } | |
b7449926 XL |
878 | |
879 | // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. | |
a1dfa0c6 | 880 | let src = match self.try_read_immediate(src)? { |
0bf4aa26 | 881 | Ok(src_val) => { |
48663c56 | 882 | assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); |
0bf4aa26 | 883 | // Yay, we got a value that we can write directly. |
9fa01778 XL |
884 | // FIXME: Add a check to make sure that if `src` is indirect, |
885 | // it does not overlap with `dest`. | |
dc9dc135 | 886 | return self.write_immediate_no_validate(*src_val, dest); |
0bf4aa26 XL |
887 | } |
888 | Err(mplace) => mplace, | |
b7449926 XL |
889 | }; |
890 | // Slow path, this does not fit into an immediate. Just memcpy. | |
0bf4aa26 XL |
891 | trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
892 | ||
48663c56 XL |
893 | // This interprets `src.meta` with the `dest` local's layout, if an unsized local |
894 | // is being initialized! | |
895 | let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?; | |
896 | let size = size.unwrap_or_else(|| { | |
dfeec247 XL |
897 | assert!( |
898 | !dest.layout.is_unsized(), | |
899 | "Cannot copy into already initialized unsized place" | |
900 | ); | |
48663c56 XL |
901 | dest.layout.size |
902 | }); | |
903 | assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); | |
416331ca | 904 | |
dfeec247 | 905 | let src = self |
6a06907d | 906 | .check_mplace_access(&src, Some(size)) |
e1599b0c | 907 | .expect("places should be checked on creation"); |
dfeec247 | 908 | let dest = self |
6a06907d | 909 | .check_mplace_access(&dest, Some(size)) |
e1599b0c | 910 | .expect("places should be checked on creation"); |
416331ca XL |
911 | let (src_ptr, dest_ptr) = match (src, dest) { |
912 | (Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr), | |
913 | (None, None) => return Ok(()), // zero-sized copy | |
914 | _ => bug!("The pointers should both be Some or both None"), | |
915 | }; | |
916 | ||
dfeec247 | 917 | self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true) |
0bf4aa26 XL |
918 | } |
919 | ||
9fa01778 | 920 | /// Copies the data from an operand to a place. The layouts may disagree, but they must |
0bf4aa26 XL |
921 | /// have the same size. |
922 | pub fn copy_op_transmute( | |
923 | &mut self, | |
6a06907d XL |
924 | src: &OpTy<'tcx, M::PointerTag>, |
925 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 926 | ) -> InterpResult<'tcx> { |
f035d41b | 927 | if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
0bf4aa26 XL |
928 | // Fast path: Just use normal `copy_op` |
929 | return self.copy_op(src, dest); | |
930 | } | |
48663c56 | 931 | // We still require the sizes to match. |
dfeec247 XL |
932 | if src.layout.size != dest.layout.size { |
933 | // FIXME: This should be an assert instead of an error, but if we transmute within an | |
934 | // array length computation, `typeck` may not have yet been run and errored out. In fact | |
935 | // most likey we *are* running `typeck` right now. Investigate whether we can bail out | |
3dfed10e | 936 | // on `typeck_results().has_errors` at all const eval entry points. |
dfeec247 | 937 | debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); |
ba9703b0 | 938 | self.tcx.sess.delay_span_bug( |
f035d41b | 939 | self.cur_span(), |
ba9703b0 XL |
940 | "size-changing transmute, should have been caught by transmute checking", |
941 | ); | |
942 | throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty)); | |
dfeec247 | 943 | } |
48663c56 XL |
944 | // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want |
945 | // to avoid that here. | |
dfeec247 XL |
946 | assert!( |
947 | !src.layout.is_unsized() && !dest.layout.is_unsized(), | |
948 | "Cannot transmute unsized data" | |
949 | ); | |
0bf4aa26 XL |
950 | |
951 | // The hard case is `ScalarPair`. `src` is already read from memory in this case, | |
952 | // using `src.layout` to figure out which bytes to use for the 1st and 2nd field. | |
953 | // We have to write them to `dest` at the offsets they were *read at*, which is | |
954 | // not necessarily the same as the offsets in `dest.layout`! | |
955 | // Hence we do the copy with the source layout on both sides. We also make sure to write | |
956 | // into memory, because if `dest` is a local we would not even have a way to write | |
957 | // at the `src` offsets; the fact that we came from a different layout would | |
958 | // just be lost. | |
959 | let dest = self.force_allocation(dest)?; | |
960 | self.copy_op_no_validate( | |
961 | src, | |
6a06907d | 962 | &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), |
0bf4aa26 XL |
963 | )?; |
964 | ||
965 | if M::enforce_validity(self) { | |
966 | // Data got changed, better make sure it matches the type! | |
6a06907d | 967 | self.validate_operand(&dest.into())?; |
0bf4aa26 XL |
968 | } |
969 | ||
970 | Ok(()) | |
ff7c6d11 XL |
971 | } |
972 | ||
9fa01778 | 973 | /// Ensures that a place is in memory, and returns where it is. |
a1dfa0c6 XL |
974 | /// If the place currently refers to a local that doesn't yet have a matching allocation, |
975 | /// create such an allocation. | |
b7449926 | 976 | /// This is essentially `force_to_memplace`. |
48663c56 XL |
977 | /// |
978 | /// This supports unsized types and returns the computed size to avoid some | |
979 | /// redundant computation when copying; use `force_allocation` for a simpler, sized-only | |
980 | /// version. | |
981 | pub fn force_allocation_maybe_sized( | |
ff7c6d11 | 982 | &mut self, |
6a06907d | 983 | place: &PlaceTy<'tcx, M::PointerTag>, |
dfeec247 | 984 | meta: MemPlaceMeta<M::PointerTag>, |
dc9dc135 | 985 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> { |
48663c56 | 986 | let (mplace, size) = match place.place { |
b7449926 | 987 | Place::Local { frame, local } => { |
f035d41b | 988 | match M::access_local_mut(self, frame, local)? { |
dfeec247 | 989 | Ok(&mut local_val) => { |
b7449926 | 990 | // We need to make an allocation. |
48663c56 | 991 | |
b7449926 | 992 | // We need the layout of the local. We can NOT use the layout we got, |
0731742a | 993 | // that might e.g., be an inner field of a struct with `Scalar` layout, |
b7449926 | 994 | // that has different alignment than the outer field. |
ba9703b0 XL |
995 | let local_layout = |
996 | self.layout_of_local(&self.stack()[frame], local, None)?; | |
dfeec247 XL |
997 | // We also need to support unsized types, and hence cannot use `allocate`. |
998 | let (size, align) = self | |
6a06907d | 999 | .size_and_align_of(&meta, &local_layout)? |
48663c56 XL |
1000 | .expect("Cannot allocate for non-dyn-sized type"); |
1001 | let ptr = self.memory.allocate(size, align, MemoryKind::Stack); | |
1002 | let mplace = MemPlace { ptr: ptr.into(), align, meta }; | |
dfeec247 | 1003 | if let LocalValue::Live(Operand::Immediate(value)) = local_val { |
48663c56 XL |
1004 | // Preserve old value. |
1005 | // We don't have to validate as we can assume the local | |
1006 | // was already valid for its type. | |
1007 | let mplace = MPlaceTy { mplace, layout: local_layout }; | |
6a06907d | 1008 | self.write_immediate_to_mplace_no_validate(value, &mplace)?; |
48663c56 XL |
1009 | } |
1010 | // Now we can call `access_mut` again, asserting it goes well, | |
1011 | // and actually overwrite things. | |
f035d41b | 1012 | *M::access_local_mut(self, frame, local).unwrap().unwrap() = |
48663c56 XL |
1013 | LocalValue::Live(Operand::Indirect(mplace)); |
1014 | (mplace, Some(size)) | |
b7449926 | 1015 | } |
48663c56 | 1016 | Err(mplace) => (mplace, None), // this already was an indirect local |
b7449926 XL |
1017 | } |
1018 | } | |
dfeec247 | 1019 | Place::Ptr(mplace) => (mplace, None), |
b7449926 XL |
1020 | }; |
1021 | // Return with the original layout, so that the caller can go on | |
48663c56 XL |
1022 | Ok((MPlaceTy { mplace, layout: place.layout }, size)) |
1023 | } | |
1024 | ||
1025 | #[inline(always)] | |
1026 | pub fn force_allocation( | |
1027 | &mut self, | |
6a06907d | 1028 | place: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 1029 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 | 1030 | Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) |
ff7c6d11 XL |
1031 | } |
1032 | ||
b7449926 | 1033 | pub fn allocate( |
ff7c6d11 | 1034 | &mut self, |
ba9703b0 XL |
1035 | layout: TyAndLayout<'tcx>, |
1036 | kind: MemoryKind<M::MemoryKind>, | |
0731742a | 1037 | ) -> MPlaceTy<'tcx, M::PointerTag> { |
48663c56 XL |
1038 | let ptr = self.memory.allocate(layout.size, layout.align.abi, kind); |
1039 | MPlaceTy::from_aligned_ptr(ptr, layout) | |
b7449926 | 1040 | } |
ff7c6d11 | 1041 | |
60c5eb7d XL |
1042 | /// Returns a wide MPlace. |
1043 | pub fn allocate_str( | |
1044 | &mut self, | |
1045 | str: &str, | |
ba9703b0 | 1046 | kind: MemoryKind<M::MemoryKind>, |
60c5eb7d | 1047 | ) -> MPlaceTy<'tcx, M::PointerTag> { |
ba9703b0 XL |
1048 | let ptr = self.memory.allocate_bytes(str.as_bytes(), kind); |
1049 | let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self); | |
60c5eb7d XL |
1050 | let mplace = MemPlace { |
1051 | ptr: ptr.into(), | |
1052 | align: Align::from_bytes(1).unwrap(), | |
dfeec247 | 1053 | meta: MemPlaceMeta::Meta(meta), |
60c5eb7d XL |
1054 | }; |
1055 | ||
1056 | let layout = self.layout_of(self.tcx.mk_static_str()).unwrap(); | |
1057 | MPlaceTy { mplace, layout } | |
1058 | } | |
1059 | ||
f035d41b XL |
1060 | /// Writes the discriminant of the given variant. |
1061 | pub fn write_discriminant( | |
b7449926 | 1062 | &mut self, |
a1dfa0c6 | 1063 | variant_index: VariantIdx, |
6a06907d | 1064 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 1065 | ) -> InterpResult<'tcx> { |
60c5eb7d XL |
1066 | // Layout computation excludes uninhabited variants from consideration |
1067 | // therefore there's no way to represent those variants in the given layout. | |
1068 | if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { | |
1069 | throw_ub!(Unreachable); | |
1070 | } | |
e74abb32 | 1071 | |
b7449926 | 1072 | match dest.layout.variants { |
ba9703b0 | 1073 | Variants::Single { index } => { |
60c5eb7d | 1074 | assert_eq!(index, variant_index); |
ff7c6d11 | 1075 | } |
ba9703b0 | 1076 | Variants::Multiple { |
f035d41b XL |
1077 | tag_encoding: TagEncoding::Direct, |
1078 | tag: ref tag_layout, | |
1079 | tag_field, | |
532ac7d7 XL |
1080 | .. |
1081 | } => { | |
60c5eb7d | 1082 | // No need to validate that the discriminant here because the |
ba9703b0 | 1083 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1084 | |
48663c56 XL |
1085 | let discr_val = |
1086 | dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; | |
b7449926 XL |
1087 | |
1088 | // raw discriminants for enums are isize or bigger during | |
1089 | // their computation, but the in-memory tag is the smallest possible | |
1090 | // representation | |
f035d41b | 1091 | let size = tag_layout.value.size(self); |
29967ef6 | 1092 | let tag_val = size.truncate(discr_val); |
b7449926 | 1093 | |
f035d41b | 1094 | let tag_dest = self.place_field(dest, tag_field)?; |
6a06907d | 1095 | self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; |
ff7c6d11 | 1096 | } |
ba9703b0 | 1097 | Variants::Multiple { |
f035d41b XL |
1098 | tag_encoding: |
1099 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, | |
1100 | tag: ref tag_layout, | |
1101 | tag_field, | |
b7449926 | 1102 | .. |
ff7c6d11 | 1103 | } => { |
60c5eb7d | 1104 | // No need to validate that the discriminant here because the |
ba9703b0 | 1105 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1106 | |
b7449926 | 1107 | if variant_index != dataful_variant { |
e1599b0c | 1108 | let variants_start = niche_variants.start().as_u32(); |
dfeec247 XL |
1109 | let variant_index_relative = variant_index |
1110 | .as_u32() | |
e1599b0c XL |
1111 | .checked_sub(variants_start) |
1112 | .expect("overflow computing relative variant idx"); | |
1113 | // We need to use machine arithmetic when taking into account `niche_start`: | |
f035d41b XL |
1114 | // tag_val = variant_index_relative + niche_start_val |
1115 | let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?; | |
1116 | let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); | |
e1599b0c | 1117 | let variant_index_relative_val = |
f035d41b XL |
1118 | ImmTy::from_uint(variant_index_relative, tag_layout); |
1119 | let tag_val = self.binary_op( | |
e1599b0c | 1120 | mir::BinOp::Add, |
6a06907d XL |
1121 | &variant_index_relative_val, |
1122 | &niche_start_val, | |
b7449926 | 1123 | )?; |
e1599b0c | 1124 | // Write result. |
f035d41b | 1125 | let niche_dest = self.place_field(dest, tag_field)?; |
6a06907d | 1126 | self.write_immediate(*tag_val, &niche_dest)?; |
b7449926 XL |
1127 | } |
1128 | } | |
1129 | } | |
ff7c6d11 | 1130 | |
b7449926 XL |
1131 | Ok(()) |
1132 | } | |
ff7c6d11 | 1133 | |
a1dfa0c6 XL |
1134 | pub fn raw_const_to_mplace( |
1135 | &self, | |
1b1a35ee | 1136 | raw: ConstAlloc<'tcx>, |
dc9dc135 | 1137 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 | 1138 | // This must be an allocation in `tcx` |
f9f354fc | 1139 | let _ = self.tcx.global_alloc(raw.alloc_id); |
3dfed10e | 1140 | let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?; |
a1dfa0c6 | 1141 | let layout = self.layout_of(raw.ty)?; |
dc9dc135 | 1142 | Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) |
a1dfa0c6 XL |
1143 | } |
1144 | ||
b7449926 XL |
1145 | /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. |
1146 | /// Also return some more information so drop doesn't have to run the same code twice. | |
dfeec247 XL |
1147 | pub(super) fn unpack_dyn_trait( |
1148 | &self, | |
6a06907d | 1149 | mplace: &MPlaceTy<'tcx, M::PointerTag>, |
dfeec247 | 1150 | ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { |
dc9dc135 | 1151 | let vtable = mplace.vtable(); // also sanity checks the type |
b7449926 XL |
1152 | let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; |
1153 | let layout = self.layout_of(ty)?; | |
1154 | ||
1155 | // More sanity checks | |
1156 | if cfg!(debug_assertions) { | |
1157 | let (size, align) = self.read_size_and_align_from_vtable(vtable)?; | |
1158 | assert_eq!(size, layout.size); | |
a1dfa0c6 XL |
1159 | // only ABI alignment is preserved |
1160 | assert_eq!(align, layout.align.abi); | |
ff7c6d11 | 1161 | } |
ff7c6d11 | 1162 | |
6a06907d | 1163 | let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout }; |
b7449926 | 1164 | Ok((instance, mplace)) |
ff7c6d11 XL |
1165 | } |
1166 | } |