]>
Commit | Line | Data |
---|---|---|
b7449926 XL |
1 | //! Computations on places -- field projections, going from mir::Place, and writing |
2 | //! into a place. | |
3 | //! All high-level functions to write to memory work on places as destinations. | |
4 | ||
5 | use std::convert::TryFrom; | |
0bf4aa26 | 6 | use std::hash::Hash; |
b7449926 | 7 | |
60c5eb7d | 8 | use rustc_macros::HashStable; |
ba9703b0 XL |
9 | use rustc_middle::mir; |
10 | use rustc_middle::ty::layout::{PrimitiveExt, TyAndLayout}; | |
11 | use rustc_middle::ty::{self, Ty}; | |
f035d41b | 12 | use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding}; |
ba9703b0 | 13 | use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants}; |
ff7c6d11 | 14 | |
0bf4aa26 | 15 | use super::{ |
1b1a35ee XL |
16 | mir_assign_valid_types, truncate, AllocId, AllocMap, Allocation, AllocationExtra, ConstAlloc, |
17 | ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, | |
18 | Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit, | |
b7449926 | 19 | }; |
ff7c6d11 | 20 | |
60c5eb7d | 21 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] |
dfeec247 | 22 | /// Information required for the sound usage of a `MemPlace`. |
f9f354fc | 23 | pub enum MemPlaceMeta<Tag = ()> { |
dfeec247 | 24 | /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). |
f9f354fc | 25 | Meta(Scalar<Tag>), |
dfeec247 XL |
26 | /// `Sized` types or unsized `extern type` |
27 | None, | |
28 | /// The address of this place may not be taken. This protects the `MemPlace` from coming from | |
ba9703b0 | 29 | /// a ZST Operand without a backing allocation and being converted to an integer address. This |
dfeec247 XL |
30 | /// should be impossible, because you can't take the address of an operand, but this is a second |
31 | /// protection layer ensuring that we don't mess up. | |
32 | Poison, | |
33 | } | |
34 | ||
f9f354fc XL |
35 | impl<Tag> MemPlaceMeta<Tag> { |
36 | pub fn unwrap_meta(self) -> Scalar<Tag> { | |
dfeec247 XL |
37 | match self { |
38 | Self::Meta(s) => s, | |
39 | Self::None | Self::Poison => { | |
40 | bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)") | |
41 | } | |
42 | } | |
43 | } | |
44 | fn has_meta(self) -> bool { | |
45 | match self { | |
46 | Self::Meta(_) => true, | |
47 | Self::None | Self::Poison => false, | |
48 | } | |
49 | } | |
dfeec247 | 50 | |
dfeec247 XL |
51 | pub fn erase_tag(self) -> MemPlaceMeta<()> { |
52 | match self { | |
53 | Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()), | |
54 | Self::None => MemPlaceMeta::None, | |
55 | Self::Poison => MemPlaceMeta::Poison, | |
56 | } | |
57 | } | |
58 | } | |
59 | ||
60 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] | |
f9f354fc | 61 | pub struct MemPlace<Tag = ()> { |
b7449926 XL |
62 | /// A place may have an integral pointer for ZSTs, and since it might |
63 | /// be turned back into a reference before ever being dereferenced. | |
3dfed10e | 64 | /// However, it may never be uninit. |
f9f354fc | 65 | pub ptr: Scalar<Tag>, |
b7449926 | 66 | pub align: Align, |
9fa01778 | 67 | /// Metadata for unsized places. Interpretation is up to the type. |
b7449926 | 68 | /// Must not be present for sized types, but can be missing for unsized types |
0731742a | 69 | /// (e.g., `extern type`). |
f9f354fc | 70 | pub meta: MemPlaceMeta<Tag>, |
b7449926 XL |
71 | } |
72 | ||
60c5eb7d | 73 | #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] |
f9f354fc | 74 | pub enum Place<Tag = ()> { |
2c00a5a8 | 75 | /// A place referring to a value allocated in the `Memory` system. |
f9f354fc | 76 | Ptr(MemPlace<Tag>), |
b7449926 XL |
77 | |
78 | /// To support alloc-free locals, we are able to write directly to a local. | |
79 | /// (Without that optimization, we'd just always be a `MemPlace`.) | |
dfeec247 | 80 | Local { frame: usize, local: mir::Local }, |
b7449926 XL |
81 | } |
82 | ||
b7449926 | 83 | #[derive(Copy, Clone, Debug)] |
dfeec247 | 84 | pub struct PlaceTy<'tcx, Tag = ()> { |
60c5eb7d | 85 | place: Place<Tag>, // Keep this private; it helps enforce invariants. |
ba9703b0 | 86 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 XL |
87 | } |
88 | ||
0bf4aa26 XL |
89 | impl<'tcx, Tag> ::std::ops::Deref for PlaceTy<'tcx, Tag> { |
90 | type Target = Place<Tag>; | |
b7449926 | 91 | #[inline(always)] |
0bf4aa26 | 92 | fn deref(&self) -> &Place<Tag> { |
b7449926 XL |
93 | &self.place |
94 | } | |
ff7c6d11 XL |
95 | } |
96 | ||
b7449926 | 97 | /// A MemPlace with its layout. Constructing it is only possible in this module. |
9fa01778 | 98 | #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] |
dfeec247 | 99 | pub struct MPlaceTy<'tcx, Tag = ()> { |
0bf4aa26 | 100 | mplace: MemPlace<Tag>, |
ba9703b0 | 101 | pub layout: TyAndLayout<'tcx>, |
b7449926 XL |
102 | } |
103 | ||
0bf4aa26 XL |
104 | impl<'tcx, Tag> ::std::ops::Deref for MPlaceTy<'tcx, Tag> { |
105 | type Target = MemPlace<Tag>; | |
b7449926 | 106 | #[inline(always)] |
0bf4aa26 | 107 | fn deref(&self) -> &MemPlace<Tag> { |
b7449926 XL |
108 | &self.mplace |
109 | } | |
110 | } | |
111 | ||
0bf4aa26 | 112 | impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> { |
b7449926 | 113 | #[inline(always)] |
0bf4aa26 | 114 | fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { |
dfeec247 | 115 | PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout } |
ff7c6d11 | 116 | } |
b7449926 | 117 | } |
ff7c6d11 | 118 | |
48663c56 XL |
119 | impl<Tag> MemPlace<Tag> { |
120 | /// Replace ptr tag, maintain vtable tag (if any) | |
0bf4aa26 | 121 | #[inline] |
48663c56 | 122 | pub fn replace_tag(self, new_tag: Tag) -> Self { |
dfeec247 | 123 | MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta } |
0bf4aa26 | 124 | } |
0bf4aa26 | 125 | |
0bf4aa26 | 126 | #[inline] |
48663c56 | 127 | pub fn erase_tag(self) -> MemPlace { |
dfeec247 | 128 | MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() } |
0bf4aa26 XL |
129 | } |
130 | ||
b7449926 | 131 | #[inline(always)] |
dfeec247 XL |
132 | fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self { |
133 | MemPlace { ptr, align, meta: MemPlaceMeta::None } | |
ff7c6d11 XL |
134 | } |
135 | ||
b7449926 | 136 | #[inline(always)] |
0bf4aa26 | 137 | pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self { |
94b46f34 | 138 | Self::from_scalar_ptr(ptr.into(), align) |
ff7c6d11 XL |
139 | } |
140 | ||
60c5eb7d | 141 | /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. |
a1dfa0c6 XL |
142 | /// This is the inverse of `ref_to_mplace`. |
143 | #[inline(always)] | |
144 | pub fn to_ref(self) -> Immediate<Tag> { | |
145 | match self.meta { | |
dfeec247 XL |
146 | MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()), |
147 | MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), | |
148 | MemPlaceMeta::Poison => bug!( | |
149 | "MPlaceTy::dangling may never be used to produce a \ | |
150 | place that will have the address of its pointee taken" | |
151 | ), | |
a1dfa0c6 XL |
152 | } |
153 | } | |
154 | ||
155 | pub fn offset( | |
156 | self, | |
157 | offset: Size, | |
dfeec247 | 158 | meta: MemPlaceMeta<Tag>, |
a1dfa0c6 | 159 | cx: &impl HasDataLayout, |
dc9dc135 | 160 | ) -> InterpResult<'tcx, Self> { |
a1dfa0c6 XL |
161 | Ok(MemPlace { |
162 | ptr: self.ptr.ptr_offset(offset, cx)?, | |
163 | align: self.align.restrict_for_offset(offset), | |
164 | meta, | |
165 | }) | |
166 | } | |
0bf4aa26 | 167 | } |
b7449926 | 168 | |
0bf4aa26 XL |
169 | impl<'tcx, Tag> MPlaceTy<'tcx, Tag> { |
170 | /// Produces a MemPlace that works for ZST but nothing else | |
171 | #[inline] | |
ba9703b0 | 172 | pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { |
dfeec247 | 173 | let align = layout.align.abi; |
ba9703b0 | 174 | let ptr = Scalar::from_machine_usize(align.bytes(), cx); |
dfeec247 XL |
175 | // `Poison` this to make sure that the pointer value `ptr` is never observable by the program. |
176 | MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } | |
ff7c6d11 XL |
177 | } |
178 | ||
48663c56 | 179 | /// Replace ptr tag, maintain vtable tag (if any) |
0731742a | 180 | #[inline] |
48663c56 | 181 | pub fn replace_tag(self, new_tag: Tag) -> Self { |
dfeec247 | 182 | MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout } |
0731742a XL |
183 | } |
184 | ||
185 | #[inline] | |
a1dfa0c6 XL |
186 | pub fn offset( |
187 | self, | |
188 | offset: Size, | |
dfeec247 | 189 | meta: MemPlaceMeta<Tag>, |
ba9703b0 | 190 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 191 | cx: &impl HasDataLayout, |
dc9dc135 | 192 | ) -> InterpResult<'tcx, Self> { |
dfeec247 | 193 | Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout }) |
a1dfa0c6 XL |
194 | } |
195 | ||
b7449926 | 196 | #[inline] |
ba9703b0 | 197 | fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self { |
a1dfa0c6 | 198 | MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } |
ff7c6d11 XL |
199 | } |
200 | ||
b7449926 | 201 | #[inline] |
dc9dc135 | 202 | pub(super) fn len(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { |
b7449926 | 203 | if self.layout.is_unsized() { |
0bf4aa26 | 204 | // We need to consult `meta` metadata |
1b1a35ee | 205 | match self.layout.ty.kind() { |
ba9703b0 | 206 | ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx), |
b7449926 | 207 | _ => bug!("len not supported on unsized type {:?}", self.layout.ty), |
ff7c6d11 | 208 | } |
b7449926 XL |
209 | } else { |
210 | // Go through the layout. There are lots of types that support a length, | |
0731742a | 211 | // e.g., SIMD types. |
b7449926 | 212 | match self.layout.fields { |
ba9703b0 | 213 | FieldsShape::Array { count, .. } => Ok(count), |
b7449926 XL |
214 | _ => bug!("len not supported on sized type {:?}", self.layout.ty), |
215 | } | |
216 | } | |
217 | } | |
ff7c6d11 | 218 | |
b7449926 | 219 | #[inline] |
dc9dc135 | 220 | pub(super) fn vtable(self) -> Scalar<Tag> { |
1b1a35ee | 221 | match self.layout.ty.kind() { |
dfeec247 | 222 | ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), |
b7449926 | 223 | _ => bug!("vtable not supported on type {:?}", self.layout.ty), |
ff7c6d11 XL |
224 | } |
225 | } | |
226 | } | |
227 | ||
416331ca | 228 | // These are defined here because they produce a place. |
9fa01778 | 229 | impl<'tcx, Tag: ::std::fmt::Debug + Copy> OpTy<'tcx, Tag> { |
b7449926 | 230 | #[inline(always)] |
dfeec247 XL |
231 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
232 | /// read from the resulting mplace, not to get its address back. | |
233 | pub fn try_as_mplace( | |
234 | self, | |
235 | cx: &impl HasDataLayout, | |
236 | ) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> { | |
9fa01778 | 237 | match *self { |
b7449926 | 238 | Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), |
dfeec247 XL |
239 | Operand::Immediate(_) if self.layout.is_zst() => { |
240 | Ok(MPlaceTy::dangling(self.layout, cx)) | |
241 | } | |
ba9703b0 | 242 | Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), |
b7449926 XL |
243 | } |
244 | } | |
245 | ||
246 | #[inline(always)] | |
dfeec247 XL |
247 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
248 | /// read from the resulting mplace, not to get its address back. | |
249 | pub fn assert_mem_place(self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { | |
250 | self.try_as_mplace(cx).unwrap() | |
b7449926 XL |
251 | } |
252 | } | |
253 | ||
416331ca | 254 | impl<Tag: ::std::fmt::Debug> Place<Tag> { |
b7449926 | 255 | #[inline] |
416331ca | 256 | pub fn assert_mem_place(self) -> MemPlace<Tag> { |
b7449926 XL |
257 | match self { |
258 | Place::Ptr(mplace) => mplace, | |
416331ca | 259 | _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self), |
b7449926 XL |
260 | } |
261 | } | |
b7449926 XL |
262 | } |
263 | ||
0bf4aa26 | 264 | impl<'tcx, Tag: ::std::fmt::Debug> PlaceTy<'tcx, Tag> { |
b7449926 | 265 | #[inline] |
416331ca XL |
266 | pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { |
267 | MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout } | |
b7449926 XL |
268 | } |
269 | } | |
270 | ||
0bf4aa26 | 271 | // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 |
ba9703b0 | 272 | impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> |
0bf4aa26 | 273 | where |
a1dfa0c6 | 274 | // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 |
dc9dc135 XL |
275 | Tag: ::std::fmt::Debug + Copy + Eq + Hash + 'static, |
276 | M: Machine<'mir, 'tcx, PointerTag = Tag>, | |
a1dfa0c6 | 277 | // FIXME: Working around https://github.com/rust-lang/rust/issues/24159 |
ba9703b0 | 278 | M::MemoryMap: AllocMap<AllocId, (MemoryKind<M::MemoryKind>, Allocation<Tag, M::AllocExtra>)>, |
48663c56 | 279 | M::AllocExtra: AllocationExtra<Tag>, |
0bf4aa26 | 280 | { |
60c5eb7d | 281 | /// Take a value, which represents a (thin or wide) reference, and make it a place. |
a1dfa0c6 | 282 | /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. |
e1599b0c XL |
283 | /// |
284 | /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not | |
285 | /// want to ever use the place for memory access! | |
286 | /// Generally prefer `deref_operand`. | |
b7449926 | 287 | pub fn ref_to_mplace( |
0bf4aa26 | 288 | &self, |
a1dfa0c6 | 289 | val: ImmTy<'tcx, M::PointerTag>, |
dc9dc135 | 290 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 XL |
291 | let pointee_type = |
292 | val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; | |
b7449926 | 293 | let layout = self.layout_of(pointee_type)?; |
60c5eb7d | 294 | let (ptr, meta) = match *val { |
3dfed10e | 295 | Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None), |
dfeec247 | 296 | Immediate::ScalarPair(ptr, meta) => { |
3dfed10e | 297 | (ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?)) |
dfeec247 | 298 | } |
60c5eb7d | 299 | }; |
0bf4aa26 | 300 | |
a1dfa0c6 | 301 | let mplace = MemPlace { |
60c5eb7d | 302 | ptr, |
9fa01778 XL |
303 | // We could use the run-time alignment here. For now, we do not, because |
304 | // the point of tracking the alignment here is to make sure that the *static* | |
305 | // alignment information emitted with the loads is correct. The run-time | |
306 | // alignment can only be more restrictive. | |
a1dfa0c6 | 307 | align: layout.align.abi, |
60c5eb7d | 308 | meta, |
b7449926 XL |
309 | }; |
310 | Ok(MPlaceTy { mplace, layout }) | |
311 | } | |
312 | ||
416331ca XL |
313 | /// Take an operand, representing a pointer, and dereference it to a place -- that |
314 | /// will always be a MemPlace. Lives in `place.rs` because it creates a place. | |
a1dfa0c6 XL |
315 | pub fn deref_operand( |
316 | &self, | |
317 | src: OpTy<'tcx, M::PointerTag>, | |
dc9dc135 | 318 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 XL |
319 | let val = self.read_immediate(src)?; |
320 | trace!("deref to {} on {:?}", val.layout.ty, *val); | |
e1599b0c | 321 | let place = self.ref_to_mplace(val)?; |
f9f354fc | 322 | self.mplace_access_checked(place, None) |
0bf4aa26 XL |
323 | } |
324 | ||
416331ca XL |
325 | /// Check if the given place is good for memory access with the given |
326 | /// size, falling back to the layout's size if `None` (in the latter case, | |
327 | /// this must be a statically sized type). | |
328 | /// | |
329 | /// On success, returns `None` for zero-sized accesses (where nothing else is | |
330 | /// left to do) and a `Pointer` to use for the actual access otherwise. | |
331 | #[inline] | |
dfeec247 | 332 | pub(super) fn check_mplace_access( |
416331ca XL |
333 | &self, |
334 | place: MPlaceTy<'tcx, M::PointerTag>, | |
335 | size: Option<Size>, | |
336 | ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> { | |
337 | let size = size.unwrap_or_else(|| { | |
338 | assert!(!place.layout.is_unsized()); | |
dfeec247 | 339 | assert!(!place.meta.has_meta()); |
416331ca XL |
340 | place.layout.size |
341 | }); | |
342 | self.memory.check_ptr_access(place.ptr, size, place.align) | |
343 | } | |
344 | ||
e1599b0c XL |
345 | /// Return the "access-checked" version of this `MPlace`, where for non-ZST |
346 | /// this is definitely a `Pointer`. | |
f9f354fc XL |
347 | /// |
348 | /// `force_align` must only be used when correct alignment does not matter, | |
349 | /// like in Stacked Borrows. | |
e1599b0c XL |
350 | pub fn mplace_access_checked( |
351 | &self, | |
352 | mut place: MPlaceTy<'tcx, M::PointerTag>, | |
f9f354fc | 353 | force_align: Option<Align>, |
e1599b0c | 354 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 XL |
355 | let (size, align) = self |
356 | .size_and_align_of_mplace(place)? | |
e1599b0c XL |
357 | .unwrap_or((place.layout.size, place.layout.align.abi)); |
358 | assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?"); | |
f9f354fc XL |
359 | // Check (stricter) dynamic alignment, unless forced otherwise. |
360 | place.mplace.align = force_align.unwrap_or(align); | |
e1599b0c XL |
361 | // When dereferencing a pointer, it must be non-NULL, aligned, and live. |
362 | if let Some(ptr) = self.check_mplace_access(place, Some(size))? { | |
363 | place.mplace.ptr = ptr.into(); | |
364 | } | |
365 | Ok(place) | |
366 | } | |
367 | ||
416331ca XL |
368 | /// Force `place.ptr` to a `Pointer`. |
369 | /// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot. | |
dfeec247 | 370 | pub(super) fn force_mplace_ptr( |
416331ca XL |
371 | &self, |
372 | mut place: MPlaceTy<'tcx, M::PointerTag>, | |
373 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { | |
374 | place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into(); | |
375 | Ok(place) | |
376 | } | |
377 | ||
ba9703b0 XL |
378 | /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is |
379 | /// always possible without allocating, so it can take `&self`. Also return the field's layout. | |
b7449926 | 380 | /// This supports both struct and array fields. |
ba9703b0 XL |
381 | /// |
382 | /// This also works for arrays, but then the `usize` index type is restricting. | |
383 | /// For indexing into arrays, use `mplace_index`. | |
b7449926 XL |
384 | #[inline(always)] |
385 | pub fn mplace_field( | |
8faf50e0 | 386 | &self, |
0bf4aa26 | 387 | base: MPlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 388 | field: usize, |
dc9dc135 | 389 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 XL |
390 | let offset = base.layout.fields.offset(field); |
391 | let field_layout = base.layout.field(self, field)?; | |
b7449926 | 392 | |
9fa01778 | 393 | // Offset may need adjustment for unsized fields. |
0bf4aa26 | 394 | let (meta, offset) = if field_layout.is_unsized() { |
9fa01778 XL |
395 | // Re-use parent metadata to determine dynamic field layout. |
396 | // With custom DSTS, this *will* execute user-defined code, but the same | |
397 | // happens at run-time so that's okay. | |
a1dfa0c6 XL |
398 | let align = match self.size_and_align_of(base.meta, field_layout)? { |
399 | Some((_, align)) => align, | |
dfeec247 | 400 | None if offset == Size::ZERO => { |
a1dfa0c6 XL |
401 | // An extern type at offset 0, we fall back to its static alignment. |
402 | // FIXME: Once we have made decisions for how to handle size and alignment | |
403 | // of `extern type`, this should be adapted. It is just a temporary hack | |
404 | // to get some code to work that probably ought to work. | |
dfeec247 XL |
405 | field_layout.align.abi |
406 | } | |
f035d41b XL |
407 | None => span_bug!( |
408 | self.cur_span(), | |
409 | "cannot compute offset for extern type field at non-0 offset" | |
410 | ), | |
a1dfa0c6 XL |
411 | }; |
412 | (base.meta, offset.align_to(align)) | |
b7449926 | 413 | } else { |
0bf4aa26 | 414 | // base.meta could be present; we might be accessing a sized field of an unsized |
b7449926 | 415 | // struct. |
dfeec247 | 416 | (MemPlaceMeta::None, offset) |
b7449926 XL |
417 | }; |
418 | ||
a1dfa0c6 XL |
419 | // We do not look at `base.layout.align` nor `field_layout.align`, unlike |
420 | // codegen -- mostly to see if we can get away with that | |
421 | base.offset(offset, meta, field_layout, self) | |
ff7c6d11 XL |
422 | } |
423 | ||
ba9703b0 XL |
424 | /// Index into an array. |
425 | #[inline(always)] | |
426 | pub fn mplace_index( | |
427 | &self, | |
428 | base: MPlaceTy<'tcx, M::PointerTag>, | |
429 | index: u64, | |
430 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { | |
431 | // Not using the layout method because we want to compute on u64 | |
432 | match base.layout.fields { | |
433 | FieldsShape::Array { stride, .. } => { | |
434 | let len = base.len(self)?; | |
435 | if index >= len { | |
436 | // This can only be reached in ConstProp and non-rustc-MIR. | |
437 | throw_ub!(BoundsCheckFailed { len, index }); | |
438 | } | |
439 | let offset = stride * index; // `Size` multiplication | |
440 | // All fields have the same layout. | |
441 | let field_layout = base.layout.field(self, 0)?; | |
442 | ||
443 | assert!(!field_layout.is_unsized()); | |
444 | base.offset(offset, MemPlaceMeta::None, field_layout, self) | |
445 | } | |
f035d41b XL |
446 | _ => span_bug!( |
447 | self.cur_span(), | |
448 | "`mplace_index` called on non-array type {:?}", | |
449 | base.layout.ty | |
450 | ), | |
ba9703b0 XL |
451 | } |
452 | } | |
453 | ||
b7449926 XL |
454 | // Iterates over all fields of an array. Much more efficient than doing the |
455 | // same by repeatedly calling `mplace_array`. | |
dfeec247 | 456 | pub(super) fn mplace_array_fields( |
0531ce1d | 457 | &self, |
0bf4aa26 | 458 | base: MPlaceTy<'tcx, Tag>, |
dc9dc135 | 459 | ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'tcx> |
0bf4aa26 | 460 | { |
b7449926 XL |
461 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
462 | let stride = match base.layout.fields { | |
ba9703b0 | 463 | FieldsShape::Array { stride, .. } => stride, |
f035d41b | 464 | _ => span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"), |
94b46f34 | 465 | }; |
b7449926 XL |
466 | let layout = base.layout.field(self, 0)?; |
467 | let dl = &self.tcx.data_layout; | |
ba9703b0 XL |
468 | // `Size` multiplication |
469 | Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) | |
0531ce1d XL |
470 | } |
471 | ||
dfeec247 | 472 | fn mplace_subslice( |
8faf50e0 | 473 | &self, |
0bf4aa26 | 474 | base: MPlaceTy<'tcx, M::PointerTag>, |
b7449926 XL |
475 | from: u64, |
476 | to: u64, | |
60c5eb7d | 477 | from_end: bool, |
dc9dc135 | 478 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
b7449926 | 479 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
60c5eb7d | 480 | let actual_to = if from_end { |
ba9703b0 | 481 | if from.checked_add(to).map_or(true, |to| to > len) { |
dfeec247 | 482 | // This can only be reached in ConstProp and non-rustc-MIR. |
ba9703b0 | 483 | throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); |
dfeec247 | 484 | } |
ba9703b0 | 485 | len.checked_sub(to).unwrap() |
60c5eb7d XL |
486 | } else { |
487 | to | |
488 | }; | |
b7449926 XL |
489 | |
490 | // Not using layout method because that works with usize, and does not work with slices | |
491 | // (that have count 0 in their layout). | |
492 | let from_offset = match base.layout.fields { | |
ba9703b0 | 493 | FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked |
f035d41b XL |
494 | _ => { |
495 | span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout) | |
496 | } | |
ff7c6d11 | 497 | }; |
b7449926 | 498 | |
0bf4aa26 | 499 | // Compute meta and new layout |
ba9703b0 | 500 | let inner_len = actual_to.checked_sub(from).unwrap(); |
1b1a35ee | 501 | let (meta, ty) = match base.layout.ty.kind() { |
b7449926 XL |
502 | // It is not nice to match on the type, but that seems to be the only way to |
503 | // implement this. | |
dfeec247 | 504 | ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(inner, inner_len)), |
b7449926 | 505 | ty::Slice(..) => { |
ba9703b0 | 506 | let len = Scalar::from_machine_usize(inner_len, self); |
dfeec247 | 507 | (MemPlaceMeta::Meta(len), base.layout.ty) |
b7449926 | 508 | } |
f035d41b XL |
509 | _ => { |
510 | span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty) | |
511 | } | |
b7449926 XL |
512 | }; |
513 | let layout = self.layout_of(ty)?; | |
a1dfa0c6 | 514 | base.offset(from_offset, meta, layout, self) |
b7449926 XL |
515 | } |
516 | ||
dfeec247 | 517 | pub(super) fn mplace_downcast( |
b7449926 | 518 | &self, |
0bf4aa26 | 519 | base: MPlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 520 | variant: VariantIdx, |
dc9dc135 | 521 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
b7449926 | 522 | // Downcasts only change the layout |
dfeec247 | 523 | assert!(!base.meta.has_meta()); |
b7449926 XL |
524 | Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..base }) |
525 | } | |
526 | ||
527 | /// Project into an mplace | |
dfeec247 | 528 | pub(super) fn mplace_projection( |
b7449926 | 529 | &self, |
0bf4aa26 | 530 | base: MPlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 531 | proj_elem: mir::PlaceElem<'tcx>, |
dc9dc135 | 532 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 533 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 534 | Ok(match proj_elem { |
ba9703b0 | 535 | Field(field, _) => self.mplace_field(base, field.index())?, |
b7449926 XL |
536 | Downcast(_, variant) => self.mplace_downcast(base, variant)?, |
537 | Deref => self.deref_operand(base.into())?, | |
538 | ||
539 | Index(local) => { | |
9fa01778 XL |
540 | let layout = self.layout_of(self.tcx.types.usize)?; |
541 | let n = self.access_local(self.frame(), local, Some(layout))?; | |
542 | let n = self.read_scalar(n)?; | |
ba9703b0 | 543 | let n = u64::try_from( |
3dfed10e | 544 | self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?, |
ba9703b0 XL |
545 | ) |
546 | .unwrap(); | |
547 | self.mplace_index(base, n)? | |
b7449926 XL |
548 | } |
549 | ||
dfeec247 | 550 | ConstantIndex { offset, min_length, from_end } => { |
b7449926 | 551 | let n = base.len(self)?; |
1b1a35ee | 552 | if n < min_length { |
dfeec247 | 553 | // This can only be reached in ConstProp and non-rustc-MIR. |
1b1a35ee | 554 | throw_ub!(BoundsCheckFailed { len: min_length, index: n }); |
dfeec247 | 555 | } |
b7449926 XL |
556 | |
557 | let index = if from_end { | |
ba9703b0 | 558 | assert!(0 < offset && offset <= min_length); |
1b1a35ee | 559 | n.checked_sub(offset).unwrap() |
b7449926 | 560 | } else { |
dfeec247 | 561 | assert!(offset < min_length); |
1b1a35ee | 562 | offset |
b7449926 XL |
563 | }; |
564 | ||
ba9703b0 | 565 | self.mplace_index(base, index)? |
b7449926 XL |
566 | } |
567 | ||
1b1a35ee | 568 | Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?, |
b7449926 | 569 | }) |
ff7c6d11 XL |
570 | } |
571 | ||
9fa01778 | 572 | /// Gets the place of a field inside the place, and also the field's type. |
b7449926 | 573 | /// Just a convenience function, but used quite a bit. |
a1dfa0c6 XL |
574 | /// This is the only projection that might have a side-effect: We cannot project |
575 | /// into the field of a local `ScalarPair`, we have to first allocate it. | |
b7449926 | 576 | pub fn place_field( |
ff7c6d11 | 577 | &mut self, |
0bf4aa26 | 578 | base: PlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 579 | field: usize, |
dc9dc135 | 580 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
581 | // FIXME: We could try to be smarter and avoid allocation for fields that span the |
582 | // entire place. | |
583 | let mplace = self.force_allocation(base)?; | |
584 | Ok(self.mplace_field(mplace, field)?.into()) | |
ff7c6d11 XL |
585 | } |
586 | ||
ba9703b0 XL |
587 | pub fn place_index( |
588 | &mut self, | |
589 | base: PlaceTy<'tcx, M::PointerTag>, | |
590 | index: u64, | |
591 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { | |
592 | let mplace = self.force_allocation(base)?; | |
593 | Ok(self.mplace_index(mplace, index)?.into()) | |
594 | } | |
595 | ||
b7449926 | 596 | pub fn place_downcast( |
a1dfa0c6 | 597 | &self, |
0bf4aa26 | 598 | base: PlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 599 | variant: VariantIdx, |
dc9dc135 | 600 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
601 | // Downcast just changes the layout |
602 | Ok(match base.place { | |
dfeec247 XL |
603 | Place::Ptr(mplace) => { |
604 | self.mplace_downcast(MPlaceTy { mplace, layout: base.layout }, variant)?.into() | |
605 | } | |
b7449926 | 606 | Place::Local { .. } => { |
a1dfa0c6 | 607 | let layout = base.layout.for_variant(self, variant); |
b7449926 | 608 | PlaceTy { layout, ..base } |
ff7c6d11 | 609 | } |
b7449926 | 610 | }) |
ff7c6d11 XL |
611 | } |
612 | ||
9fa01778 | 613 | /// Projects into a place. |
b7449926 XL |
614 | pub fn place_projection( |
615 | &mut self, | |
0bf4aa26 | 616 | base: PlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 617 | &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>, |
dc9dc135 | 618 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 619 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 620 | Ok(match proj_elem { |
ba9703b0 | 621 | Field(field, _) => self.place_field(base, field.index())?, |
b7449926 XL |
622 | Downcast(_, variant) => self.place_downcast(base, variant)?, |
623 | Deref => self.deref_operand(self.place_to_op(base)?)?.into(), | |
624 | // For the other variants, we have to force an allocation. | |
625 | // This matches `operand_projection`. | |
626 | Subslice { .. } | ConstantIndex { .. } | Index(_) => { | |
627 | let mplace = self.force_allocation(base)?; | |
628 | self.mplace_projection(mplace, proj_elem)?.into() | |
629 | } | |
630 | }) | |
631 | } | |
ff7c6d11 | 632 | |
9fa01778 | 633 | /// Computes a place. You should only use this if you intend to write into this |
b7449926 | 634 | /// place; for reading, a more efficient alternative is `eval_place_for_read`. |
0bf4aa26 XL |
635 | pub fn eval_place( |
636 | &mut self, | |
ba9703b0 | 637 | place: mir::Place<'tcx>, |
dc9dc135 | 638 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
f9f354fc XL |
639 | let mut place_ty = PlaceTy { |
640 | // This works even for dead/uninitialized locals; we check further when writing | |
641 | place: Place::Local { frame: self.frame_idx(), local: place.local }, | |
642 | layout: self.layout_of_local(self.frame(), place.local, None)?, | |
e1599b0c | 643 | }; |
b7449926 | 644 | |
e1599b0c | 645 | for elem in place.projection.iter() { |
f9f354fc | 646 | place_ty = self.place_projection(place_ty, &elem)? |
e1599b0c | 647 | } |
ff7c6d11 | 648 | |
3dfed10e | 649 | trace!("{:?}", self.dump_place(place_ty.place)); |
f9f354fc XL |
650 | // Sanity-check the type we ended up with. |
651 | debug_assert!(mir_assign_valid_types( | |
652 | *self.tcx, | |
f035d41b | 653 | self.param_env, |
f9f354fc XL |
654 | self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( |
655 | place.ty(&self.frame().body.local_decls, *self.tcx).ty | |
656 | ))?, | |
657 | place_ty.layout, | |
658 | )); | |
e1599b0c | 659 | Ok(place_ty) |
ff7c6d11 XL |
660 | } |
661 | ||
b7449926 | 662 | /// Write a scalar to a place |
60c5eb7d | 663 | #[inline(always)] |
b7449926 | 664 | pub fn write_scalar( |
ff7c6d11 | 665 | &mut self, |
f9f354fc | 666 | val: impl Into<ScalarMaybeUninit<M::PointerTag>>, |
0bf4aa26 | 667 | dest: PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 668 | ) -> InterpResult<'tcx> { |
a1dfa0c6 | 669 | self.write_immediate(Immediate::Scalar(val.into()), dest) |
b7449926 | 670 | } |
ff7c6d11 | 671 | |
a1dfa0c6 | 672 | /// Write an immediate to a place |
0bf4aa26 | 673 | #[inline(always)] |
a1dfa0c6 | 674 | pub fn write_immediate( |
b7449926 | 675 | &mut self, |
a1dfa0c6 | 676 | src: Immediate<M::PointerTag>, |
0bf4aa26 | 677 | dest: PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 678 | ) -> InterpResult<'tcx> { |
a1dfa0c6 | 679 | self.write_immediate_no_validate(src, dest)?; |
0bf4aa26 XL |
680 | |
681 | if M::enforce_validity(self) { | |
682 | // Data got changed, better make sure it matches the type! | |
74b04a01 | 683 | self.validate_operand(self.place_to_op(dest)?)?; |
dc9dc135 XL |
684 | } |
685 | ||
686 | Ok(()) | |
687 | } | |
688 | ||
689 | /// Write an `Immediate` to memory. | |
690 | #[inline(always)] | |
691 | pub fn write_immediate_to_mplace( | |
692 | &mut self, | |
693 | src: Immediate<M::PointerTag>, | |
694 | dest: MPlaceTy<'tcx, M::PointerTag>, | |
695 | ) -> InterpResult<'tcx> { | |
696 | self.write_immediate_to_mplace_no_validate(src, dest)?; | |
697 | ||
698 | if M::enforce_validity(self) { | |
699 | // Data got changed, better make sure it matches the type! | |
74b04a01 | 700 | self.validate_operand(dest.into())?; |
0bf4aa26 XL |
701 | } |
702 | ||
703 | Ok(()) | |
704 | } | |
705 | ||
a1dfa0c6 | 706 | /// Write an immediate to a place. |
0bf4aa26 XL |
707 | /// If you use this you are responsible for validating that things got copied at the |
708 | /// right type. | |
a1dfa0c6 | 709 | fn write_immediate_no_validate( |
0bf4aa26 | 710 | &mut self, |
a1dfa0c6 | 711 | src: Immediate<M::PointerTag>, |
0bf4aa26 | 712 | dest: PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 713 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
714 | if cfg!(debug_assertions) { |
715 | // This is a very common path, avoid some checks in release mode | |
716 | assert!(!dest.layout.is_unsized(), "Cannot write unsized data"); | |
a1dfa0c6 | 717 | match src { |
f9f354fc | 718 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(_))) => assert_eq!( |
dfeec247 XL |
719 | self.pointer_size(), |
720 | dest.layout.size, | |
721 | "Size mismatch when writing pointer" | |
722 | ), | |
f9f354fc | 723 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Raw { size, .. })) => { |
dfeec247 | 724 | assert_eq!( |
ba9703b0 | 725 | Size::from_bytes(size), |
dfeec247 XL |
726 | dest.layout.size, |
727 | "Size mismatch when writing bits" | |
728 | ) | |
729 | } | |
3dfed10e | 730 | Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size |
a1dfa0c6 | 731 | Immediate::ScalarPair(_, _) => { |
0bf4aa26 XL |
732 | // FIXME: Can we check anything here? |
733 | } | |
734 | } | |
735 | } | |
a1dfa0c6 | 736 | trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
0bf4aa26 | 737 | |
a1dfa0c6 | 738 | // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`, |
b7449926 XL |
739 | // but not factored as a separate function. |
740 | let mplace = match dest.place { | |
ff7c6d11 | 741 | Place::Local { frame, local } => { |
f035d41b | 742 | match M::access_local_mut(self, frame, local)? { |
48663c56 XL |
743 | Ok(local) => { |
744 | // Local can be updated in-place. | |
745 | *local = LocalValue::Live(Operand::Immediate(src)); | |
b7449926 | 746 | return Ok(()); |
48663c56 XL |
747 | } |
748 | Err(mplace) => { | |
749 | // The local is in memory, go on below. | |
750 | mplace | |
751 | } | |
ff7c6d11 | 752 | } |
dfeec247 | 753 | } |
48663c56 | 754 | Place::Ptr(mplace) => mplace, // already referring to memory |
ff7c6d11 | 755 | }; |
0bf4aa26 | 756 | let dest = MPlaceTy { mplace, layout: dest.layout }; |
ff7c6d11 | 757 | |
b7449926 | 758 | // This is already in memory, write there. |
a1dfa0c6 | 759 | self.write_immediate_to_mplace_no_validate(src, dest) |
ff7c6d11 XL |
760 | } |
761 | ||
a1dfa0c6 | 762 | /// Write an immediate to memory. |
dc9dc135 | 763 | /// If you use this you are responsible for validating that things got copied at the |
0bf4aa26 | 764 | /// right type. |
a1dfa0c6 | 765 | fn write_immediate_to_mplace_no_validate( |
b7449926 | 766 | &mut self, |
a1dfa0c6 | 767 | value: Immediate<M::PointerTag>, |
0bf4aa26 | 768 | dest: MPlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 769 | ) -> InterpResult<'tcx> { |
b7449926 XL |
770 | // Note that it is really important that the type here is the right one, and matches the |
771 | // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here | |
772 | // to handle padding properly, which is only correct if we never look at this data with the | |
773 | // wrong type. | |
774 | ||
60c5eb7d | 775 | // Invalid places are a thing: the return place of a diverging function |
dfeec247 | 776 | let ptr = match self.check_mplace_access(dest, None)? { |
dc9dc135 XL |
777 | Some(ptr) => ptr, |
778 | None => return Ok(()), // zero-sized access | |
779 | }; | |
b7449926 | 780 | |
f035d41b | 781 | let tcx = *self.tcx; |
0bf4aa26 XL |
782 | // FIXME: We should check that there are dest.layout.size many bytes available in |
783 | // memory. The code below is not sufficient, with enough padding it might not | |
784 | // cover all the bytes! | |
b7449926 | 785 | match value { |
a1dfa0c6 | 786 | Immediate::Scalar(scalar) => { |
0bf4aa26 | 787 | match dest.layout.abi { |
ba9703b0 | 788 | Abi::Scalar(_) => {} // fine |
f035d41b XL |
789 | _ => span_bug!( |
790 | self.cur_span(), | |
791 | "write_immediate_to_mplace: invalid Scalar layout: {:#?}", | |
792 | dest.layout | |
793 | ), | |
0bf4aa26 | 794 | } |
60c5eb7d | 795 | self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar( |
f035d41b | 796 | &tcx, |
dfeec247 XL |
797 | ptr, |
798 | scalar, | |
799 | dest.layout.size, | |
b7449926 | 800 | ) |
ff7c6d11 | 801 | } |
a1dfa0c6 | 802 | Immediate::ScalarPair(a_val, b_val) => { |
dc9dc135 XL |
803 | // We checked `ptr_align` above, so all fields will have the alignment they need. |
804 | // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, | |
805 | // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. | |
b7449926 | 806 | let (a, b) = match dest.layout.abi { |
ba9703b0 | 807 | Abi::ScalarPair(ref a, ref b) => (&a.value, &b.value), |
f035d41b XL |
808 | _ => span_bug!( |
809 | self.cur_span(), | |
dfeec247 XL |
810 | "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", |
811 | dest.layout | |
812 | ), | |
b7449926 | 813 | }; |
a1dfa0c6 XL |
814 | let (a_size, b_size) = (a.size(self), b.size(self)); |
815 | let b_offset = a_size.align_to(b.align(self).abi); | |
a1dfa0c6 XL |
816 | let b_ptr = ptr.offset(b_offset, self)?; |
817 | ||
0bf4aa26 XL |
818 | // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, |
819 | // but that does not work: We could be a newtype around a pair, then the | |
820 | // fields do not match the `ScalarPair` components. | |
821 | ||
f035d41b XL |
822 | self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(&tcx, ptr, a_val, a_size)?; |
823 | self.memory.get_raw_mut(b_ptr.alloc_id)?.write_scalar(&tcx, b_ptr, b_val, b_size) | |
ff7c6d11 | 824 | } |
b7449926 | 825 | } |
ff7c6d11 XL |
826 | } |
827 | ||
9fa01778 | 828 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 XL |
829 | /// Use `copy_op_transmute` if the layouts could disagree. |
830 | #[inline(always)] | |
b7449926 | 831 | pub fn copy_op( |
ff7c6d11 | 832 | &mut self, |
0bf4aa26 XL |
833 | src: OpTy<'tcx, M::PointerTag>, |
834 | dest: PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 835 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
836 | self.copy_op_no_validate(src, dest)?; |
837 | ||
838 | if M::enforce_validity(self) { | |
839 | // Data got changed, better make sure it matches the type! | |
74b04a01 | 840 | self.validate_operand(self.place_to_op(dest)?)?; |
0bf4aa26 XL |
841 | } |
842 | ||
843 | Ok(()) | |
844 | } | |
845 | ||
9fa01778 | 846 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 | 847 | /// Use `copy_op_transmute` if the layouts could disagree. |
dc9dc135 | 848 | /// Also, if you use this you are responsible for validating that things get copied at the |
0bf4aa26 XL |
849 | /// right type. |
850 | fn copy_op_no_validate( | |
851 | &mut self, | |
852 | src: OpTy<'tcx, M::PointerTag>, | |
853 | dest: PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 854 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
855 | // We do NOT compare the types for equality, because well-typed code can |
856 | // actually "transmute" `&mut T` to `&T` in an assignment without a cast. | |
f035d41b | 857 | if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
ba9703b0 | 858 | span_bug!( |
f035d41b | 859 | self.cur_span(), |
ba9703b0 XL |
860 | "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", |
861 | src.layout.ty, | |
862 | dest.layout.ty, | |
863 | ); | |
864 | } | |
b7449926 XL |
865 | |
866 | // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. | |
a1dfa0c6 | 867 | let src = match self.try_read_immediate(src)? { |
0bf4aa26 | 868 | Ok(src_val) => { |
48663c56 | 869 | assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); |
0bf4aa26 | 870 | // Yay, we got a value that we can write directly. |
9fa01778 XL |
871 | // FIXME: Add a check to make sure that if `src` is indirect, |
872 | // it does not overlap with `dest`. | |
dc9dc135 | 873 | return self.write_immediate_no_validate(*src_val, dest); |
0bf4aa26 XL |
874 | } |
875 | Err(mplace) => mplace, | |
b7449926 XL |
876 | }; |
877 | // Slow path, this does not fit into an immediate. Just memcpy. | |
0bf4aa26 XL |
878 | trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
879 | ||
48663c56 XL |
880 | // This interprets `src.meta` with the `dest` local's layout, if an unsized local |
881 | // is being initialized! | |
882 | let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?; | |
883 | let size = size.unwrap_or_else(|| { | |
dfeec247 XL |
884 | assert!( |
885 | !dest.layout.is_unsized(), | |
886 | "Cannot copy into already initialized unsized place" | |
887 | ); | |
48663c56 XL |
888 | dest.layout.size |
889 | }); | |
890 | assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); | |
416331ca | 891 | |
dfeec247 XL |
892 | let src = self |
893 | .check_mplace_access(src, Some(size)) | |
e1599b0c | 894 | .expect("places should be checked on creation"); |
dfeec247 XL |
895 | let dest = self |
896 | .check_mplace_access(dest, Some(size)) | |
e1599b0c | 897 | .expect("places should be checked on creation"); |
416331ca XL |
898 | let (src_ptr, dest_ptr) = match (src, dest) { |
899 | (Some(src_ptr), Some(dest_ptr)) => (src_ptr, dest_ptr), | |
900 | (None, None) => return Ok(()), // zero-sized copy | |
901 | _ => bug!("The pointers should both be Some or both None"), | |
902 | }; | |
903 | ||
dfeec247 | 904 | self.memory.copy(src_ptr, dest_ptr, size, /*nonoverlapping*/ true) |
0bf4aa26 XL |
905 | } |
906 | ||
9fa01778 | 907 | /// Copies the data from an operand to a place. The layouts may disagree, but they must |
0bf4aa26 XL |
908 | /// have the same size. |
909 | pub fn copy_op_transmute( | |
910 | &mut self, | |
911 | src: OpTy<'tcx, M::PointerTag>, | |
912 | dest: PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 913 | ) -> InterpResult<'tcx> { |
f035d41b | 914 | if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
0bf4aa26 XL |
915 | // Fast path: Just use normal `copy_op` |
916 | return self.copy_op(src, dest); | |
917 | } | |
48663c56 | 918 | // We still require the sizes to match. |
dfeec247 XL |
919 | if src.layout.size != dest.layout.size { |
920 | // FIXME: This should be an assert instead of an error, but if we transmute within an | |
921 | // array length computation, `typeck` may not have yet been run and errored out. In fact | |
922 | // most likey we *are* running `typeck` right now. Investigate whether we can bail out | |
3dfed10e | 923 | // on `typeck_results().has_errors` at all const eval entry points. |
dfeec247 | 924 | debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); |
ba9703b0 | 925 | self.tcx.sess.delay_span_bug( |
f035d41b | 926 | self.cur_span(), |
ba9703b0 XL |
927 | "size-changing transmute, should have been caught by transmute checking", |
928 | ); | |
929 | throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty)); | |
dfeec247 | 930 | } |
48663c56 XL |
931 | // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want |
932 | // to avoid that here. | |
dfeec247 XL |
933 | assert!( |
934 | !src.layout.is_unsized() && !dest.layout.is_unsized(), | |
935 | "Cannot transmute unsized data" | |
936 | ); | |
0bf4aa26 XL |
937 | |
938 | // The hard case is `ScalarPair`. `src` is already read from memory in this case, | |
939 | // using `src.layout` to figure out which bytes to use for the 1st and 2nd field. | |
940 | // We have to write them to `dest` at the offsets they were *read at*, which is | |
941 | // not necessarily the same as the offsets in `dest.layout`! | |
942 | // Hence we do the copy with the source layout on both sides. We also make sure to write | |
943 | // into memory, because if `dest` is a local we would not even have a way to write | |
944 | // at the `src` offsets; the fact that we came from a different layout would | |
945 | // just be lost. | |
946 | let dest = self.force_allocation(dest)?; | |
947 | self.copy_op_no_validate( | |
948 | src, | |
949 | PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), | |
950 | )?; | |
951 | ||
952 | if M::enforce_validity(self) { | |
953 | // Data got changed, better make sure it matches the type! | |
74b04a01 | 954 | self.validate_operand(dest.into())?; |
0bf4aa26 XL |
955 | } |
956 | ||
957 | Ok(()) | |
ff7c6d11 XL |
958 | } |
959 | ||
9fa01778 | 960 | /// Ensures that a place is in memory, and returns where it is. |
a1dfa0c6 XL |
961 | /// If the place currently refers to a local that doesn't yet have a matching allocation, |
962 | /// create such an allocation. | |
b7449926 | 963 | /// This is essentially `force_to_memplace`. |
48663c56 XL |
964 | /// |
965 | /// This supports unsized types and returns the computed size to avoid some | |
966 | /// redundant computation when copying; use `force_allocation` for a simpler, sized-only | |
967 | /// version. | |
968 | pub fn force_allocation_maybe_sized( | |
ff7c6d11 | 969 | &mut self, |
0bf4aa26 | 970 | place: PlaceTy<'tcx, M::PointerTag>, |
dfeec247 | 971 | meta: MemPlaceMeta<M::PointerTag>, |
dc9dc135 | 972 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> { |
48663c56 | 973 | let (mplace, size) = match place.place { |
b7449926 | 974 | Place::Local { frame, local } => { |
f035d41b | 975 | match M::access_local_mut(self, frame, local)? { |
dfeec247 | 976 | Ok(&mut local_val) => { |
b7449926 | 977 | // We need to make an allocation. |
48663c56 | 978 | |
b7449926 | 979 | // We need the layout of the local. We can NOT use the layout we got, |
0731742a | 980 | // that might e.g., be an inner field of a struct with `Scalar` layout, |
b7449926 | 981 | // that has different alignment than the outer field. |
ba9703b0 XL |
982 | let local_layout = |
983 | self.layout_of_local(&self.stack()[frame], local, None)?; | |
dfeec247 XL |
984 | // We also need to support unsized types, and hence cannot use `allocate`. |
985 | let (size, align) = self | |
986 | .size_and_align_of(meta, local_layout)? | |
48663c56 XL |
987 | .expect("Cannot allocate for non-dyn-sized type"); |
988 | let ptr = self.memory.allocate(size, align, MemoryKind::Stack); | |
989 | let mplace = MemPlace { ptr: ptr.into(), align, meta }; | |
dfeec247 | 990 | if let LocalValue::Live(Operand::Immediate(value)) = local_val { |
48663c56 XL |
991 | // Preserve old value. |
992 | // We don't have to validate as we can assume the local | |
993 | // was already valid for its type. | |
994 | let mplace = MPlaceTy { mplace, layout: local_layout }; | |
995 | self.write_immediate_to_mplace_no_validate(value, mplace)?; | |
996 | } | |
997 | // Now we can call `access_mut` again, asserting it goes well, | |
998 | // and actually overwrite things. | |
f035d41b | 999 | *M::access_local_mut(self, frame, local).unwrap().unwrap() = |
48663c56 XL |
1000 | LocalValue::Live(Operand::Indirect(mplace)); |
1001 | (mplace, Some(size)) | |
b7449926 | 1002 | } |
48663c56 | 1003 | Err(mplace) => (mplace, None), // this already was an indirect local |
b7449926 XL |
1004 | } |
1005 | } | |
dfeec247 | 1006 | Place::Ptr(mplace) => (mplace, None), |
b7449926 XL |
1007 | }; |
1008 | // Return with the original layout, so that the caller can go on | |
48663c56 XL |
1009 | Ok((MPlaceTy { mplace, layout: place.layout }, size)) |
1010 | } | |
1011 | ||
1012 | #[inline(always)] | |
1013 | pub fn force_allocation( | |
1014 | &mut self, | |
1015 | place: PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 1016 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 | 1017 | Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) |
ff7c6d11 XL |
1018 | } |
1019 | ||
b7449926 | 1020 | pub fn allocate( |
ff7c6d11 | 1021 | &mut self, |
ba9703b0 XL |
1022 | layout: TyAndLayout<'tcx>, |
1023 | kind: MemoryKind<M::MemoryKind>, | |
0731742a | 1024 | ) -> MPlaceTy<'tcx, M::PointerTag> { |
48663c56 XL |
1025 | let ptr = self.memory.allocate(layout.size, layout.align.abi, kind); |
1026 | MPlaceTy::from_aligned_ptr(ptr, layout) | |
b7449926 | 1027 | } |
ff7c6d11 | 1028 | |
60c5eb7d XL |
1029 | /// Returns a wide MPlace. |
1030 | pub fn allocate_str( | |
1031 | &mut self, | |
1032 | str: &str, | |
ba9703b0 | 1033 | kind: MemoryKind<M::MemoryKind>, |
60c5eb7d | 1034 | ) -> MPlaceTy<'tcx, M::PointerTag> { |
ba9703b0 XL |
1035 | let ptr = self.memory.allocate_bytes(str.as_bytes(), kind); |
1036 | let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self); | |
60c5eb7d XL |
1037 | let mplace = MemPlace { |
1038 | ptr: ptr.into(), | |
1039 | align: Align::from_bytes(1).unwrap(), | |
dfeec247 | 1040 | meta: MemPlaceMeta::Meta(meta), |
60c5eb7d XL |
1041 | }; |
1042 | ||
1043 | let layout = self.layout_of(self.tcx.mk_static_str()).unwrap(); | |
1044 | MPlaceTy { mplace, layout } | |
1045 | } | |
1046 | ||
f035d41b XL |
1047 | /// Writes the discriminant of the given variant. |
1048 | pub fn write_discriminant( | |
b7449926 | 1049 | &mut self, |
a1dfa0c6 | 1050 | variant_index: VariantIdx, |
0bf4aa26 | 1051 | dest: PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 1052 | ) -> InterpResult<'tcx> { |
60c5eb7d XL |
1053 | // Layout computation excludes uninhabited variants from consideration |
1054 | // therefore there's no way to represent those variants in the given layout. | |
1055 | if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { | |
1056 | throw_ub!(Unreachable); | |
1057 | } | |
e74abb32 | 1058 | |
b7449926 | 1059 | match dest.layout.variants { |
ba9703b0 | 1060 | Variants::Single { index } => { |
60c5eb7d | 1061 | assert_eq!(index, variant_index); |
ff7c6d11 | 1062 | } |
ba9703b0 | 1063 | Variants::Multiple { |
f035d41b XL |
1064 | tag_encoding: TagEncoding::Direct, |
1065 | tag: ref tag_layout, | |
1066 | tag_field, | |
532ac7d7 XL |
1067 | .. |
1068 | } => { | |
60c5eb7d | 1069 | // No need to validate that the discriminant here because the |
ba9703b0 | 1070 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1071 | |
48663c56 XL |
1072 | let discr_val = |
1073 | dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; | |
b7449926 XL |
1074 | |
1075 | // raw discriminants for enums are isize or bigger during | |
1076 | // their computation, but the in-memory tag is the smallest possible | |
1077 | // representation | |
f035d41b XL |
1078 | let size = tag_layout.value.size(self); |
1079 | let tag_val = truncate(discr_val, size); | |
b7449926 | 1080 | |
f035d41b XL |
1081 | let tag_dest = self.place_field(dest, tag_field)?; |
1082 | self.write_scalar(Scalar::from_uint(tag_val, size), tag_dest)?; | |
ff7c6d11 | 1083 | } |
ba9703b0 | 1084 | Variants::Multiple { |
f035d41b XL |
1085 | tag_encoding: |
1086 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, | |
1087 | tag: ref tag_layout, | |
1088 | tag_field, | |
b7449926 | 1089 | .. |
ff7c6d11 | 1090 | } => { |
60c5eb7d | 1091 | // No need to validate that the discriminant here because the |
ba9703b0 | 1092 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1093 | |
b7449926 | 1094 | if variant_index != dataful_variant { |
e1599b0c | 1095 | let variants_start = niche_variants.start().as_u32(); |
dfeec247 XL |
1096 | let variant_index_relative = variant_index |
1097 | .as_u32() | |
e1599b0c XL |
1098 | .checked_sub(variants_start) |
1099 | .expect("overflow computing relative variant idx"); | |
1100 | // We need to use machine arithmetic when taking into account `niche_start`: | |
f035d41b XL |
1101 | // tag_val = variant_index_relative + niche_start_val |
1102 | let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?; | |
1103 | let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); | |
e1599b0c | 1104 | let variant_index_relative_val = |
f035d41b XL |
1105 | ImmTy::from_uint(variant_index_relative, tag_layout); |
1106 | let tag_val = self.binary_op( | |
e1599b0c XL |
1107 | mir::BinOp::Add, |
1108 | variant_index_relative_val, | |
1109 | niche_start_val, | |
b7449926 | 1110 | )?; |
e1599b0c | 1111 | // Write result. |
f035d41b XL |
1112 | let niche_dest = self.place_field(dest, tag_field)?; |
1113 | self.write_immediate(*tag_val, niche_dest)?; | |
b7449926 XL |
1114 | } |
1115 | } | |
1116 | } | |
ff7c6d11 | 1117 | |
b7449926 XL |
1118 | Ok(()) |
1119 | } | |
ff7c6d11 | 1120 | |
a1dfa0c6 XL |
1121 | pub fn raw_const_to_mplace( |
1122 | &self, | |
1b1a35ee | 1123 | raw: ConstAlloc<'tcx>, |
dc9dc135 | 1124 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 | 1125 | // This must be an allocation in `tcx` |
f9f354fc | 1126 | let _ = self.tcx.global_alloc(raw.alloc_id); |
3dfed10e | 1127 | let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?; |
a1dfa0c6 | 1128 | let layout = self.layout_of(raw.ty)?; |
dc9dc135 | 1129 | Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) |
a1dfa0c6 XL |
1130 | } |
1131 | ||
b7449926 XL |
1132 | /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. |
1133 | /// Also return some more information so drop doesn't have to run the same code twice. | |
dfeec247 XL |
1134 | pub(super) fn unpack_dyn_trait( |
1135 | &self, | |
1136 | mplace: MPlaceTy<'tcx, M::PointerTag>, | |
1137 | ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { | |
dc9dc135 | 1138 | let vtable = mplace.vtable(); // also sanity checks the type |
b7449926 XL |
1139 | let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; |
1140 | let layout = self.layout_of(ty)?; | |
1141 | ||
1142 | // More sanity checks | |
1143 | if cfg!(debug_assertions) { | |
1144 | let (size, align) = self.read_size_and_align_from_vtable(vtable)?; | |
1145 | assert_eq!(size, layout.size); | |
a1dfa0c6 XL |
1146 | // only ABI alignment is preserved |
1147 | assert_eq!(align, layout.align.abi); | |
ff7c6d11 | 1148 | } |
ff7c6d11 | 1149 | |
dfeec247 | 1150 | let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..*mplace }, layout }; |
b7449926 | 1151 | Ok((instance, mplace)) |
ff7c6d11 XL |
1152 | } |
1153 | } |