]>
Commit | Line | Data |
---|---|---|
b7449926 XL |
1 | //! Computations on places -- field projections, going from mir::Place, and writing |
2 | //! into a place. | |
3 | //! All high-level functions to write to memory work on places as destinations. | |
4 | ||
5 | use std::convert::TryFrom; | |
0bf4aa26 | 6 | use std::hash::Hash; |
b7449926 | 7 | |
17df50a5 | 8 | use rustc_ast::Mutability; |
60c5eb7d | 9 | use rustc_macros::HashStable; |
ba9703b0 | 10 | use rustc_middle::mir; |
c295e0f8 | 11 | use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout}; |
ba9703b0 | 12 | use rustc_middle::ty::{self, Ty}; |
f035d41b | 13 | use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding}; |
c295e0f8 | 14 | use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants}; |
ff7c6d11 | 15 | |
0bf4aa26 | 16 | use super::{ |
136023e0 XL |
17 | alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg, |
18 | ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, | |
19 | Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit, | |
b7449926 | 20 | }; |
ff7c6d11 | 21 | |
136023e0 | 22 | #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] |
dfeec247 | 23 | /// Information required for the sound usage of a `MemPlace`. |
136023e0 | 24 | pub enum MemPlaceMeta<Tag: Provenance = AllocId> { |
dfeec247 | 25 | /// The unsized payload (e.g. length for slices or vtable pointer for trait objects). |
f9f354fc | 26 | Meta(Scalar<Tag>), |
dfeec247 XL |
27 | /// `Sized` types or unsized `extern type` |
28 | None, | |
29 | /// The address of this place may not be taken. This protects the `MemPlace` from coming from | |
ba9703b0 | 30 | /// a ZST Operand without a backing allocation and being converted to an integer address. This |
dfeec247 XL |
31 | /// should be impossible, because you can't take the address of an operand, but this is a second |
32 | /// protection layer ensuring that we don't mess up. | |
33 | Poison, | |
34 | } | |
35 | ||
6a06907d XL |
36 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
37 | rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); | |
38 | ||
136023e0 | 39 | impl<Tag: Provenance> MemPlaceMeta<Tag> { |
f9f354fc | 40 | pub fn unwrap_meta(self) -> Scalar<Tag> { |
dfeec247 XL |
41 | match self { |
42 | Self::Meta(s) => s, | |
43 | Self::None | Self::Poison => { | |
44 | bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)") | |
45 | } | |
46 | } | |
47 | } | |
48 | fn has_meta(self) -> bool { | |
49 | match self { | |
50 | Self::Meta(_) => true, | |
51 | Self::None | Self::Poison => false, | |
52 | } | |
53 | } | |
dfeec247 XL |
54 | } |
55 | ||
136023e0 XL |
56 | #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] |
57 | pub struct MemPlace<Tag: Provenance = AllocId> { | |
58 | /// The pointer can be a pure integer, with the `None` tag. | |
59 | pub ptr: Pointer<Option<Tag>>, | |
b7449926 | 60 | pub align: Align, |
9fa01778 | 61 | /// Metadata for unsized places. Interpretation is up to the type. |
b7449926 | 62 | /// Must not be present for sized types, but can be missing for unsized types |
0731742a | 63 | /// (e.g., `extern type`). |
f9f354fc | 64 | pub meta: MemPlaceMeta<Tag>, |
b7449926 XL |
65 | } |
66 | ||
6a06907d | 67 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
136023e0 | 68 | rustc_data_structures::static_assert_size!(MemPlace, 48); |
6a06907d | 69 | |
136023e0 XL |
70 | #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)] |
71 | pub enum Place<Tag: Provenance = AllocId> { | |
2c00a5a8 | 72 | /// A place referring to a value allocated in the `Memory` system. |
f9f354fc | 73 | Ptr(MemPlace<Tag>), |
b7449926 XL |
74 | |
75 | /// To support alloc-free locals, we are able to write directly to a local. | |
76 | /// (Without that optimization, we'd just always be a `MemPlace`.) | |
dfeec247 | 77 | Local { frame: usize, local: mir::Local }, |
b7449926 XL |
78 | } |
79 | ||
6a06907d | 80 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
136023e0 | 81 | rustc_data_structures::static_assert_size!(Place, 56); |
6a06907d | 82 | |
b7449926 | 83 | #[derive(Copy, Clone, Debug)] |
136023e0 | 84 | pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> { |
60c5eb7d | 85 | place: Place<Tag>, // Keep this private; it helps enforce invariants. |
ba9703b0 | 86 | pub layout: TyAndLayout<'tcx>, |
ff7c6d11 XL |
87 | } |
88 | ||
6a06907d | 89 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
136023e0 | 90 | rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72); |
6a06907d | 91 | |
136023e0 | 92 | impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> { |
0bf4aa26 | 93 | type Target = Place<Tag>; |
b7449926 | 94 | #[inline(always)] |
0bf4aa26 | 95 | fn deref(&self) -> &Place<Tag> { |
b7449926 XL |
96 | &self.place |
97 | } | |
ff7c6d11 XL |
98 | } |
99 | ||
b7449926 | 100 | /// A MemPlace with its layout. Constructing it is only possible in this module. |
136023e0 XL |
101 | #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)] |
102 | pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> { | |
0bf4aa26 | 103 | mplace: MemPlace<Tag>, |
ba9703b0 | 104 | pub layout: TyAndLayout<'tcx>, |
b7449926 XL |
105 | } |
106 | ||
6a06907d | 107 | #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] |
136023e0 | 108 | rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64); |
6a06907d | 109 | |
136023e0 | 110 | impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> { |
0bf4aa26 | 111 | type Target = MemPlace<Tag>; |
b7449926 | 112 | #[inline(always)] |
0bf4aa26 | 113 | fn deref(&self) -> &MemPlace<Tag> { |
b7449926 XL |
114 | &self.mplace |
115 | } | |
116 | } | |
117 | ||
136023e0 | 118 | impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> { |
b7449926 | 119 | #[inline(always)] |
0bf4aa26 | 120 | fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self { |
dfeec247 | 121 | PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout } |
ff7c6d11 | 122 | } |
b7449926 | 123 | } |
ff7c6d11 | 124 | |
136023e0 | 125 | impl<Tag: Provenance> MemPlace<Tag> { |
b7449926 | 126 | #[inline(always)] |
136023e0 | 127 | pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self { |
dfeec247 | 128 | MemPlace { ptr, align, meta: MemPlaceMeta::None } |
ff7c6d11 XL |
129 | } |
130 | ||
136023e0 XL |
131 | /// Adjust the provenance of the main pointer (metadata is unaffected). |
132 | pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self { | |
133 | MemPlace { ptr: self.ptr.map_provenance(f), ..self } | |
ff7c6d11 XL |
134 | } |
135 | ||
60c5eb7d | 136 | /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. |
a1dfa0c6 XL |
137 | /// This is the inverse of `ref_to_mplace`. |
138 | #[inline(always)] | |
136023e0 | 139 | pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> { |
a1dfa0c6 | 140 | match self.meta { |
136023e0 XL |
141 | MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)), |
142 | MemPlaceMeta::Meta(meta) => { | |
143 | Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into()) | |
144 | } | |
dfeec247 XL |
145 | MemPlaceMeta::Poison => bug!( |
146 | "MPlaceTy::dangling may never be used to produce a \ | |
147 | place that will have the address of its pointee taken" | |
148 | ), | |
a1dfa0c6 XL |
149 | } |
150 | } | |
151 | ||
5869c6ff | 152 | #[inline] |
a2a8927a | 153 | pub fn offset<'tcx>( |
a1dfa0c6 XL |
154 | self, |
155 | offset: Size, | |
dfeec247 | 156 | meta: MemPlaceMeta<Tag>, |
a1dfa0c6 | 157 | cx: &impl HasDataLayout, |
dc9dc135 | 158 | ) -> InterpResult<'tcx, Self> { |
a1dfa0c6 | 159 | Ok(MemPlace { |
136023e0 | 160 | ptr: self.ptr.offset(offset, cx)?, |
a1dfa0c6 XL |
161 | align: self.align.restrict_for_offset(offset), |
162 | meta, | |
163 | }) | |
164 | } | |
0bf4aa26 | 165 | } |
b7449926 | 166 | |
136023e0 | 167 | impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> { |
0bf4aa26 XL |
168 | /// Produces a MemPlace that works for ZST but nothing else |
169 | #[inline] | |
136023e0 | 170 | pub fn dangling(layout: TyAndLayout<'tcx>) -> Self { |
dfeec247 | 171 | let align = layout.align.abi; |
136023e0 | 172 | let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address |
dfeec247 XL |
173 | // `Poison` this to make sure that the pointer value `ptr` is never observable by the program. |
174 | MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } | |
ff7c6d11 XL |
175 | } |
176 | ||
0731742a | 177 | #[inline] |
a1dfa0c6 | 178 | pub fn offset( |
6a06907d | 179 | &self, |
a1dfa0c6 | 180 | offset: Size, |
dfeec247 | 181 | meta: MemPlaceMeta<Tag>, |
ba9703b0 | 182 | layout: TyAndLayout<'tcx>, |
a1dfa0c6 | 183 | cx: &impl HasDataLayout, |
dc9dc135 | 184 | ) -> InterpResult<'tcx, Self> { |
dfeec247 | 185 | Ok(MPlaceTy { mplace: self.mplace.offset(offset, meta, cx)?, layout }) |
a1dfa0c6 XL |
186 | } |
187 | ||
b7449926 | 188 | #[inline] |
136023e0 | 189 | pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self { |
a1dfa0c6 | 190 | MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } |
ff7c6d11 XL |
191 | } |
192 | ||
b7449926 | 193 | #[inline] |
6a06907d | 194 | pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { |
b7449926 | 195 | if self.layout.is_unsized() { |
0bf4aa26 | 196 | // We need to consult `meta` metadata |
1b1a35ee | 197 | match self.layout.ty.kind() { |
ba9703b0 | 198 | ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_machine_usize(cx), |
b7449926 | 199 | _ => bug!("len not supported on unsized type {:?}", self.layout.ty), |
ff7c6d11 | 200 | } |
b7449926 XL |
201 | } else { |
202 | // Go through the layout. There are lots of types that support a length, | |
3c0e092e | 203 | // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!) |
b7449926 | 204 | match self.layout.fields { |
ba9703b0 | 205 | FieldsShape::Array { count, .. } => Ok(count), |
b7449926 XL |
206 | _ => bug!("len not supported on sized type {:?}", self.layout.ty), |
207 | } | |
208 | } | |
209 | } | |
ff7c6d11 | 210 | |
b7449926 | 211 | #[inline] |
6a06907d | 212 | pub(super) fn vtable(&self) -> Scalar<Tag> { |
1b1a35ee | 213 | match self.layout.ty.kind() { |
dfeec247 | 214 | ty::Dynamic(..) => self.mplace.meta.unwrap_meta(), |
b7449926 | 215 | _ => bug!("vtable not supported on type {:?}", self.layout.ty), |
ff7c6d11 XL |
216 | } |
217 | } | |
218 | } | |
219 | ||
416331ca | 220 | // These are defined here because they produce a place. |
136023e0 | 221 | impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> { |
b7449926 | 222 | #[inline(always)] |
dfeec247 XL |
223 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
224 | /// read from the resulting mplace, not to get its address back. | |
136023e0 | 225 | pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> { |
6a06907d | 226 | match **self { |
b7449926 | 227 | Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), |
136023e0 | 228 | Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)), |
ba9703b0 | 229 | Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), |
b7449926 XL |
230 | } |
231 | } | |
232 | ||
233 | #[inline(always)] | |
dfeec247 XL |
234 | /// Note: do not call `as_ref` on the resulting place. This function should only be used to |
235 | /// read from the resulting mplace, not to get its address back. | |
136023e0 XL |
236 | pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> { |
237 | self.try_as_mplace().unwrap() | |
b7449926 XL |
238 | } |
239 | } | |
240 | ||
136023e0 | 241 | impl<Tag: Provenance> Place<Tag> { |
b7449926 | 242 | #[inline] |
416331ca | 243 | pub fn assert_mem_place(self) -> MemPlace<Tag> { |
b7449926 XL |
244 | match self { |
245 | Place::Ptr(mplace) => mplace, | |
416331ca | 246 | _ => bug!("assert_mem_place: expected Place::Ptr, got {:?}", self), |
b7449926 XL |
247 | } |
248 | } | |
b7449926 XL |
249 | } |
250 | ||
136023e0 | 251 | impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> { |
b7449926 | 252 | #[inline] |
416331ca XL |
253 | pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { |
254 | MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout } | |
b7449926 XL |
255 | } |
256 | } | |
257 | ||
0bf4aa26 | 258 | // separating the pointer tag for `impl Trait`, see https://github.com/rust-lang/rust/issues/54385 |
ba9703b0 | 259 | impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> |
0bf4aa26 | 260 | where |
a1dfa0c6 | 261 | // FIXME: Working around https://github.com/rust-lang/rust/issues/54385 |
136023e0 | 262 | Tag: Provenance + Eq + Hash + 'static, |
dc9dc135 | 263 | M: Machine<'mir, 'tcx, PointerTag = Tag>, |
0bf4aa26 | 264 | { |
60c5eb7d | 265 | /// Take a value, which represents a (thin or wide) reference, and make it a place. |
a1dfa0c6 | 266 | /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`. |
e1599b0c XL |
267 | /// |
268 | /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not | |
269 | /// want to ever use the place for memory access! | |
270 | /// Generally prefer `deref_operand`. | |
b7449926 | 271 | pub fn ref_to_mplace( |
0bf4aa26 | 272 | &self, |
6a06907d | 273 | val: &ImmTy<'tcx, M::PointerTag>, |
dc9dc135 | 274 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 XL |
275 | let pointee_type = |
276 | val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; | |
b7449926 | 277 | let layout = self.layout_of(pointee_type)?; |
6a06907d | 278 | let (ptr, meta) = match **val { |
136023e0 XL |
279 | Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None), |
280 | Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)), | |
60c5eb7d | 281 | }; |
0bf4aa26 | 282 | |
a1dfa0c6 | 283 | let mplace = MemPlace { |
136023e0 | 284 | ptr: self.scalar_to_ptr(ptr.check_init()?), |
9fa01778 XL |
285 | // We could use the run-time alignment here. For now, we do not, because |
286 | // the point of tracking the alignment here is to make sure that the *static* | |
287 | // alignment information emitted with the loads is correct. The run-time | |
288 | // alignment can only be more restrictive. | |
a1dfa0c6 | 289 | align: layout.align.abi, |
60c5eb7d | 290 | meta, |
b7449926 XL |
291 | }; |
292 | Ok(MPlaceTy { mplace, layout }) | |
293 | } | |
294 | ||
416331ca XL |
295 | /// Take an operand, representing a pointer, and dereference it to a place -- that |
296 | /// will always be a MemPlace. Lives in `place.rs` because it creates a place. | |
a1dfa0c6 XL |
297 | pub fn deref_operand( |
298 | &self, | |
6a06907d | 299 | src: &OpTy<'tcx, M::PointerTag>, |
dc9dc135 | 300 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 XL |
301 | let val = self.read_immediate(src)?; |
302 | trace!("deref to {} on {:?}", val.layout.ty, *val); | |
136023e0 XL |
303 | let mplace = self.ref_to_mplace(&val)?; |
304 | self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?; | |
305 | Ok(mplace) | |
0bf4aa26 XL |
306 | } |
307 | ||
416331ca | 308 | #[inline] |
17df50a5 | 309 | pub(super) fn get_alloc( |
416331ca | 310 | &self, |
6a06907d | 311 | place: &MPlaceTy<'tcx, M::PointerTag>, |
17df50a5 XL |
312 | ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::PointerTag, M::AllocExtra>>> { |
313 | assert!(!place.layout.is_unsized()); | |
314 | assert!(!place.meta.has_meta()); | |
315 | let size = place.layout.size; | |
316 | self.memory.get(place.ptr, size, place.align) | |
317 | } | |
318 | ||
319 | #[inline] | |
320 | pub(super) fn get_alloc_mut( | |
321 | &mut self, | |
322 | place: &MPlaceTy<'tcx, M::PointerTag>, | |
323 | ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::PointerTag, M::AllocExtra>>> { | |
324 | assert!(!place.layout.is_unsized()); | |
325 | assert!(!place.meta.has_meta()); | |
326 | let size = place.layout.size; | |
327 | self.memory.get_mut(place.ptr, size, place.align) | |
416331ca XL |
328 | } |
329 | ||
a2a8927a | 330 | /// Check if this mplace is dereferenceable and sufficiently aligned. |
136023e0 | 331 | fn check_mplace_access( |
e1599b0c | 332 | &self, |
136023e0 XL |
333 | mplace: MPlaceTy<'tcx, M::PointerTag>, |
334 | msg: CheckInAllocMsg, | |
335 | ) -> InterpResult<'tcx> { | |
dfeec247 | 336 | let (size, align) = self |
136023e0 XL |
337 | .size_and_align_of_mplace(&mplace)? |
338 | .unwrap_or((mplace.layout.size, mplace.layout.align.abi)); | |
339 | assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?"); | |
340 | let align = M::enforce_alignment(&self.memory.extra).then_some(align); | |
341 | self.memory.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?; | |
342 | Ok(()) | |
416331ca XL |
343 | } |
344 | ||
ba9703b0 XL |
345 | /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is |
346 | /// always possible without allocating, so it can take `&self`. Also return the field's layout. | |
b7449926 | 347 | /// This supports both struct and array fields. |
ba9703b0 XL |
348 | /// |
349 | /// This also works for arrays, but then the `usize` index type is restricting. | |
350 | /// For indexing into arrays, use `mplace_index`. | |
b7449926 XL |
351 | #[inline(always)] |
352 | pub fn mplace_field( | |
8faf50e0 | 353 | &self, |
6a06907d | 354 | base: &MPlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 355 | field: usize, |
dc9dc135 | 356 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 357 | let offset = base.layout.fields.offset(field); |
94222f64 | 358 | let field_layout = base.layout.field(self, field); |
b7449926 | 359 | |
9fa01778 | 360 | // Offset may need adjustment for unsized fields. |
0bf4aa26 | 361 | let (meta, offset) = if field_layout.is_unsized() { |
9fa01778 XL |
362 | // Re-use parent metadata to determine dynamic field layout. |
363 | // With custom DSTS, this *will* execute user-defined code, but the same | |
364 | // happens at run-time so that's okay. | |
a2a8927a XL |
365 | match self.size_and_align_of(&base.meta, &field_layout)? { |
366 | Some((_, align)) => (base.meta, offset.align_to(align)), | |
367 | None => { | |
368 | // For unsized types with an extern type tail we perform no adjustments. | |
369 | // NOTE: keep this in sync with `PlaceRef::project_field` in the codegen backend. | |
370 | assert!(matches!(base.meta, MemPlaceMeta::None)); | |
371 | (base.meta, offset) | |
dfeec247 | 372 | } |
a2a8927a | 373 | } |
b7449926 | 374 | } else { |
0bf4aa26 | 375 | // base.meta could be present; we might be accessing a sized field of an unsized |
b7449926 | 376 | // struct. |
dfeec247 | 377 | (MemPlaceMeta::None, offset) |
b7449926 XL |
378 | }; |
379 | ||
a1dfa0c6 XL |
380 | // We do not look at `base.layout.align` nor `field_layout.align`, unlike |
381 | // codegen -- mostly to see if we can get away with that | |
382 | base.offset(offset, meta, field_layout, self) | |
ff7c6d11 XL |
383 | } |
384 | ||
ba9703b0 XL |
385 | /// Index into an array. |
386 | #[inline(always)] | |
387 | pub fn mplace_index( | |
388 | &self, | |
6a06907d | 389 | base: &MPlaceTy<'tcx, M::PointerTag>, |
ba9703b0 XL |
390 | index: u64, |
391 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { | |
392 | // Not using the layout method because we want to compute on u64 | |
393 | match base.layout.fields { | |
394 | FieldsShape::Array { stride, .. } => { | |
395 | let len = base.len(self)?; | |
396 | if index >= len { | |
397 | // This can only be reached in ConstProp and non-rustc-MIR. | |
398 | throw_ub!(BoundsCheckFailed { len, index }); | |
399 | } | |
400 | let offset = stride * index; // `Size` multiplication | |
401 | // All fields have the same layout. | |
94222f64 | 402 | let field_layout = base.layout.field(self, 0); |
ba9703b0 XL |
403 | |
404 | assert!(!field_layout.is_unsized()); | |
405 | base.offset(offset, MemPlaceMeta::None, field_layout, self) | |
406 | } | |
f035d41b XL |
407 | _ => span_bug!( |
408 | self.cur_span(), | |
409 | "`mplace_index` called on non-array type {:?}", | |
410 | base.layout.ty | |
411 | ), | |
ba9703b0 XL |
412 | } |
413 | } | |
414 | ||
b7449926 XL |
415 | // Iterates over all fields of an array. Much more efficient than doing the |
416 | // same by repeatedly calling `mplace_array`. | |
a2a8927a | 417 | pub(super) fn mplace_array_fields<'a>( |
0531ce1d | 418 | &self, |
6a06907d XL |
419 | base: &'a MPlaceTy<'tcx, Tag>, |
420 | ) -> InterpResult<'tcx, impl Iterator<Item = InterpResult<'tcx, MPlaceTy<'tcx, Tag>>> + 'a> | |
0bf4aa26 | 421 | { |
b7449926 | 422 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
ee023bcb FG |
423 | let FieldsShape::Array { stride, .. } = base.layout.fields else { |
424 | span_bug!(self.cur_span(), "mplace_array_fields: expected an array layout"); | |
94b46f34 | 425 | }; |
94222f64 | 426 | let layout = base.layout.field(self, 0); |
b7449926 | 427 | let dl = &self.tcx.data_layout; |
ba9703b0 XL |
428 | // `Size` multiplication |
429 | Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl))) | |
0531ce1d XL |
430 | } |
431 | ||
dfeec247 | 432 | fn mplace_subslice( |
8faf50e0 | 433 | &self, |
6a06907d | 434 | base: &MPlaceTy<'tcx, M::PointerTag>, |
b7449926 XL |
435 | from: u64, |
436 | to: u64, | |
60c5eb7d | 437 | from_end: bool, |
dc9dc135 | 438 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
b7449926 | 439 | let len = base.len(self)?; // also asserts that we have a type where this makes sense |
60c5eb7d | 440 | let actual_to = if from_end { |
ba9703b0 | 441 | if from.checked_add(to).map_or(true, |to| to > len) { |
dfeec247 | 442 | // This can only be reached in ConstProp and non-rustc-MIR. |
ba9703b0 | 443 | throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); |
dfeec247 | 444 | } |
ba9703b0 | 445 | len.checked_sub(to).unwrap() |
60c5eb7d XL |
446 | } else { |
447 | to | |
448 | }; | |
b7449926 XL |
449 | |
450 | // Not using layout method because that works with usize, and does not work with slices | |
451 | // (that have count 0 in their layout). | |
452 | let from_offset = match base.layout.fields { | |
ba9703b0 | 453 | FieldsShape::Array { stride, .. } => stride * from, // `Size` multiplication is checked |
f035d41b XL |
454 | _ => { |
455 | span_bug!(self.cur_span(), "unexpected layout of index access: {:#?}", base.layout) | |
456 | } | |
ff7c6d11 | 457 | }; |
b7449926 | 458 | |
0bf4aa26 | 459 | // Compute meta and new layout |
ba9703b0 | 460 | let inner_len = actual_to.checked_sub(from).unwrap(); |
1b1a35ee | 461 | let (meta, ty) = match base.layout.ty.kind() { |
b7449926 XL |
462 | // It is not nice to match on the type, but that seems to be the only way to |
463 | // implement this. | |
5099ac24 | 464 | ty::Array(inner, _) => (MemPlaceMeta::None, self.tcx.mk_array(*inner, inner_len)), |
b7449926 | 465 | ty::Slice(..) => { |
ba9703b0 | 466 | let len = Scalar::from_machine_usize(inner_len, self); |
dfeec247 | 467 | (MemPlaceMeta::Meta(len), base.layout.ty) |
b7449926 | 468 | } |
f035d41b XL |
469 | _ => { |
470 | span_bug!(self.cur_span(), "cannot subslice non-array type: `{:?}`", base.layout.ty) | |
471 | } | |
b7449926 XL |
472 | }; |
473 | let layout = self.layout_of(ty)?; | |
a1dfa0c6 | 474 | base.offset(from_offset, meta, layout, self) |
b7449926 XL |
475 | } |
476 | ||
6a06907d | 477 | pub(crate) fn mplace_downcast( |
b7449926 | 478 | &self, |
6a06907d | 479 | base: &MPlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 480 | variant: VariantIdx, |
dc9dc135 | 481 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ee023bcb FG |
482 | // Downcasts only change the layout. |
483 | // (In particular, no check about whether this is even the active variant -- that's by design, | |
484 | // see https://github.com/rust-lang/rust/issues/93688#issuecomment-1032929496.) | |
dfeec247 | 485 | assert!(!base.meta.has_meta()); |
6a06907d | 486 | Ok(MPlaceTy { layout: base.layout.for_variant(self, variant), ..*base }) |
b7449926 XL |
487 | } |
488 | ||
489 | /// Project into an mplace | |
dfeec247 | 490 | pub(super) fn mplace_projection( |
b7449926 | 491 | &self, |
6a06907d | 492 | base: &MPlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 493 | proj_elem: mir::PlaceElem<'tcx>, |
dc9dc135 | 494 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 495 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 496 | Ok(match proj_elem { |
ba9703b0 | 497 | Field(field, _) => self.mplace_field(base, field.index())?, |
b7449926 | 498 | Downcast(_, variant) => self.mplace_downcast(base, variant)?, |
6a06907d | 499 | Deref => self.deref_operand(&base.into())?, |
b7449926 XL |
500 | |
501 | Index(local) => { | |
9fa01778 XL |
502 | let layout = self.layout_of(self.tcx.types.usize)?; |
503 | let n = self.access_local(self.frame(), local, Some(layout))?; | |
6a06907d | 504 | let n = self.read_scalar(&n)?; |
136023e0 | 505 | let n = n.to_machine_usize(self)?; |
ba9703b0 | 506 | self.mplace_index(base, n)? |
b7449926 XL |
507 | } |
508 | ||
dfeec247 | 509 | ConstantIndex { offset, min_length, from_end } => { |
b7449926 | 510 | let n = base.len(self)?; |
1b1a35ee | 511 | if n < min_length { |
dfeec247 | 512 | // This can only be reached in ConstProp and non-rustc-MIR. |
1b1a35ee | 513 | throw_ub!(BoundsCheckFailed { len: min_length, index: n }); |
dfeec247 | 514 | } |
b7449926 XL |
515 | |
516 | let index = if from_end { | |
ba9703b0 | 517 | assert!(0 < offset && offset <= min_length); |
1b1a35ee | 518 | n.checked_sub(offset).unwrap() |
b7449926 | 519 | } else { |
dfeec247 | 520 | assert!(offset < min_length); |
1b1a35ee | 521 | offset |
b7449926 XL |
522 | }; |
523 | ||
ba9703b0 | 524 | self.mplace_index(base, index)? |
b7449926 XL |
525 | } |
526 | ||
1b1a35ee | 527 | Subslice { from, to, from_end } => self.mplace_subslice(base, from, to, from_end)?, |
b7449926 | 528 | }) |
ff7c6d11 XL |
529 | } |
530 | ||
3c0e092e XL |
531 | /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. |
532 | /// Also returns the number of elements. | |
533 | pub fn mplace_to_simd( | |
534 | &self, | |
535 | base: &MPlaceTy<'tcx, M::PointerTag>, | |
536 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { | |
537 | // Basically we just transmute this place into an array following simd_size_and_type. | |
538 | // (Transmuting is okay since this is an in-memory place. We also double-check the size | |
539 | // stays the same.) | |
540 | let (len, e_ty) = base.layout.ty.simd_size_and_type(*self.tcx); | |
541 | let array = self.tcx.mk_array(e_ty, len); | |
542 | let layout = self.layout_of(array)?; | |
543 | assert_eq!(layout.size, base.layout.size); | |
544 | Ok((MPlaceTy { layout, ..*base }, len)) | |
545 | } | |
546 | ||
9fa01778 | 547 | /// Gets the place of a field inside the place, and also the field's type. |
b7449926 | 548 | /// Just a convenience function, but used quite a bit. |
a1dfa0c6 XL |
549 | /// This is the only projection that might have a side-effect: We cannot project |
550 | /// into the field of a local `ScalarPair`, we have to first allocate it. | |
b7449926 | 551 | pub fn place_field( |
ff7c6d11 | 552 | &mut self, |
6a06907d | 553 | base: &PlaceTy<'tcx, M::PointerTag>, |
ba9703b0 | 554 | field: usize, |
dc9dc135 | 555 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
556 | // FIXME: We could try to be smarter and avoid allocation for fields that span the |
557 | // entire place. | |
558 | let mplace = self.force_allocation(base)?; | |
6a06907d | 559 | Ok(self.mplace_field(&mplace, field)?.into()) |
ff7c6d11 XL |
560 | } |
561 | ||
ba9703b0 XL |
562 | pub fn place_index( |
563 | &mut self, | |
6a06907d | 564 | base: &PlaceTy<'tcx, M::PointerTag>, |
ba9703b0 XL |
565 | index: u64, |
566 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { | |
567 | let mplace = self.force_allocation(base)?; | |
6a06907d | 568 | Ok(self.mplace_index(&mplace, index)?.into()) |
ba9703b0 XL |
569 | } |
570 | ||
b7449926 | 571 | pub fn place_downcast( |
a1dfa0c6 | 572 | &self, |
6a06907d | 573 | base: &PlaceTy<'tcx, M::PointerTag>, |
a1dfa0c6 | 574 | variant: VariantIdx, |
dc9dc135 | 575 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
b7449926 XL |
576 | // Downcast just changes the layout |
577 | Ok(match base.place { | |
dfeec247 | 578 | Place::Ptr(mplace) => { |
6a06907d | 579 | self.mplace_downcast(&MPlaceTy { mplace, layout: base.layout }, variant)?.into() |
dfeec247 | 580 | } |
b7449926 | 581 | Place::Local { .. } => { |
a1dfa0c6 | 582 | let layout = base.layout.for_variant(self, variant); |
6a06907d | 583 | PlaceTy { layout, ..*base } |
ff7c6d11 | 584 | } |
b7449926 | 585 | }) |
ff7c6d11 XL |
586 | } |
587 | ||
9fa01778 | 588 | /// Projects into a place. |
b7449926 XL |
589 | pub fn place_projection( |
590 | &mut self, | |
6a06907d | 591 | base: &PlaceTy<'tcx, M::PointerTag>, |
f9f354fc | 592 | &proj_elem: &mir::ProjectionElem<mir::Local, Ty<'tcx>>, |
dc9dc135 | 593 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
ba9703b0 | 594 | use rustc_middle::mir::ProjectionElem::*; |
f9f354fc | 595 | Ok(match proj_elem { |
ba9703b0 | 596 | Field(field, _) => self.place_field(base, field.index())?, |
b7449926 | 597 | Downcast(_, variant) => self.place_downcast(base, variant)?, |
6a06907d | 598 | Deref => self.deref_operand(&self.place_to_op(base)?)?.into(), |
b7449926 XL |
599 | // For the other variants, we have to force an allocation. |
600 | // This matches `operand_projection`. | |
601 | Subslice { .. } | ConstantIndex { .. } | Index(_) => { | |
602 | let mplace = self.force_allocation(base)?; | |
6a06907d | 603 | self.mplace_projection(&mplace, proj_elem)?.into() |
b7449926 XL |
604 | } |
605 | }) | |
606 | } | |
ff7c6d11 | 607 | |
3c0e092e XL |
608 | /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements. |
609 | /// Also returns the number of elements. | |
610 | pub fn place_to_simd( | |
611 | &mut self, | |
612 | base: &PlaceTy<'tcx, M::PointerTag>, | |
613 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, u64)> { | |
614 | let mplace = self.force_allocation(base)?; | |
615 | self.mplace_to_simd(&mplace) | |
616 | } | |
617 | ||
9fa01778 | 618 | /// Computes a place. You should only use this if you intend to write into this |
b7449926 | 619 | /// place; for reading, a more efficient alternative is `eval_place_for_read`. |
0bf4aa26 XL |
620 | pub fn eval_place( |
621 | &mut self, | |
ba9703b0 | 622 | place: mir::Place<'tcx>, |
dc9dc135 | 623 | ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { |
f9f354fc XL |
624 | let mut place_ty = PlaceTy { |
625 | // This works even for dead/uninitialized locals; we check further when writing | |
626 | place: Place::Local { frame: self.frame_idx(), local: place.local }, | |
627 | layout: self.layout_of_local(self.frame(), place.local, None)?, | |
e1599b0c | 628 | }; |
b7449926 | 629 | |
e1599b0c | 630 | for elem in place.projection.iter() { |
6a06907d | 631 | place_ty = self.place_projection(&place_ty, &elem)? |
e1599b0c | 632 | } |
ff7c6d11 | 633 | |
3dfed10e | 634 | trace!("{:?}", self.dump_place(place_ty.place)); |
f9f354fc XL |
635 | // Sanity-check the type we ended up with. |
636 | debug_assert!(mir_assign_valid_types( | |
637 | *self.tcx, | |
f035d41b | 638 | self.param_env, |
f9f354fc XL |
639 | self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions( |
640 | place.ty(&self.frame().body.local_decls, *self.tcx).ty | |
a2a8927a | 641 | )?)?, |
f9f354fc XL |
642 | place_ty.layout, |
643 | )); | |
e1599b0c | 644 | Ok(place_ty) |
ff7c6d11 XL |
645 | } |
646 | ||
a1dfa0c6 | 647 | /// Write an immediate to a place |
0bf4aa26 | 648 | #[inline(always)] |
a1dfa0c6 | 649 | pub fn write_immediate( |
b7449926 | 650 | &mut self, |
a1dfa0c6 | 651 | src: Immediate<M::PointerTag>, |
6a06907d | 652 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 653 | ) -> InterpResult<'tcx> { |
a1dfa0c6 | 654 | self.write_immediate_no_validate(src, dest)?; |
0bf4aa26 XL |
655 | |
656 | if M::enforce_validity(self) { | |
657 | // Data got changed, better make sure it matches the type! | |
6a06907d | 658 | self.validate_operand(&self.place_to_op(dest)?)?; |
dc9dc135 XL |
659 | } |
660 | ||
661 | Ok(()) | |
662 | } | |
663 | ||
136023e0 | 664 | /// Write a scalar to a place |
dc9dc135 | 665 | #[inline(always)] |
136023e0 | 666 | pub fn write_scalar( |
dc9dc135 | 667 | &mut self, |
136023e0 XL |
668 | val: impl Into<ScalarMaybeUninit<M::PointerTag>>, |
669 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 670 | ) -> InterpResult<'tcx> { |
136023e0 XL |
671 | self.write_immediate(Immediate::Scalar(val.into()), dest) |
672 | } | |
0bf4aa26 | 673 | |
136023e0 XL |
674 | /// Write a pointer to a place |
675 | #[inline(always)] | |
676 | pub fn write_pointer( | |
677 | &mut self, | |
678 | ptr: impl Into<Pointer<Option<M::PointerTag>>>, | |
679 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
680 | ) -> InterpResult<'tcx> { | |
681 | self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest) | |
0bf4aa26 XL |
682 | } |
683 | ||
a1dfa0c6 | 684 | /// Write an immediate to a place. |
0bf4aa26 XL |
685 | /// If you use this you are responsible for validating that things got copied at the |
686 | /// right type. | |
a1dfa0c6 | 687 | fn write_immediate_no_validate( |
0bf4aa26 | 688 | &mut self, |
a1dfa0c6 | 689 | src: Immediate<M::PointerTag>, |
6a06907d | 690 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 691 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
692 | if cfg!(debug_assertions) { |
693 | // This is a very common path, avoid some checks in release mode | |
694 | assert!(!dest.layout.is_unsized(), "Cannot write unsized data"); | |
a1dfa0c6 | 695 | match src { |
136023e0 | 696 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!( |
dfeec247 XL |
697 | self.pointer_size(), |
698 | dest.layout.size, | |
699 | "Size mismatch when writing pointer" | |
700 | ), | |
29967ef6 XL |
701 | Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => { |
702 | assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits") | |
dfeec247 | 703 | } |
3dfed10e | 704 | Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size |
a1dfa0c6 | 705 | Immediate::ScalarPair(_, _) => { |
0bf4aa26 XL |
706 | // FIXME: Can we check anything here? |
707 | } | |
708 | } | |
709 | } | |
a1dfa0c6 | 710 | trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
0bf4aa26 | 711 | |
a1dfa0c6 | 712 | // See if we can avoid an allocation. This is the counterpart to `try_read_immediate`, |
b7449926 XL |
713 | // but not factored as a separate function. |
714 | let mplace = match dest.place { | |
ff7c6d11 | 715 | Place::Local { frame, local } => { |
f035d41b | 716 | match M::access_local_mut(self, frame, local)? { |
48663c56 XL |
717 | Ok(local) => { |
718 | // Local can be updated in-place. | |
719 | *local = LocalValue::Live(Operand::Immediate(src)); | |
b7449926 | 720 | return Ok(()); |
48663c56 XL |
721 | } |
722 | Err(mplace) => { | |
723 | // The local is in memory, go on below. | |
724 | mplace | |
725 | } | |
ff7c6d11 | 726 | } |
dfeec247 | 727 | } |
48663c56 | 728 | Place::Ptr(mplace) => mplace, // already referring to memory |
ff7c6d11 | 729 | }; |
0bf4aa26 | 730 | let dest = MPlaceTy { mplace, layout: dest.layout }; |
ff7c6d11 | 731 | |
b7449926 | 732 | // This is already in memory, write there. |
6a06907d | 733 | self.write_immediate_to_mplace_no_validate(src, &dest) |
ff7c6d11 XL |
734 | } |
735 | ||
a1dfa0c6 | 736 | /// Write an immediate to memory. |
dc9dc135 | 737 | /// If you use this you are responsible for validating that things got copied at the |
0bf4aa26 | 738 | /// right type. |
a1dfa0c6 | 739 | fn write_immediate_to_mplace_no_validate( |
b7449926 | 740 | &mut self, |
a1dfa0c6 | 741 | value: Immediate<M::PointerTag>, |
6a06907d | 742 | dest: &MPlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 743 | ) -> InterpResult<'tcx> { |
b7449926 XL |
744 | // Note that it is really important that the type here is the right one, and matches the |
745 | // type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here | |
746 | // to handle padding properly, which is only correct if we never look at this data with the | |
747 | // wrong type. | |
748 | ||
60c5eb7d | 749 | // Invalid places are a thing: the return place of a diverging function |
17df50a5 | 750 | let tcx = *self.tcx; |
ee023bcb FG |
751 | let Some(mut alloc) = self.get_alloc_mut(dest)? else { |
752 | // zero-sized access | |
753 | return Ok(()); | |
dc9dc135 | 754 | }; |
b7449926 | 755 | |
0bf4aa26 XL |
756 | // FIXME: We should check that there are dest.layout.size many bytes available in |
757 | // memory. The code below is not sufficient, with enough padding it might not | |
758 | // cover all the bytes! | |
b7449926 | 759 | match value { |
a1dfa0c6 | 760 | Immediate::Scalar(scalar) => { |
0bf4aa26 | 761 | match dest.layout.abi { |
ba9703b0 | 762 | Abi::Scalar(_) => {} // fine |
f035d41b XL |
763 | _ => span_bug!( |
764 | self.cur_span(), | |
765 | "write_immediate_to_mplace: invalid Scalar layout: {:#?}", | |
766 | dest.layout | |
767 | ), | |
0bf4aa26 | 768 | } |
17df50a5 | 769 | alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar) |
ff7c6d11 | 770 | } |
a1dfa0c6 | 771 | Immediate::ScalarPair(a_val, b_val) => { |
dc9dc135 XL |
772 | // We checked `ptr_align` above, so all fields will have the alignment they need. |
773 | // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`, | |
774 | // which `ptr.offset(b_offset)` cannot possibly fail to satisfy. | |
b7449926 | 775 | let (a, b) = match dest.layout.abi { |
c295e0f8 | 776 | Abi::ScalarPair(a, b) => (a.value, b.value), |
f035d41b XL |
777 | _ => span_bug!( |
778 | self.cur_span(), | |
dfeec247 XL |
779 | "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", |
780 | dest.layout | |
781 | ), | |
b7449926 | 782 | }; |
17df50a5 XL |
783 | let (a_size, b_size) = (a.size(&tcx), b.size(&tcx)); |
784 | let b_offset = a_size.align_to(b.align(&tcx).abi); | |
a1dfa0c6 | 785 | |
0bf4aa26 XL |
786 | // It is tempting to verify `b_offset` against `layout.fields.offset(1)`, |
787 | // but that does not work: We could be a newtype around a pair, then the | |
788 | // fields do not match the `ScalarPair` components. | |
789 | ||
17df50a5 XL |
790 | alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?; |
791 | alloc.write_scalar(alloc_range(b_offset, b_size), b_val) | |
ff7c6d11 | 792 | } |
b7449926 | 793 | } |
ff7c6d11 XL |
794 | } |
795 | ||
9fa01778 | 796 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 XL |
797 | /// Use `copy_op_transmute` if the layouts could disagree. |
798 | #[inline(always)] | |
b7449926 | 799 | pub fn copy_op( |
ff7c6d11 | 800 | &mut self, |
6a06907d XL |
801 | src: &OpTy<'tcx, M::PointerTag>, |
802 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 803 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
804 | self.copy_op_no_validate(src, dest)?; |
805 | ||
806 | if M::enforce_validity(self) { | |
807 | // Data got changed, better make sure it matches the type! | |
6a06907d | 808 | self.validate_operand(&self.place_to_op(dest)?)?; |
0bf4aa26 XL |
809 | } |
810 | ||
811 | Ok(()) | |
812 | } | |
813 | ||
9fa01778 | 814 | /// Copies the data from an operand to a place. This does not support transmuting! |
0bf4aa26 | 815 | /// Use `copy_op_transmute` if the layouts could disagree. |
dc9dc135 | 816 | /// Also, if you use this you are responsible for validating that things get copied at the |
0bf4aa26 XL |
817 | /// right type. |
818 | fn copy_op_no_validate( | |
819 | &mut self, | |
6a06907d XL |
820 | src: &OpTy<'tcx, M::PointerTag>, |
821 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 822 | ) -> InterpResult<'tcx> { |
0bf4aa26 XL |
823 | // We do NOT compare the types for equality, because well-typed code can |
824 | // actually "transmute" `&mut T` to `&T` in an assignment without a cast. | |
f035d41b | 825 | if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
ba9703b0 | 826 | span_bug!( |
f035d41b | 827 | self.cur_span(), |
ba9703b0 XL |
828 | "type mismatch when copying!\nsrc: {:?},\ndest: {:?}", |
829 | src.layout.ty, | |
830 | dest.layout.ty, | |
831 | ); | |
832 | } | |
b7449926 XL |
833 | |
834 | // Let us see if the layout is simple so we take a shortcut, avoid force_allocation. | |
a1dfa0c6 | 835 | let src = match self.try_read_immediate(src)? { |
0bf4aa26 | 836 | Ok(src_val) => { |
48663c56 | 837 | assert!(!src.layout.is_unsized(), "cannot have unsized immediates"); |
0bf4aa26 | 838 | // Yay, we got a value that we can write directly. |
9fa01778 XL |
839 | // FIXME: Add a check to make sure that if `src` is indirect, |
840 | // it does not overlap with `dest`. | |
dc9dc135 | 841 | return self.write_immediate_no_validate(*src_val, dest); |
0bf4aa26 XL |
842 | } |
843 | Err(mplace) => mplace, | |
b7449926 XL |
844 | }; |
845 | // Slow path, this does not fit into an immediate. Just memcpy. | |
0bf4aa26 XL |
846 | trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty); |
847 | ||
48663c56 XL |
848 | // This interprets `src.meta` with the `dest` local's layout, if an unsized local |
849 | // is being initialized! | |
850 | let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?; | |
851 | let size = size.unwrap_or_else(|| { | |
dfeec247 XL |
852 | assert!( |
853 | !dest.layout.is_unsized(), | |
854 | "Cannot copy into already initialized unsized place" | |
855 | ); | |
48663c56 XL |
856 | dest.layout.size |
857 | }); | |
858 | assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances"); | |
416331ca | 859 | |
17df50a5 XL |
860 | self.memory |
861 | .copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ true) | |
0bf4aa26 XL |
862 | } |
863 | ||
9fa01778 | 864 | /// Copies the data from an operand to a place. The layouts may disagree, but they must |
0bf4aa26 XL |
865 | /// have the same size. |
866 | pub fn copy_op_transmute( | |
867 | &mut self, | |
6a06907d XL |
868 | src: &OpTy<'tcx, M::PointerTag>, |
869 | dest: &PlaceTy<'tcx, M::PointerTag>, | |
dc9dc135 | 870 | ) -> InterpResult<'tcx> { |
f035d41b | 871 | if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) { |
0bf4aa26 XL |
872 | // Fast path: Just use normal `copy_op` |
873 | return self.copy_op(src, dest); | |
874 | } | |
48663c56 | 875 | // We still require the sizes to match. |
dfeec247 XL |
876 | if src.layout.size != dest.layout.size { |
877 | // FIXME: This should be an assert instead of an error, but if we transmute within an | |
878 | // array length computation, `typeck` may not have yet been run and errored out. In fact | |
ee023bcb | 879 | // most likely we *are* running `typeck` right now. Investigate whether we can bail out |
3dfed10e | 880 | // on `typeck_results().has_errors` at all const eval entry points. |
dfeec247 | 881 | debug!("Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest); |
ba9703b0 | 882 | self.tcx.sess.delay_span_bug( |
f035d41b | 883 | self.cur_span(), |
ba9703b0 XL |
884 | "size-changing transmute, should have been caught by transmute checking", |
885 | ); | |
886 | throw_inval!(TransmuteSizeDiff(src.layout.ty, dest.layout.ty)); | |
dfeec247 | 887 | } |
48663c56 XL |
888 | // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want |
889 | // to avoid that here. | |
dfeec247 XL |
890 | assert!( |
891 | !src.layout.is_unsized() && !dest.layout.is_unsized(), | |
892 | "Cannot transmute unsized data" | |
893 | ); | |
0bf4aa26 XL |
894 | |
895 | // The hard case is `ScalarPair`. `src` is already read from memory in this case, | |
896 | // using `src.layout` to figure out which bytes to use for the 1st and 2nd field. | |
897 | // We have to write them to `dest` at the offsets they were *read at*, which is | |
898 | // not necessarily the same as the offsets in `dest.layout`! | |
899 | // Hence we do the copy with the source layout on both sides. We also make sure to write | |
900 | // into memory, because if `dest` is a local we would not even have a way to write | |
901 | // at the `src` offsets; the fact that we came from a different layout would | |
902 | // just be lost. | |
903 | let dest = self.force_allocation(dest)?; | |
904 | self.copy_op_no_validate( | |
905 | src, | |
6a06907d | 906 | &PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout }), |
0bf4aa26 XL |
907 | )?; |
908 | ||
909 | if M::enforce_validity(self) { | |
910 | // Data got changed, better make sure it matches the type! | |
6a06907d | 911 | self.validate_operand(&dest.into())?; |
0bf4aa26 XL |
912 | } |
913 | ||
914 | Ok(()) | |
ff7c6d11 XL |
915 | } |
916 | ||
9fa01778 | 917 | /// Ensures that a place is in memory, and returns where it is. |
a1dfa0c6 XL |
918 | /// If the place currently refers to a local that doesn't yet have a matching allocation, |
919 | /// create such an allocation. | |
b7449926 | 920 | /// This is essentially `force_to_memplace`. |
48663c56 XL |
921 | /// |
922 | /// This supports unsized types and returns the computed size to avoid some | |
923 | /// redundant computation when copying; use `force_allocation` for a simpler, sized-only | |
924 | /// version. | |
925 | pub fn force_allocation_maybe_sized( | |
ff7c6d11 | 926 | &mut self, |
6a06907d | 927 | place: &PlaceTy<'tcx, M::PointerTag>, |
dfeec247 | 928 | meta: MemPlaceMeta<M::PointerTag>, |
dc9dc135 | 929 | ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> { |
48663c56 | 930 | let (mplace, size) = match place.place { |
b7449926 | 931 | Place::Local { frame, local } => { |
f035d41b | 932 | match M::access_local_mut(self, frame, local)? { |
dfeec247 | 933 | Ok(&mut local_val) => { |
b7449926 | 934 | // We need to make an allocation. |
48663c56 | 935 | |
b7449926 | 936 | // We need the layout of the local. We can NOT use the layout we got, |
0731742a | 937 | // that might e.g., be an inner field of a struct with `Scalar` layout, |
b7449926 | 938 | // that has different alignment than the outer field. |
ba9703b0 XL |
939 | let local_layout = |
940 | self.layout_of_local(&self.stack()[frame], local, None)?; | |
dfeec247 XL |
941 | // We also need to support unsized types, and hence cannot use `allocate`. |
942 | let (size, align) = self | |
6a06907d | 943 | .size_and_align_of(&meta, &local_layout)? |
48663c56 | 944 | .expect("Cannot allocate for non-dyn-sized type"); |
136023e0 | 945 | let ptr = self.memory.allocate(size, align, MemoryKind::Stack)?; |
48663c56 | 946 | let mplace = MemPlace { ptr: ptr.into(), align, meta }; |
dfeec247 | 947 | if let LocalValue::Live(Operand::Immediate(value)) = local_val { |
48663c56 XL |
948 | // Preserve old value. |
949 | // We don't have to validate as we can assume the local | |
950 | // was already valid for its type. | |
951 | let mplace = MPlaceTy { mplace, layout: local_layout }; | |
6a06907d | 952 | self.write_immediate_to_mplace_no_validate(value, &mplace)?; |
48663c56 XL |
953 | } |
954 | // Now we can call `access_mut` again, asserting it goes well, | |
955 | // and actually overwrite things. | |
f035d41b | 956 | *M::access_local_mut(self, frame, local).unwrap().unwrap() = |
48663c56 XL |
957 | LocalValue::Live(Operand::Indirect(mplace)); |
958 | (mplace, Some(size)) | |
b7449926 | 959 | } |
48663c56 | 960 | Err(mplace) => (mplace, None), // this already was an indirect local |
b7449926 XL |
961 | } |
962 | } | |
dfeec247 | 963 | Place::Ptr(mplace) => (mplace, None), |
b7449926 XL |
964 | }; |
965 | // Return with the original layout, so that the caller can go on | |
48663c56 XL |
966 | Ok((MPlaceTy { mplace, layout: place.layout }, size)) |
967 | } | |
968 | ||
969 | #[inline(always)] | |
970 | pub fn force_allocation( | |
971 | &mut self, | |
6a06907d | 972 | place: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 973 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
dfeec247 | 974 | Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0) |
ff7c6d11 XL |
975 | } |
976 | ||
b7449926 | 977 | pub fn allocate( |
ff7c6d11 | 978 | &mut self, |
ba9703b0 XL |
979 | layout: TyAndLayout<'tcx>, |
980 | kind: MemoryKind<M::MemoryKind>, | |
136023e0 XL |
981 | ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> { |
982 | let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; | |
983 | Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout)) | |
b7449926 | 984 | } |
ff7c6d11 | 985 | |
17df50a5 | 986 | /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation. |
60c5eb7d XL |
987 | pub fn allocate_str( |
988 | &mut self, | |
989 | str: &str, | |
ba9703b0 | 990 | kind: MemoryKind<M::MemoryKind>, |
17df50a5 | 991 | mutbl: Mutability, |
60c5eb7d | 992 | ) -> MPlaceTy<'tcx, M::PointerTag> { |
17df50a5 | 993 | let ptr = self.memory.allocate_bytes(str.as_bytes(), Align::ONE, kind, mutbl); |
ba9703b0 | 994 | let meta = Scalar::from_machine_usize(u64::try_from(str.len()).unwrap(), self); |
17df50a5 XL |
995 | let mplace = |
996 | MemPlace { ptr: ptr.into(), align: Align::ONE, meta: MemPlaceMeta::Meta(meta) }; | |
60c5eb7d | 997 | |
17df50a5 XL |
998 | let ty = self.tcx.mk_ref( |
999 | self.tcx.lifetimes.re_static, | |
1000 | ty::TypeAndMut { ty: self.tcx.types.str_, mutbl }, | |
1001 | ); | |
1002 | let layout = self.layout_of(ty).unwrap(); | |
60c5eb7d XL |
1003 | MPlaceTy { mplace, layout } |
1004 | } | |
1005 | ||
f035d41b XL |
1006 | /// Writes the discriminant of the given variant. |
1007 | pub fn write_discriminant( | |
b7449926 | 1008 | &mut self, |
a1dfa0c6 | 1009 | variant_index: VariantIdx, |
6a06907d | 1010 | dest: &PlaceTy<'tcx, M::PointerTag>, |
dc9dc135 | 1011 | ) -> InterpResult<'tcx> { |
c295e0f8 XL |
1012 | // This must be an enum or generator. |
1013 | match dest.layout.ty.kind() { | |
1014 | ty::Adt(adt, _) => assert!(adt.is_enum()), | |
1015 | ty::Generator(..) => {} | |
1016 | _ => span_bug!( | |
1017 | self.cur_span(), | |
1018 | "write_discriminant called on non-variant-type (neither enum nor generator)" | |
1019 | ), | |
1020 | } | |
60c5eb7d XL |
1021 | // Layout computation excludes uninhabited variants from consideration |
1022 | // therefore there's no way to represent those variants in the given layout. | |
c295e0f8 XL |
1023 | // Essentially, uninhabited variants do not have a tag that corresponds to their |
1024 | // discriminant, so we cannot do anything here. | |
1025 | // When evaluating we will always error before even getting here, but ConstProp 'executes' | |
1026 | // dead code, so we cannot ICE here. | |
60c5eb7d | 1027 | if dest.layout.for_variant(self, variant_index).abi.is_uninhabited() { |
c295e0f8 | 1028 | throw_ub!(UninhabitedEnumVariantWritten) |
60c5eb7d | 1029 | } |
e74abb32 | 1030 | |
b7449926 | 1031 | match dest.layout.variants { |
ba9703b0 | 1032 | Variants::Single { index } => { |
60c5eb7d | 1033 | assert_eq!(index, variant_index); |
ff7c6d11 | 1034 | } |
ba9703b0 | 1035 | Variants::Multiple { |
f035d41b | 1036 | tag_encoding: TagEncoding::Direct, |
c295e0f8 | 1037 | tag: tag_layout, |
f035d41b | 1038 | tag_field, |
532ac7d7 XL |
1039 | .. |
1040 | } => { | |
60c5eb7d | 1041 | // No need to validate that the discriminant here because the |
ba9703b0 | 1042 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1043 | |
48663c56 XL |
1044 | let discr_val = |
1045 | dest.layout.ty.discriminant_for_variant(*self.tcx, variant_index).unwrap().val; | |
b7449926 XL |
1046 | |
1047 | // raw discriminants for enums are isize or bigger during | |
1048 | // their computation, but the in-memory tag is the smallest possible | |
1049 | // representation | |
f035d41b | 1050 | let size = tag_layout.value.size(self); |
29967ef6 | 1051 | let tag_val = size.truncate(discr_val); |
b7449926 | 1052 | |
f035d41b | 1053 | let tag_dest = self.place_field(dest, tag_field)?; |
6a06907d | 1054 | self.write_scalar(Scalar::from_uint(tag_val, size), &tag_dest)?; |
ff7c6d11 | 1055 | } |
ba9703b0 | 1056 | Variants::Multiple { |
f035d41b XL |
1057 | tag_encoding: |
1058 | TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start }, | |
c295e0f8 | 1059 | tag: tag_layout, |
f035d41b | 1060 | tag_field, |
b7449926 | 1061 | .. |
ff7c6d11 | 1062 | } => { |
60c5eb7d | 1063 | // No need to validate that the discriminant here because the |
ba9703b0 | 1064 | // `TyAndLayout::for_variant()` call earlier already checks the variant is valid. |
60c5eb7d | 1065 | |
b7449926 | 1066 | if variant_index != dataful_variant { |
e1599b0c | 1067 | let variants_start = niche_variants.start().as_u32(); |
dfeec247 XL |
1068 | let variant_index_relative = variant_index |
1069 | .as_u32() | |
e1599b0c XL |
1070 | .checked_sub(variants_start) |
1071 | .expect("overflow computing relative variant idx"); | |
1072 | // We need to use machine arithmetic when taking into account `niche_start`: | |
f035d41b XL |
1073 | // tag_val = variant_index_relative + niche_start_val |
1074 | let tag_layout = self.layout_of(tag_layout.value.to_int_ty(*self.tcx))?; | |
1075 | let niche_start_val = ImmTy::from_uint(niche_start, tag_layout); | |
e1599b0c | 1076 | let variant_index_relative_val = |
f035d41b XL |
1077 | ImmTy::from_uint(variant_index_relative, tag_layout); |
1078 | let tag_val = self.binary_op( | |
e1599b0c | 1079 | mir::BinOp::Add, |
6a06907d XL |
1080 | &variant_index_relative_val, |
1081 | &niche_start_val, | |
b7449926 | 1082 | )?; |
e1599b0c | 1083 | // Write result. |
f035d41b | 1084 | let niche_dest = self.place_field(dest, tag_field)?; |
6a06907d | 1085 | self.write_immediate(*tag_val, &niche_dest)?; |
b7449926 XL |
1086 | } |
1087 | } | |
1088 | } | |
ff7c6d11 | 1089 | |
b7449926 XL |
1090 | Ok(()) |
1091 | } | |
ff7c6d11 | 1092 | |
a1dfa0c6 XL |
1093 | pub fn raw_const_to_mplace( |
1094 | &self, | |
1b1a35ee | 1095 | raw: ConstAlloc<'tcx>, |
dc9dc135 | 1096 | ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { |
a1dfa0c6 | 1097 | // This must be an allocation in `tcx` |
f9f354fc | 1098 | let _ = self.tcx.global_alloc(raw.alloc_id); |
3dfed10e | 1099 | let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?; |
a1dfa0c6 | 1100 | let layout = self.layout_of(raw.ty)?; |
136023e0 | 1101 | Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout)) |
a1dfa0c6 XL |
1102 | } |
1103 | ||
b7449926 XL |
1104 | /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. |
1105 | /// Also return some more information so drop doesn't have to run the same code twice. | |
dfeec247 XL |
1106 | pub(super) fn unpack_dyn_trait( |
1107 | &self, | |
6a06907d | 1108 | mplace: &MPlaceTy<'tcx, M::PointerTag>, |
dfeec247 | 1109 | ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { |
136023e0 | 1110 | let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type |
b7449926 XL |
1111 | let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; |
1112 | let layout = self.layout_of(ty)?; | |
1113 | ||
1114 | // More sanity checks | |
1115 | if cfg!(debug_assertions) { | |
1116 | let (size, align) = self.read_size_and_align_from_vtable(vtable)?; | |
1117 | assert_eq!(size, layout.size); | |
a1dfa0c6 XL |
1118 | // only ABI alignment is preserved |
1119 | assert_eq!(align, layout.align.abi); | |
ff7c6d11 | 1120 | } |
ff7c6d11 | 1121 | |
6a06907d | 1122 | let mplace = MPlaceTy { mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace }, layout }; |
b7449926 | 1123 | Ok((instance, mplace)) |
ff7c6d11 XL |
1124 | } |
1125 | } |