]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_middle/src/mir/interpret/allocation.rs
New upstream version 1.68.2+dfsg1
[rustc.git] / compiler / rustc_middle / src / mir / interpret / allocation.rs
CommitLineData
9fa01778 1//! The virtual memory representation of the MIR interpreter.
a1dfa0c6 2
6522a427
EL
3mod init_mask;
4mod provenance_map;
5#[cfg(test)]
6mod tests;
7
dc9dc135 8use std::borrow::Cow;
5e7ed085 9use std::fmt;
923072b8 10use std::hash;
6522a427 11use std::ops::Range;
17df50a5 12use std::ptr;
a1dfa0c6 13
6522a427
EL
14use either::{Left, Right};
15
3dfed10e 16use rustc_ast::Mutability;
5e7ed085 17use rustc_data_structures::intern::Interned;
136023e0 18use rustc_span::DUMMY_SP;
ba9703b0
XL
19use rustc_target::abi::{Align, HasDataLayout, Size};
20
21use super::{
94222f64 22 read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
f2b60f7d
FG
23 ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
24 UnsupportedOpInfo,
ba9703b0 25};
136023e0 26use crate::ty;
6522a427
EL
27use init_mask::*;
28use provenance_map::*;
29
30pub use init_mask::{InitChunk, InitChunkIter};
ba9703b0 31
17df50a5
XL
32/// This type represents an Allocation in the Miri/CTFE core engine.
33///
34/// Its public API is rather low-level, working directly with allocation offsets and a custom error
35/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
36/// module provides higher-level access.
923072b8
FG
37// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
38// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
6522a427 39#[derive(Clone, Eq, PartialEq, TyEncodable, TyDecodable)]
ba9703b0 40#[derive(HashStable)]
6522a427 41pub struct Allocation<Prov: Provenance = AllocId, Extra = ()> {
a1dfa0c6 42 /// The actual bytes of the allocation.
e1599b0c 43 /// Note that the bytes of a pointer represent the offset of the pointer.
94222f64 44 bytes: Box<[u8]>,
f2b60f7d 45 /// Maps from byte addresses to extra provenance data for each pointer.
a1dfa0c6
XL
46 /// Only the first byte of a pointer is inserted into the map; i.e.,
47 /// every entry in this map applies to `pointer_size` consecutive bytes starting
48 /// at the given offset.
f2b60f7d 49 provenance: ProvenanceMap<Prov>,
e1599b0c 50 /// Denotes which part of this allocation is initialized.
f9f354fc 51 init_mask: InitMask,
a1dfa0c6 52 /// The alignment of the allocation to detect unaligned reads.
ba9703b0 53 /// (`Align` guarantees that this is a power of two.)
a1dfa0c6 54 pub align: Align,
e1599b0c 55 /// `true` if the allocation is mutable.
a1dfa0c6
XL
56 /// Also used by codegen to determine if a static should be put into mutable memory,
57 /// which happens for `static mut` and `static` with interior mutability.
58 pub mutability: Mutability,
59 /// Extra state for the machine.
60 pub extra: Extra,
61}
62
923072b8
FG
63/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
64/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
65/// Used when these two structures are large: we only partially hash the larger fields in that
66/// situation. See the comment at the top of their respective `Hash` impl for more details.
67const MAX_BYTES_TO_HASH: usize = 64;
68
69/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
70/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
71/// bytes.
72const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
73
74// Const allocations are only hashed for interning. However, they can be large, making the hashing
75// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
76// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
77// large.
78impl hash::Hash for Allocation {
79 fn hash<H: hash::Hasher>(&self, state: &mut H) {
80 // Partially hash the `bytes` buffer when it is large. To limit collisions with common
81 // prefixes and suffixes, we hash the length and some slices of the buffer.
82 let byte_count = self.bytes.len();
83 if byte_count > MAX_HASHED_BUFFER_LEN {
84 // Hash the buffer's length.
85 byte_count.hash(state);
86
87 // And its head and tail.
88 self.bytes[..MAX_BYTES_TO_HASH].hash(state);
89 self.bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
90 } else {
91 self.bytes.hash(state);
92 }
93
94 // Hash the other fields as usual.
f2b60f7d 95 self.provenance.hash(state);
923072b8
FG
96 self.init_mask.hash(state);
97 self.align.hash(state);
98 self.mutability.hash(state);
99 self.extra.hash(state);
100 }
101}
102
5e7ed085
FG
103/// Interned types generally have an `Outer` type and an `Inner` type, where
104/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
105/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
6522a427 106/// outer type and `TyKind` is its inner type.
5e7ed085
FG
107///
108/// Here things are different because only const allocations are interned. This
109/// means that both the inner type (`Allocation`) and the outer type
110/// (`ConstAllocation`) are used quite a bit.
6522a427 111#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable)]
04454e1e 112#[rustc_pass_by_value]
6522a427 113pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
5e7ed085
FG
114
115impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
116 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
6522a427
EL
117 // The debug representation of this is very verbose and basically useless,
118 // so don't print it.
119 write!(f, "ConstAllocation {{ .. }}")
5e7ed085
FG
120 }
121}
122
6522a427
EL
123impl<'tcx> ConstAllocation<'tcx> {
124 pub fn inner(self) -> &'tcx Allocation {
5e7ed085
FG
125 self.0.0
126 }
127}
128
17df50a5
XL
129/// We have our own error type that does not know about the `AllocId`; that information
130/// is added when converting to `InterpError`.
131#[derive(Debug)]
132pub enum AllocError {
04454e1e
FG
133 /// A scalar had the wrong size.
134 ScalarSizeMismatch(ScalarSizeMismatch),
17df50a5
XL
135 /// Encountered a pointer where we needed raw bytes.
136 ReadPointerAsBytes,
94222f64
XL
137 /// Partially overwriting a pointer.
138 PartialPointerOverwrite(Size),
f2b60f7d
FG
139 /// Partially copying a pointer.
140 PartialPointerCopy(Size),
17df50a5
XL
141 /// Using uninitialized data where it is not allowed.
142 InvalidUninitBytes(Option<UninitBytesAccess>),
143}
144pub type AllocResult<T = ()> = Result<T, AllocError>;
a1dfa0c6 145
04454e1e
FG
146impl From<ScalarSizeMismatch> for AllocError {
147 fn from(s: ScalarSizeMismatch) -> Self {
148 AllocError::ScalarSizeMismatch(s)
149 }
150}
151
17df50a5
XL
152impl AllocError {
153 pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
94222f64 154 use AllocError::*;
17df50a5 155 match self {
04454e1e
FG
156 ScalarSizeMismatch(s) => {
157 InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
158 }
94222f64
XL
159 ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
160 PartialPointerOverwrite(offset) => InterpError::Unsupported(
161 UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
162 ),
f2b60f7d
FG
163 PartialPointerCopy(offset) => InterpError::Unsupported(
164 UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
165 ),
94222f64 166 InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
17df50a5
XL
167 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
168 ),
169 }
a1dfa0c6 170 }
17df50a5
XL
171}
172
173/// The information that makes up a memory access: offset and size.
064997fb 174#[derive(Copy, Clone)]
17df50a5
XL
175pub struct AllocRange {
176 pub start: Size,
177 pub size: Size,
178}
a1dfa0c6 179
064997fb
FG
180impl fmt::Debug for AllocRange {
181 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
182 write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
183 }
184}
185
17df50a5
XL
186/// Free-starting constructor for less syntactic overhead.
187#[inline(always)]
188pub fn alloc_range(start: Size, size: Size) -> AllocRange {
189 AllocRange { start, size }
190}
191
6522a427 192impl From<Range<Size>> for AllocRange {
064997fb 193 #[inline]
6522a427 194 fn from(r: Range<Size>) -> Self {
064997fb
FG
195 alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
196 }
6522a427 197}
064997fb 198
6522a427
EL
199impl From<Range<usize>> for AllocRange {
200 #[inline]
201 fn from(r: Range<usize>) -> Self {
202 AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
203 }
204}
205
206impl AllocRange {
a1dfa0c6 207 #[inline(always)]
17df50a5
XL
208 pub fn end(self) -> Size {
209 self.start + self.size // This does overflow checking.
a1dfa0c6
XL
210 }
211
17df50a5
XL
212 /// Returns the `subrange` within this range; panics if it is not a subrange.
213 #[inline]
214 pub fn subrange(self, subrange: AllocRange) -> AllocRange {
215 let sub_start = self.start + subrange.start;
216 let range = alloc_range(sub_start, subrange.size);
217 assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
218 range
a1dfa0c6
XL
219 }
220}
221
dc9dc135 222// The constructors are all without extra; the extra gets added by a machine hook later.
6522a427 223impl<Prov: Provenance> Allocation<Prov> {
17df50a5
XL
224 /// Creates an allocation initialized by the given bytes
225 pub fn from_bytes<'a>(
226 slice: impl Into<Cow<'a, [u8]>>,
227 align: Align,
228 mutability: Mutability,
229 ) -> Self {
94222f64 230 let bytes = Box::<[u8]>::from(slice.into());
ba9703b0 231 let size = Size::from_bytes(bytes.len());
a1dfa0c6 232 Self {
dc9dc135 233 bytes,
f2b60f7d 234 provenance: ProvenanceMap::new(),
f9f354fc 235 init_mask: InitMask::new(size, true),
a1dfa0c6 236 align,
17df50a5 237 mutability,
dc9dc135 238 extra: (),
a1dfa0c6
XL
239 }
240 }
241
17df50a5
XL
242 pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
243 Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
a1dfa0c6
XL
244 }
245
136023e0
XL
246 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
247 /// available to the compiler to do so.
064997fb
FG
248 ///
249 /// If `panic_on_fail` is true, this will never return `Err`.
923072b8 250 pub fn uninit<'tcx>(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'tcx, Self> {
94222f64 251 let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).map_err(|_| {
136023e0
XL
252 // This results in an error that can happen non-deterministically, since the memory
253 // available to the compiler can change between runs. Normally queries are always
5e7ed085 254 // deterministic. However, we can be non-deterministic here because all uses of const
136023e0
XL
255 // evaluation (including ConstProp!) will make compilation fail (via hard error
256 // or ICE) upon encountering a `MemoryExhausted` error.
257 if panic_on_fail {
258 panic!("Allocation::uninit called with panic_on_fail had allocation failure")
259 }
260 ty::tls::with(|tcx| {
5e7ed085 261 tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpretation")
136023e0
XL
262 });
263 InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
264 })?;
94222f64
XL
265 // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
266 let bytes = unsafe { bytes.assume_init() };
136023e0
XL
267 Ok(Allocation {
268 bytes,
f2b60f7d 269 provenance: ProvenanceMap::new(),
f9f354fc 270 init_mask: InitMask::new(size, false),
a1dfa0c6 271 align,
dfeec247 272 mutability: Mutability::Mut,
dc9dc135 273 extra: (),
136023e0 274 })
a1dfa0c6
XL
275 }
276}
277
136023e0 278impl Allocation {
064997fb
FG
279 /// Adjust allocation from the ones in tcx to a custom Machine instance
280 /// with a different Provenance and Extra type.
6522a427 281 pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>(
e1599b0c 282 self,
136023e0
XL
283 cx: &impl HasDataLayout,
284 extra: Extra,
064997fb
FG
285 mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
286 ) -> Result<Allocation<Prov, Extra>, Err> {
287 // Compute new pointer provenance, which also adjusts the bytes.
136023e0 288 let mut bytes = self.bytes;
6522a427 289 let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
136023e0
XL
290 let ptr_size = cx.data_layout().pointer_size.bytes_usize();
291 let endian = cx.data_layout().endian;
6522a427 292 for &(offset, alloc_id) in self.provenance.ptrs().iter() {
136023e0
XL
293 let idx = offset.bytes_usize();
294 let ptr_bytes = &mut bytes[idx..idx + ptr_size];
295 let bits = read_target_uint(endian, ptr_bytes).unwrap();
064997fb
FG
296 let (ptr_prov, ptr_offset) =
297 adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
136023e0 298 write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
f2b60f7d 299 new_provenance.push((offset, ptr_prov));
136023e0
XL
300 }
301 // Create allocation.
923072b8 302 Ok(Allocation {
136023e0 303 bytes,
6522a427 304 provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
f9f354fc 305 init_mask: self.init_mask,
e1599b0c
XL
306 align: self.align,
307 mutability: self.mutability,
308 extra,
923072b8 309 })
e1599b0c
XL
310 }
311}
312
313/// Raw accessors. Provide access to otherwise private bytes.
6522a427 314impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
e1599b0c 315 pub fn len(&self) -> usize {
17df50a5
XL
316 self.bytes.len()
317 }
318
319 pub fn size(&self) -> Size {
320 Size::from_bytes(self.len())
e1599b0c
XL
321 }
322
f2b60f7d
FG
323 /// Looks at a slice which may contain uninitialized bytes or provenance. This differs
324 /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
17df50a5 325 /// edges) at all.
e1599b0c 326 /// This must not be used for reads affecting the interpreter execution.
3dfed10e 327 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
e1599b0c
XL
328 &self.bytes[range]
329 }
330
f9f354fc
XL
331 /// Returns the mask indicating which bytes are initialized.
332 pub fn init_mask(&self) -> &InitMask {
333 &self.init_mask
e1599b0c
XL
334 }
335
f2b60f7d
FG
336 /// Returns the provenance map.
337 pub fn provenance(&self) -> &ProvenanceMap<Prov> {
338 &self.provenance
e1599b0c
XL
339 }
340}
341
e1599b0c 342/// Byte accessors.
064997fb 343impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
923072b8 344 /// This is the entirely abstraction-violating way to just grab the raw bytes without
f2b60f7d 345 /// caring about provenance or initialization.
a1dfa0c6
XL
346 ///
347 /// This function also guarantees that the resulting pointer will remain stable
04454e1e 348 /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
a1dfa0c6 349 /// on that.
f2b60f7d
FG
350 #[inline]
351 pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
352 &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
a1dfa0c6
XL
353 }
354
f2b60f7d
FG
355 /// Checks that these bytes are initialized, and then strip provenance (if possible) and return
356 /// them.
dc9dc135
XL
357 ///
358 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32
XL
359 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
360 /// on `InterpCx` instead.
a1dfa0c6 361 #[inline]
f2b60f7d 362 pub fn get_bytes_strip_provenance(
a1dfa0c6
XL
363 &self,
364 cx: &impl HasDataLayout,
17df50a5
XL
365 range: AllocRange,
366 ) -> AllocResult<&[u8]> {
6522a427
EL
367 self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
368 AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
369 access: range,
370 uninit: uninit_range,
371 }))
372 })?;
f2b60f7d 373 if !Prov::OFFSET_IS_ADDR {
6522a427 374 if !self.provenance.range_empty(range, cx) {
f2b60f7d
FG
375 return Err(AllocError::ReadPointerAsBytes);
376 }
377 }
378 Ok(self.get_bytes_unchecked(range))
a1dfa0c6
XL
379 }
380
f2b60f7d 381 /// Just calling this already marks everything as defined and removes provenance,
a1dfa0c6 382 /// so be sure to actually put data there!
dc9dc135
XL
383 ///
384 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32
XL
385 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
386 /// on `InterpCx` instead.
94222f64
XL
387 pub fn get_bytes_mut(
388 &mut self,
389 cx: &impl HasDataLayout,
390 range: AllocRange,
391 ) -> AllocResult<&mut [u8]> {
17df50a5 392 self.mark_init(range, true);
6522a427 393 self.provenance.clear(range, cx)?;
a1dfa0c6 394
94222f64 395 Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
17df50a5 396 }
a1dfa0c6 397
17df50a5 398 /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
94222f64
XL
399 pub fn get_bytes_mut_ptr(
400 &mut self,
401 cx: &impl HasDataLayout,
402 range: AllocRange,
403 ) -> AllocResult<*mut [u8]> {
17df50a5 404 self.mark_init(range, true);
6522a427 405 self.provenance.clear(range, cx)?;
a1dfa0c6 406
17df50a5
XL
407 assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
408 let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
409 let len = range.end().bytes_usize() - range.start.bytes_usize();
94222f64 410 Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
a1dfa0c6
XL
411 }
412}
413
e1599b0c 414/// Reading and writing.
064997fb 415impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
6522a427
EL
416 /// Sets the init bit for the given range.
417 fn mark_init(&mut self, range: AllocRange, is_init: bool) {
418 if range.size.bytes() == 0 {
419 return;
420 }
421 assert!(self.mutability == Mutability::Mut);
422 self.init_mask.set_range(range, is_init);
423 }
424
e1599b0c 425 /// Reads a *non-ZST* scalar.
a1dfa0c6 426 ///
923072b8
FG
427 /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
428 /// supports that) provenance is entirely ignored.
429 ///
1b1a35ee
XL
430 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
431 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
a1dfa0c6 432 ///
dc9dc135 433 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32 434 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
48663c56 435 pub fn read_scalar(
a1dfa0c6
XL
436 &self,
437 cx: &impl HasDataLayout,
17df50a5 438 range: AllocRange,
923072b8 439 read_provenance: bool,
f2b60f7d 440 ) -> AllocResult<Scalar<Prov>> {
923072b8 441 // First and foremost, if anything is uninit, bail.
6522a427 442 if self.init_mask.is_range_initialized(range).is_err() {
f2b60f7d 443 return Err(AllocError::InvalidUninitBytes(None));
a1dfa0c6 444 }
923072b8 445
f2b60f7d
FG
446 // Get the integer part of the result. We HAVE TO check provenance before returning this!
447 let bytes = self.get_bytes_unchecked(range);
448 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
923072b8 449
f2b60f7d
FG
450 if read_provenance {
451 assert_eq!(range.size, cx.data_layout().pointer_size);
452
453 // When reading data with provenance, the easy case is finding provenance exactly where we
454 // are reading, then we can put data and provenance back together and return that.
6522a427 455 if let Some(prov) = self.provenance.get_ptr(range.start) {
f2b60f7d
FG
456 // Now we can return the bits, with their appropriate provenance.
457 let ptr = Pointer::new(prov, Size::from_bytes(bits));
458 return Ok(Scalar::from_pointer(ptr, cx));
459 }
460
461 // If we can work on pointers byte-wise, join the byte-wise provenances.
462 if Prov::OFFSET_IS_ADDR {
6522a427
EL
463 let mut prov = self.provenance.get(range.start, cx);
464 for offset in Size::from_bytes(1)..range.size {
465 let this_prov = self.provenance.get(range.start + offset, cx);
f2b60f7d
FG
466 prov = Prov::join(prov, this_prov);
467 }
468 // Now use this provenance.
469 let ptr = Pointer::new(prov, Size::from_bytes(bits));
470 return Ok(Scalar::from_maybe_pointer(ptr, cx));
471 }
472 } else {
473 // We are *not* reading a pointer.
474 // If we can just ignore provenance, do exactly that.
475 if Prov::OFFSET_IS_ADDR {
476 // We just strip provenance.
477 return Ok(Scalar::from_uint(bits, range.size));
478 }
a1dfa0c6 479 }
923072b8 480
f2b60f7d
FG
481 // Fallback path for when we cannot treat provenance bytewise or ignore it.
482 assert!(!Prov::OFFSET_IS_ADDR);
6522a427 483 if !self.provenance.range_empty(range, cx) {
f2b60f7d
FG
484 return Err(AllocError::ReadPointerAsBytes);
485 }
486 // There is no provenance, we can just return the bits.
487 Ok(Scalar::from_uint(bits, range.size))
a1dfa0c6
XL
488 }
489
e1599b0c 490 /// Writes a *non-ZST* scalar.
a1dfa0c6 491 ///
1b1a35ee
XL
492 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
493 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
a1dfa0c6 494 ///
dc9dc135 495 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32 496 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
48663c56 497 pub fn write_scalar(
a1dfa0c6
XL
498 &mut self,
499 cx: &impl HasDataLayout,
17df50a5 500 range: AllocRange,
f2b60f7d 501 val: Scalar<Prov>,
17df50a5 502 ) -> AllocResult {
136023e0
XL
503 assert!(self.mutability == Mutability::Mut);
504
136023e0
XL
505 // `to_bits_or_ptr_internal` is the right method because we just want to store this data
506 // as-is into memory.
04454e1e 507 let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
6522a427
EL
508 Right(ptr) => {
509 let (provenance, offset) = ptr.into_parts();
136023e0
XL
510 (u128::from(offset.bytes()), Some(provenance))
511 }
6522a427 512 Left(data) => (data, None),
a1dfa0c6
XL
513 };
514
515 let endian = cx.data_layout().endian;
94222f64 516 let dst = self.get_bytes_mut(cx, range)?;
a1dfa0c6
XL
517 write_target_uint(endian, dst, bytes).unwrap();
518
f2b60f7d 519 // See if we have to also store some provenance.
136023e0 520 if let Some(provenance) = provenance {
6522a427
EL
521 assert_eq!(range.size, cx.data_layout().pointer_size);
522 self.provenance.insert_ptr(range.start, provenance, cx);
a1dfa0c6
XL
523 }
524
525 Ok(())
526 }
04454e1e
FG
527
528 /// Write "uninit" to the given memory range.
529 pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
530 self.mark_init(range, false);
6522a427 531 self.provenance.clear(range, cx)?;
04454e1e
FG
532 return Ok(());
533 }
e1599b0c 534
6522a427
EL
535 /// Applies a previously prepared provenance copy.
536 /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
f2b60f7d 537 /// to be clear of provenance.
04454e1e
FG
538 ///
539 /// This is dangerous to use as it can violate internal `Allocation` invariants!
540 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
6522a427
EL
541 pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
542 self.provenance.apply_copy(copy)
a1dfa0c6
XL
543 }
544
6522a427 545 /// Applies a previously prepared copy of the init mask.
04454e1e
FG
546 ///
547 /// This is dangerous to use as it can violate internal `Allocation` invariants!
548 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
6522a427
EL
549 pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
550 self.init_mask.apply_copy(copy, range, repeat)
94222f64 551 }
a1dfa0c6 552}