]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_mir/src/interpret/memory.rs
New upstream version 1.52.0+dfsg1
[rustc.git] / compiler / rustc_mir / src / interpret / memory.rs
CommitLineData
b7449926
XL
1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
5//! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
9fa01778 6//! integer. It is crucial that these operations call `check_align` *before*
b7449926
XL
7//! short-circuiting the empty case!
8
dfeec247 9use std::borrow::Cow;
94b46f34 10use std::collections::VecDeque;
3dfed10e 11use std::convert::{TryFrom, TryInto};
ba9703b0 12use std::fmt;
94b46f34 13use std::ptr;
ff7c6d11 14
3dfed10e 15use rustc_ast::Mutability;
ba9703b0 16use rustc_data_structures::fx::{FxHashMap, FxHashSet};
3dfed10e 17use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
ba9703b0 18use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
ff7c6d11 19
0bf4aa26 20use super::{
3dfed10e
XL
21 AllocId, AllocMap, Allocation, AllocationExtra, CheckInAllocMsg, GlobalAlloc, InterpResult,
22 Machine, MayLeak, Pointer, PointerArithmetic, Scalar,
0bf4aa26 23};
ba9703b0 24use crate::util::pretty;
ff7c6d11 25
e74abb32 26#[derive(Debug, PartialEq, Copy, Clone)]
ff7c6d11 27pub enum MemoryKind<T> {
60c5eb7d 28 /// Stack memory. Error if deallocated except during a stack pop.
ff7c6d11 29 Stack,
60c5eb7d 30 /// Memory backing vtables. Error if ever deallocated.
0bf4aa26 31 Vtable,
60c5eb7d
XL
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
33 CallerLocation,
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
ff7c6d11
XL
35 Machine(T),
36}
37
0bf4aa26
XL
38impl<T: MayLeak> MayLeak for MemoryKind<T> {
39 #[inline]
40 fn may_leak(self) -> bool {
41 match self {
42 MemoryKind::Stack => false,
43 MemoryKind::Vtable => true,
60c5eb7d 44 MemoryKind::CallerLocation => true,
dfeec247 45 MemoryKind::Machine(k) => k.may_leak(),
0bf4aa26
XL
46 }
47 }
48}
ff7c6d11 49
ba9703b0
XL
50impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
51 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
52 match self {
53 MemoryKind::Stack => write!(f, "stack variable"),
54 MemoryKind::Vtable => write!(f, "vtable"),
55 MemoryKind::CallerLocation => write!(f, "caller location"),
56 MemoryKind::Machine(m) => write!(f, "{}", m),
57 }
58 }
59}
60
dc9dc135
XL
61/// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
62#[derive(Debug, Copy, Clone)]
63pub enum AllocCheck {
64 /// Allocation must be live and not a function pointer.
60c5eb7d 65 Dereferenceable,
dc9dc135
XL
66 /// Allocations needs to be live, but may be a function pointer.
67 Live,
68 /// Allocation may be dead.
69 MaybeDead,
70}
71
416331ca
XL
72/// The value of a function pointer.
73#[derive(Debug, Copy, Clone)]
74pub enum FnVal<'tcx, Other> {
75 Instance(Instance<'tcx>),
76 Other(Other),
77}
78
79impl<'tcx, Other> FnVal<'tcx, Other> {
80 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
81 match self {
dfeec247
XL
82 FnVal::Instance(instance) => Ok(instance),
83 FnVal::Other(_) => {
84 throw_unsup_format!("'foreign' function pointers are not supported in this context")
85 }
416331ca
XL
86 }
87 }
88}
89
0bf4aa26 90// `Memory` has to depend on the `Machine` because some of its operations
0731742a 91// (e.g., `get`) call a `Machine` hook.
dc9dc135 92pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
9fa01778 93 /// Allocations local to this instance of the miri engine. The kind
b7449926 94 /// helps ensure that the same mechanism is used for allocation and
9fa01778 95 /// deallocation. When an allocation is not found here, it is a
ba9703b0
XL
96 /// global and looked up in the `tcx` for read access. Some machines may
97 /// have to mutate this map even on a read-only access to a global (because
0bf4aa26
XL
98 /// they do pointer provenance tracking and the allocations in `tcx` have
99 /// the wrong type), so we let the machine override this type.
ba9703b0
XL
100 /// Either way, if the machine allows writing to a global, doing so will
101 /// create a copy of the global allocation here.
dc9dc135
XL
102 // FIXME: this should not be public, but interning currently needs access to it
103 pub(super) alloc_map: M::MemoryMap,
ff7c6d11 104
416331ca
XL
105 /// Map for "extra" function pointers.
106 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
107
b7449926
XL
108 /// To be able to compare pointers with NULL, and to check alignment for accesses
109 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
110 /// that do not exist any more.
416331ca 111 // FIXME: this should not be public, but interning currently needs access to it
dc9dc135 112 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
ff7c6d11 113
a1dfa0c6
XL
114 /// Extra data added by the machine.
115 pub extra: M::MemoryExtra,
116
0bf4aa26 117 /// Lets us implement `HasDataLayout`, which is awfully convenient.
f035d41b 118 pub tcx: TyCtxt<'tcx>,
ff7c6d11
XL
119}
120
dc9dc135 121impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for Memory<'mir, 'tcx, M> {
b7449926
XL
122 #[inline]
123 fn data_layout(&self) -> &TargetDataLayout {
124 &self.tcx.data_layout
8faf50e0
XL
125 }
126}
127
dc9dc135 128impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
f035d41b 129 pub fn new(tcx: TyCtxt<'tcx>, extra: M::MemoryExtra) -> Self {
0bf4aa26 130 Memory {
a1dfa0c6 131 alloc_map: M::MemoryMap::default(),
416331ca 132 extra_fn_ptr_map: FxHashMap::default(),
b7449926 133 dead_alloc_map: FxHashMap::default(),
416331ca 134 extra,
ff7c6d11 135 tcx,
ff7c6d11
XL
136 }
137 }
138
60c5eb7d 139 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
3dfed10e
XL
140 /// the machine pointer to the allocation. Must never be used
141 /// for any other pointers, nor for TLS statics.
60c5eb7d 142 ///
3dfed10e
XL
143 /// Using the resulting pointer represents a *direct* access to that memory
144 /// (e.g. by directly using a `static`),
145 /// as opposed to access through a pointer that was created by the program.
146 ///
147 /// This function can fail only if `ptr` points to an `extern static`.
dc9dc135 148 #[inline]
3dfed10e
XL
149 pub fn global_base_pointer(
150 &self,
151 mut ptr: Pointer,
152 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
153 // We need to handle `extern static`.
154 let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) {
155 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
156 bug!("global memory cannot point to thread-local static")
157 }
158 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
159 ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?;
160 ptr
161 }
162 _ => {
163 // No need to change the `AllocId`.
164 ptr
165 }
166 };
167 // And we need to get the tag.
168 let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id);
169 Ok(ptr.with_tag(tag))
ff7c6d11
XL
170 }
171
416331ca
XL
172 pub fn create_fn_alloc(
173 &mut self,
174 fn_val: FnVal<'tcx, M::ExtraFnVal>,
dfeec247 175 ) -> Pointer<M::PointerTag> {
416331ca 176 let id = match fn_val {
f9f354fc 177 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
416331ca
XL
178 FnVal::Other(extra) => {
179 // FIXME(RalfJung): Should we have a cache here?
f9f354fc 180 let id = self.tcx.reserve_alloc_id();
416331ca
XL
181 let old = self.extra_fn_ptr_map.insert(id, extra);
182 assert!(old.is_none());
183 id
184 }
185 };
3dfed10e
XL
186 // Functions are global allocations, so make sure we get the right base pointer.
187 // We know this is not an `extern static` so this cannot fail.
188 self.global_base_pointer(Pointer::from(id)).unwrap()
ff7c6d11
XL
189 }
190
dc9dc135 191 pub fn allocate(
ff7c6d11 192 &mut self,
dc9dc135
XL
193 size: Size,
194 align: Align,
ba9703b0 195 kind: MemoryKind<M::MemoryKind>,
dc9dc135 196 ) -> Pointer<M::PointerTag> {
3dfed10e 197 let alloc = Allocation::uninit(size, align);
dc9dc135 198 self.allocate_with(alloc, kind)
94b46f34
XL
199 }
200
ba9703b0 201 pub fn allocate_bytes(
94b46f34 202 &mut self,
dc9dc135 203 bytes: &[u8],
ba9703b0 204 kind: MemoryKind<M::MemoryKind>,
48663c56 205 ) -> Pointer<M::PointerTag> {
dc9dc135
XL
206 let alloc = Allocation::from_byte_aligned_bytes(bytes);
207 self.allocate_with(alloc, kind)
208 }
209
210 pub fn allocate_with(
211 &mut self,
212 alloc: Allocation,
ba9703b0 213 kind: MemoryKind<M::MemoryKind>,
dc9dc135 214 ) -> Pointer<M::PointerTag> {
f9f354fc 215 let id = self.tcx.reserve_alloc_id();
dfeec247
XL
216 debug_assert_ne!(
217 Some(kind),
ba9703b0
XL
218 M::GLOBAL_KIND.map(MemoryKind::Machine),
219 "dynamically allocating global memory"
dfeec247 220 );
3dfed10e 221 // This is a new allocation, not a new global one, so no `global_base_ptr`.
60c5eb7d 222 let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
dc9dc135
XL
223 self.alloc_map.insert(id, (kind, alloc.into_owned()));
224 Pointer::from(id).with_tag(tag)
ff7c6d11
XL
225 }
226
227 pub fn reallocate(
228 &mut self,
0bf4aa26 229 ptr: Pointer<M::PointerTag>,
416331ca 230 old_size_and_align: Option<(Size, Align)>,
94b46f34 231 new_size: Size,
ff7c6d11 232 new_align: Align,
ba9703b0 233 kind: MemoryKind<M::MemoryKind>,
dc9dc135 234 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
94b46f34 235 if ptr.offset.bytes() != 0 {
ba9703b0
XL
236 throw_ub_format!(
237 "reallocating {:?} which does not point to the beginning of an object",
238 ptr
239 );
ff7c6d11 240 }
ff7c6d11 241
a1dfa0c6
XL
242 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
243 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
0731742a 244 let new_ptr = self.allocate(new_size, new_align, kind);
416331ca
XL
245 let old_size = match old_size_and_align {
246 Some((size, _align)) => size,
60c5eb7d 247 None => self.get_raw(ptr.alloc_id)?.size,
416331ca 248 };
dfeec247 249 self.copy(ptr, new_ptr, old_size.min(new_size), /*nonoverlapping*/ true)?;
416331ca 250 self.deallocate(ptr, old_size_and_align, kind)?;
ff7c6d11
XL
251
252 Ok(new_ptr)
253 }
254
ba9703b0 255 /// Deallocate a local, or do nothing if that local has been made into a global.
dc9dc135 256 pub fn deallocate_local(&mut self, ptr: Pointer<M::PointerTag>) -> InterpResult<'tcx> {
ba9703b0 257 // The allocation might be already removed by global interning.
b7449926
XL
258 // This can only really happen in the CTFE instance, not in miri.
259 if self.alloc_map.contains_key(&ptr.alloc_id) {
260 self.deallocate(ptr, None, MemoryKind::Stack)
261 } else {
262 Ok(())
ff7c6d11
XL
263 }
264 }
265
266 pub fn deallocate(
267 &mut self,
0bf4aa26 268 ptr: Pointer<M::PointerTag>,
416331ca 269 old_size_and_align: Option<(Size, Align)>,
ba9703b0 270 kind: MemoryKind<M::MemoryKind>,
dc9dc135 271 ) -> InterpResult<'tcx> {
0bf4aa26 272 trace!("deallocating: {}", ptr.alloc_id);
b7449926 273
94b46f34 274 if ptr.offset.bytes() != 0 {
ba9703b0
XL
275 throw_ub_format!(
276 "deallocating {:?} which does not point to the beginning of an object",
277 ptr
278 );
ff7c6d11
XL
279 }
280
ba9703b0
XL
281 M::before_deallocation(&mut self.extra, ptr.alloc_id)?;
282
0bf4aa26 283 let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
ff7c6d11 284 Some(alloc) => alloc,
8faf50e0 285 None => {
ba9703b0 286 // Deallocating global memory -- always an error
f9f354fc 287 return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
1b1a35ee
XL
288 Some(GlobalAlloc::Function(..)) => {
289 err_ub_format!("deallocating {}, which is a function", ptr.alloc_id)
290 }
ba9703b0 291 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
1b1a35ee 292 err_ub_format!("deallocating {}, which is static memory", ptr.alloc_id)
ba9703b0
XL
293 }
294 None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
94b46f34 295 }
416331ca 296 .into());
94b46f34 297 }
ff7c6d11
XL
298 };
299
ff7c6d11 300 if alloc_kind != kind {
ba9703b0 301 throw_ub_format!(
1b1a35ee
XL
302 "deallocating {}, which is {} memory, using {} deallocation operation",
303 ptr.alloc_id,
ba9703b0
XL
304 alloc_kind,
305 kind
306 );
ff7c6d11 307 }
416331ca 308 if let Some((size, align)) = old_size_and_align {
e1599b0c 309 if size != alloc.size || align != alloc.align {
ba9703b0 310 throw_ub_format!(
1b1a35ee
XL
311 "incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
312 ptr.alloc_id,
ba9703b0
XL
313 alloc.size.bytes(),
314 alloc.align.bytes(),
315 size.bytes(),
316 align.bytes(),
317 )
ff7c6d11
XL
318 }
319 }
320
0bf4aa26 321 // Let the machine take some extra action
e1599b0c 322 let size = alloc.size;
a1dfa0c6 323 AllocationExtra::memory_deallocated(&mut alloc, ptr, size)?;
0bf4aa26 324
b7449926 325 // Don't forget to remember size and align of this now-dead allocation
dfeec247 326 let old = self.dead_alloc_map.insert(ptr.alloc_id, (alloc.size, alloc.align));
b7449926
XL
327 if old.is_some() {
328 bug!("Nothing can be deallocated twice");
329 }
ff7c6d11
XL
330
331 Ok(())
332 }
333
dc9dc135
XL
334 /// Check if the given scalar is allowed to do a memory access of given `size`
335 /// and `align`. On success, returns `None` for zero-sized accesses (where
336 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
337 /// Crucially, if the input is a `Pointer`, we will test it for liveness
e1599b0c 338 /// *even if* the size is 0.
dc9dc135
XL
339 ///
340 /// Everyone accessing memory based on a `Scalar` should use this method to get the
341 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
342 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
343 /// cause ICEs.
416331ca
XL
344 ///
345 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
346 /// this method is still appropriate.
347 #[inline(always)]
dc9dc135 348 pub fn check_ptr_access(
0bf4aa26 349 &self,
dc9dc135
XL
350 sptr: Scalar<M::PointerTag>,
351 size: Size,
352 align: Align,
416331ca 353 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
ba9703b0 354 let align = M::enforce_alignment(&self.extra).then_some(align);
60c5eb7d 355 self.check_ptr_access_align(sptr, size, align, CheckInAllocMsg::MemoryAccessTest)
416331ca
XL
356 }
357
358 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
ba9703b0 359 /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
60c5eb7d
XL
360 /// the error message for the out-of-bounds case.
361 pub fn check_ptr_access_align(
416331ca
XL
362 &self,
363 sptr: Scalar<M::PointerTag>,
364 size: Size,
365 align: Option<Align>,
60c5eb7d 366 msg: CheckInAllocMsg,
dc9dc135
XL
367 ) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
368 fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
369 if offset % align.bytes() == 0 {
370 Ok(())
371 } else {
372 // The biggest power of two through which `offset` is divisible.
373 let offset_pow2 = 1 << offset.trailing_zeros();
ba9703b0 374 throw_ub!(AlignmentCheckFailed {
dc9dc135
XL
375 has: Align::from_bytes(offset_pow2).unwrap(),
376 required: align,
377 })
ff7c6d11 378 }
dc9dc135
XL
379 }
380
381 // Normalize to a `Pointer` if we definitely need one.
382 let normalized = if size.bytes() == 0 {
383 // Can be an integer, just take what we got. We do NOT `force_bits` here;
384 // if this is already a `Pointer` we want to do the bounds checks!
385 sptr
386 } else {
3dfed10e 387 // A "real" access, we must get a pointer to be able to check the bounds.
60c5eb7d 388 Scalar::from(self.force_ptr(sptr)?)
dc9dc135
XL
389 };
390 Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
391 Ok(bits) => {
ba9703b0 392 let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
dc9dc135 393 assert!(size.bytes() == 0);
416331ca 394 // Must be non-NULL.
b7449926 395 if bits == 0 {
f9f354fc 396 throw_ub!(DanglingIntPointer(0, msg))
416331ca
XL
397 }
398 // Must be aligned.
399 if let Some(align) = align {
400 check_offset_align(bits, align)?;
ff7c6d11 401 }
dc9dc135 402 None
ff7c6d11 403 }
dc9dc135
XL
404 Err(ptr) => {
405 let (allocation_size, alloc_align) =
60c5eb7d 406 self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
dc9dc135
XL
407 // Test bounds. This also ensures non-NULL.
408 // It is sufficient to check this for the end pointer. The addition
409 // checks for overflow.
410 let end_ptr = ptr.offset(size, self)?;
ba9703b0
XL
411 if end_ptr.offset > allocation_size {
412 // equal is okay!
413 throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
414 }
dc9dc135
XL
415 // Test align. Check this last; if both bounds and alignment are violated
416 // we want the error to be about the bounds.
416331ca 417 if let Some(align) = align {
3dfed10e
XL
418 if M::force_int_for_alignment_check(&self.extra) {
419 let bits = self
420 .force_bits(ptr.into(), self.pointer_size())
421 .expect("ptr-to-int cast for align check should never fail");
422 check_offset_align(bits.try_into().unwrap(), align)?;
423 } else {
424 // Check allocation alignment and offset alignment.
425 if alloc_align.bytes() < align.bytes() {
426 throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
427 }
428 check_offset_align(ptr.offset.bytes(), align)?;
416331ca 429 }
dc9dc135 430 }
dc9dc135
XL
431
432 // We can still be zero-sized in this branch, in which case we have to
433 // return `None`.
434 if size.bytes() == 0 { None } else { Some(ptr) }
435 }
436 })
ff7c6d11
XL
437 }
438
dc9dc135 439 /// Test if the pointer might be NULL.
dfeec247
XL
440 pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
441 let (size, _align) = self
442 .get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
dc9dc135 443 .expect("alloc info with MaybeDead cannot fail");
ba9703b0
XL
444 // If the pointer is out-of-bounds, it may be null.
445 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
446 ptr.offset > size
0bf4aa26 447 }
ff7c6d11
XL
448}
449
450/// Allocation accessors
dc9dc135 451impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
ba9703b0 452 /// Helper function to obtain a global (tcx) allocation.
0bf4aa26
XL
453 /// This attempts to return a reference to an existing allocation if
454 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
455 /// this machine use the same pointer tag, so it is indirected through
dc9dc135 456 /// `M::tag_allocation`.
ba9703b0 457 fn get_global_alloc(
416331ca 458 memory_extra: &M::MemoryExtra,
f035d41b 459 tcx: TyCtxt<'tcx>,
416331ca 460 id: AllocId,
ba9703b0 461 is_write: bool,
dc9dc135 462 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
f9f354fc 463 let (alloc, def_id) = match tcx.get_global_alloc(id) {
ba9703b0
XL
464 Some(GlobalAlloc::Memory(mem)) => {
465 // Memory of a constant or promoted or anonymous memory referenced by a static.
466 (mem, None)
467 }
468 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
469 None => throw_ub!(PointerUseAfterFree(id)),
dc9dc135 470 Some(GlobalAlloc::Static(def_id)) => {
3dfed10e 471 assert!(tcx.is_static(def_id));
f9f354fc 472 assert!(!tcx.is_thread_local_static(def_id));
ba9703b0
XL
473 // Notice that every static has two `AllocId` that will resolve to the same
474 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
475 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
1b1a35ee 476 // `eval_static_initializer` and it is the "resolved" ID.
f9f354fc
XL
477 // The resolved ID is never used by the interpreted program, it is hidden.
478 // This is relied upon for soundness of const-patterns; a pointer to the resolved
479 // ID would "sidestep" the checks that make sure consts do not point to statics!
ba9703b0
XL
480 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
481 // contains a reference to memory that was created during its evaluation (i.e., not
482 // to another static), those inner references only exist in "resolved" form.
dc9dc135 483 if tcx.is_foreign_item(def_id) {
3dfed10e 484 throw_unsup!(ReadExternStatic(def_id));
dc9dc135 485 }
3dfed10e
XL
486
487 (tcx.eval_static_initializer(def_id)?, Some(def_id))
b7449926 488 }
dc9dc135 489 };
ba9703b0
XL
490 M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
491 let alloc = Cow::Borrowed(alloc);
60c5eb7d
XL
492 // We got tcx memory. Let the machine initialize its "extra" stuff.
493 let (alloc, tag) = M::init_allocation_extra(
416331ca 494 memory_extra,
dc9dc135
XL
495 id, // always use the ID we got as input, not the "hidden" one.
496 alloc,
ba9703b0 497 M::GLOBAL_KIND.map(MemoryKind::Machine),
60c5eb7d 498 );
3dfed10e 499 // Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
ba9703b0 500 debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
60c5eb7d 501 Ok(alloc)
94b46f34
XL
502 }
503
60c5eb7d 504 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
ba9703b0 505 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
60c5eb7d 506 pub fn get_raw(
dc9dc135
XL
507 &self,
508 id: AllocId,
509 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
0bf4aa26
XL
510 // The error type of the inner closure here is somewhat funny. We have two
511 // ways of "erroring": An actual error, or because we got a reference from
ba9703b0 512 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
dc9dc135 513 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
0bf4aa26 514 let a = self.alloc_map.get_or(id, || {
ba9703b0
XL
515 let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
516 .map_err(Err)?;
0bf4aa26
XL
517 match alloc {
518 Cow::Borrowed(alloc) => {
519 // We got a ref, cheaply return that as an "error" so that the
520 // map does not get mutated.
521 Err(Ok(alloc))
522 }
523 Cow::Owned(alloc) => {
524 // Need to put it into the map and return a ref to that
ba9703b0
XL
525 let kind = M::GLOBAL_KIND.expect(
526 "I got a global allocation that I have to copy but the machine does \
dfeec247 527 not expect that to happen",
0bf4aa26
XL
528 );
529 Ok((MemoryKind::Machine(kind), alloc))
530 }
b7449926 531 }
0bf4aa26
XL
532 });
533 // Now unpack that funny error type
534 match a {
535 Ok(a) => Ok(&a.1),
dfeec247 536 Err(a) => a,
0bf4aa26 537 }
ff7c6d11
XL
538 }
539
60c5eb7d 540 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
ba9703b0 541 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
60c5eb7d 542 pub fn get_raw_mut(
ff7c6d11
XL
543 &mut self,
544 id: AllocId,
dc9dc135 545 ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
0bf4aa26 546 let tcx = self.tcx;
416331ca 547 let memory_extra = &self.extra;
0bf4aa26 548 let a = self.alloc_map.get_mut_or(id, || {
ba9703b0 549 // Need to make a copy, even if `get_global_alloc` is able
0bf4aa26 550 // to give us a cheap reference.
ba9703b0 551 let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
dfeec247 552 if alloc.mutability == Mutability::Not {
ba9703b0 553 throw_ub!(WriteToReadOnly(id))
a1dfa0c6 554 }
ba9703b0
XL
555 let kind = M::GLOBAL_KIND.expect(
556 "I got a global allocation that I have to copy but the machine does \
557 not expect that to happen",
558 );
559 Ok((MemoryKind::Machine(kind), alloc.into_owned()))
0bf4aa26
XL
560 });
561 // Unpack the error type manually because type inference doesn't
562 // work otherwise (and we cannot help it because `impl Trait`)
563 match a {
564 Err(e) => Err(e),
565 Ok(a) => {
566 let a = &mut a.1;
dfeec247 567 if a.mutability == Mutability::Not {
ba9703b0 568 throw_ub!(WriteToReadOnly(id))
0bf4aa26
XL
569 }
570 Ok(a)
b7449926
XL
571 }
572 }
0bf4aa26
XL
573 }
574
dc9dc135
XL
575 /// Obtain the size and alignment of an allocation, even if that allocation has
576 /// been deallocated.
0731742a 577 ///
dc9dc135 578 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
0731742a
XL
579 pub fn get_size_and_align(
580 &self,
581 id: AllocId,
dc9dc135
XL
582 liveness: AllocCheck,
583 ) -> InterpResult<'static, (Size, Align)> {
584 // # Regular allocations
60c5eb7d 585 // Don't use `self.get_raw` here as that will
dc9dc135 586 // a) cause cycles in case `id` refers to a static
ba9703b0 587 // b) duplicate a global's allocation in miri
dc9dc135 588 if let Some((_, alloc)) = self.alloc_map.get(id) {
e1599b0c 589 return Ok((alloc.size, alloc.align));
0bf4aa26 590 }
dc9dc135 591
416331ca
XL
592 // # Function pointers
593 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
74b04a01 594 if self.get_fn_alloc(id).is_some() {
60c5eb7d 595 return if let AllocCheck::Dereferenceable = liveness {
416331ca 596 // The caller requested no function pointers.
ba9703b0 597 throw_ub!(DerefFunctionPointer(id))
416331ca
XL
598 } else {
599 Ok((Size::ZERO, Align::from_bytes(1).unwrap()))
600 };
601 }
602
603 // # Statics
dc9dc135
XL
604 // Can't do this in the match argument, we may get cycle errors since the lock would
605 // be held throughout the match.
f9f354fc 606 match self.tcx.get_global_alloc(id) {
dc9dc135 607 Some(GlobalAlloc::Static(did)) => {
f9f354fc 608 assert!(!self.tcx.is_thread_local_static(did));
dc9dc135 609 // Use size and align of the type.
0bf4aa26
XL
610 let ty = self.tcx.type_of(did);
611 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
0731742a 612 Ok((layout.size, layout.align.abi))
dfeec247
XL
613 }
614 Some(GlobalAlloc::Memory(alloc)) => {
dc9dc135
XL
615 // Need to duplicate the logic here, because the global allocations have
616 // different associated types than the interpreter-local ones.
dfeec247
XL
617 Ok((alloc.size, alloc.align))
618 }
619 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
dc9dc135 620 // The rest must be dead.
dfeec247
XL
621 None => {
622 if let AllocCheck::MaybeDead = liveness {
623 // Deallocated pointers are allowed, we should be able to find
624 // them in the map.
ba9703b0
XL
625 Ok(*self
626 .dead_alloc_map
627 .get(&id)
628 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
dfeec247 629 } else {
ba9703b0 630 throw_ub!(PointerUseAfterFree(id))
dfeec247
XL
631 }
632 }
ff7c6d11
XL
633 }
634 }
635
74b04a01 636 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
416331ca
XL
637 trace!("reading fn ptr: {}", id);
638 if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
74b04a01 639 Some(FnVal::Other(*extra))
416331ca 640 } else {
f9f354fc 641 match self.tcx.get_global_alloc(id) {
74b04a01
XL
642 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
643 _ => None,
416331ca 644 }
ff7c6d11 645 }
416331ca
XL
646 }
647
648 pub fn get_fn(
649 &self,
650 ptr: Scalar<M::PointerTag>,
651 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
652 let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
653 if ptr.offset.bytes() != 0 {
ba9703b0 654 throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
94b46f34 655 }
3dfed10e
XL
656 self.get_fn_alloc(ptr.alloc_id)
657 .ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
ff7c6d11
XL
658 }
659
dc9dc135 660 pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
dfeec247 661 self.get_raw_mut(id)?.mutability = Mutability::Not;
0bf4aa26
XL
662 Ok(())
663 }
664
3dfed10e
XL
665 /// Create a lazy debug printer that prints the given allocation and all allocations it points
666 /// to, recursively.
667 #[must_use]
668 pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
669 self.dump_allocs(vec![id])
670 }
671
672 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
673 /// recursively.
674 #[must_use]
675 pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
676 allocs.sort();
677 allocs.dedup();
678 DumpAllocs { mem: self, allocs }
679 }
680
681 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
682 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
683 pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
684 // Collect the set of allocations that are *reachable* from `Global` allocations.
685 let reachable = {
686 let mut reachable = FxHashSet::default();
687 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
688 let mut todo: Vec<_> = self.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
689 if Some(kind) == global_kind { Some(id) } else { None }
690 });
691 todo.extend(static_roots);
692 while let Some(id) = todo.pop() {
693 if reachable.insert(id) {
694 // This is a new allocation, add its relocations to `todo`.
695 if let Some((_, alloc)) = self.alloc_map.get(id) {
696 todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
697 }
698 }
699 }
700 reachable
701 };
702
703 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
704 let leaks: Vec<_> = self.alloc_map.filter_map_collect(|&id, &(kind, _)| {
705 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
706 });
707 let n = leaks.len();
708 if n > 0 {
709 eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
710 }
711 n
ff7c6d11
XL
712 }
713
3dfed10e
XL
714 /// This is used by [priroda](https://github.com/oli-obk/priroda)
715 pub fn alloc_map(&self) -> &M::MemoryMap {
716 &self.alloc_map
717 }
718}
719
720#[doc(hidden)]
721/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
722pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
723 mem: &'a Memory<'mir, 'tcx, M>,
724 allocs: Vec<AllocId>,
725}
726
727impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
728 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
ba9703b0 729 // Cannot be a closure because it is generic in `Tag`, `Extra`.
f9f354fc 730 fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
3dfed10e 731 fmt: &mut std::fmt::Formatter<'_>,
f035d41b 732 tcx: TyCtxt<'tcx>,
ba9703b0
XL
733 allocs_to_print: &mut VecDeque<AllocId>,
734 alloc: &Allocation<Tag, Extra>,
3dfed10e 735 ) -> std::fmt::Result {
ba9703b0
XL
736 for &(_, target_id) in alloc.relocations().values() {
737 allocs_to_print.push_back(target_id);
738 }
3dfed10e 739 write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
ba9703b0
XL
740 }
741
3dfed10e 742 let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
ba9703b0
XL
743 // `allocs_printed` contains all allocations that we have already printed.
744 let mut allocs_printed = FxHashSet::default();
ff7c6d11
XL
745
746 while let Some(id) = allocs_to_print.pop_front() {
ba9703b0
XL
747 if !allocs_printed.insert(id) {
748 // Already printed, so skip this.
749 continue;
750 }
751
3dfed10e
XL
752 write!(fmt, "{}", id)?;
753 match self.mem.alloc_map.get(id) {
ba9703b0
XL
754 Some(&(kind, ref alloc)) => {
755 // normal alloc
3dfed10e
XL
756 write!(fmt, " ({}, ", kind)?;
757 write_allocation_track_relocs(
758 &mut *fmt,
759 self.mem.tcx,
760 &mut allocs_to_print,
761 alloc,
762 )?;
dfeec247 763 }
ba9703b0
XL
764 None => {
765 // global alloc
3dfed10e 766 match self.mem.tcx.get_global_alloc(id) {
dc9dc135 767 Some(GlobalAlloc::Memory(alloc)) => {
3dfed10e
XL
768 write!(fmt, " (unchanged global, ")?;
769 write_allocation_track_relocs(
770 &mut *fmt,
771 self.mem.tcx,
772 &mut allocs_to_print,
773 alloc,
774 )?;
0bf4aa26 775 }
dc9dc135 776 Some(GlobalAlloc::Function(func)) => {
3dfed10e 777 write!(fmt, " (fn: {})", func)?;
0bf4aa26 778 }
dc9dc135 779 Some(GlobalAlloc::Static(did)) => {
3dfed10e 780 write!(fmt, " (static: {})", self.mem.tcx.def_path_str(did))?;
0bf4aa26
XL
781 }
782 None => {
3dfed10e 783 write!(fmt, " (deallocated)")?;
8faf50e0 784 }
ff7c6d11 785 }
dfeec247 786 }
ba9703b0 787 }
3dfed10e 788 writeln!(fmt)?;
ff7c6d11 789 }
3dfed10e 790 Ok(())
0bf4aa26 791 }
ff7c6d11
XL
792}
793
dc9dc135
XL
794/// Reading and writing.
795impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
416331ca
XL
796 /// Reads the given number of bytes from memory. Returns them as a slice.
797 ///
dc9dc135 798 /// Performs appropriate bounds checks.
dfeec247 799 pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
dc9dc135
XL
800 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
801 Some(ptr) => ptr,
802 None => return Ok(&[]), // zero-sized access
803 };
60c5eb7d 804 self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
ff7c6d11
XL
805 }
806
416331ca
XL
807 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
808 ///
dc9dc135 809 /// Performs appropriate bounds checks.
416331ca
XL
810 pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
811 let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
60c5eb7d 812 self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
416331ca
XL
813 }
814
74b04a01
XL
815 /// Reads a 0x0000-terminated u16-sequence from memory. Returns them as a Vec<u16>.
816 /// Terminator 0x0000 is not included in the returned Vec<u16>.
817 ///
818 /// Performs appropriate bounds checks.
819 pub fn read_wide_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, Vec<u16>> {
820 let size_2bytes = Size::from_bytes(2);
821 let align_2bytes = Align::from_bytes(2).unwrap();
822 // We need to read at least 2 bytes, so we *need* a ptr.
823 let mut ptr = self.force_ptr(ptr)?;
824 let allocation = self.get_raw(ptr.alloc_id)?;
825 let mut u16_seq = Vec::new();
826
827 loop {
828 ptr = self
829 .check_ptr_access(ptr.into(), size_2bytes, align_2bytes)?
830 .expect("cannot be a ZST");
831 let single_u16 = allocation.read_scalar(self, ptr, size_2bytes)?.to_u16()?;
832 if single_u16 != 0x0000 {
833 u16_seq.push(single_u16);
834 ptr = ptr.offset(size_2bytes, self)?;
835 } else {
836 break;
837 }
838 }
839 Ok(u16_seq)
840 }
841
e74abb32
XL
842 /// Writes the given stream of bytes into memory.
843 ///
844 /// Performs appropriate bounds checks.
845 pub fn write_bytes(
846 &mut self,
847 ptr: Scalar<M::PointerTag>,
dfeec247
XL
848 src: impl IntoIterator<Item = u8>,
849 ) -> InterpResult<'tcx> {
ba9703b0
XL
850 let mut src = src.into_iter();
851 let size = Size::from_bytes(src.size_hint().0);
852 // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
e74abb32
XL
853 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
854 Some(ptr) => ptr,
ba9703b0
XL
855 None => {
856 // zero-sized access
36d6ef2b 857 assert!(src.next().is_none(), "iterator said it was empty but returned an element");
ba9703b0
XL
858 return Ok(());
859 }
e74abb32 860 };
f035d41b 861 let tcx = self.tcx;
60c5eb7d 862 self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
e74abb32
XL
863 }
864
ba9703b0
XL
865 /// Writes the given stream of u16s into memory.
866 ///
867 /// Performs appropriate bounds checks.
868 pub fn write_u16s(
869 &mut self,
870 ptr: Scalar<M::PointerTag>,
871 src: impl IntoIterator<Item = u16>,
872 ) -> InterpResult<'tcx> {
873 let mut src = src.into_iter();
874 let (lower, upper) = src.size_hint();
875 let len = upper.expect("can only write bounded iterators");
876 assert_eq!(lower, len, "can only write iterators with a precise length");
877
878 let size = Size::from_bytes(lower);
879 let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(2).unwrap())? {
880 Some(ptr) => ptr,
881 None => {
882 // zero-sized access
36d6ef2b 883 assert!(src.next().is_none(), "iterator said it was empty but returned an element");
ba9703b0
XL
884 return Ok(());
885 }
886 };
f035d41b 887 let tcx = self.tcx;
ba9703b0
XL
888 let allocation = self.get_raw_mut(ptr.alloc_id)?;
889
890 for idx in 0..len {
891 let val = Scalar::from_u16(
892 src.next().expect("iterator was shorter than it said it would be"),
893 );
894 let offset_ptr = ptr.offset(Size::from_bytes(idx) * 2, &tcx)?; // `Size` multiplication
895 allocation.write_scalar(&tcx, offset_ptr, val.into(), Size::from_bytes(2))?;
896 }
36d6ef2b 897 assert!(src.next().is_none(), "iterator was longer than it said it would be");
ba9703b0
XL
898 Ok(())
899 }
900
416331ca 901 /// Expects the caller to have checked bounds and alignment.
ff7c6d11
XL
902 pub fn copy(
903 &mut self,
416331ca
XL
904 src: Pointer<M::PointerTag>,
905 dest: Pointer<M::PointerTag>,
94b46f34 906 size: Size,
ff7c6d11 907 nonoverlapping: bool,
dc9dc135 908 ) -> InterpResult<'tcx> {
416331ca 909 self.copy_repeatedly(src, dest, size, 1, nonoverlapping)
8faf50e0
XL
910 }
911
416331ca 912 /// Expects the caller to have checked bounds and alignment.
8faf50e0
XL
913 pub fn copy_repeatedly(
914 &mut self,
416331ca
XL
915 src: Pointer<M::PointerTag>,
916 dest: Pointer<M::PointerTag>,
8faf50e0
XL
917 size: Size,
918 length: u64,
919 nonoverlapping: bool,
dc9dc135 920 ) -> InterpResult<'tcx> {
ff7c6d11
XL
921 // first copy the relocations to a temporary buffer, because
922 // `get_bytes_mut` will clear the relocations, which is correct,
923 // since we don't want to keep any relocations at the target.
3dfed10e 924 // (`get_bytes_with_uninit_and_ptr` below checks that there are no
b7449926 925 // relocations overlapping the edges; those would not be handled correctly).
dfeec247
XL
926 let relocations =
927 self.get_raw(src.alloc_id)?.prepare_relocation_copy(self, src, size, dest, length);
ff7c6d11 928
f035d41b 929 let tcx = self.tcx;
a1dfa0c6
XL
930
931 // This checks relocation edges on the src.
dfeec247 932 let src_bytes =
3dfed10e 933 self.get_raw(src.alloc_id)?.get_bytes_with_uninit_and_ptr(&tcx, src, size)?.as_ptr();
dfeec247 934 let dest_bytes =
ba9703b0 935 self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
dfeec247
XL
936
937 // If `dest_bytes` is empty we just optimize to not run anything for zsts.
938 // See #67539
939 if dest_bytes.is_empty() {
940 return Ok(());
941 }
942
943 let dest_bytes = dest_bytes.as_mut_ptr();
944
3dfed10e
XL
945 // Prepare a copy of the initialization mask.
946 let compressed = self.get_raw(src.alloc_id)?.compress_uninit_range(src, size);
dfeec247 947
3dfed10e
XL
948 if compressed.no_bytes_init() {
949 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
950 // is marked as uninitialized but we otherwise omit changing the byte representation which may
951 // be arbitrary for uninitialized bytes.
dfeec247 952 // This also avoids writing to the target bytes so that the backing allocation is never
3dfed10e 953 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
dfeec247
XL
954 // operating system this can avoid physically allocating the page.
955 let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
3dfed10e 956 dest_alloc.mark_init(dest, size * length, false); // `Size` multiplication
dfeec247
XL
957 dest_alloc.mark_relocation_range(relocations);
958 return Ok(());
959 }
ff7c6d11
XL
960
961 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
962 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
963 // `dest` could possibly overlap.
0bf4aa26
XL
964 // The pointers above remain valid even if the `HashMap` table is moved around because they
965 // point into the `Vec` storing the bytes.
ff7c6d11 966 unsafe {
ff7c6d11
XL
967 if src.alloc_id == dest.alloc_id {
968 if nonoverlapping {
ba9703b0 969 // `Size` additions
dfeec247
XL
970 if (src.offset <= dest.offset && src.offset + size > dest.offset)
971 || (dest.offset <= src.offset && dest.offset + size > src.offset)
ff7c6d11 972 {
dfeec247 973 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
ff7c6d11
XL
974 }
975 }
8faf50e0
XL
976
977 for i in 0..length {
dfeec247
XL
978 ptr::copy(
979 src_bytes,
ba9703b0
XL
980 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
981 size.bytes_usize(),
dfeec247 982 );
8faf50e0 983 }
ff7c6d11 984 } else {
8faf50e0 985 for i in 0..length {
dfeec247
XL
986 ptr::copy_nonoverlapping(
987 src_bytes,
ba9703b0
XL
988 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
989 size.bytes_usize(),
dfeec247 990 );
8faf50e0 991 }
ff7c6d11
XL
992 }
993 }
994
dfeec247 995 // now fill in all the data
3dfed10e 996 self.get_raw_mut(dest.alloc_id)?.mark_compressed_init_range(
dfeec247
XL
997 &compressed,
998 dest,
999 size,
1000 length,
1001 );
1002
b7449926 1003 // copy the relocations to the destination
60c5eb7d 1004 self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
ff7c6d11
XL
1005
1006 Ok(())
1007 }
ff7c6d11
XL
1008}
1009
dfeec247 1010/// Machine pointer introspection.
dc9dc135 1011impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
dc9dc135
XL
1012 pub fn force_ptr(
1013 &self,
1014 scalar: Scalar<M::PointerTag>,
1015 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
1016 match scalar {
1017 Scalar::Ptr(ptr) => Ok(ptr),
dfeec247 1018 _ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
dc9dc135
XL
1019 }
1020 }
1021
1022 pub fn force_bits(
1023 &self,
1024 scalar: Scalar<M::PointerTag>,
dfeec247 1025 size: Size,
dc9dc135
XL
1026 ) -> InterpResult<'tcx, u128> {
1027 match scalar.to_bits_or_ptr(size, self) {
1028 Ok(bits) => Ok(bits),
ba9703b0 1029 Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
dc9dc135
XL
1030 }
1031 }
ff7c6d11 1032}