1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
10 use std
::collections
::VecDeque
;
11 use std
::convert
::{TryFrom, TryInto}
;
15 use rustc_ast
::Mutability
;
16 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
17 use rustc_middle
::ty
::{Instance, ParamEnv, TyCtxt}
;
18 use rustc_target
::abi
::{Align, HasDataLayout, Size, TargetDataLayout}
;
21 AllocId
, AllocMap
, Allocation
, AllocationExtra
, CheckInAllocMsg
, GlobalAlloc
, InterpResult
,
22 Machine
, MayLeak
, Pointer
, PointerArithmetic
, Scalar
,
24 use crate::util
::pretty
;
26 #[derive(Debug, PartialEq, Copy, Clone)]
27 pub enum MemoryKind
<T
> {
28 /// Stack memory. Error if deallocated except during a stack pop.
30 /// Memory backing vtables. Error if ever deallocated.
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
38 impl<T
: MayLeak
> MayLeak
for MemoryKind
<T
> {
40 fn may_leak(self) -> bool
{
42 MemoryKind
::Stack
=> false,
43 MemoryKind
::Vtable
=> true,
44 MemoryKind
::CallerLocation
=> true,
45 MemoryKind
::Machine(k
) => k
.may_leak(),
50 impl<T
: fmt
::Display
> fmt
::Display
for MemoryKind
<T
> {
51 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
53 MemoryKind
::Stack
=> write
!(f
, "stack variable"),
54 MemoryKind
::Vtable
=> write
!(f
, "vtable"),
55 MemoryKind
::CallerLocation
=> write
!(f
, "caller location"),
56 MemoryKind
::Machine(m
) => write
!(f
, "{}", m
),
61 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
62 #[derive(Debug, Copy, Clone)]
64 /// Allocation must be live and not a function pointer.
66 /// Allocations needs to be live, but may be a function pointer.
68 /// Allocation may be dead.
72 /// The value of a function pointer.
73 #[derive(Debug, Copy, Clone)]
74 pub enum FnVal
<'tcx
, Other
> {
75 Instance(Instance
<'tcx
>),
79 impl<'tcx
, Other
> FnVal
<'tcx
, Other
> {
80 pub fn as_instance(self) -> InterpResult
<'tcx
, Instance
<'tcx
>> {
82 FnVal
::Instance(instance
) => Ok(instance
),
84 throw_unsup_format
!("'foreign' function pointers are not supported in this context")
90 // `Memory` has to depend on the `Machine` because some of its operations
91 // (e.g., `get`) call a `Machine` hook.
92 pub struct Memory
<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
93 /// Allocations local to this instance of the miri engine. The kind
94 /// helps ensure that the same mechanism is used for allocation and
95 /// deallocation. When an allocation is not found here, it is a
96 /// global and looked up in the `tcx` for read access. Some machines may
97 /// have to mutate this map even on a read-only access to a global (because
98 /// they do pointer provenance tracking and the allocations in `tcx` have
99 /// the wrong type), so we let the machine override this type.
100 /// Either way, if the machine allows writing to a global, doing so will
101 /// create a copy of the global allocation here.
102 // FIXME: this should not be public, but interning currently needs access to it
103 pub(super) alloc_map
: M
::MemoryMap
,
105 /// Map for "extra" function pointers.
106 extra_fn_ptr_map
: FxHashMap
<AllocId
, M
::ExtraFnVal
>,
108 /// To be able to compare pointers with NULL, and to check alignment for accesses
109 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
110 /// that do not exist any more.
111 // FIXME: this should not be public, but interning currently needs access to it
112 pub(super) dead_alloc_map
: FxHashMap
<AllocId
, (Size
, Align
)>,
114 /// Extra data added by the machine.
115 pub extra
: M
::MemoryExtra
,
117 /// Lets us implement `HasDataLayout`, which is awfully convenient.
118 pub tcx
: TyCtxt
<'tcx
>,
121 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> HasDataLayout
for Memory
<'mir
, 'tcx
, M
> {
123 fn data_layout(&self) -> &TargetDataLayout
{
124 &self.tcx
.data_layout
128 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
129 pub fn new(tcx
: TyCtxt
<'tcx
>, extra
: M
::MemoryExtra
) -> Self {
131 alloc_map
: M
::MemoryMap
::default(),
132 extra_fn_ptr_map
: FxHashMap
::default(),
133 dead_alloc_map
: FxHashMap
::default(),
139 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
140 /// the machine pointer to the allocation. Must never be used
141 /// for any other pointers, nor for TLS statics.
143 /// Using the resulting pointer represents a *direct* access to that memory
144 /// (e.g. by directly using a `static`),
145 /// as opposed to access through a pointer that was created by the program.
147 /// This function can fail only if `ptr` points to an `extern static`.
149 pub fn global_base_pointer(
152 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
153 // We need to handle `extern static`.
154 let ptr
= match self.tcx
.get_global_alloc(ptr
.alloc_id
) {
155 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_thread_local_static(def_id
) => {
156 bug
!("global memory cannot point to thread-local static")
158 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_foreign_item(def_id
) => {
159 ptr
.alloc_id
= M
::extern_static_alloc_id(self, def_id
)?
;
163 // No need to change the `AllocId`.
167 // And we need to get the tag.
168 let tag
= M
::tag_global_base_pointer(&self.extra
, ptr
.alloc_id
);
169 Ok(ptr
.with_tag(tag
))
172 pub fn create_fn_alloc(
174 fn_val
: FnVal
<'tcx
, M
::ExtraFnVal
>,
175 ) -> Pointer
<M
::PointerTag
> {
176 let id
= match fn_val
{
177 FnVal
::Instance(instance
) => self.tcx
.create_fn_alloc(instance
),
178 FnVal
::Other(extra
) => {
179 // FIXME(RalfJung): Should we have a cache here?
180 let id
= self.tcx
.reserve_alloc_id();
181 let old
= self.extra_fn_ptr_map
.insert(id
, extra
);
182 assert
!(old
.is_none());
186 // Functions are global allocations, so make sure we get the right base pointer.
187 // We know this is not an `extern static` so this cannot fail.
188 self.global_base_pointer(Pointer
::from(id
)).unwrap()
195 kind
: MemoryKind
<M
::MemoryKind
>,
196 ) -> Pointer
<M
::PointerTag
> {
197 let alloc
= Allocation
::uninit(size
, align
);
198 self.allocate_with(alloc
, kind
)
201 pub fn allocate_bytes(
204 kind
: MemoryKind
<M
::MemoryKind
>,
205 ) -> Pointer
<M
::PointerTag
> {
206 let alloc
= Allocation
::from_byte_aligned_bytes(bytes
);
207 self.allocate_with(alloc
, kind
)
210 pub fn allocate_with(
213 kind
: MemoryKind
<M
::MemoryKind
>,
214 ) -> Pointer
<M
::PointerTag
> {
215 let id
= self.tcx
.reserve_alloc_id();
218 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
219 "dynamically allocating global memory"
221 // This is a new allocation, not a new global one, so no `global_base_ptr`.
222 let (alloc
, tag
) = M
::init_allocation_extra(&self.extra
, id
, Cow
::Owned(alloc
), Some(kind
));
223 self.alloc_map
.insert(id
, (kind
, alloc
.into_owned()));
224 Pointer
::from(id
).with_tag(tag
)
229 ptr
: Pointer
<M
::PointerTag
>,
230 old_size_and_align
: Option
<(Size
, Align
)>,
233 kind
: MemoryKind
<M
::MemoryKind
>,
234 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
235 if ptr
.offset
.bytes() != 0 {
237 "reallocating {:?} which does not point to the beginning of an object",
242 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
243 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
244 let new_ptr
= self.allocate(new_size
, new_align
, kind
);
245 let old_size
= match old_size_and_align
{
246 Some((size
, _align
)) => size
,
247 None
=> self.get_raw(ptr
.alloc_id
)?
.size
,
249 self.copy(ptr
, new_ptr
, old_size
.min(new_size
), /*nonoverlapping*/ true)?
;
250 self.deallocate(ptr
, old_size_and_align
, kind
)?
;
255 /// Deallocate a local, or do nothing if that local has been made into a global.
256 pub fn deallocate_local(&mut self, ptr
: Pointer
<M
::PointerTag
>) -> InterpResult
<'tcx
> {
257 // The allocation might be already removed by global interning.
258 // This can only really happen in the CTFE instance, not in miri.
259 if self.alloc_map
.contains_key(&ptr
.alloc_id
) {
260 self.deallocate(ptr
, None
, MemoryKind
::Stack
)
268 ptr
: Pointer
<M
::PointerTag
>,
269 old_size_and_align
: Option
<(Size
, Align
)>,
270 kind
: MemoryKind
<M
::MemoryKind
>,
271 ) -> InterpResult
<'tcx
> {
272 trace
!("deallocating: {}", ptr
.alloc_id
);
274 if ptr
.offset
.bytes() != 0 {
276 "deallocating {:?} which does not point to the beginning of an object",
281 M
::before_deallocation(&mut self.extra
, ptr
.alloc_id
)?
;
283 let (alloc_kind
, mut alloc
) = match self.alloc_map
.remove(&ptr
.alloc_id
) {
284 Some(alloc
) => alloc
,
286 // Deallocating global memory -- always an error
287 return Err(match self.tcx
.get_global_alloc(ptr
.alloc_id
) {
288 Some(GlobalAlloc
::Function(..)) => err_ub_format
!("deallocating a function"),
289 Some(GlobalAlloc
::Static(..) | GlobalAlloc
::Memory(..)) => {
290 err_ub_format
!("deallocating static memory")
292 None
=> err_ub
!(PointerUseAfterFree(ptr
.alloc_id
)),
298 if alloc_kind
!= kind
{
300 "deallocating {} memory using {} deallocation operation",
305 if let Some((size
, align
)) = old_size_and_align
{
306 if size
!= alloc
.size
|| align
!= alloc
.align
{
308 "incorrect layout on deallocation: allocation has size {} and alignment {}, but gave size {} and alignment {}",
317 // Let the machine take some extra action
318 let size
= alloc
.size
;
319 AllocationExtra
::memory_deallocated(&mut alloc
, ptr
, size
)?
;
321 // Don't forget to remember size and align of this now-dead allocation
322 let old
= self.dead_alloc_map
.insert(ptr
.alloc_id
, (alloc
.size
, alloc
.align
));
324 bug
!("Nothing can be deallocated twice");
330 /// Check if the given scalar is allowed to do a memory access of given `size`
331 /// and `align`. On success, returns `None` for zero-sized accesses (where
332 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
333 /// Crucially, if the input is a `Pointer`, we will test it for liveness
334 /// *even if* the size is 0.
336 /// Everyone accessing memory based on a `Scalar` should use this method to get the
337 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
338 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
341 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
342 /// this method is still appropriate.
344 pub fn check_ptr_access(
346 sptr
: Scalar
<M
::PointerTag
>,
349 ) -> InterpResult
<'tcx
, Option
<Pointer
<M
::PointerTag
>>> {
350 let align
= M
::enforce_alignment(&self.extra
).then_some(align
);
351 self.check_ptr_access_align(sptr
, size
, align
, CheckInAllocMsg
::MemoryAccessTest
)
354 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
355 /// is `Some` (overriding `M::enforce_alignment`). Also lets the caller control
356 /// the error message for the out-of-bounds case.
357 pub fn check_ptr_access_align(
359 sptr
: Scalar
<M
::PointerTag
>,
361 align
: Option
<Align
>,
362 msg
: CheckInAllocMsg
,
363 ) -> InterpResult
<'tcx
, Option
<Pointer
<M
::PointerTag
>>> {
364 fn check_offset_align(offset
: u64, align
: Align
) -> InterpResult
<'
static> {
365 if offset
% align
.bytes() == 0 {
368 // The biggest power of two through which `offset` is divisible.
369 let offset_pow2
= 1 << offset
.trailing_zeros();
370 throw_ub
!(AlignmentCheckFailed
{
371 has
: Align
::from_bytes(offset_pow2
).unwrap(),
377 // Normalize to a `Pointer` if we definitely need one.
378 let normalized
= if size
.bytes() == 0 {
379 // Can be an integer, just take what we got. We do NOT `force_bits` here;
380 // if this is already a `Pointer` we want to do the bounds checks!
383 // A "real" access, we must get a pointer to be able to check the bounds.
384 Scalar
::from(self.force_ptr(sptr
)?
)
386 Ok(match normalized
.to_bits_or_ptr(self.pointer_size(), self) {
388 let bits
= u64::try_from(bits
).unwrap(); // it's ptr-sized
389 assert
!(size
.bytes() == 0);
392 throw_ub
!(DanglingIntPointer(0, msg
))
395 if let Some(align
) = align
{
396 check_offset_align(bits
, align
)?
;
401 let (allocation_size
, alloc_align
) =
402 self.get_size_and_align(ptr
.alloc_id
, AllocCheck
::Dereferenceable
)?
;
403 // Test bounds. This also ensures non-NULL.
404 // It is sufficient to check this for the end pointer. The addition
405 // checks for overflow.
406 let end_ptr
= ptr
.offset(size
, self)?
;
407 if end_ptr
.offset
> allocation_size
{
409 throw_ub
!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size }
)
411 // Test align. Check this last; if both bounds and alignment are violated
412 // we want the error to be about the bounds.
413 if let Some(align
) = align
{
414 if M
::force_int_for_alignment_check(&self.extra
) {
416 .force_bits(ptr
.into(), self.pointer_size())
417 .expect("ptr-to-int cast for align check should never fail");
418 check_offset_align(bits
.try_into().unwrap(), align
)?
;
420 // Check allocation alignment and offset alignment.
421 if alloc_align
.bytes() < align
.bytes() {
422 throw_ub
!(AlignmentCheckFailed { has: alloc_align, required: align }
);
424 check_offset_align(ptr
.offset
.bytes(), align
)?
;
428 // We can still be zero-sized in this branch, in which case we have to
430 if size
.bytes() == 0 { None }
else { Some(ptr) }
435 /// Test if the pointer might be NULL.
436 pub fn ptr_may_be_null(&self, ptr
: Pointer
<M
::PointerTag
>) -> bool
{
437 let (size
, _align
) = self
438 .get_size_and_align(ptr
.alloc_id
, AllocCheck
::MaybeDead
)
439 .expect("alloc info with MaybeDead cannot fail");
440 // If the pointer is out-of-bounds, it may be null.
441 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
446 /// Allocation accessors
447 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
448 /// Helper function to obtain a global (tcx) allocation.
449 /// This attempts to return a reference to an existing allocation if
450 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
451 /// this machine use the same pointer tag, so it is indirected through
452 /// `M::tag_allocation`.
454 memory_extra
: &M
::MemoryExtra
,
458 ) -> InterpResult
<'tcx
, Cow
<'tcx
, Allocation
<M
::PointerTag
, M
::AllocExtra
>>> {
459 let (alloc
, def_id
) = match tcx
.get_global_alloc(id
) {
460 Some(GlobalAlloc
::Memory(mem
)) => {
461 // Memory of a constant or promoted or anonymous memory referenced by a static.
464 Some(GlobalAlloc
::Function(..)) => throw_ub
!(DerefFunctionPointer(id
)),
465 None
=> throw_ub
!(PointerUseAfterFree(id
)),
466 Some(GlobalAlloc
::Static(def_id
)) => {
467 assert
!(tcx
.is_static(def_id
));
468 assert
!(!tcx
.is_thread_local_static(def_id
));
469 // Notice that every static has two `AllocId` that will resolve to the same
470 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
471 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
472 // `const_eval_raw` and it is the "resolved" ID.
473 // The resolved ID is never used by the interpreted program, it is hidden.
474 // This is relied upon for soundness of const-patterns; a pointer to the resolved
475 // ID would "sidestep" the checks that make sure consts do not point to statics!
476 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
477 // contains a reference to memory that was created during its evaluation (i.e., not
478 // to another static), those inner references only exist in "resolved" form.
479 if tcx
.is_foreign_item(def_id
) {
480 throw_unsup
!(ReadExternStatic(def_id
));
483 (tcx
.eval_static_initializer(def_id
)?
, Some(def_id
))
486 M
::before_access_global(memory_extra
, id
, alloc
, def_id
, is_write
)?
;
487 let alloc
= Cow
::Borrowed(alloc
);
488 // We got tcx memory. Let the machine initialize its "extra" stuff.
489 let (alloc
, tag
) = M
::init_allocation_extra(
491 id
, // always use the ID we got as input, not the "hidden" one.
493 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
495 // Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
496 debug_assert_eq
!(tag
, M
::tag_global_base_pointer(memory_extra
, id
));
500 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
501 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
505 ) -> InterpResult
<'tcx
, &Allocation
<M
::PointerTag
, M
::AllocExtra
>> {
506 // The error type of the inner closure here is somewhat funny. We have two
507 // ways of "erroring": An actual error, or because we got a reference from
508 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
509 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
510 let a
= self.alloc_map
.get_or(id
, || {
511 let alloc
= Self::get_global_alloc(&self.extra
, self.tcx
, id
, /*is_write*/ false)
514 Cow
::Borrowed(alloc
) => {
515 // We got a ref, cheaply return that as an "error" so that the
516 // map does not get mutated.
519 Cow
::Owned(alloc
) => {
520 // Need to put it into the map and return a ref to that
521 let kind
= M
::GLOBAL_KIND
.expect(
522 "I got a global allocation that I have to copy but the machine does \
523 not expect that to happen",
525 Ok((MemoryKind
::Machine(kind
), alloc
))
529 // Now unpack that funny error type
536 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
537 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCx` instead!
541 ) -> InterpResult
<'tcx
, &mut Allocation
<M
::PointerTag
, M
::AllocExtra
>> {
543 let memory_extra
= &self.extra
;
544 let a
= self.alloc_map
.get_mut_or(id
, || {
545 // Need to make a copy, even if `get_global_alloc` is able
546 // to give us a cheap reference.
547 let alloc
= Self::get_global_alloc(memory_extra
, tcx
, id
, /*is_write*/ true)?
;
548 if alloc
.mutability
== Mutability
::Not
{
549 throw_ub
!(WriteToReadOnly(id
))
551 let kind
= M
::GLOBAL_KIND
.expect(
552 "I got a global allocation that I have to copy but the machine does \
553 not expect that to happen",
555 Ok((MemoryKind
::Machine(kind
), alloc
.into_owned()))
557 // Unpack the error type manually because type inference doesn't
558 // work otherwise (and we cannot help it because `impl Trait`)
563 if a
.mutability
== Mutability
::Not
{
564 throw_ub
!(WriteToReadOnly(id
))
571 /// Obtain the size and alignment of an allocation, even if that allocation has
572 /// been deallocated.
574 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
575 pub fn get_size_and_align(
578 liveness
: AllocCheck
,
579 ) -> InterpResult
<'
static, (Size
, Align
)> {
580 // # Regular allocations
581 // Don't use `self.get_raw` here as that will
582 // a) cause cycles in case `id` refers to a static
583 // b) duplicate a global's allocation in miri
584 if let Some((_
, alloc
)) = self.alloc_map
.get(id
) {
585 return Ok((alloc
.size
, alloc
.align
));
588 // # Function pointers
589 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
590 if self.get_fn_alloc(id
).is_some() {
591 return if let AllocCheck
::Dereferenceable
= liveness
{
592 // The caller requested no function pointers.
593 throw_ub
!(DerefFunctionPointer(id
))
595 Ok((Size
::ZERO
, Align
::from_bytes(1).unwrap()))
600 // Can't do this in the match argument, we may get cycle errors since the lock would
601 // be held throughout the match.
602 match self.tcx
.get_global_alloc(id
) {
603 Some(GlobalAlloc
::Static(did
)) => {
604 assert
!(!self.tcx
.is_thread_local_static(did
));
605 // Use size and align of the type.
606 let ty
= self.tcx
.type_of(did
);
607 let layout
= self.tcx
.layout_of(ParamEnv
::empty().and(ty
)).unwrap();
608 Ok((layout
.size
, layout
.align
.abi
))
610 Some(GlobalAlloc
::Memory(alloc
)) => {
611 // Need to duplicate the logic here, because the global allocations have
612 // different associated types than the interpreter-local ones.
613 Ok((alloc
.size
, alloc
.align
))
615 Some(GlobalAlloc
::Function(_
)) => bug
!("We already checked function pointers above"),
616 // The rest must be dead.
618 if let AllocCheck
::MaybeDead
= liveness
{
619 // Deallocated pointers are allowed, we should be able to find
624 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
626 throw_ub
!(PointerUseAfterFree(id
))
632 fn get_fn_alloc(&self, id
: AllocId
) -> Option
<FnVal
<'tcx
, M
::ExtraFnVal
>> {
633 trace
!("reading fn ptr: {}", id
);
634 if let Some(extra
) = self.extra_fn_ptr_map
.get(&id
) {
635 Some(FnVal
::Other(*extra
))
637 match self.tcx
.get_global_alloc(id
) {
638 Some(GlobalAlloc
::Function(instance
)) => Some(FnVal
::Instance(instance
)),
646 ptr
: Scalar
<M
::PointerTag
>,
647 ) -> InterpResult
<'tcx
, FnVal
<'tcx
, M
::ExtraFnVal
>> {
648 let ptr
= self.force_ptr(ptr
)?
; // We definitely need a pointer value.
649 if ptr
.offset
.bytes() != 0 {
650 throw_ub
!(InvalidFunctionPointer(ptr
.erase_tag()))
652 self.get_fn_alloc(ptr
.alloc_id
)
653 .ok_or_else(|| err_ub
!(InvalidFunctionPointer(ptr
.erase_tag())).into())
656 pub fn mark_immutable(&mut self, id
: AllocId
) -> InterpResult
<'tcx
> {
657 self.get_raw_mut(id
)?
.mutability
= Mutability
::Not
;
661 /// Create a lazy debug printer that prints the given allocation and all allocations it points
664 pub fn dump_alloc
<'a
>(&'a
self, id
: AllocId
) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
665 self.dump_allocs(vec
![id
])
668 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
671 pub fn dump_allocs
<'a
>(&'a
self, mut allocs
: Vec
<AllocId
>) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
674 DumpAllocs { mem: self, allocs }
677 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
678 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
679 pub fn leak_report(&self, static_roots
: &[AllocId
]) -> usize {
680 // Collect the set of allocations that are *reachable* from `Global` allocations.
682 let mut reachable
= FxHashSet
::default();
683 let global_kind
= M
::GLOBAL_KIND
.map(MemoryKind
::Machine
);
684 let mut todo
: Vec
<_
> = self.alloc_map
.filter_map_collect(move |&id
, &(kind
, _
)| {
685 if Some(kind
) == global_kind { Some(id) }
else { None }
687 todo
.extend(static_roots
);
688 while let Some(id
) = todo
.pop() {
689 if reachable
.insert(id
) {
690 // This is a new allocation, add its relocations to `todo`.
691 if let Some((_
, alloc
)) = self.alloc_map
.get(id
) {
692 todo
.extend(alloc
.relocations().values().map(|&(_
, target_id
)| target_id
));
699 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
700 let leaks
: Vec
<_
> = self.alloc_map
.filter_map_collect(|&id
, &(kind
, _
)| {
701 if kind
.may_leak() || reachable
.contains(&id
) { None }
else { Some(id) }
705 eprintln
!("The following memory was leaked: {:?}", self.dump_allocs(leaks
));
710 /// This is used by [priroda](https://github.com/oli-obk/priroda)
711 pub fn alloc_map(&self) -> &M
::MemoryMap
{
717 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
718 pub struct DumpAllocs
<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
719 mem
: &'a Memory
<'mir
, 'tcx
, M
>,
720 allocs
: Vec
<AllocId
>,
723 impl<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> std
::fmt
::Debug
for DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
724 fn fmt(&self, fmt
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
725 // Cannot be a closure because it is generic in `Tag`, `Extra`.
726 fn write_allocation_track_relocs
<'tcx
, Tag
: Copy
+ fmt
::Debug
, Extra
>(
727 fmt
: &mut std
::fmt
::Formatter
<'_
>,
729 allocs_to_print
: &mut VecDeque
<AllocId
>,
730 alloc
: &Allocation
<Tag
, Extra
>,
731 ) -> std
::fmt
::Result
{
732 for &(_
, target_id
) in alloc
.relocations().values() {
733 allocs_to_print
.push_back(target_id
);
735 write
!(fmt
, "{}", pretty
::display_allocation(tcx
, alloc
))
738 let mut allocs_to_print
: VecDeque
<_
> = self.allocs
.iter().copied().collect();
739 // `allocs_printed` contains all allocations that we have already printed.
740 let mut allocs_printed
= FxHashSet
::default();
742 while let Some(id
) = allocs_to_print
.pop_front() {
743 if !allocs_printed
.insert(id
) {
744 // Already printed, so skip this.
748 write
!(fmt
, "{}", id
)?
;
749 match self.mem
.alloc_map
.get(id
) {
750 Some(&(kind
, ref alloc
)) => {
752 write
!(fmt
, " ({}, ", kind
)?
;
753 write_allocation_track_relocs(
756 &mut allocs_to_print
,
762 match self.mem
.tcx
.get_global_alloc(id
) {
763 Some(GlobalAlloc
::Memory(alloc
)) => {
764 write
!(fmt
, " (unchanged global, ")?
;
765 write_allocation_track_relocs(
768 &mut allocs_to_print
,
772 Some(GlobalAlloc
::Function(func
)) => {
773 write
!(fmt
, " (fn: {})", func
)?
;
775 Some(GlobalAlloc
::Static(did
)) => {
776 write
!(fmt
, " (static: {})", self.mem
.tcx
.def_path_str(did
))?
;
779 write
!(fmt
, " (deallocated)")?
;
790 /// Reading and writing.
791 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
792 /// Reads the given number of bytes from memory. Returns them as a slice.
794 /// Performs appropriate bounds checks.
795 pub fn read_bytes(&self, ptr
: Scalar
<M
::PointerTag
>, size
: Size
) -> InterpResult
<'tcx
, &[u8]> {
796 let ptr
= match self.check_ptr_access(ptr
, size
, Align
::from_bytes(1).unwrap())?
{
798 None
=> return Ok(&[]), // zero-sized access
800 self.get_raw(ptr
.alloc_id
)?
.get_bytes(self, ptr
, size
)
803 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
805 /// Performs appropriate bounds checks.
806 pub fn read_c_str(&self, ptr
: Scalar
<M
::PointerTag
>) -> InterpResult
<'tcx
, &[u8]> {
807 let ptr
= self.force_ptr(ptr
)?
; // We need to read at least 1 byte, so we *need* a ptr.
808 self.get_raw(ptr
.alloc_id
)?
.read_c_str(self, ptr
)
811 /// Reads a 0x0000-terminated u16-sequence from memory. Returns them as a Vec<u16>.
812 /// Terminator 0x0000 is not included in the returned Vec<u16>.
814 /// Performs appropriate bounds checks.
815 pub fn read_wide_str(&self, ptr
: Scalar
<M
::PointerTag
>) -> InterpResult
<'tcx
, Vec
<u16>> {
816 let size_2bytes
= Size
::from_bytes(2);
817 let align_2bytes
= Align
::from_bytes(2).unwrap();
818 // We need to read at least 2 bytes, so we *need* a ptr.
819 let mut ptr
= self.force_ptr(ptr
)?
;
820 let allocation
= self.get_raw(ptr
.alloc_id
)?
;
821 let mut u16_seq
= Vec
::new();
825 .check_ptr_access(ptr
.into(), size_2bytes
, align_2bytes
)?
826 .expect("cannot be a ZST");
827 let single_u16
= allocation
.read_scalar(self, ptr
, size_2bytes
)?
.to_u16()?
;
828 if single_u16
!= 0x0000 {
829 u16_seq
.push(single_u16
);
830 ptr
= ptr
.offset(size_2bytes
, self)?
;
838 /// Writes the given stream of bytes into memory.
840 /// Performs appropriate bounds checks.
843 ptr
: Scalar
<M
::PointerTag
>,
844 src
: impl IntoIterator
<Item
= u8>,
845 ) -> InterpResult
<'tcx
> {
846 let mut src
= src
.into_iter();
847 let size
= Size
::from_bytes(src
.size_hint().0);
848 // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
849 let ptr
= match self.check_ptr_access(ptr
, size
, Align
::from_bytes(1).unwrap())?
{
853 src
.next().expect_none("iterator said it was empty but returned an element");
858 self.get_raw_mut(ptr
.alloc_id
)?
.write_bytes(&tcx
, ptr
, src
)
861 /// Writes the given stream of u16s into memory.
863 /// Performs appropriate bounds checks.
866 ptr
: Scalar
<M
::PointerTag
>,
867 src
: impl IntoIterator
<Item
= u16>,
868 ) -> InterpResult
<'tcx
> {
869 let mut src
= src
.into_iter();
870 let (lower
, upper
) = src
.size_hint();
871 let len
= upper
.expect("can only write bounded iterators");
872 assert_eq
!(lower
, len
, "can only write iterators with a precise length");
874 let size
= Size
::from_bytes(lower
);
875 let ptr
= match self.check_ptr_access(ptr
, size
, Align
::from_bytes(2).unwrap())?
{
879 src
.next().expect_none("iterator said it was empty but returned an element");
884 let allocation
= self.get_raw_mut(ptr
.alloc_id
)?
;
887 let val
= Scalar
::from_u16(
888 src
.next().expect("iterator was shorter than it said it would be"),
890 let offset_ptr
= ptr
.offset(Size
::from_bytes(idx
) * 2, &tcx
)?
; // `Size` multiplication
891 allocation
.write_scalar(&tcx
, offset_ptr
, val
.into(), Size
::from_bytes(2))?
;
893 src
.next().expect_none("iterator was longer than it said it would be");
897 /// Expects the caller to have checked bounds and alignment.
900 src
: Pointer
<M
::PointerTag
>,
901 dest
: Pointer
<M
::PointerTag
>,
903 nonoverlapping
: bool
,
904 ) -> InterpResult
<'tcx
> {
905 self.copy_repeatedly(src
, dest
, size
, 1, nonoverlapping
)
908 /// Expects the caller to have checked bounds and alignment.
909 pub fn copy_repeatedly(
911 src
: Pointer
<M
::PointerTag
>,
912 dest
: Pointer
<M
::PointerTag
>,
915 nonoverlapping
: bool
,
916 ) -> InterpResult
<'tcx
> {
917 // first copy the relocations to a temporary buffer, because
918 // `get_bytes_mut` will clear the relocations, which is correct,
919 // since we don't want to keep any relocations at the target.
920 // (`get_bytes_with_uninit_and_ptr` below checks that there are no
921 // relocations overlapping the edges; those would not be handled correctly).
923 self.get_raw(src
.alloc_id
)?
.prepare_relocation_copy(self, src
, size
, dest
, length
);
927 // This checks relocation edges on the src.
929 self.get_raw(src
.alloc_id
)?
.get_bytes_with_uninit_and_ptr(&tcx
, src
, size
)?
.as_ptr();
931 self.get_raw_mut(dest
.alloc_id
)?
.get_bytes_mut(&tcx
, dest
, size
* length
)?
; // `Size` multiplication
933 // If `dest_bytes` is empty we just optimize to not run anything for zsts.
935 if dest_bytes
.is_empty() {
939 let dest_bytes
= dest_bytes
.as_mut_ptr();
941 // Prepare a copy of the initialization mask.
942 let compressed
= self.get_raw(src
.alloc_id
)?
.compress_uninit_range(src
, size
);
944 if compressed
.no_bytes_init() {
945 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
946 // is marked as uninitialized but we otherwise omit changing the byte representation which may
947 // be arbitrary for uninitialized bytes.
948 // This also avoids writing to the target bytes so that the backing allocation is never
949 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
950 // operating system this can avoid physically allocating the page.
951 let dest_alloc
= self.get_raw_mut(dest
.alloc_id
)?
;
952 dest_alloc
.mark_init(dest
, size
* length
, false); // `Size` multiplication
953 dest_alloc
.mark_relocation_range(relocations
);
957 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
958 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
959 // `dest` could possibly overlap.
960 // The pointers above remain valid even if the `HashMap` table is moved around because they
961 // point into the `Vec` storing the bytes.
963 if src
.alloc_id
== dest
.alloc_id
{
966 if (src
.offset
<= dest
.offset
&& src
.offset
+ size
> dest
.offset
)
967 || (dest
.offset
<= src
.offset
&& dest
.offset
+ size
> src
.offset
)
969 throw_ub_format
!("copy_nonoverlapping called on overlapping ranges")
976 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
982 ptr
::copy_nonoverlapping(
984 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
991 // now fill in all the data
992 self.get_raw_mut(dest
.alloc_id
)?
.mark_compressed_init_range(
999 // copy the relocations to the destination
1000 self.get_raw_mut(dest
.alloc_id
)?
.mark_relocation_range(relocations
);
1006 /// Machine pointer introspection.
1007 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
1010 scalar
: Scalar
<M
::PointerTag
>,
1011 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
1013 Scalar
::Ptr(ptr
) => Ok(ptr
),
1014 _
=> M
::int_to_ptr(&self, scalar
.to_machine_usize(self)?
),
1020 scalar
: Scalar
<M
::PointerTag
>,
1022 ) -> InterpResult
<'tcx
, u128
> {
1023 match scalar
.to_bits_or_ptr(size
, self) {
1024 Ok(bits
) => Ok(bits
),
1025 Err(ptr
) => Ok(M
::ptr_to_int(&self, ptr
)?
.into()),