1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std
::assert_matches
::assert_matches
;
11 use std
::collections
::VecDeque
;
15 use rustc_ast
::Mutability
;
16 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
17 use rustc_middle
::mir
::display_allocation
;
18 use rustc_middle
::ty
::{self, Instance, ParamEnv, Ty, TyCtxt}
;
19 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
22 alloc_range
, AllocId
, AllocMap
, AllocRange
, Allocation
, CheckInAllocMsg
, GlobalAlloc
, InterpCx
,
23 InterpResult
, Machine
, MayLeak
, Pointer
, PointerArithmetic
, Provenance
, Scalar
,
26 #[derive(Debug, PartialEq, Copy, Clone)]
27 pub enum MemoryKind
<T
> {
28 /// Stack memory. Error if deallocated except during a stack pop.
30 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
32 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
36 impl<T
: MayLeak
> MayLeak
for MemoryKind
<T
> {
38 fn may_leak(self) -> bool
{
40 MemoryKind
::Stack
=> false,
41 MemoryKind
::CallerLocation
=> true,
42 MemoryKind
::Machine(k
) => k
.may_leak(),
47 impl<T
: fmt
::Display
> fmt
::Display
for MemoryKind
<T
> {
48 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
50 MemoryKind
::Stack
=> write
!(f
, "stack variable"),
51 MemoryKind
::CallerLocation
=> write
!(f
, "caller location"),
52 MemoryKind
::Machine(m
) => write
!(f
, "{}", m
),
57 /// The return value of `get_alloc_info` indicates the "kind" of the allocation.
59 /// A regular live data allocation.
61 /// A function allocation (that fn ptrs point to).
63 /// A (symbolic) vtable allocation.
65 /// A dead allocation.
69 /// The value of a function pointer.
70 #[derive(Debug, Copy, Clone)]
71 pub enum FnVal
<'tcx
, Other
> {
72 Instance(Instance
<'tcx
>),
76 impl<'tcx
, Other
> FnVal
<'tcx
, Other
> {
77 pub fn as_instance(self) -> InterpResult
<'tcx
, Instance
<'tcx
>> {
79 FnVal
::Instance(instance
) => Ok(instance
),
81 throw_unsup_format
!("'foreign' function pointers are not supported in this context")
87 // `Memory` has to depend on the `Machine` because some of its operations
88 // (e.g., `get`) call a `Machine` hook.
89 pub struct Memory
<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
90 /// Allocations local to this instance of the miri engine. The kind
91 /// helps ensure that the same mechanism is used for allocation and
92 /// deallocation. When an allocation is not found here, it is a
93 /// global and looked up in the `tcx` for read access. Some machines may
94 /// have to mutate this map even on a read-only access to a global (because
95 /// they do pointer provenance tracking and the allocations in `tcx` have
96 /// the wrong type), so we let the machine override this type.
97 /// Either way, if the machine allows writing to a global, doing so will
98 /// create a copy of the global allocation here.
99 // FIXME: this should not be public, but interning currently needs access to it
100 pub(super) alloc_map
: M
::MemoryMap
,
102 /// Map for "extra" function pointers.
103 extra_fn_ptr_map
: FxHashMap
<AllocId
, M
::ExtraFnVal
>,
105 /// To be able to compare pointers with null, and to check alignment for accesses
106 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
107 /// that do not exist any more.
108 // FIXME: this should not be public, but interning currently needs access to it
109 pub(super) dead_alloc_map
: FxHashMap
<AllocId
, (Size
, Align
)>,
112 /// A reference to some allocation that was already bounds-checked for the given region
113 /// and had the on-access machine hooks run.
114 #[derive(Copy, Clone)]
115 pub struct AllocRef
<'a
, 'tcx
, Prov
, Extra
> {
116 alloc
: &'a Allocation
<Prov
, Extra
>,
121 /// A reference to some allocation that was already bounds-checked for the given region
122 /// and had the on-access machine hooks run.
123 pub struct AllocRefMut
<'a
, 'tcx
, Prov
, Extra
> {
124 alloc
: &'a
mut Allocation
<Prov
, Extra
>,
130 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
131 pub fn new() -> Self {
133 alloc_map
: M
::MemoryMap
::default(),
134 extra_fn_ptr_map
: FxHashMap
::default(),
135 dead_alloc_map
: FxHashMap
::default(),
139 /// This is used by [priroda](https://github.com/oli-obk/priroda)
140 pub fn alloc_map(&self) -> &M
::MemoryMap
{
145 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
146 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
147 /// the machine pointer to the allocation. Must never be used
148 /// for any other pointers, nor for TLS statics.
150 /// Using the resulting pointer represents a *direct* access to that memory
151 /// (e.g. by directly using a `static`),
152 /// as opposed to access through a pointer that was created by the program.
154 /// This function can fail only if `ptr` points to an `extern static`.
156 pub fn global_base_pointer(
158 ptr
: Pointer
<AllocId
>,
159 ) -> InterpResult
<'tcx
, Pointer
<M
::Provenance
>> {
160 let alloc_id
= ptr
.provenance
;
161 // We need to handle `extern static`.
162 match self.tcx
.try_get_global_alloc(alloc_id
) {
163 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_thread_local_static(def_id
) => {
164 bug
!("global memory cannot point to thread-local static")
166 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_foreign_item(def_id
) => {
167 return M
::extern_static_base_pointer(self, def_id
);
171 // And we need to get the provenance.
172 Ok(M
::adjust_alloc_base_pointer(self, ptr
))
175 pub fn create_fn_alloc_ptr(
177 fn_val
: FnVal
<'tcx
, M
::ExtraFnVal
>,
178 ) -> Pointer
<M
::Provenance
> {
179 let id
= match fn_val
{
180 FnVal
::Instance(instance
) => self.tcx
.create_fn_alloc(instance
),
181 FnVal
::Other(extra
) => {
182 // FIXME(RalfJung): Should we have a cache here?
183 let id
= self.tcx
.reserve_alloc_id();
184 let old
= self.memory
.extra_fn_ptr_map
.insert(id
, extra
);
185 assert
!(old
.is_none());
189 // Functions are global allocations, so make sure we get the right base pointer.
190 // We know this is not an `extern static` so this cannot fail.
191 self.global_base_pointer(Pointer
::from(id
)).unwrap()
198 kind
: MemoryKind
<M
::MemoryKind
>,
199 ) -> InterpResult
<'tcx
, Pointer
<M
::Provenance
>> {
200 let alloc
= Allocation
::uninit(size
, align
, M
::PANIC_ON_ALLOC_FAIL
)?
;
201 // We can `unwrap` since `alloc` contains no pointers.
202 Ok(self.allocate_raw_ptr(alloc
, kind
).unwrap())
205 pub fn allocate_bytes_ptr(
209 kind
: MemoryKind
<M
::MemoryKind
>,
210 mutability
: Mutability
,
211 ) -> Pointer
<M
::Provenance
> {
212 let alloc
= Allocation
::from_bytes(bytes
, align
, mutability
);
213 // We can `unwrap` since `alloc` contains no pointers.
214 self.allocate_raw_ptr(alloc
, kind
).unwrap()
217 /// This can fail only of `alloc` contains provenance.
218 pub fn allocate_raw_ptr(
221 kind
: MemoryKind
<M
::MemoryKind
>,
222 ) -> InterpResult
<'tcx
, Pointer
<M
::Provenance
>> {
223 let id
= self.tcx
.reserve_alloc_id();
226 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
227 "dynamically allocating global memory"
229 let alloc
= M
::adjust_allocation(self, id
, Cow
::Owned(alloc
), Some(kind
))?
;
230 self.memory
.alloc_map
.insert(id
, (kind
, alloc
.into_owned()));
231 Ok(M
::adjust_alloc_base_pointer(self, Pointer
::from(id
)))
234 pub fn reallocate_ptr(
236 ptr
: Pointer
<Option
<M
::Provenance
>>,
237 old_size_and_align
: Option
<(Size
, Align
)>,
240 kind
: MemoryKind
<M
::MemoryKind
>,
241 ) -> InterpResult
<'tcx
, Pointer
<M
::Provenance
>> {
242 let (alloc_id
, offset
, _prov
) = self.ptr_get_alloc_id(ptr
)?
;
243 if offset
.bytes() != 0 {
245 "reallocating {:?} which does not point to the beginning of an object",
250 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
251 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
252 let new_ptr
= self.allocate_ptr(new_size
, new_align
, kind
)?
;
253 let old_size
= match old_size_and_align
{
254 Some((size
, _align
)) => size
,
255 None
=> self.get_alloc_raw(alloc_id
)?
.size(),
257 // This will also call the access hooks.
263 old_size
.min(new_size
),
264 /*nonoverlapping*/ true,
266 self.deallocate_ptr(ptr
, old_size_and_align
, kind
)?
;
271 #[instrument(skip(self), level = "debug")]
272 pub fn deallocate_ptr(
274 ptr
: Pointer
<Option
<M
::Provenance
>>,
275 old_size_and_align
: Option
<(Size
, Align
)>,
276 kind
: MemoryKind
<M
::MemoryKind
>,
277 ) -> InterpResult
<'tcx
> {
278 let (alloc_id
, offset
, prov
) = self.ptr_get_alloc_id(ptr
)?
;
279 trace
!("deallocating: {alloc_id:?}");
281 if offset
.bytes() != 0 {
283 "deallocating {:?} which does not point to the beginning of an object",
288 let Some((alloc_kind
, mut alloc
)) = self.memory
.alloc_map
.remove(&alloc_id
) else {
289 // Deallocating global memory -- always an error
290 return Err(match self.tcx
.try_get_global_alloc(alloc_id
) {
291 Some(GlobalAlloc
::Function(..)) => {
292 err_ub_format
!("deallocating {alloc_id:?}, which is a function")
294 Some(GlobalAlloc
::VTable(..)) => {
295 err_ub_format
!("deallocating {alloc_id:?}, which is a vtable")
297 Some(GlobalAlloc
::Static(..) | GlobalAlloc
::Memory(..)) => {
298 err_ub_format
!("deallocating {alloc_id:?}, which is static memory")
300 None
=> err_ub
!(PointerUseAfterFree(alloc_id
)),
307 if alloc
.mutability
== Mutability
::Not
{
308 throw_ub_format
!("deallocating immutable allocation {alloc_id:?}");
310 if alloc_kind
!= kind
{
312 "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
315 if let Some((size
, align
)) = old_size_and_align
{
316 if size
!= alloc
.size() || align
!= alloc
.align
{
318 "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
319 alloc
.size().bytes(),
327 // Let the machine take some extra action
328 let size
= alloc
.size();
329 M
::before_memory_deallocation(
334 alloc_range(Size
::ZERO
, size
),
337 // Don't forget to remember size and align of this now-dead allocation
338 let old
= self.memory
.dead_alloc_map
.insert(alloc_id
, (size
, alloc
.align
));
340 bug
!("Nothing can be deallocated twice");
346 /// Internal helper function to determine the allocation and offset of a pointer (if any).
350 ptr
: Pointer
<Option
<M
::Provenance
>>,
353 ) -> InterpResult
<'tcx
, Option
<(AllocId
, Size
, M
::ProvenanceExtra
)>> {
354 let align
= M
::enforce_alignment(&self).then_some(align
);
355 self.check_and_deref_ptr(
359 CheckInAllocMsg
::MemoryAccessTest
,
360 |alloc_id
, offset
, prov
| {
361 let (size
, align
) = self.get_live_alloc_size_and_align(alloc_id
)?
;
362 Ok((size
, align
, (alloc_id
, offset
, prov
)))
367 /// Check if the given pointer points to live memory of given `size` and `align`
368 /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
369 /// out-of-bounds case.
371 pub fn check_ptr_access_align(
373 ptr
: Pointer
<Option
<M
::Provenance
>>,
376 msg
: CheckInAllocMsg
,
377 ) -> InterpResult
<'tcx
> {
378 self.check_and_deref_ptr(ptr
, size
, Some(align
), msg
, |alloc_id
, _
, _
| {
379 let (size
, align
) = self.get_live_alloc_size_and_align(alloc_id
)?
;
380 Ok((size
, align
, ()))
385 /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
386 /// to the allocation it points to. Supports both shared and mutable references, as the actual
387 /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
388 /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
389 fn check_and_deref_ptr
<T
>(
391 ptr
: Pointer
<Option
<M
::Provenance
>>,
393 align
: Option
<Align
>,
394 msg
: CheckInAllocMsg
,
395 alloc_size
: impl FnOnce(
399 ) -> InterpResult
<'tcx
, (Size
, Align
, T
)>,
400 ) -> InterpResult
<'tcx
, Option
<T
>> {
401 fn check_offset_align
<'tcx
>(offset
: u64, align
: Align
) -> InterpResult
<'tcx
> {
402 if offset
% align
.bytes() == 0 {
405 // The biggest power of two through which `offset` is divisible.
406 let offset_pow2
= 1 << offset
.trailing_zeros();
407 throw_ub
!(AlignmentCheckFailed
{
408 has
: Align
::from_bytes(offset_pow2
).unwrap(),
414 Ok(match self.ptr_try_get_alloc_id(ptr
) {
416 // We couldn't get a proper allocation. This is only okay if the access size is 0,
417 // and the address is not null.
418 if size
.bytes() > 0 || addr
== 0 {
419 throw_ub
!(DanglingIntPointer(addr
, msg
));
422 if let Some(align
) = align
{
423 check_offset_align(addr
, align
)?
;
427 Ok((alloc_id
, offset
, prov
)) => {
428 let (alloc_size
, alloc_align
, ret_val
) = alloc_size(alloc_id
, offset
, prov
)?
;
429 // Test bounds. This also ensures non-null.
430 // It is sufficient to check this for the end pointer. Also check for overflow!
431 if offset
.checked_add(size
, &self.tcx
).map_or(true, |end
| end
> alloc_size
) {
432 throw_ub
!(PointerOutOfBounds
{
435 ptr_offset
: self.machine_usize_to_isize(offset
.bytes()),
440 // Ensure we never consider the null pointer dereferenceable.
441 if M
::Provenance
::OFFSET_IS_ADDR
{
442 assert_ne
!(ptr
.addr(), Size
::ZERO
);
444 // Test align. Check this last; if both bounds and alignment are violated
445 // we want the error to be about the bounds.
446 if let Some(align
) = align
{
447 if M
::use_addr_for_alignment_check(self) {
448 // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
449 check_offset_align(ptr
.addr().bytes(), align
)?
;
451 // Check allocation alignment and offset alignment.
452 if alloc_align
.bytes() < align
.bytes() {
453 throw_ub
!(AlignmentCheckFailed { has: alloc_align, required: align }
);
455 check_offset_align(offset
.bytes(), align
)?
;
459 // We can still be zero-sized in this branch, in which case we have to
461 if size
.bytes() == 0 { None }
else { Some(ret_val) }
467 /// Allocation accessors
468 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
469 /// Helper function to obtain a global (tcx) allocation.
470 /// This attempts to return a reference to an existing allocation if
471 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
472 /// this machine use the same pointer provenance, so it is indirected through
473 /// `M::adjust_allocation`.
478 ) -> InterpResult
<'tcx
, Cow
<'tcx
, Allocation
<M
::Provenance
, M
::AllocExtra
>>> {
479 let (alloc
, def_id
) = match self.tcx
.try_get_global_alloc(id
) {
480 Some(GlobalAlloc
::Memory(mem
)) => {
481 // Memory of a constant or promoted or anonymous memory referenced by a static.
484 Some(GlobalAlloc
::Function(..)) => throw_ub
!(DerefFunctionPointer(id
)),
485 Some(GlobalAlloc
::VTable(..)) => throw_ub
!(DerefVTablePointer(id
)),
486 None
=> throw_ub
!(PointerUseAfterFree(id
)),
487 Some(GlobalAlloc
::Static(def_id
)) => {
488 assert
!(self.tcx
.is_static(def_id
));
489 assert
!(!self.tcx
.is_thread_local_static(def_id
));
490 // Notice that every static has two `AllocId` that will resolve to the same
491 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
492 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
493 // `eval_static_initializer` and it is the "resolved" ID.
494 // The resolved ID is never used by the interpreted program, it is hidden.
495 // This is relied upon for soundness of const-patterns; a pointer to the resolved
496 // ID would "sidestep" the checks that make sure consts do not point to statics!
497 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
498 // contains a reference to memory that was created during its evaluation (i.e., not
499 // to another static), those inner references only exist in "resolved" form.
500 if self.tcx
.is_foreign_item(def_id
) {
501 // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
502 // referencing arbitrary (declared) extern statics.
503 throw_unsup
!(ReadExternStatic(def_id
));
506 // Use a precise span for better cycle errors.
507 (self.tcx
.at(self.cur_span()).eval_static_initializer(def_id
)?
, Some(def_id
))
510 M
::before_access_global(*self.tcx
, &self.machine
, id
, alloc
, def_id
, is_write
)?
;
511 // We got tcx memory. Let the machine initialize its "extra" stuff.
512 M
::adjust_allocation(
514 id
, // always use the ID we got as input, not the "hidden" one.
515 Cow
::Borrowed(alloc
.inner()),
516 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
520 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
521 /// The caller is responsible for calling the access hooks!
523 /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
527 ) -> InterpResult
<'tcx
, &Allocation
<M
::Provenance
, M
::AllocExtra
>> {
528 // The error type of the inner closure here is somewhat funny. We have two
529 // ways of "erroring": An actual error, or because we got a reference from
530 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
531 // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
532 let a
= self.memory
.alloc_map
.get_or(id
, || {
533 let alloc
= self.get_global_alloc(id
, /*is_write*/ false).map_err(Err
)?
;
535 Cow
::Borrowed(alloc
) => {
536 // We got a ref, cheaply return that as an "error" so that the
537 // map does not get mutated.
540 Cow
::Owned(alloc
) => {
541 // Need to put it into the map and return a ref to that
542 let kind
= M
::GLOBAL_KIND
.expect(
543 "I got a global allocation that I have to copy but the machine does \
544 not expect that to happen",
546 Ok((MemoryKind
::Machine(kind
), alloc
))
550 // Now unpack that funny error type
557 /// "Safe" (bounds and align-checked) allocation access.
558 pub fn get_ptr_alloc
<'a
>(
560 ptr
: Pointer
<Option
<M
::Provenance
>>,
563 ) -> InterpResult
<'tcx
, Option
<AllocRef
<'a
, 'tcx
, M
::Provenance
, M
::AllocExtra
>>> {
564 let align
= M
::enforce_alignment(self).then_some(align
);
565 let ptr_and_alloc
= self.check_and_deref_ptr(
569 CheckInAllocMsg
::MemoryAccessTest
,
570 |alloc_id
, offset
, prov
| {
571 let alloc
= self.get_alloc_raw(alloc_id
)?
;
572 Ok((alloc
.size(), alloc
.align
, (alloc_id
, offset
, prov
, alloc
)))
575 if let Some((alloc_id
, offset
, prov
, alloc
)) = ptr_and_alloc
{
576 let range
= alloc_range(offset
, size
);
577 M
::before_memory_read(*self.tcx
, &self.machine
, &alloc
.extra
, (alloc_id
, prov
), range
)?
;
578 Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }
))
580 // Even in this branch we have to be sure that we actually access the allocation, in
581 // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
582 // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
583 // always called when `ptr` has an `AllocId`.
588 /// Return the `extra` field of the given allocation.
589 pub fn get_alloc_extra
<'a
>(&'a
self, id
: AllocId
) -> InterpResult
<'tcx
, &'a M
::AllocExtra
> {
590 Ok(&self.get_alloc_raw(id
)?
.extra
)
593 /// Return the `mutability` field of the given allocation.
594 pub fn get_alloc_mutability
<'a
>(&'a
self, id
: AllocId
) -> InterpResult
<'tcx
, Mutability
> {
595 Ok(self.get_alloc_raw(id
)?
.mutability
)
598 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
599 /// The caller is responsible for calling the access hooks!
601 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
603 fn get_alloc_raw_mut(
606 ) -> InterpResult
<'tcx
, (&mut Allocation
<M
::Provenance
, M
::AllocExtra
>, &mut M
)> {
607 // We have "NLL problem case #3" here, which cannot be worked around without loss of
608 // efficiency even for the common case where the key is in the map.
609 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
610 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
611 if self.memory
.alloc_map
.get_mut(id
).is_none() {
613 // Allocation not found locally, go look global.
614 let alloc
= self.get_global_alloc(id
, /*is_write*/ true)?
;
615 let kind
= M
::GLOBAL_KIND
.expect(
616 "I got a global allocation that I have to copy but the machine does \
617 not expect that to happen",
619 self.memory
.alloc_map
.insert(id
, (MemoryKind
::Machine(kind
), alloc
.into_owned()));
622 let (_kind
, alloc
) = self.memory
.alloc_map
.get_mut(id
).unwrap();
623 if alloc
.mutability
== Mutability
::Not
{
624 throw_ub
!(WriteToReadOnly(id
))
626 Ok((alloc
, &mut self.machine
))
629 /// "Safe" (bounds and align-checked) allocation access.
630 pub fn get_ptr_alloc_mut
<'a
>(
632 ptr
: Pointer
<Option
<M
::Provenance
>>,
635 ) -> InterpResult
<'tcx
, Option
<AllocRefMut
<'a
, 'tcx
, M
::Provenance
, M
::AllocExtra
>>> {
636 let parts
= self.get_ptr_access(ptr
, size
, align
)?
;
637 if let Some((alloc_id
, offset
, prov
)) = parts
{
639 // FIXME: can we somehow avoid looking up the allocation twice here?
640 // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
641 let (alloc
, machine
) = self.get_alloc_raw_mut(alloc_id
)?
;
642 let range
= alloc_range(offset
, size
);
643 M
::before_memory_write(tcx
, machine
, &mut alloc
.extra
, (alloc_id
, prov
), range
)?
;
644 Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }
))
650 /// Return the `extra` field of the given allocation.
651 pub fn get_alloc_extra_mut
<'a
>(
654 ) -> InterpResult
<'tcx
, (&'a
mut M
::AllocExtra
, &'a
mut M
)> {
655 let (alloc
, machine
) = self.get_alloc_raw_mut(id
)?
;
656 Ok((&mut alloc
.extra
, machine
))
659 /// Obtain the size and alignment of an allocation, even if that allocation has
660 /// been deallocated.
661 pub fn get_alloc_info(&self, id
: AllocId
) -> (Size
, Align
, AllocKind
) {
662 // # Regular allocations
663 // Don't use `self.get_raw` here as that will
664 // a) cause cycles in case `id` refers to a static
665 // b) duplicate a global's allocation in miri
666 if let Some((_
, alloc
)) = self.memory
.alloc_map
.get(id
) {
667 return (alloc
.size(), alloc
.align
, AllocKind
::LiveData
);
670 // # Function pointers
671 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
672 if self.get_fn_alloc(id
).is_some() {
673 return (Size
::ZERO
, Align
::ONE
, AllocKind
::Function
);
677 // Can't do this in the match argument, we may get cycle errors since the lock would
678 // be held throughout the match.
679 match self.tcx
.try_get_global_alloc(id
) {
680 Some(GlobalAlloc
::Static(def_id
)) => {
681 assert
!(self.tcx
.is_static(def_id
));
682 assert
!(!self.tcx
.is_thread_local_static(def_id
));
683 // Use size and align of the type.
684 let ty
= self.tcx
.type_of(def_id
);
685 let layout
= self.tcx
.layout_of(ParamEnv
::empty().and(ty
)).unwrap();
686 assert
!(!layout
.is_unsized());
687 (layout
.size
, layout
.align
.abi
, AllocKind
::LiveData
)
689 Some(GlobalAlloc
::Memory(alloc
)) => {
690 // Need to duplicate the logic here, because the global allocations have
691 // different associated types than the interpreter-local ones.
692 let alloc
= alloc
.inner();
693 (alloc
.size(), alloc
.align
, AllocKind
::LiveData
)
695 Some(GlobalAlloc
::Function(_
)) => bug
!("We already checked function pointers above"),
696 Some(GlobalAlloc
::VTable(..)) => {
697 // No data to be accessed here. But vtables are pointer-aligned.
698 return (Size
::ZERO
, self.tcx
.data_layout
.pointer_align
.abi
, AllocKind
::VTable
);
700 // The rest must be dead.
702 // Deallocated pointers are allowed, we should be able to find
704 let (size
, align
) = *self
708 .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
709 (size
, align
, AllocKind
::Dead
)
714 /// Obtain the size and alignment of a live allocation.
715 pub fn get_live_alloc_size_and_align(&self, id
: AllocId
) -> InterpResult
<'tcx
, (Size
, Align
)> {
716 let (size
, align
, kind
) = self.get_alloc_info(id
);
717 if matches
!(kind
, AllocKind
::Dead
) {
718 throw_ub
!(PointerUseAfterFree(id
))
723 fn get_fn_alloc(&self, id
: AllocId
) -> Option
<FnVal
<'tcx
, M
::ExtraFnVal
>> {
724 if let Some(extra
) = self.memory
.extra_fn_ptr_map
.get(&id
) {
725 Some(FnVal
::Other(*extra
))
727 match self.tcx
.try_get_global_alloc(id
) {
728 Some(GlobalAlloc
::Function(instance
)) => Some(FnVal
::Instance(instance
)),
736 ptr
: Pointer
<Option
<M
::Provenance
>>,
737 ) -> InterpResult
<'tcx
, FnVal
<'tcx
, M
::ExtraFnVal
>> {
738 trace
!("get_ptr_fn({:?})", ptr
);
739 let (alloc_id
, offset
, _prov
) = self.ptr_get_alloc_id(ptr
)?
;
740 if offset
.bytes() != 0 {
741 throw_ub
!(InvalidFunctionPointer(Pointer
::new(alloc_id
, offset
)))
743 self.get_fn_alloc(alloc_id
)
744 .ok_or_else(|| err_ub
!(InvalidFunctionPointer(Pointer
::new(alloc_id
, offset
))).into())
747 pub fn get_ptr_vtable(
749 ptr
: Pointer
<Option
<M
::Provenance
>>,
750 ) -> InterpResult
<'tcx
, (Ty
<'tcx
>, Option
<ty
::PolyExistentialTraitRef
<'tcx
>>)> {
751 trace
!("get_ptr_vtable({:?})", ptr
);
752 let (alloc_id
, offset
, _tag
) = self.ptr_get_alloc_id(ptr
)?
;
753 if offset
.bytes() != 0 {
754 throw_ub
!(InvalidVTablePointer(Pointer
::new(alloc_id
, offset
)))
756 match self.tcx
.try_get_global_alloc(alloc_id
) {
757 Some(GlobalAlloc
::VTable(ty
, trait_ref
)) => Ok((ty
, trait_ref
)),
758 _
=> throw_ub
!(InvalidVTablePointer(Pointer
::new(alloc_id
, offset
))),
762 pub fn alloc_mark_immutable(&mut self, id
: AllocId
) -> InterpResult
<'tcx
> {
763 self.get_alloc_raw_mut(id
)?
.0.mutability
= Mutability
::Not
;
767 /// Create a lazy debug printer that prints the given allocation and all allocations it points
770 pub fn dump_alloc
<'a
>(&'a
self, id
: AllocId
) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
771 self.dump_allocs(vec
![id
])
774 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
777 pub fn dump_allocs
<'a
>(&'a
self, mut allocs
: Vec
<AllocId
>) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
780 DumpAllocs { ecx: self, allocs }
783 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
784 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
785 pub fn leak_report(&self, static_roots
: &[AllocId
]) -> usize {
786 // Collect the set of allocations that are *reachable* from `Global` allocations.
788 let mut reachable
= FxHashSet
::default();
789 let global_kind
= M
::GLOBAL_KIND
.map(MemoryKind
::Machine
);
790 let mut todo
: Vec
<_
> =
791 self.memory
.alloc_map
.filter_map_collect(move |&id
, &(kind
, _
)| {
792 if Some(kind
) == global_kind { Some(id) }
else { None }
794 todo
.extend(static_roots
);
795 while let Some(id
) = todo
.pop() {
796 if reachable
.insert(id
) {
797 // This is a new allocation, add the allocation it points to `todo`.
798 if let Some((_
, alloc
)) = self.memory
.alloc_map
.get(id
) {
800 alloc
.provenance().values().filter_map(|prov
| prov
.get_alloc_id()),
808 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
809 let leaks
: Vec
<_
> = self.memory
.alloc_map
.filter_map_collect(|&id
, &(kind
, _
)| {
810 if kind
.may_leak() || reachable
.contains(&id
) { None }
else { Some(id) }
814 eprintln
!("The following memory was leaked: {:?}", self.dump_allocs(leaks
));
821 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
822 pub struct DumpAllocs
<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
823 ecx
: &'a InterpCx
<'mir
, 'tcx
, M
>,
824 allocs
: Vec
<AllocId
>,
827 impl<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> std
::fmt
::Debug
for DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
828 fn fmt(&self, fmt
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
829 // Cannot be a closure because it is generic in `Prov`, `Extra`.
830 fn write_allocation_track_relocs
<'tcx
, Prov
: Provenance
, Extra
>(
831 fmt
: &mut std
::fmt
::Formatter
<'_
>,
833 allocs_to_print
: &mut VecDeque
<AllocId
>,
834 alloc
: &Allocation
<Prov
, Extra
>,
835 ) -> std
::fmt
::Result
{
836 for alloc_id
in alloc
.provenance().values().filter_map(|prov
| prov
.get_alloc_id()) {
837 allocs_to_print
.push_back(alloc_id
);
839 write
!(fmt
, "{}", display_allocation(tcx
, alloc
))
842 let mut allocs_to_print
: VecDeque
<_
> = self.allocs
.iter().copied().collect();
843 // `allocs_printed` contains all allocations that we have already printed.
844 let mut allocs_printed
= FxHashSet
::default();
846 while let Some(id
) = allocs_to_print
.pop_front() {
847 if !allocs_printed
.insert(id
) {
848 // Already printed, so skip this.
852 write
!(fmt
, "{id:?}")?
;
853 match self.ecx
.memory
.alloc_map
.get(id
) {
854 Some(&(kind
, ref alloc
)) => {
856 write
!(fmt
, " ({}, ", kind
)?
;
857 write_allocation_track_relocs(
860 &mut allocs_to_print
,
866 match self.ecx
.tcx
.try_get_global_alloc(id
) {
867 Some(GlobalAlloc
::Memory(alloc
)) => {
868 write
!(fmt
, " (unchanged global, ")?
;
869 write_allocation_track_relocs(
872 &mut allocs_to_print
,
876 Some(GlobalAlloc
::Function(func
)) => {
877 write
!(fmt
, " (fn: {func})")?
;
879 Some(GlobalAlloc
::VTable(ty
, Some(trait_ref
))) => {
880 write
!(fmt
, " (vtable: impl {trait_ref} for {ty})")?
;
882 Some(GlobalAlloc
::VTable(ty
, None
)) => {
883 write
!(fmt
, " (vtable: impl <auto trait> for {ty})")?
;
885 Some(GlobalAlloc
::Static(did
)) => {
886 write
!(fmt
, " (static: {})", self.ecx
.tcx
.def_path_str(did
))?
;
889 write
!(fmt
, " (deallocated)")?
;
900 /// Reading and writing.
901 impl<'tcx
, 'a
, Prov
: Provenance
, Extra
> AllocRefMut
<'a
, 'tcx
, Prov
, Extra
> {
902 /// `range` is relative to this allocation reference, not the base of the allocation.
903 pub fn write_scalar(&mut self, range
: AllocRange
, val
: Scalar
<Prov
>) -> InterpResult
<'tcx
> {
904 let range
= self.range
.subrange(range
);
905 debug
!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id
);
908 .write_scalar(&self.tcx
, range
, val
)
909 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
912 /// `offset` is relative to this allocation reference, not the base of the allocation.
913 pub fn write_ptr_sized(&mut self, offset
: Size
, val
: Scalar
<Prov
>) -> InterpResult
<'tcx
> {
914 self.write_scalar(alloc_range(offset
, self.tcx
.data_layout().pointer_size
), val
)
917 /// Mark the entire referenced range as uninitialized
918 pub fn write_uninit(&mut self) -> InterpResult
<'tcx
> {
921 .write_uninit(&self.tcx
, self.range
)
922 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
926 impl<'tcx
, 'a
, Prov
: Provenance
, Extra
> AllocRef
<'a
, 'tcx
, Prov
, Extra
> {
927 /// `range` is relative to this allocation reference, not the base of the allocation.
931 read_provenance
: bool
,
932 ) -> InterpResult
<'tcx
, Scalar
<Prov
>> {
933 let range
= self.range
.subrange(range
);
936 .read_scalar(&self.tcx
, range
, read_provenance
)
937 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
;
938 debug
!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id
);
942 /// `range` is relative to this allocation reference, not the base of the allocation.
943 pub fn read_integer(&self, range
: AllocRange
) -> InterpResult
<'tcx
, Scalar
<Prov
>> {
944 self.read_scalar(range
, /*read_provenance*/ false)
947 /// `offset` is relative to this allocation reference, not the base of the allocation.
948 pub fn read_pointer(&self, offset
: Size
) -> InterpResult
<'tcx
, Scalar
<Prov
>> {
950 alloc_range(offset
, self.tcx
.data_layout().pointer_size
),
951 /*read_provenance*/ true,
955 /// `range` is relative to this allocation reference, not the base of the allocation.
956 pub fn get_bytes_strip_provenance
<'b
>(&'b
self) -> InterpResult
<'tcx
, &'a
[u8]> {
959 .get_bytes_strip_provenance(&self.tcx
, self.range
)
960 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
963 /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
964 pub(crate) fn has_provenance(&self) -> bool
{
965 self.alloc
.range_has_provenance(&self.tcx
, self.range
)
969 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
970 /// Reads the given number of bytes from memory, and strips their provenance if possible.
971 /// Returns them as a slice.
973 /// Performs appropriate bounds checks.
974 pub fn read_bytes_ptr_strip_provenance(
976 ptr
: Pointer
<Option
<M
::Provenance
>>,
978 ) -> InterpResult
<'tcx
, &[u8]> {
979 let Some(alloc_ref
) = self.get_ptr_alloc(ptr
, size
, Align
::ONE
)?
else {
983 // Side-step AllocRef and directly access the underlying bytes more efficiently.
984 // (We are staying inside the bounds here so all is good.)
987 .get_bytes_strip_provenance(&alloc_ref
.tcx
, alloc_ref
.range
)
988 .map_err(|e
| e
.to_interp_error(alloc_ref
.alloc_id
))?
)
991 /// Writes the given stream of bytes into memory.
993 /// Performs appropriate bounds checks.
994 pub fn write_bytes_ptr(
996 ptr
: Pointer
<Option
<M
::Provenance
>>,
997 src
: impl IntoIterator
<Item
= u8>,
998 ) -> InterpResult
<'tcx
> {
999 let mut src
= src
.into_iter();
1000 let (lower
, upper
) = src
.size_hint();
1001 let len
= upper
.expect("can only write bounded iterators");
1002 assert_eq
!(lower
, len
, "can only write iterators with a precise length");
1004 let size
= Size
::from_bytes(len
);
1005 let Some(alloc_ref
) = self.get_ptr_alloc_mut(ptr
, size
, Align
::ONE
)?
else {
1006 // zero-sized access
1010 "iterator said it was empty but returned an element"
1015 // Side-step AllocRef and directly access the underlying bytes more efficiently.
1016 // (We are staying inside the bounds here so all is good.)
1017 let alloc_id
= alloc_ref
.alloc_id
;
1018 let bytes
= alloc_ref
1020 .get_bytes_mut(&alloc_ref
.tcx
, alloc_ref
.range
)
1021 .map_err(move |e
| e
.to_interp_error(alloc_id
))?
;
1022 // `zip` would stop when the first iterator ends; we want to definitely
1023 // cover all of `bytes`.
1025 *dest
= src
.next().expect("iterator was shorter than it said it would be");
1027 assert_matches
!(src
.next(), None
, "iterator was longer than it said it would be");
1033 src
: Pointer
<Option
<M
::Provenance
>>,
1035 dest
: Pointer
<Option
<M
::Provenance
>>,
1038 nonoverlapping
: bool
,
1039 ) -> InterpResult
<'tcx
> {
1040 self.mem_copy_repeatedly(src
, src_align
, dest
, dest_align
, size
, 1, nonoverlapping
)
1043 pub fn mem_copy_repeatedly(
1045 src
: Pointer
<Option
<M
::Provenance
>>,
1047 dest
: Pointer
<Option
<M
::Provenance
>>,
1051 nonoverlapping
: bool
,
1052 ) -> InterpResult
<'tcx
> {
1054 // We need to do our own bounds-checks.
1055 let src_parts
= self.get_ptr_access(src
, size
, src_align
)?
;
1056 let dest_parts
= self.get_ptr_access(dest
, size
* num_copies
, dest_align
)?
; // `Size` multiplication
1058 // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1059 // and once below to get the underlying `&[mut] Allocation`.
1061 // Source alloc preparations and access hooks.
1062 let Some((src_alloc_id
, src_offset
, src_prov
)) = src_parts
else {
1063 // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
1066 let src_alloc
= self.get_alloc_raw(src_alloc_id
)?
;
1067 let src_range
= alloc_range(src_offset
, size
);
1068 M
::before_memory_read(
1072 (src_alloc_id
, src_prov
),
1075 // We need the `dest` ptr for the next operation, so we get it now.
1076 // We already did the source checks and called the hooks so we are good to return early.
1077 let Some((dest_alloc_id
, dest_offset
, dest_prov
)) = dest_parts
else {
1078 // Zero-sized *destination*.
1082 // Checks provenance edges on the src, which needs to happen before
1083 // `prepare_provenance_copy`.
1084 if src_alloc
.range_has_provenance(&tcx
, alloc_range(src_range
.start
, Size
::ZERO
)) {
1085 throw_unsup
!(PartialPointerCopy(Pointer
::new(src_alloc_id
, src_range
.start
)));
1087 if src_alloc
.range_has_provenance(&tcx
, alloc_range(src_range
.end(), Size
::ZERO
)) {
1088 throw_unsup
!(PartialPointerCopy(Pointer
::new(src_alloc_id
, src_range
.end())));
1090 let src_bytes
= src_alloc
.get_bytes_unchecked(src_range
).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1091 // first copy the provenance to a temporary buffer, because
1092 // `get_bytes_mut` will clear the provenance, which is correct,
1093 // since we don't want to keep any provenance at the target.
1095 src_alloc
.prepare_provenance_copy(self, src_range
, dest_offset
, num_copies
);
1096 // Prepare a copy of the initialization mask.
1097 let compressed
= src_alloc
.compress_uninit_range(src_range
);
1099 // Destination alloc preparations and access hooks.
1100 let (dest_alloc
, extra
) = self.get_alloc_raw_mut(dest_alloc_id
)?
;
1101 let dest_range
= alloc_range(dest_offset
, size
* num_copies
);
1102 M
::before_memory_write(
1105 &mut dest_alloc
.extra
,
1106 (dest_alloc_id
, dest_prov
),
1109 let dest_bytes
= dest_alloc
1110 .get_bytes_mut_ptr(&tcx
, dest_range
)
1111 .map_err(|e
| e
.to_interp_error(dest_alloc_id
))?
1114 if compressed
.no_bytes_init() {
1115 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1116 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1117 // be arbitrary for uninitialized bytes.
1118 // This also avoids writing to the target bytes so that the backing allocation is never
1119 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1120 // operating system this can avoid physically allocating the page.
1122 .write_uninit(&tcx
, dest_range
)
1123 .map_err(|e
| e
.to_interp_error(dest_alloc_id
))?
;
1124 // We can forget about the provenance, this is all not initialized anyway.
1128 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1129 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1130 // `dest` could possibly overlap.
1131 // The pointers above remain valid even if the `HashMap` table is moved around because they
1132 // point into the `Vec` storing the bytes.
1134 if src_alloc_id
== dest_alloc_id
{
1137 if (src_offset
<= dest_offset
&& src_offset
+ size
> dest_offset
)
1138 || (dest_offset
<= src_offset
&& dest_offset
+ size
> src_offset
)
1140 throw_ub_format
!("copy_nonoverlapping called on overlapping ranges")
1144 for i
in 0..num_copies
{
1147 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
1152 for i
in 0..num_copies
{
1153 ptr
::copy_nonoverlapping(
1155 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
1162 // now fill in all the "init" data
1163 dest_alloc
.mark_compressed_init_range(
1165 alloc_range(dest_offset
, size
), // just a single copy (i.e., not full `dest_range`)
1168 // copy the provenance to the destination
1169 dest_alloc
.mark_provenance_range(provenance
);
1175 /// Machine pointer introspection.
1176 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
1177 /// Test if this value might be null.
1178 /// If the machine does not support ptr-to-int casts, this is conservative.
1179 pub fn scalar_may_be_null(&self, scalar
: Scalar
<M
::Provenance
>) -> InterpResult
<'tcx
, bool
> {
1180 Ok(match scalar
.try_to_int() {
1181 Ok(int
) => int
.is_null(),
1183 // Can only happen during CTFE.
1184 let ptr
= scalar
.to_pointer(self)?
;
1185 match self.ptr_try_get_alloc_id(ptr
) {
1186 Ok((alloc_id
, offset
, _
)) => {
1187 let (size
, _align
, _kind
) = self.get_alloc_info(alloc_id
);
1188 // If the pointer is out-of-bounds, it may be null.
1189 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
1192 Err(_offset
) => bug
!("a non-int scalar is always a pointer"),
1198 /// Turning a "maybe pointer" into a proper pointer (and some information
1199 /// about where it points), or an absolute address.
1200 pub fn ptr_try_get_alloc_id(
1202 ptr
: Pointer
<Option
<M
::Provenance
>>,
1203 ) -> Result
<(AllocId
, Size
, M
::ProvenanceExtra
), u64> {
1204 match ptr
.into_pointer_or_addr() {
1205 Ok(ptr
) => match M
::ptr_get_alloc(self, ptr
) {
1206 Some((alloc_id
, offset
, extra
)) => Ok((alloc_id
, offset
, extra
)),
1208 assert
!(M
::Provenance
::OFFSET_IS_ADDR
);
1209 let (_
, addr
) = ptr
.into_parts();
1213 Err(addr
) => Err(addr
.bytes()),
1217 /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1219 pub fn ptr_get_alloc_id(
1221 ptr
: Pointer
<Option
<M
::Provenance
>>,
1222 ) -> InterpResult
<'tcx
, (AllocId
, Size
, M
::ProvenanceExtra
)> {
1223 self.ptr_try_get_alloc_id(ptr
).map_err(|offset
| {
1224 err_ub
!(DanglingIntPointer(offset
, CheckInAllocMsg
::InboundsTest
)).into()