1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std
::assert_matches
::assert_matches
;
11 use std
::collections
::VecDeque
;
12 use std
::convert
::TryFrom
;
16 use rustc_ast
::Mutability
;
17 use rustc_data_structures
::fx
::{FxHashMap, FxHashSet}
;
18 use rustc_middle
::mir
::display_allocation
;
19 use rustc_middle
::ty
::{Instance, ParamEnv, TyCtxt}
;
20 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
23 alloc_range
, AllocId
, AllocMap
, AllocRange
, Allocation
, CheckInAllocMsg
, GlobalAlloc
, InterpCx
,
24 InterpResult
, Machine
, MayLeak
, Pointer
, PointerArithmetic
, Provenance
, Scalar
,
28 #[derive(Debug, PartialEq, Copy, Clone)]
29 pub enum MemoryKind
<T
> {
30 /// Stack memory. Error if deallocated except during a stack pop.
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
38 impl<T
: MayLeak
> MayLeak
for MemoryKind
<T
> {
40 fn may_leak(self) -> bool
{
42 MemoryKind
::Stack
=> false,
43 MemoryKind
::CallerLocation
=> true,
44 MemoryKind
::Machine(k
) => k
.may_leak(),
49 impl<T
: fmt
::Display
> fmt
::Display
for MemoryKind
<T
> {
50 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
52 MemoryKind
::Stack
=> write
!(f
, "stack variable"),
53 MemoryKind
::CallerLocation
=> write
!(f
, "caller location"),
54 MemoryKind
::Machine(m
) => write
!(f
, "{}", m
),
59 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
60 #[derive(Debug, Copy, Clone)]
62 /// Allocation must be live and not a function pointer.
64 /// Allocations needs to be live, but may be a function pointer.
66 /// Allocation may be dead.
70 /// The value of a function pointer.
71 #[derive(Debug, Copy, Clone)]
72 pub enum FnVal
<'tcx
, Other
> {
73 Instance(Instance
<'tcx
>),
77 impl<'tcx
, Other
> FnVal
<'tcx
, Other
> {
78 pub fn as_instance(self) -> InterpResult
<'tcx
, Instance
<'tcx
>> {
80 FnVal
::Instance(instance
) => Ok(instance
),
82 throw_unsup_format
!("'foreign' function pointers are not supported in this context")
88 // `Memory` has to depend on the `Machine` because some of its operations
89 // (e.g., `get`) call a `Machine` hook.
90 pub struct Memory
<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
91 /// Allocations local to this instance of the miri engine. The kind
92 /// helps ensure that the same mechanism is used for allocation and
93 /// deallocation. When an allocation is not found here, it is a
94 /// global and looked up in the `tcx` for read access. Some machines may
95 /// have to mutate this map even on a read-only access to a global (because
96 /// they do pointer provenance tracking and the allocations in `tcx` have
97 /// the wrong type), so we let the machine override this type.
98 /// Either way, if the machine allows writing to a global, doing so will
99 /// create a copy of the global allocation here.
100 // FIXME: this should not be public, but interning currently needs access to it
101 pub(super) alloc_map
: M
::MemoryMap
,
103 /// Map for "extra" function pointers.
104 extra_fn_ptr_map
: FxHashMap
<AllocId
, M
::ExtraFnVal
>,
106 /// To be able to compare pointers with null, and to check alignment for accesses
107 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
108 /// that do not exist any more.
109 // FIXME: this should not be public, but interning currently needs access to it
110 pub(super) dead_alloc_map
: FxHashMap
<AllocId
, (Size
, Align
)>,
113 /// A reference to some allocation that was already bounds-checked for the given region
114 /// and had the on-access machine hooks run.
115 #[derive(Copy, Clone)]
116 pub struct AllocRef
<'a
, 'tcx
, Tag
, Extra
> {
117 alloc
: &'a Allocation
<Tag
, Extra
>,
122 /// A reference to some allocation that was already bounds-checked for the given region
123 /// and had the on-access machine hooks run.
124 pub struct AllocRefMut
<'a
, 'tcx
, Tag
, Extra
> {
125 alloc
: &'a
mut Allocation
<Tag
, Extra
>,
131 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
132 pub fn new() -> Self {
134 alloc_map
: M
::MemoryMap
::default(),
135 extra_fn_ptr_map
: FxHashMap
::default(),
136 dead_alloc_map
: FxHashMap
::default(),
140 /// This is used by [priroda](https://github.com/oli-obk/priroda)
141 pub fn alloc_map(&self) -> &M
::MemoryMap
{
146 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
147 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
148 /// the machine pointer to the allocation. Must never be used
149 /// for any other pointers, nor for TLS statics.
151 /// Using the resulting pointer represents a *direct* access to that memory
152 /// (e.g. by directly using a `static`),
153 /// as opposed to access through a pointer that was created by the program.
155 /// This function can fail only if `ptr` points to an `extern static`.
157 pub fn global_base_pointer(
159 ptr
: Pointer
<AllocId
>,
160 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
161 let alloc_id
= ptr
.provenance
;
162 // We need to handle `extern static`.
163 match self.tcx
.get_global_alloc(alloc_id
) {
164 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_thread_local_static(def_id
) => {
165 bug
!("global memory cannot point to thread-local static")
167 Some(GlobalAlloc
::Static(def_id
)) if self.tcx
.is_foreign_item(def_id
) => {
168 return M
::extern_static_base_pointer(self, def_id
);
172 // And we need to get the tag.
173 Ok(M
::tag_alloc_base_pointer(self, ptr
))
176 pub fn create_fn_alloc_ptr(
178 fn_val
: FnVal
<'tcx
, M
::ExtraFnVal
>,
179 ) -> Pointer
<M
::PointerTag
> {
180 let id
= match fn_val
{
181 FnVal
::Instance(instance
) => self.tcx
.create_fn_alloc(instance
),
182 FnVal
::Other(extra
) => {
183 // FIXME(RalfJung): Should we have a cache here?
184 let id
= self.tcx
.reserve_alloc_id();
185 let old
= self.memory
.extra_fn_ptr_map
.insert(id
, extra
);
186 assert
!(old
.is_none());
190 // Functions are global allocations, so make sure we get the right base pointer.
191 // We know this is not an `extern static` so this cannot fail.
192 self.global_base_pointer(Pointer
::from(id
)).unwrap()
199 kind
: MemoryKind
<M
::MemoryKind
>,
200 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
201 let alloc
= Allocation
::uninit(size
, align
, M
::PANIC_ON_ALLOC_FAIL
)?
;
202 // We can `unwrap` since `alloc` contains no pointers.
203 Ok(self.allocate_raw_ptr(alloc
, kind
).unwrap())
206 pub fn allocate_bytes_ptr(
210 kind
: MemoryKind
<M
::MemoryKind
>,
211 mutability
: Mutability
,
212 ) -> Pointer
<M
::PointerTag
> {
213 let alloc
= Allocation
::from_bytes(bytes
, align
, mutability
);
214 // We can `unwrap` since `alloc` contains no pointers.
215 self.allocate_raw_ptr(alloc
, kind
).unwrap()
218 /// This can fail only of `alloc` contains relocations.
219 pub fn allocate_raw_ptr(
222 kind
: MemoryKind
<M
::MemoryKind
>,
223 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
224 let id
= self.tcx
.reserve_alloc_id();
227 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
228 "dynamically allocating global memory"
230 let alloc
= M
::init_allocation_extra(self, id
, Cow
::Owned(alloc
), Some(kind
))?
;
231 self.memory
.alloc_map
.insert(id
, (kind
, alloc
.into_owned()));
232 Ok(M
::tag_alloc_base_pointer(self, Pointer
::from(id
)))
235 pub fn reallocate_ptr(
237 ptr
: Pointer
<Option
<M
::PointerTag
>>,
238 old_size_and_align
: Option
<(Size
, Align
)>,
241 kind
: MemoryKind
<M
::MemoryKind
>,
242 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
243 let (alloc_id
, offset
, _tag
) = self.ptr_get_alloc_id(ptr
)?
;
244 if offset
.bytes() != 0 {
246 "reallocating {:?} which does not point to the beginning of an object",
251 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
252 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
253 let new_ptr
= self.allocate_ptr(new_size
, new_align
, kind
)?
;
254 let old_size
= match old_size_and_align
{
255 Some((size
, _align
)) => size
,
256 None
=> self.get_alloc_raw(alloc_id
)?
.size(),
258 // This will also call the access hooks.
264 old_size
.min(new_size
),
265 /*nonoverlapping*/ true,
267 self.deallocate_ptr(ptr
, old_size_and_align
, kind
)?
;
272 #[instrument(skip(self), level = "debug")]
273 pub fn deallocate_ptr(
275 ptr
: Pointer
<Option
<M
::PointerTag
>>,
276 old_size_and_align
: Option
<(Size
, Align
)>,
277 kind
: MemoryKind
<M
::MemoryKind
>,
278 ) -> InterpResult
<'tcx
> {
279 let (alloc_id
, offset
, tag
) = self.ptr_get_alloc_id(ptr
)?
;
280 trace
!("deallocating: {}", alloc_id
);
282 if offset
.bytes() != 0 {
284 "deallocating {:?} which does not point to the beginning of an object",
289 let Some((alloc_kind
, mut alloc
)) = self.memory
.alloc_map
.remove(&alloc_id
) else {
290 // Deallocating global memory -- always an error
291 return Err(match self.tcx
.get_global_alloc(alloc_id
) {
292 Some(GlobalAlloc
::Function(..)) => {
293 err_ub_format
!("deallocating {}, which is a function", alloc_id
)
295 Some(GlobalAlloc
::Static(..) | GlobalAlloc
::Memory(..)) => {
296 err_ub_format
!("deallocating {}, which is static memory", alloc_id
)
298 None
=> err_ub
!(PointerUseAfterFree(alloc_id
)),
305 if alloc
.mutability
== Mutability
::Not
{
306 throw_ub_format
!("deallocating immutable allocation {}", alloc_id
);
308 if alloc_kind
!= kind
{
310 "deallocating {}, which is {} memory, using {} deallocation operation",
316 if let Some((size
, align
)) = old_size_and_align
{
317 if size
!= alloc
.size() || align
!= alloc
.align
{
319 "incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
321 alloc
.size().bytes(),
329 // Let the machine take some extra action
330 let size
= alloc
.size();
331 M
::memory_deallocated(
336 alloc_range(Size
::ZERO
, size
),
339 // Don't forget to remember size and align of this now-dead allocation
340 let old
= self.memory
.dead_alloc_map
.insert(alloc_id
, (size
, alloc
.align
));
342 bug
!("Nothing can be deallocated twice");
348 /// Internal helper function to determine the allocation and offset of a pointer (if any).
352 ptr
: Pointer
<Option
<M
::PointerTag
>>,
355 ) -> InterpResult
<'tcx
, Option
<(AllocId
, Size
, M
::TagExtra
)>> {
356 let align
= M
::enforce_alignment(&self).then_some(align
);
357 self.check_and_deref_ptr(
361 CheckInAllocMsg
::MemoryAccessTest
,
362 |alloc_id
, offset
, tag
| {
364 self.get_alloc_size_and_align(alloc_id
, AllocCheck
::Dereferenceable
)?
;
365 Ok((size
, align
, (alloc_id
, offset
, tag
)))
370 /// Check if the given pointer points to live memory of given `size` and `align`
371 /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
372 /// out-of-bounds case.
374 pub fn check_ptr_access_align(
376 ptr
: Pointer
<Option
<M
::PointerTag
>>,
379 msg
: CheckInAllocMsg
,
380 ) -> InterpResult
<'tcx
> {
381 self.check_and_deref_ptr(ptr
, size
, Some(align
), msg
, |alloc_id
, _
, _
| {
382 let check
= match msg
{
383 CheckInAllocMsg
::DerefTest
| CheckInAllocMsg
::MemoryAccessTest
=> {
384 AllocCheck
::Dereferenceable
386 CheckInAllocMsg
::PointerArithmeticTest
387 | CheckInAllocMsg
::OffsetFromTest
388 | CheckInAllocMsg
::InboundsTest
=> AllocCheck
::Live
,
390 let (size
, align
) = self.get_alloc_size_and_align(alloc_id
, check
)?
;
391 Ok((size
, align
, ()))
396 /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
397 /// to the allocation it points to. Supports both shared and mutable references, as the actual
398 /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
399 /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
400 fn check_and_deref_ptr
<T
>(
402 ptr
: Pointer
<Option
<M
::PointerTag
>>,
404 align
: Option
<Align
>,
405 msg
: CheckInAllocMsg
,
406 alloc_size
: impl FnOnce(AllocId
, Size
, M
::TagExtra
) -> InterpResult
<'tcx
, (Size
, Align
, T
)>,
407 ) -> InterpResult
<'tcx
, Option
<T
>> {
408 fn check_offset_align
<'tcx
>(offset
: u64, align
: Align
) -> InterpResult
<'tcx
> {
409 if offset
% align
.bytes() == 0 {
412 // The biggest power of two through which `offset` is divisible.
413 let offset_pow2
= 1 << offset
.trailing_zeros();
414 throw_ub
!(AlignmentCheckFailed
{
415 has
: Align
::from_bytes(offset_pow2
).unwrap(),
421 Ok(match self.ptr_try_get_alloc_id(ptr
) {
423 // We couldn't get a proper allocation. This is only okay if the access size is 0,
424 // and the address is not null.
425 if size
.bytes() > 0 || addr
== 0 {
426 throw_ub
!(DanglingIntPointer(addr
, msg
));
429 if let Some(align
) = align
{
430 check_offset_align(addr
, align
)?
;
434 Ok((alloc_id
, offset
, tag
)) => {
435 let (alloc_size
, alloc_align
, ret_val
) = alloc_size(alloc_id
, offset
, tag
)?
;
436 // Test bounds. This also ensures non-null.
437 // It is sufficient to check this for the end pointer. Also check for overflow!
438 if offset
.checked_add(size
, &self.tcx
).map_or(true, |end
| end
> alloc_size
) {
439 throw_ub
!(PointerOutOfBounds
{
442 ptr_offset
: self.machine_usize_to_isize(offset
.bytes()),
447 // Ensure we never consider the null pointer dereferencable.
448 if M
::PointerTag
::OFFSET_IS_ADDR
{
449 assert_ne
!(ptr
.addr(), Size
::ZERO
);
451 // Test align. Check this last; if both bounds and alignment are violated
452 // we want the error to be about the bounds.
453 if let Some(align
) = align
{
454 if M
::force_int_for_alignment_check(self) {
455 // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
456 check_offset_align(ptr
.addr().bytes(), align
)?
;
458 // Check allocation alignment and offset alignment.
459 if alloc_align
.bytes() < align
.bytes() {
460 throw_ub
!(AlignmentCheckFailed { has: alloc_align, required: align }
);
462 check_offset_align(offset
.bytes(), align
)?
;
466 // We can still be zero-sized in this branch, in which case we have to
468 if size
.bytes() == 0 { None }
else { Some(ret_val) }
474 /// Allocation accessors
475 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
476 /// Helper function to obtain a global (tcx) allocation.
477 /// This attempts to return a reference to an existing allocation if
478 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
479 /// this machine use the same pointer tag, so it is indirected through
480 /// `M::tag_allocation`.
485 ) -> InterpResult
<'tcx
, Cow
<'tcx
, Allocation
<M
::PointerTag
, M
::AllocExtra
>>> {
486 let (alloc
, def_id
) = match self.tcx
.get_global_alloc(id
) {
487 Some(GlobalAlloc
::Memory(mem
)) => {
488 // Memory of a constant or promoted or anonymous memory referenced by a static.
491 Some(GlobalAlloc
::Function(..)) => throw_ub
!(DerefFunctionPointer(id
)),
492 None
=> throw_ub
!(PointerUseAfterFree(id
)),
493 Some(GlobalAlloc
::Static(def_id
)) => {
494 assert
!(self.tcx
.is_static(def_id
));
495 assert
!(!self.tcx
.is_thread_local_static(def_id
));
496 // Notice that every static has two `AllocId` that will resolve to the same
497 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
498 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
499 // `eval_static_initializer` and it is the "resolved" ID.
500 // The resolved ID is never used by the interpreted program, it is hidden.
501 // This is relied upon for soundness of const-patterns; a pointer to the resolved
502 // ID would "sidestep" the checks that make sure consts do not point to statics!
503 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
504 // contains a reference to memory that was created during its evaluation (i.e., not
505 // to another static), those inner references only exist in "resolved" form.
506 if self.tcx
.is_foreign_item(def_id
) {
507 throw_unsup
!(ReadExternStatic(def_id
));
510 // Use a precise span for better cycle errors.
511 (self.tcx
.at(self.cur_span()).eval_static_initializer(def_id
)?
, Some(def_id
))
514 M
::before_access_global(*self.tcx
, &self.machine
, id
, alloc
, def_id
, is_write
)?
;
515 // We got tcx memory. Let the machine initialize its "extra" stuff.
516 M
::init_allocation_extra(
518 id
, // always use the ID we got as input, not the "hidden" one.
519 Cow
::Borrowed(alloc
.inner()),
520 M
::GLOBAL_KIND
.map(MemoryKind
::Machine
),
524 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
525 /// The caller is responsible for calling the access hooks!
529 ) -> InterpResult
<'tcx
, &Allocation
<M
::PointerTag
, M
::AllocExtra
>> {
530 // The error type of the inner closure here is somewhat funny. We have two
531 // ways of "erroring": An actual error, or because we got a reference from
532 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
533 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
534 let a
= self.memory
.alloc_map
.get_or(id
, || {
535 let alloc
= self.get_global_alloc(id
, /*is_write*/ false).map_err(Err
)?
;
537 Cow
::Borrowed(alloc
) => {
538 // We got a ref, cheaply return that as an "error" so that the
539 // map does not get mutated.
542 Cow
::Owned(alloc
) => {
543 // Need to put it into the map and return a ref to that
544 let kind
= M
::GLOBAL_KIND
.expect(
545 "I got a global allocation that I have to copy but the machine does \
546 not expect that to happen",
548 Ok((MemoryKind
::Machine(kind
), alloc
))
552 // Now unpack that funny error type
559 /// "Safe" (bounds and align-checked) allocation access.
560 pub fn get_ptr_alloc
<'a
>(
562 ptr
: Pointer
<Option
<M
::PointerTag
>>,
565 ) -> InterpResult
<'tcx
, Option
<AllocRef
<'a
, 'tcx
, M
::PointerTag
, M
::AllocExtra
>>> {
566 let align
= M
::enforce_alignment(self).then_some(align
);
567 let ptr_and_alloc
= self.check_and_deref_ptr(
571 CheckInAllocMsg
::MemoryAccessTest
,
572 |alloc_id
, offset
, tag
| {
573 let alloc
= self.get_alloc_raw(alloc_id
)?
;
574 Ok((alloc
.size(), alloc
.align
, (alloc_id
, offset
, tag
, alloc
)))
577 if let Some((alloc_id
, offset
, tag
, alloc
)) = ptr_and_alloc
{
578 let range
= alloc_range(offset
, size
);
579 M
::memory_read(*self.tcx
, &self.machine
, &alloc
.extra
, (alloc_id
, tag
), range
)?
;
580 Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }
))
582 // Even in this branch we have to be sure that we actually access the allocation, in
583 // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
584 // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
585 // always called when `ptr` has an `AllocId`.
590 /// Return the `extra` field of the given allocation.
591 pub fn get_alloc_extra
<'a
>(&'a
self, id
: AllocId
) -> InterpResult
<'tcx
, &'a M
::AllocExtra
> {
592 Ok(&self.get_alloc_raw(id
)?
.extra
)
595 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
596 /// The caller is responsible for calling the access hooks!
598 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
600 fn get_alloc_raw_mut(
603 ) -> InterpResult
<'tcx
, (&mut Allocation
<M
::PointerTag
, M
::AllocExtra
>, &mut M
)> {
604 // We have "NLL problem case #3" here, which cannot be worked around without loss of
605 // efficiency even for the common case where the key is in the map.
606 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
607 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
608 if self.memory
.alloc_map
.get_mut(id
).is_none() {
610 // Allocation not found locally, go look global.
611 let alloc
= self.get_global_alloc(id
, /*is_write*/ true)?
;
612 let kind
= M
::GLOBAL_KIND
.expect(
613 "I got a global allocation that I have to copy but the machine does \
614 not expect that to happen",
616 self.memory
.alloc_map
.insert(id
, (MemoryKind
::Machine(kind
), alloc
.into_owned()));
619 let (_kind
, alloc
) = self.memory
.alloc_map
.get_mut(id
).unwrap();
620 if alloc
.mutability
== Mutability
::Not
{
621 throw_ub
!(WriteToReadOnly(id
))
623 Ok((alloc
, &mut self.machine
))
626 /// "Safe" (bounds and align-checked) allocation access.
627 pub fn get_ptr_alloc_mut
<'a
>(
629 ptr
: Pointer
<Option
<M
::PointerTag
>>,
632 ) -> InterpResult
<'tcx
, Option
<AllocRefMut
<'a
, 'tcx
, M
::PointerTag
, M
::AllocExtra
>>> {
633 let parts
= self.get_ptr_access(ptr
, size
, align
)?
;
634 if let Some((alloc_id
, offset
, tag
)) = parts
{
636 // FIXME: can we somehow avoid looking up the allocation twice here?
637 // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
638 let (alloc
, machine
) = self.get_alloc_raw_mut(alloc_id
)?
;
639 let range
= alloc_range(offset
, size
);
640 M
::memory_written(tcx
, machine
, &mut alloc
.extra
, (alloc_id
, tag
), range
)?
;
641 Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }
))
647 /// Return the `extra` field of the given allocation.
648 pub fn get_alloc_extra_mut
<'a
>(
651 ) -> InterpResult
<'tcx
, (&'a
mut M
::AllocExtra
, &'a
mut M
)> {
652 let (alloc
, machine
) = self.get_alloc_raw_mut(id
)?
;
653 Ok((&mut alloc
.extra
, machine
))
656 /// Obtain the size and alignment of an allocation, even if that allocation has
657 /// been deallocated.
659 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
660 pub fn get_alloc_size_and_align(
663 liveness
: AllocCheck
,
664 ) -> InterpResult
<'tcx
, (Size
, Align
)> {
665 // # Regular allocations
666 // Don't use `self.get_raw` here as that will
667 // a) cause cycles in case `id` refers to a static
668 // b) duplicate a global's allocation in miri
669 if let Some((_
, alloc
)) = self.memory
.alloc_map
.get(id
) {
670 return Ok((alloc
.size(), alloc
.align
));
673 // # Function pointers
674 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
675 if self.get_fn_alloc(id
).is_some() {
676 return if let AllocCheck
::Dereferenceable
= liveness
{
677 // The caller requested no function pointers.
678 throw_ub
!(DerefFunctionPointer(id
))
680 Ok((Size
::ZERO
, Align
::ONE
))
685 // Can't do this in the match argument, we may get cycle errors since the lock would
686 // be held throughout the match.
687 match self.tcx
.get_global_alloc(id
) {
688 Some(GlobalAlloc
::Static(did
)) => {
689 assert
!(!self.tcx
.is_thread_local_static(did
));
690 // Use size and align of the type.
691 let ty
= self.tcx
.type_of(did
);
692 let layout
= self.tcx
.layout_of(ParamEnv
::empty().and(ty
)).unwrap();
693 Ok((layout
.size
, layout
.align
.abi
))
695 Some(GlobalAlloc
::Memory(alloc
)) => {
696 // Need to duplicate the logic here, because the global allocations have
697 // different associated types than the interpreter-local ones.
698 let alloc
= alloc
.inner();
699 Ok((alloc
.size(), alloc
.align
))
701 Some(GlobalAlloc
::Function(_
)) => bug
!("We already checked function pointers above"),
702 // The rest must be dead.
704 if let AllocCheck
::MaybeDead
= liveness
{
705 // Deallocated pointers are allowed, we should be able to find
711 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
713 throw_ub
!(PointerUseAfterFree(id
))
719 fn get_fn_alloc(&self, id
: AllocId
) -> Option
<FnVal
<'tcx
, M
::ExtraFnVal
>> {
720 if let Some(extra
) = self.memory
.extra_fn_ptr_map
.get(&id
) {
721 Some(FnVal
::Other(*extra
))
723 match self.tcx
.get_global_alloc(id
) {
724 Some(GlobalAlloc
::Function(instance
)) => Some(FnVal
::Instance(instance
)),
732 ptr
: Pointer
<Option
<M
::PointerTag
>>,
733 ) -> InterpResult
<'tcx
, FnVal
<'tcx
, M
::ExtraFnVal
>> {
734 trace
!("get_fn({:?})", ptr
);
735 let (alloc_id
, offset
, _tag
) = self.ptr_get_alloc_id(ptr
)?
;
736 if offset
.bytes() != 0 {
737 throw_ub
!(InvalidFunctionPointer(Pointer
::new(alloc_id
, offset
)))
739 self.get_fn_alloc(alloc_id
)
740 .ok_or_else(|| err_ub
!(InvalidFunctionPointer(Pointer
::new(alloc_id
, offset
))).into())
743 pub fn alloc_mark_immutable(&mut self, id
: AllocId
) -> InterpResult
<'tcx
> {
744 self.get_alloc_raw_mut(id
)?
.0.mutability
= Mutability
::Not
;
748 /// Create a lazy debug printer that prints the given allocation and all allocations it points
751 pub fn dump_alloc
<'a
>(&'a
self, id
: AllocId
) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
752 self.dump_allocs(vec
![id
])
755 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
758 pub fn dump_allocs
<'a
>(&'a
self, mut allocs
: Vec
<AllocId
>) -> DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
761 DumpAllocs { ecx: self, allocs }
764 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
765 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
766 pub fn leak_report(&self, static_roots
: &[AllocId
]) -> usize {
767 // Collect the set of allocations that are *reachable* from `Global` allocations.
769 let mut reachable
= FxHashSet
::default();
770 let global_kind
= M
::GLOBAL_KIND
.map(MemoryKind
::Machine
);
771 let mut todo
: Vec
<_
> =
772 self.memory
.alloc_map
.filter_map_collect(move |&id
, &(kind
, _
)| {
773 if Some(kind
) == global_kind { Some(id) }
else { None }
775 todo
.extend(static_roots
);
776 while let Some(id
) = todo
.pop() {
777 if reachable
.insert(id
) {
778 // This is a new allocation, add its relocations to `todo`.
779 if let Some((_
, alloc
)) = self.memory
.alloc_map
.get(id
) {
781 alloc
.relocations().values().filter_map(|tag
| tag
.get_alloc_id()),
789 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
790 let leaks
: Vec
<_
> = self.memory
.alloc_map
.filter_map_collect(|&id
, &(kind
, _
)| {
791 if kind
.may_leak() || reachable
.contains(&id
) { None }
else { Some(id) }
795 eprintln
!("The following memory was leaked: {:?}", self.dump_allocs(leaks
));
802 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
803 pub struct DumpAllocs
<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
804 ecx
: &'a InterpCx
<'mir
, 'tcx
, M
>,
805 allocs
: Vec
<AllocId
>,
808 impl<'a
, 'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> std
::fmt
::Debug
for DumpAllocs
<'a
, 'mir
, 'tcx
, M
> {
809 fn fmt(&self, fmt
: &mut std
::fmt
::Formatter
<'_
>) -> std
::fmt
::Result
{
810 // Cannot be a closure because it is generic in `Tag`, `Extra`.
811 fn write_allocation_track_relocs
<'tcx
, Tag
: Provenance
, Extra
>(
812 fmt
: &mut std
::fmt
::Formatter
<'_
>,
814 allocs_to_print
: &mut VecDeque
<AllocId
>,
815 alloc
: &Allocation
<Tag
, Extra
>,
816 ) -> std
::fmt
::Result
{
817 for alloc_id
in alloc
.relocations().values().filter_map(|tag
| tag
.get_alloc_id()) {
818 allocs_to_print
.push_back(alloc_id
);
820 write
!(fmt
, "{}", display_allocation(tcx
, alloc
))
823 let mut allocs_to_print
: VecDeque
<_
> = self.allocs
.iter().copied().collect();
824 // `allocs_printed` contains all allocations that we have already printed.
825 let mut allocs_printed
= FxHashSet
::default();
827 while let Some(id
) = allocs_to_print
.pop_front() {
828 if !allocs_printed
.insert(id
) {
829 // Already printed, so skip this.
833 write
!(fmt
, "{}", id
)?
;
834 match self.ecx
.memory
.alloc_map
.get(id
) {
835 Some(&(kind
, ref alloc
)) => {
837 write
!(fmt
, " ({}, ", kind
)?
;
838 write_allocation_track_relocs(
841 &mut allocs_to_print
,
847 match self.ecx
.tcx
.get_global_alloc(id
) {
848 Some(GlobalAlloc
::Memory(alloc
)) => {
849 write
!(fmt
, " (unchanged global, ")?
;
850 write_allocation_track_relocs(
853 &mut allocs_to_print
,
857 Some(GlobalAlloc
::Function(func
)) => {
858 write
!(fmt
, " (fn: {})", func
)?
;
860 Some(GlobalAlloc
::Static(did
)) => {
861 write
!(fmt
, " (static: {})", self.ecx
.tcx
.def_path_str(did
))?
;
864 write
!(fmt
, " (deallocated)")?
;
875 /// Reading and writing.
876 impl<'tcx
, 'a
, Tag
: Provenance
, Extra
> AllocRefMut
<'a
, 'tcx
, Tag
, Extra
> {
880 val
: ScalarMaybeUninit
<Tag
>,
881 ) -> InterpResult
<'tcx
> {
882 let range
= self.range
.subrange(range
);
884 "write_scalar in {} at {:#x}, size {}: {:?}",
892 .write_scalar(&self.tcx
, range
, val
)
893 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
896 pub fn write_ptr_sized(
899 val
: ScalarMaybeUninit
<Tag
>,
900 ) -> InterpResult
<'tcx
> {
901 self.write_scalar(alloc_range(offset
, self.tcx
.data_layout().pointer_size
), val
)
904 /// Mark the entire referenced range as uninitalized
905 pub fn write_uninit(&mut self) -> InterpResult
<'tcx
> {
908 .write_uninit(&self.tcx
, self.range
)
909 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
913 impl<'tcx
, 'a
, Tag
: Provenance
, Extra
> AllocRef
<'a
, 'tcx
, Tag
, Extra
> {
917 read_provenance
: bool
,
918 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<Tag
>> {
919 let range
= self.range
.subrange(range
);
922 .read_scalar(&self.tcx
, range
, read_provenance
)
923 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
;
925 "read_scalar in {} at {:#x}, size {}: {:?}",
938 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<Tag
>> {
939 self.read_scalar(alloc_range(offset
, size
), /*read_provenance*/ false)
942 pub fn read_pointer(&self, offset
: Size
) -> InterpResult
<'tcx
, ScalarMaybeUninit
<Tag
>> {
944 alloc_range(offset
, self.tcx
.data_layout().pointer_size
),
945 /*read_provenance*/ true,
954 ) -> InterpResult
<'tcx
> {
957 .check_bytes(&self.tcx
, self.range
.subrange(range
), allow_uninit
, allow_ptr
)
958 .map_err(|e
| e
.to_interp_error(self.alloc_id
))?
)
962 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
963 /// Reads the given number of bytes from memory. Returns them as a slice.
965 /// Performs appropriate bounds checks.
966 pub fn read_bytes_ptr(
968 ptr
: Pointer
<Option
<M
::PointerTag
>>,
970 ) -> InterpResult
<'tcx
, &[u8]> {
971 let Some(alloc_ref
) = self.get_ptr_alloc(ptr
, size
, Align
::ONE
)?
else {
975 // Side-step AllocRef and directly access the underlying bytes more efficiently.
976 // (We are staying inside the bounds here so all is good.)
979 .get_bytes(&alloc_ref
.tcx
, alloc_ref
.range
)
980 .map_err(|e
| e
.to_interp_error(alloc_ref
.alloc_id
))?
)
983 /// Writes the given stream of bytes into memory.
985 /// Performs appropriate bounds checks.
986 pub fn write_bytes_ptr(
988 ptr
: Pointer
<Option
<M
::PointerTag
>>,
989 src
: impl IntoIterator
<Item
= u8>,
990 ) -> InterpResult
<'tcx
> {
991 let mut src
= src
.into_iter();
992 let (lower
, upper
) = src
.size_hint();
993 let len
= upper
.expect("can only write bounded iterators");
994 assert_eq
!(lower
, len
, "can only write iterators with a precise length");
996 let size
= Size
::from_bytes(len
);
997 let Some(alloc_ref
) = self.get_ptr_alloc_mut(ptr
, size
, Align
::ONE
)?
else {
1002 "iterator said it was empty but returned an element"
1007 // Side-step AllocRef and directly access the underlying bytes more efficiently.
1008 // (We are staying inside the bounds here so all is good.)
1009 let alloc_id
= alloc_ref
.alloc_id
;
1010 let bytes
= alloc_ref
1012 .get_bytes_mut(&alloc_ref
.tcx
, alloc_ref
.range
)
1013 .map_err(move |e
| e
.to_interp_error(alloc_id
))?
;
1014 // `zip` would stop when the first iterator ends; we want to definitely
1015 // cover all of `bytes`.
1017 *dest
= src
.next().expect("iterator was shorter than it said it would be");
1019 assert_matches
!(src
.next(), None
, "iterator was longer than it said it would be");
1025 src
: Pointer
<Option
<M
::PointerTag
>>,
1027 dest
: Pointer
<Option
<M
::PointerTag
>>,
1030 nonoverlapping
: bool
,
1031 ) -> InterpResult
<'tcx
> {
1032 self.mem_copy_repeatedly(src
, src_align
, dest
, dest_align
, size
, 1, nonoverlapping
)
1035 pub fn mem_copy_repeatedly(
1037 src
: Pointer
<Option
<M
::PointerTag
>>,
1039 dest
: Pointer
<Option
<M
::PointerTag
>>,
1043 nonoverlapping
: bool
,
1044 ) -> InterpResult
<'tcx
> {
1046 // We need to do our own bounds-checks.
1047 let src_parts
= self.get_ptr_access(src
, size
, src_align
)?
;
1048 let dest_parts
= self.get_ptr_access(dest
, size
* num_copies
, dest_align
)?
; // `Size` multiplication
1050 // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1051 // and once below to get the underlying `&[mut] Allocation`.
1053 // Source alloc preparations and access hooks.
1054 let Some((src_alloc_id
, src_offset
, src_tag
)) = src_parts
else {
1055 // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
1058 let src_alloc
= self.get_alloc_raw(src_alloc_id
)?
;
1059 let src_range
= alloc_range(src_offset
, size
);
1060 M
::memory_read(*tcx
, &self.machine
, &src_alloc
.extra
, (src_alloc_id
, src_tag
), src_range
)?
;
1061 // We need the `dest` ptr for the next operation, so we get it now.
1062 // We already did the source checks and called the hooks so we are good to return early.
1063 let Some((dest_alloc_id
, dest_offset
, dest_tag
)) = dest_parts
else {
1064 // Zero-sized *destination*.
1068 // This checks relocation edges on the src, which needs to happen before
1069 // `prepare_relocation_copy`.
1070 let src_bytes
= src_alloc
1071 .get_bytes_with_uninit_and_ptr(&tcx
, src_range
)
1072 .map_err(|e
| e
.to_interp_error(src_alloc_id
))?
1073 .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1074 // first copy the relocations to a temporary buffer, because
1075 // `get_bytes_mut` will clear the relocations, which is correct,
1076 // since we don't want to keep any relocations at the target.
1078 src_alloc
.prepare_relocation_copy(self, src_range
, dest_offset
, num_copies
);
1079 // Prepare a copy of the initialization mask.
1080 let compressed
= src_alloc
.compress_uninit_range(src_range
);
1082 // Destination alloc preparations and access hooks.
1083 let (dest_alloc
, extra
) = self.get_alloc_raw_mut(dest_alloc_id
)?
;
1084 let dest_range
= alloc_range(dest_offset
, size
* num_copies
);
1088 &mut dest_alloc
.extra
,
1089 (dest_alloc_id
, dest_tag
),
1092 let dest_bytes
= dest_alloc
1093 .get_bytes_mut_ptr(&tcx
, dest_range
)
1094 .map_err(|e
| e
.to_interp_error(dest_alloc_id
))?
1097 if compressed
.no_bytes_init() {
1098 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1099 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1100 // be arbitrary for uninitialized bytes.
1101 // This also avoids writing to the target bytes so that the backing allocation is never
1102 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1103 // operating system this can avoid physically allocating the page.
1105 .write_uninit(&tcx
, dest_range
)
1106 .map_err(|e
| e
.to_interp_error(dest_alloc_id
))?
;
1107 // We can forget about the relocations, this is all not initialized anyway.
1111 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1112 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1113 // `dest` could possibly overlap.
1114 // The pointers above remain valid even if the `HashMap` table is moved around because they
1115 // point into the `Vec` storing the bytes.
1117 if src_alloc_id
== dest_alloc_id
{
1120 if (src_offset
<= dest_offset
&& src_offset
+ size
> dest_offset
)
1121 || (dest_offset
<= src_offset
&& dest_offset
+ size
> src_offset
)
1123 throw_ub_format
!("copy_nonoverlapping called on overlapping ranges")
1127 for i
in 0..num_copies
{
1130 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
1135 for i
in 0..num_copies
{
1136 ptr
::copy_nonoverlapping(
1138 dest_bytes
.add((size
* i
).bytes_usize()), // `Size` multiplication
1145 // now fill in all the "init" data
1146 dest_alloc
.mark_compressed_init_range(
1148 alloc_range(dest_offset
, size
), // just a single copy (i.e., not full `dest_range`)
1151 // copy the relocations to the destination
1152 dest_alloc
.mark_relocation_range(relocations
);
1158 /// Machine pointer introspection.
1159 impl<'mir
, 'tcx
: 'mir
, M
: Machine
<'mir
, 'tcx
>> InterpCx
<'mir
, 'tcx
, M
> {
1160 pub fn scalar_to_ptr(
1162 scalar
: Scalar
<M
::PointerTag
>,
1163 ) -> InterpResult
<'tcx
, Pointer
<Option
<M
::PointerTag
>>> {
1164 // We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
1165 // call to force getting out a pointer.
1168 .to_bits_or_ptr_internal(self.pointer_size())
1169 .map_err(|s
| err_ub
!(ScalarSizeMismatch(s
)))?
1171 Err(ptr
) => ptr
.into(),
1173 let addr
= u64::try_from(bits
).unwrap();
1174 M
::ptr_from_addr_transmute(&self, addr
)
1180 /// Test if this value might be null.
1181 /// If the machine does not support ptr-to-int casts, this is conservative.
1182 pub fn scalar_may_be_null(&self, scalar
: Scalar
<M
::PointerTag
>) -> InterpResult
<'tcx
, bool
> {
1183 Ok(match scalar
.try_to_int() {
1184 Ok(int
) => int
.is_null(),
1186 // Can only happen during CTFE.
1187 let ptr
= self.scalar_to_ptr(scalar
)?
;
1188 match self.ptr_try_get_alloc_id(ptr
) {
1189 Ok((alloc_id
, offset
, _
)) => {
1190 let (size
, _align
) = self
1191 .get_alloc_size_and_align(alloc_id
, AllocCheck
::MaybeDead
)
1192 .expect("alloc info with MaybeDead cannot fail");
1193 // If the pointer is out-of-bounds, it may be null.
1194 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
1197 Err(_offset
) => bug
!("a non-int scalar is always a pointer"),
1203 /// Turning a "maybe pointer" into a proper pointer (and some information
1204 /// about where it points), or an absolute address.
1205 pub fn ptr_try_get_alloc_id(
1207 ptr
: Pointer
<Option
<M
::PointerTag
>>,
1208 ) -> Result
<(AllocId
, Size
, M
::TagExtra
), u64> {
1209 match ptr
.into_pointer_or_addr() {
1210 Ok(ptr
) => match M
::ptr_get_alloc(self, ptr
) {
1211 Some((alloc_id
, offset
, extra
)) => Ok((alloc_id
, offset
, extra
)),
1213 assert
!(M
::PointerTag
::OFFSET_IS_ADDR
);
1214 let (_
, addr
) = ptr
.into_parts();
1218 Err(addr
) => Err(addr
.bytes()),
1222 /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1224 pub fn ptr_get_alloc_id(
1226 ptr
: Pointer
<Option
<M
::PointerTag
>>,
1227 ) -> InterpResult
<'tcx
, (AllocId
, Size
, M
::TagExtra
)> {
1228 self.ptr_try_get_alloc_id(ptr
).map_err(|offset
| {
1229 err_ub
!(DanglingIntPointer(offset
, CheckInAllocMsg
::InboundsTest
)).into()