1 //! The memory subsystem.
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-NULL)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
9 use std
::collections
::VecDeque
;
13 use rustc
::ty
::{self, Instance, ParamEnv, query::TyCtxtAt}
;
14 use rustc
::ty
::layout
::{Align, TargetDataLayout, Size, HasDataLayout}
;
15 use rustc_data_structures
::fx
::{FxHashSet, FxHashMap}
;
17 use syntax
::ast
::Mutability
;
20 Pointer
, AllocId
, Allocation
, GlobalId
, AllocationExtra
,
21 InterpResult
, Scalar
, GlobalAlloc
, PointerArithmetic
,
22 Machine
, AllocMap
, MayLeak
, ErrorHandled
, CheckInAllocMsg
,
25 #[derive(Debug, PartialEq, Copy, Clone)]
26 pub enum MemoryKind
<T
> {
27 /// Stack memory. Error if deallocated except during a stack pop.
29 /// Memory backing vtables. Error if ever deallocated.
31 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
33 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
37 impl<T
: MayLeak
> MayLeak
for MemoryKind
<T
> {
39 fn may_leak(self) -> bool
{
41 MemoryKind
::Stack
=> false,
42 MemoryKind
::Vtable
=> true,
43 MemoryKind
::CallerLocation
=> true,
44 MemoryKind
::Machine(k
) => k
.may_leak()
49 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
50 #[derive(Debug, Copy, Clone)]
52 /// Allocation must be live and not a function pointer.
54 /// Allocations needs to be live, but may be a function pointer.
56 /// Allocation may be dead.
60 /// The value of a function pointer.
61 #[derive(Debug, Copy, Clone)]
62 pub enum FnVal
<'tcx
, Other
> {
63 Instance(Instance
<'tcx
>),
67 impl<'tcx
, Other
> FnVal
<'tcx
, Other
> {
68 pub fn as_instance(self) -> InterpResult
<'tcx
, Instance
<'tcx
>> {
70 FnVal
::Instance(instance
) =>
72 FnVal
::Other(_
) => throw_unsup_format
!(
73 "'foreign' function pointers are not supported in this context"
79 // `Memory` has to depend on the `Machine` because some of its operations
80 // (e.g., `get`) call a `Machine` hook.
81 pub struct Memory
<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> {
82 /// Allocations local to this instance of the miri engine. The kind
83 /// helps ensure that the same mechanism is used for allocation and
84 /// deallocation. When an allocation is not found here, it is a
85 /// static and looked up in the `tcx` for read access. Some machines may
86 /// have to mutate this map even on a read-only access to a static (because
87 /// they do pointer provenance tracking and the allocations in `tcx` have
88 /// the wrong type), so we let the machine override this type.
89 /// Either way, if the machine allows writing to a static, doing so will
90 /// create a copy of the static allocation here.
91 // FIXME: this should not be public, but interning currently needs access to it
92 pub(super) alloc_map
: M
::MemoryMap
,
94 /// Map for "extra" function pointers.
95 extra_fn_ptr_map
: FxHashMap
<AllocId
, M
::ExtraFnVal
>,
97 /// To be able to compare pointers with NULL, and to check alignment for accesses
98 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
99 /// that do not exist any more.
100 // FIXME: this should not be public, but interning currently needs access to it
101 pub(super) dead_alloc_map
: FxHashMap
<AllocId
, (Size
, Align
)>,
103 /// Extra data added by the machine.
104 pub extra
: M
::MemoryExtra
,
106 /// Lets us implement `HasDataLayout`, which is awfully convenient.
107 pub tcx
: TyCtxtAt
<'tcx
>,
110 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> HasDataLayout
for Memory
<'mir
, 'tcx
, M
> {
112 fn data_layout(&self) -> &TargetDataLayout
{
113 &self.tcx
.data_layout
117 // FIXME: Really we shouldn't clone memory, ever. Snapshot machinery should instead
118 // carefully copy only the reachable parts.
119 impl<'mir
, 'tcx
, M
> Clone
for Memory
<'mir
, 'tcx
, M
>
121 M
: Machine
<'mir
, 'tcx
, PointerTag
= (), AllocExtra
= (), MemoryExtra
= ()>,
122 M
::MemoryMap
: AllocMap
<AllocId
, (MemoryKind
<M
::MemoryKinds
>, Allocation
)>,
124 fn clone(&self) -> Self {
126 alloc_map
: self.alloc_map
.clone(),
127 extra_fn_ptr_map
: self.extra_fn_ptr_map
.clone(),
128 dead_alloc_map
: self.dead_alloc_map
.clone(),
135 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
136 pub fn new(tcx
: TyCtxtAt
<'tcx
>, extra
: M
::MemoryExtra
) -> Self {
138 alloc_map
: M
::MemoryMap
::default(),
139 extra_fn_ptr_map
: FxHashMap
::default(),
140 dead_alloc_map
: FxHashMap
::default(),
146 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
147 /// the *canonical* machine pointer to the allocation. Must never be used
148 /// for any other pointers!
150 /// This represents a *direct* access to that memory, as opposed to access
151 /// through a pointer that was created by the program.
153 pub fn tag_static_base_pointer(&self, ptr
: Pointer
) -> Pointer
<M
::PointerTag
> {
154 ptr
.with_tag(M
::tag_static_base_pointer(&self.extra
, ptr
.alloc_id
))
157 pub fn create_fn_alloc(
159 fn_val
: FnVal
<'tcx
, M
::ExtraFnVal
>,
160 ) -> Pointer
<M
::PointerTag
>
162 let id
= match fn_val
{
163 FnVal
::Instance(instance
) => self.tcx
.alloc_map
.lock().create_fn_alloc(instance
),
164 FnVal
::Other(extra
) => {
165 // FIXME(RalfJung): Should we have a cache here?
166 let id
= self.tcx
.alloc_map
.lock().reserve();
167 let old
= self.extra_fn_ptr_map
.insert(id
, extra
);
168 assert
!(old
.is_none());
172 self.tag_static_base_pointer(Pointer
::from(id
))
179 kind
: MemoryKind
<M
::MemoryKinds
>,
180 ) -> Pointer
<M
::PointerTag
> {
181 let alloc
= Allocation
::undef(size
, align
);
182 self.allocate_with(alloc
, kind
)
185 pub fn allocate_static_bytes(
188 kind
: MemoryKind
<M
::MemoryKinds
>,
189 ) -> Pointer
<M
::PointerTag
> {
190 let alloc
= Allocation
::from_byte_aligned_bytes(bytes
);
191 self.allocate_with(alloc
, kind
)
194 pub fn allocate_with(
197 kind
: MemoryKind
<M
::MemoryKinds
>,
198 ) -> Pointer
<M
::PointerTag
> {
199 let id
= self.tcx
.alloc_map
.lock().reserve();
200 debug_assert_ne
!(Some(kind
), M
::STATIC_KIND
.map(MemoryKind
::Machine
),
201 "dynamically allocating static memory");
202 let (alloc
, tag
) = M
::init_allocation_extra(&self.extra
, id
, Cow
::Owned(alloc
), Some(kind
));
203 self.alloc_map
.insert(id
, (kind
, alloc
.into_owned()));
204 Pointer
::from(id
).with_tag(tag
)
209 ptr
: Pointer
<M
::PointerTag
>,
210 old_size_and_align
: Option
<(Size
, Align
)>,
213 kind
: MemoryKind
<M
::MemoryKinds
>,
214 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
215 if ptr
.offset
.bytes() != 0 {
216 throw_unsup
!(ReallocateNonBasePtr
)
219 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
220 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
221 let new_ptr
= self.allocate(new_size
, new_align
, kind
);
222 let old_size
= match old_size_and_align
{
223 Some((size
, _align
)) => size
,
224 None
=> self.get_raw(ptr
.alloc_id
)?
.size
,
229 old_size
.min(new_size
),
230 /*nonoverlapping*/ true,
232 self.deallocate(ptr
, old_size_and_align
, kind
)?
;
237 /// Deallocate a local, or do nothing if that local has been made into a static
238 pub fn deallocate_local(&mut self, ptr
: Pointer
<M
::PointerTag
>) -> InterpResult
<'tcx
> {
239 // The allocation might be already removed by static interning.
240 // This can only really happen in the CTFE instance, not in miri.
241 if self.alloc_map
.contains_key(&ptr
.alloc_id
) {
242 self.deallocate(ptr
, None
, MemoryKind
::Stack
)
250 ptr
: Pointer
<M
::PointerTag
>,
251 old_size_and_align
: Option
<(Size
, Align
)>,
252 kind
: MemoryKind
<M
::MemoryKinds
>,
253 ) -> InterpResult
<'tcx
> {
254 trace
!("deallocating: {}", ptr
.alloc_id
);
256 if ptr
.offset
.bytes() != 0 {
257 throw_unsup
!(DeallocateNonBasePtr
)
260 let (alloc_kind
, mut alloc
) = match self.alloc_map
.remove(&ptr
.alloc_id
) {
261 Some(alloc
) => alloc
,
263 // Deallocating static memory -- always an error
264 return Err(match self.tcx
.alloc_map
.lock().get(ptr
.alloc_id
) {
265 Some(GlobalAlloc
::Function(..)) => err_unsup
!(DeallocatedWrongMemoryKind(
266 "function".to_string(),
267 format
!("{:?}", kind
),
269 Some(GlobalAlloc
::Static(..)) | Some(GlobalAlloc
::Memory(..)) => err_unsup
!(
270 DeallocatedWrongMemoryKind("static".to_string(), format
!("{:?}", kind
))
272 None
=> err_unsup
!(DoubleFree
),
278 if alloc_kind
!= kind
{
279 throw_unsup
!(DeallocatedWrongMemoryKind(
280 format
!("{:?}", alloc_kind
),
281 format
!("{:?}", kind
),
284 if let Some((size
, align
)) = old_size_and_align
{
285 if size
!= alloc
.size
|| align
!= alloc
.align
{
286 let bytes
= alloc
.size
;
287 throw_unsup
!(IncorrectAllocationInformation(size
, bytes
, align
, alloc
.align
))
291 // Let the machine take some extra action
292 let size
= alloc
.size
;
293 AllocationExtra
::memory_deallocated(&mut alloc
, ptr
, size
)?
;
295 // Don't forget to remember size and align of this now-dead allocation
296 let old
= self.dead_alloc_map
.insert(
298 (alloc
.size
, alloc
.align
)
301 bug
!("Nothing can be deallocated twice");
307 /// Check if the given scalar is allowed to do a memory access of given `size`
308 /// and `align`. On success, returns `None` for zero-sized accesses (where
309 /// nothing else is left to do) and a `Pointer` to use for the actual access otherwise.
310 /// Crucially, if the input is a `Pointer`, we will test it for liveness
311 /// *even if* the size is 0.
313 /// Everyone accessing memory based on a `Scalar` should use this method to get the
314 /// `Pointer` they need. And even if you already have a `Pointer`, call this method
315 /// to make sure it is sufficiently aligned and not dangling. Not doing that may
318 /// Most of the time you should use `check_mplace_access`, but when you just have a pointer,
319 /// this method is still appropriate.
321 pub fn check_ptr_access(
323 sptr
: Scalar
<M
::PointerTag
>,
326 ) -> InterpResult
<'tcx
, Option
<Pointer
<M
::PointerTag
>>> {
327 let align
= M
::CHECK_ALIGN
.then_some(align
);
328 self.check_ptr_access_align(sptr
, size
, align
, CheckInAllocMsg
::MemoryAccessTest
)
331 /// Like `check_ptr_access`, but *definitely* checks alignment when `align`
332 /// is `Some` (overriding `M::CHECK_ALIGN`). Also lets the caller control
333 /// the error message for the out-of-bounds case.
334 pub fn check_ptr_access_align(
336 sptr
: Scalar
<M
::PointerTag
>,
338 align
: Option
<Align
>,
339 msg
: CheckInAllocMsg
,
340 ) -> InterpResult
<'tcx
, Option
<Pointer
<M
::PointerTag
>>> {
341 fn check_offset_align(offset
: u64, align
: Align
) -> InterpResult
<'
static> {
342 if offset
% align
.bytes() == 0 {
345 // The biggest power of two through which `offset` is divisible.
346 let offset_pow2
= 1 << offset
.trailing_zeros();
347 throw_unsup
!(AlignmentCheckFailed
{
348 has
: Align
::from_bytes(offset_pow2
).unwrap(),
354 // Normalize to a `Pointer` if we definitely need one.
355 let normalized
= if size
.bytes() == 0 {
356 // Can be an integer, just take what we got. We do NOT `force_bits` here;
357 // if this is already a `Pointer` we want to do the bounds checks!
360 // A "real" access, we must get a pointer.
361 Scalar
::from(self.force_ptr(sptr
)?
)
363 Ok(match normalized
.to_bits_or_ptr(self.pointer_size(), self) {
365 let bits
= bits
as u64; // it's ptr-sized
366 assert
!(size
.bytes() == 0);
369 throw_unsup
!(InvalidNullPointerUsage
)
372 if let Some(align
) = align
{
373 check_offset_align(bits
, align
)?
;
378 let (allocation_size
, alloc_align
) =
379 self.get_size_and_align(ptr
.alloc_id
, AllocCheck
::Dereferenceable
)?
;
380 // Test bounds. This also ensures non-NULL.
381 // It is sufficient to check this for the end pointer. The addition
382 // checks for overflow.
383 let end_ptr
= ptr
.offset(size
, self)?
;
384 end_ptr
.check_inbounds_alloc(allocation_size
, msg
)?
;
385 // Test align. Check this last; if both bounds and alignment are violated
386 // we want the error to be about the bounds.
387 if let Some(align
) = align
{
388 if alloc_align
.bytes() < align
.bytes() {
389 // The allocation itself is not aligned enough.
390 // FIXME: Alignment check is too strict, depending on the base address that
391 // got picked we might be aligned even if this check fails.
392 // We instead have to fall back to converting to an integer and checking
393 // the "real" alignment.
394 throw_unsup
!(AlignmentCheckFailed
{
399 check_offset_align(ptr
.offset
.bytes(), align
)?
;
402 // We can still be zero-sized in this branch, in which case we have to
404 if size
.bytes() == 0 { None }
else { Some(ptr) }
409 /// Test if the pointer might be NULL.
410 pub fn ptr_may_be_null(
412 ptr
: Pointer
<M
::PointerTag
>,
414 let (size
, _align
) = self.get_size_and_align(ptr
.alloc_id
, AllocCheck
::MaybeDead
)
415 .expect("alloc info with MaybeDead cannot fail");
416 ptr
.check_inbounds_alloc(size
, CheckInAllocMsg
::NullPointerTest
).is_err()
420 /// Allocation accessors
421 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
422 /// Helper function to obtain the global (tcx) allocation for a static.
423 /// This attempts to return a reference to an existing allocation if
424 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
425 /// this machine use the same pointer tag, so it is indirected through
426 /// `M::tag_allocation`.
428 /// Notice that every static has two `AllocId` that will resolve to the same
429 /// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
430 /// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
431 /// `const_eval_raw` and it is the "resolved" ID.
432 /// The resolved ID is never used by the interpreted progrma, it is hidden.
433 /// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
434 /// contains a reference to memory that was created during its evaluation (i.e., not to
435 /// another static), those inner references only exist in "resolved" form.
437 memory_extra
: &M
::MemoryExtra
,
440 ) -> InterpResult
<'tcx
, Cow
<'tcx
, Allocation
<M
::PointerTag
, M
::AllocExtra
>>> {
441 let alloc
= tcx
.alloc_map
.lock().get(id
);
442 let alloc
= match alloc
{
443 Some(GlobalAlloc
::Memory(mem
)) =>
445 Some(GlobalAlloc
::Function(..)) =>
446 throw_unsup
!(DerefFunctionPointer
),
448 throw_unsup
!(DanglingPointerDeref
),
449 Some(GlobalAlloc
::Static(def_id
)) => {
450 // We got a "lazy" static that has not been computed yet.
451 if tcx
.is_foreign_item(def_id
) {
452 trace
!("static_alloc: foreign item {:?}", def_id
);
453 M
::find_foreign_static(tcx
.tcx
, def_id
)?
455 trace
!("static_alloc: Need to compute {:?}", def_id
);
456 let instance
= Instance
::mono(tcx
.tcx
, def_id
);
461 // use the raw query here to break validation cycles. Later uses of the static
462 // will call the full query anyway
463 let raw_const
= tcx
.const_eval_raw(ty
::ParamEnv
::reveal_all().and(gid
))
465 // no need to report anything, the const_eval call takes care of that
467 assert
!(tcx
.is_static(def_id
));
469 ErrorHandled
::Reported
=>
470 err_inval
!(ReferencedConstant
),
471 ErrorHandled
::TooGeneric
=>
472 err_inval
!(TooGeneric
),
475 // Make sure we use the ID of the resolved memory, not the lazy one!
476 let id
= raw_const
.alloc_id
;
477 let allocation
= tcx
.alloc_map
.lock().unwrap_memory(id
);
479 M
::before_access_static(allocation
)?
;
480 Cow
::Borrowed(allocation
)
484 // We got tcx memory. Let the machine initialize its "extra" stuff.
485 let (alloc
, tag
) = M
::init_allocation_extra(
487 id
, // always use the ID we got as input, not the "hidden" one.
489 M
::STATIC_KIND
.map(MemoryKind
::Machine
),
491 debug_assert_eq
!(tag
, M
::tag_static_base_pointer(memory_extra
, id
));
495 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
496 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
500 ) -> InterpResult
<'tcx
, &Allocation
<M
::PointerTag
, M
::AllocExtra
>> {
501 // The error type of the inner closure here is somewhat funny. We have two
502 // ways of "erroring": An actual error, or because we got a reference from
503 // `get_static_alloc` that we can actually use directly without inserting anything anywhere.
504 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
505 let a
= self.alloc_map
.get_or(id
, || {
506 let alloc
= Self::get_static_alloc(&self.extra
, self.tcx
, id
).map_err(Err
)?
;
508 Cow
::Borrowed(alloc
) => {
509 // We got a ref, cheaply return that as an "error" so that the
510 // map does not get mutated.
513 Cow
::Owned(alloc
) => {
514 // Need to put it into the map and return a ref to that
515 let kind
= M
::STATIC_KIND
.expect(
516 "I got an owned allocation that I have to copy but the machine does \
517 not expect that to happen"
519 Ok((MemoryKind
::Machine(kind
), alloc
))
523 // Now unpack that funny error type
530 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
531 /// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
535 ) -> InterpResult
<'tcx
, &mut Allocation
<M
::PointerTag
, M
::AllocExtra
>> {
537 let memory_extra
= &self.extra
;
538 let a
= self.alloc_map
.get_mut_or(id
, || {
539 // Need to make a copy, even if `get_static_alloc` is able
540 // to give us a cheap reference.
541 let alloc
= Self::get_static_alloc(memory_extra
, tcx
, id
)?
;
542 if alloc
.mutability
== Mutability
::Immutable
{
543 throw_unsup
!(ModifiedConstantMemory
)
545 match M
::STATIC_KIND
{
546 Some(kind
) => Ok((MemoryKind
::Machine(kind
), alloc
.into_owned())),
547 None
=> throw_unsup
!(ModifiedStatic
),
550 // Unpack the error type manually because type inference doesn't
551 // work otherwise (and we cannot help it because `impl Trait`)
556 if a
.mutability
== Mutability
::Immutable
{
557 throw_unsup
!(ModifiedConstantMemory
)
564 /// Obtain the size and alignment of an allocation, even if that allocation has
565 /// been deallocated.
567 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
568 pub fn get_size_and_align(
571 liveness
: AllocCheck
,
572 ) -> InterpResult
<'
static, (Size
, Align
)> {
573 // # Regular allocations
574 // Don't use `self.get_raw` here as that will
575 // a) cause cycles in case `id` refers to a static
576 // b) duplicate a static's allocation in miri
577 if let Some((_
, alloc
)) = self.alloc_map
.get(id
) {
578 return Ok((alloc
.size
, alloc
.align
));
581 // # Function pointers
582 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
583 if let Ok(_
) = self.get_fn_alloc(id
) {
584 return if let AllocCheck
::Dereferenceable
= liveness
{
585 // The caller requested no function pointers.
586 throw_unsup
!(DerefFunctionPointer
)
588 Ok((Size
::ZERO
, Align
::from_bytes(1).unwrap()))
593 // Can't do this in the match argument, we may get cycle errors since the lock would
594 // be held throughout the match.
595 let alloc
= self.tcx
.alloc_map
.lock().get(id
);
597 Some(GlobalAlloc
::Static(did
)) => {
598 // Use size and align of the type.
599 let ty
= self.tcx
.type_of(did
);
600 let layout
= self.tcx
.layout_of(ParamEnv
::empty().and(ty
)).unwrap();
601 Ok((layout
.size
, layout
.align
.abi
))
603 Some(GlobalAlloc
::Memory(alloc
)) =>
604 // Need to duplicate the logic here, because the global allocations have
605 // different associated types than the interpreter-local ones.
606 Ok((alloc
.size
, alloc
.align
)),
607 Some(GlobalAlloc
::Function(_
)) =>
608 bug
!("We already checked function pointers above"),
609 // The rest must be dead.
610 None
=> if let AllocCheck
::MaybeDead
= liveness
{
611 // Deallocated pointers are allowed, we should be able to find
613 Ok(*self.dead_alloc_map
.get(&id
)
614 .expect("deallocated pointers should all be recorded in \
617 throw_unsup
!(DanglingPointerDeref
)
622 fn get_fn_alloc(&self, id
: AllocId
) -> InterpResult
<'tcx
, FnVal
<'tcx
, M
::ExtraFnVal
>> {
623 trace
!("reading fn ptr: {}", id
);
624 if let Some(extra
) = self.extra_fn_ptr_map
.get(&id
) {
625 Ok(FnVal
::Other(*extra
))
627 match self.tcx
.alloc_map
.lock().get(id
) {
628 Some(GlobalAlloc
::Function(instance
)) => Ok(FnVal
::Instance(instance
)),
629 _
=> throw_unsup
!(ExecuteMemory
),
636 ptr
: Scalar
<M
::PointerTag
>,
637 ) -> InterpResult
<'tcx
, FnVal
<'tcx
, M
::ExtraFnVal
>> {
638 let ptr
= self.force_ptr(ptr
)?
; // We definitely need a pointer value.
639 if ptr
.offset
.bytes() != 0 {
640 throw_unsup
!(InvalidFunctionPointer
)
642 self.get_fn_alloc(ptr
.alloc_id
)
645 pub fn mark_immutable(&mut self, id
: AllocId
) -> InterpResult
<'tcx
> {
646 self.get_raw_mut(id
)?
.mutability
= Mutability
::Immutable
;
650 /// Print an allocation and all allocations it points to, recursively.
651 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
652 /// control for this.
653 pub fn dump_alloc(&self, id
: AllocId
) {
654 self.dump_allocs(vec
![id
]);
657 fn dump_alloc_helper
<Tag
, Extra
>(
659 allocs_seen
: &mut FxHashSet
<AllocId
>,
660 allocs_to_print
: &mut VecDeque
<AllocId
>,
662 alloc
: &Allocation
<Tag
, Extra
>,
667 let prefix_len
= msg
.len();
668 let mut relocations
= vec
![];
670 for i
in 0..alloc
.size
.bytes() {
671 let i
= Size
::from_bytes(i
);
672 if let Some(&(_
, target_id
)) = alloc
.relocations().get(&i
) {
673 if allocs_seen
.insert(target_id
) {
674 allocs_to_print
.push_back(target_id
);
676 relocations
.push((i
, target_id
));
678 if alloc
.undef_mask().is_range_defined(i
, i
+ Size
::from_bytes(1)).is_ok() {
679 // this `as usize` is fine, since `i` came from a `usize`
680 let i
= i
.bytes() as usize;
682 // Checked definedness (and thus range) and relocations. This access also doesn't
683 // influence interpreter execution but is only for debugging.
684 let bytes
= alloc
.inspect_with_undef_and_ptr_outside_interpreter(i
..i
+1);
685 write
!(msg
, "{:02x} ", bytes
[0]).unwrap();
692 "{}({} bytes, alignment {}){}",
699 if !relocations
.is_empty() {
701 write
!(msg
, "{:1$}", "", prefix_len
).unwrap(); // Print spaces.
702 let mut pos
= Size
::ZERO
;
703 let relocation_width
= (self.pointer_size().bytes() - 1) * 3;
704 for (i
, target_id
) in relocations
{
705 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
706 write
!(msg
, "{:1$}", "", ((i
- pos
) * 3).bytes() as usize).unwrap();
707 let target
= format
!("({})", target_id
);
708 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
709 write
!(msg
, "└{0:─^1$}┘ ", target
, relocation_width
as usize).unwrap();
710 pos
= i
+ self.pointer_size();
712 eprintln
!("{}", msg
);
716 /// Print a list of allocations and all allocations they point to, recursively.
717 /// This prints directly to stderr, ignoring RUSTC_LOG! It is up to the caller to
718 /// control for this.
719 pub fn dump_allocs(&self, mut allocs
: Vec
<AllocId
>) {
722 let mut allocs_to_print
= VecDeque
::from(allocs
);
723 let mut allocs_seen
= FxHashSet
::default();
725 while let Some(id
) = allocs_to_print
.pop_front() {
726 let msg
= format
!("Alloc {:<5} ", format
!("{}:", id
));
729 match self.alloc_map
.get_or(id
, || Err(())) {
730 Ok((kind
, alloc
)) => {
731 let extra
= match kind
{
732 MemoryKind
::Stack
=> " (stack)".to_owned(),
733 MemoryKind
::Vtable
=> " (vtable)".to_owned(),
734 MemoryKind
::CallerLocation
=> " (caller_location)".to_owned(),
735 MemoryKind
::Machine(m
) => format
!(" ({:?})", m
),
737 self.dump_alloc_helper(
738 &mut allocs_seen
, &mut allocs_to_print
,
744 match self.tcx
.alloc_map
.lock().get(id
) {
745 Some(GlobalAlloc
::Memory(alloc
)) => {
746 self.dump_alloc_helper(
747 &mut allocs_seen
, &mut allocs_to_print
,
748 msg
, alloc
, " (immutable)".to_owned()
751 Some(GlobalAlloc
::Function(func
)) => {
752 eprintln
!("{} {}", msg
, func
);
754 Some(GlobalAlloc
::Static(did
)) => {
755 eprintln
!("{} {:?}", msg
, did
);
758 eprintln
!("{} (deallocated)", msg
);
767 pub fn leak_report(&self) -> usize {
768 let leaks
: Vec
<_
> = self.alloc_map
.filter_map_collect(|&id
, &(kind
, _
)| {
769 if kind
.may_leak() { None }
else { Some(id) }
773 eprintln
!("### LEAK REPORT ###");
774 self.dump_allocs(leaks
);
779 /// This is used by [priroda](https://github.com/oli-obk/priroda)
780 pub fn alloc_map(&self) -> &M
::MemoryMap
{
785 /// Reading and writing.
786 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
787 /// Reads the given number of bytes from memory. Returns them as a slice.
789 /// Performs appropriate bounds checks.
792 ptr
: Scalar
<M
::PointerTag
>,
794 ) -> InterpResult
<'tcx
, &[u8]> {
795 let ptr
= match self.check_ptr_access(ptr
, size
, Align
::from_bytes(1).unwrap())?
{
797 None
=> return Ok(&[]), // zero-sized access
799 self.get_raw(ptr
.alloc_id
)?
.get_bytes(self, ptr
, size
)
802 /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
804 /// Performs appropriate bounds checks.
805 pub fn read_c_str(&self, ptr
: Scalar
<M
::PointerTag
>) -> InterpResult
<'tcx
, &[u8]> {
806 let ptr
= self.force_ptr(ptr
)?
; // We need to read at least 1 byte, so we *need* a ptr.
807 self.get_raw(ptr
.alloc_id
)?
.read_c_str(self, ptr
)
810 /// Writes the given stream of bytes into memory.
812 /// Performs appropriate bounds checks.
815 ptr
: Scalar
<M
::PointerTag
>,
816 src
: impl IntoIterator
<Item
=u8>,
817 ) -> InterpResult
<'tcx
>
819 let src
= src
.into_iter();
820 let size
= Size
::from_bytes(src
.size_hint().0 as u64);
821 // `write_bytes` checks that this lower bound matches the upper bound matches reality.
822 let ptr
= match self.check_ptr_access(ptr
, size
, Align
::from_bytes(1).unwrap())?
{
824 None
=> return Ok(()), // zero-sized access
826 let tcx
= self.tcx
.tcx
;
827 self.get_raw_mut(ptr
.alloc_id
)?
.write_bytes(&tcx
, ptr
, src
)
830 /// Expects the caller to have checked bounds and alignment.
833 src
: Pointer
<M
::PointerTag
>,
834 dest
: Pointer
<M
::PointerTag
>,
836 nonoverlapping
: bool
,
837 ) -> InterpResult
<'tcx
> {
838 self.copy_repeatedly(src
, dest
, size
, 1, nonoverlapping
)
841 /// Expects the caller to have checked bounds and alignment.
842 pub fn copy_repeatedly(
844 src
: Pointer
<M
::PointerTag
>,
845 dest
: Pointer
<M
::PointerTag
>,
848 nonoverlapping
: bool
,
849 ) -> InterpResult
<'tcx
> {
850 // first copy the relocations to a temporary buffer, because
851 // `get_bytes_mut` will clear the relocations, which is correct,
852 // since we don't want to keep any relocations at the target.
853 // (`get_bytes_with_undef_and_ptr` below checks that there are no
854 // relocations overlapping the edges; those would not be handled correctly).
855 let relocations
= self.get_raw(src
.alloc_id
)?
856 .prepare_relocation_copy(self, src
, size
, dest
, length
);
858 let tcx
= self.tcx
.tcx
;
860 // This checks relocation edges on the src.
861 let src_bytes
= self.get_raw(src
.alloc_id
)?
862 .get_bytes_with_undef_and_ptr(&tcx
, src
, size
)?
864 let dest_bytes
= self.get_raw_mut(dest
.alloc_id
)?
865 .get_bytes_mut(&tcx
, dest
, size
* length
)?
868 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
869 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
870 // `dest` could possibly overlap.
871 // The pointers above remain valid even if the `HashMap` table is moved around because they
872 // point into the `Vec` storing the bytes.
874 assert_eq
!(size
.bytes() as usize as u64, size
.bytes());
875 if src
.alloc_id
== dest
.alloc_id
{
877 if (src
.offset
<= dest
.offset
&& src
.offset
+ size
> dest
.offset
) ||
878 (dest
.offset
<= src
.offset
&& dest
.offset
+ size
> src
.offset
)
881 "copy_nonoverlapping called on overlapping ranges"
888 dest_bytes
.offset((size
.bytes() * i
) as isize),
889 size
.bytes() as usize);
893 ptr
::copy_nonoverlapping(src_bytes
,
894 dest_bytes
.offset((size
.bytes() * i
) as isize),
895 size
.bytes() as usize);
900 // copy definedness to the destination
901 self.copy_undef_mask(src
, dest
, size
, length
)?
;
902 // copy the relocations to the destination
903 self.get_raw_mut(dest
.alloc_id
)?
.mark_relocation_range(relocations
);
910 impl<'mir
, 'tcx
, M
: Machine
<'mir
, 'tcx
>> Memory
<'mir
, 'tcx
, M
> {
911 // FIXME: Add a fast version for the common, nonoverlapping case
914 src
: Pointer
<M
::PointerTag
>,
915 dest
: Pointer
<M
::PointerTag
>,
918 ) -> InterpResult
<'tcx
> {
919 // The bits have to be saved locally before writing to dest in case src and dest overlap.
920 assert_eq
!(size
.bytes() as usize as u64, size
.bytes());
922 let src_alloc
= self.get_raw(src
.alloc_id
)?
;
923 let compressed
= src_alloc
.compress_undef_range(src
, size
);
925 // now fill in all the data
926 let dest_allocation
= self.get_raw_mut(dest
.alloc_id
)?
;
927 dest_allocation
.mark_compressed_undef_range(&compressed
, dest
, size
, repeat
);
934 scalar
: Scalar
<M
::PointerTag
>,
935 ) -> InterpResult
<'tcx
, Pointer
<M
::PointerTag
>> {
937 Scalar
::Ptr(ptr
) => Ok(ptr
),
938 _
=> M
::int_to_ptr(&self, scalar
.to_machine_usize(self)?
)
944 scalar
: Scalar
<M
::PointerTag
>,
946 ) -> InterpResult
<'tcx
, u128
> {
947 match scalar
.to_bits_or_ptr(size
, self) {
948 Ok(bits
) => Ok(bits
),
949 Err(ptr
) => Ok(M
::ptr_to_int(&self, ptr
)?
as u128
)