]> git.proxmox.com Git - rustc.git/blob - compiler/rustc_const_eval/src/interpret/memory.rs
New upstream version 1.63.0+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / memory.rs
1 //! The memory subsystem.
2 //!
3 //! Generally, we use `Pointer` to denote memory addresses. However, some operations
4 //! have a "size"-like parameter, and they take `Scalar` for the address because
5 //! if the size is 0, then the pointer can also be a (properly aligned, non-null)
6 //! integer. It is crucial that these operations call `check_align` *before*
7 //! short-circuiting the empty case!
8
9 use std::assert_matches::assert_matches;
10 use std::borrow::Cow;
11 use std::collections::VecDeque;
12 use std::convert::TryFrom;
13 use std::fmt;
14 use std::ptr;
15
16 use rustc_ast::Mutability;
17 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
18 use rustc_middle::mir::display_allocation;
19 use rustc_middle::ty::{Instance, ParamEnv, TyCtxt};
20 use rustc_target::abi::{Align, HasDataLayout, Size};
21
22 use super::{
23 alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
24 InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
25 ScalarMaybeUninit,
26 };
27
28 #[derive(Debug, PartialEq, Copy, Clone)]
29 pub enum MemoryKind<T> {
30 /// Stack memory. Error if deallocated except during a stack pop.
31 Stack,
32 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
33 CallerLocation,
34 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
35 Machine(T),
36 }
37
38 impl<T: MayLeak> MayLeak for MemoryKind<T> {
39 #[inline]
40 fn may_leak(self) -> bool {
41 match self {
42 MemoryKind::Stack => false,
43 MemoryKind::CallerLocation => true,
44 MemoryKind::Machine(k) => k.may_leak(),
45 }
46 }
47 }
48
49 impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
50 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
51 match self {
52 MemoryKind::Stack => write!(f, "stack variable"),
53 MemoryKind::CallerLocation => write!(f, "caller location"),
54 MemoryKind::Machine(m) => write!(f, "{}", m),
55 }
56 }
57 }
58
59 /// Used by `get_size_and_align` to indicate whether the allocation needs to be live.
60 #[derive(Debug, Copy, Clone)]
61 pub enum AllocCheck {
62 /// Allocation must be live and not a function pointer.
63 Dereferenceable,
64 /// Allocations needs to be live, but may be a function pointer.
65 Live,
66 /// Allocation may be dead.
67 MaybeDead,
68 }
69
70 /// The value of a function pointer.
71 #[derive(Debug, Copy, Clone)]
72 pub enum FnVal<'tcx, Other> {
73 Instance(Instance<'tcx>),
74 Other(Other),
75 }
76
77 impl<'tcx, Other> FnVal<'tcx, Other> {
78 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
79 match self {
80 FnVal::Instance(instance) => Ok(instance),
81 FnVal::Other(_) => {
82 throw_unsup_format!("'foreign' function pointers are not supported in this context")
83 }
84 }
85 }
86 }
87
88 // `Memory` has to depend on the `Machine` because some of its operations
89 // (e.g., `get`) call a `Machine` hook.
90 pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
91 /// Allocations local to this instance of the miri engine. The kind
92 /// helps ensure that the same mechanism is used for allocation and
93 /// deallocation. When an allocation is not found here, it is a
94 /// global and looked up in the `tcx` for read access. Some machines may
95 /// have to mutate this map even on a read-only access to a global (because
96 /// they do pointer provenance tracking and the allocations in `tcx` have
97 /// the wrong type), so we let the machine override this type.
98 /// Either way, if the machine allows writing to a global, doing so will
99 /// create a copy of the global allocation here.
100 // FIXME: this should not be public, but interning currently needs access to it
101 pub(super) alloc_map: M::MemoryMap,
102
103 /// Map for "extra" function pointers.
104 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
105
106 /// To be able to compare pointers with null, and to check alignment for accesses
107 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
108 /// that do not exist any more.
109 // FIXME: this should not be public, but interning currently needs access to it
110 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
111 }
112
113 /// A reference to some allocation that was already bounds-checked for the given region
114 /// and had the on-access machine hooks run.
115 #[derive(Copy, Clone)]
116 pub struct AllocRef<'a, 'tcx, Tag, Extra> {
117 alloc: &'a Allocation<Tag, Extra>,
118 range: AllocRange,
119 tcx: TyCtxt<'tcx>,
120 alloc_id: AllocId,
121 }
122 /// A reference to some allocation that was already bounds-checked for the given region
123 /// and had the on-access machine hooks run.
124 pub struct AllocRefMut<'a, 'tcx, Tag, Extra> {
125 alloc: &'a mut Allocation<Tag, Extra>,
126 range: AllocRange,
127 tcx: TyCtxt<'tcx>,
128 alloc_id: AllocId,
129 }
130
131 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
132 pub fn new() -> Self {
133 Memory {
134 alloc_map: M::MemoryMap::default(),
135 extra_fn_ptr_map: FxHashMap::default(),
136 dead_alloc_map: FxHashMap::default(),
137 }
138 }
139
140 /// This is used by [priroda](https://github.com/oli-obk/priroda)
141 pub fn alloc_map(&self) -> &M::MemoryMap {
142 &self.alloc_map
143 }
144 }
145
146 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
147 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
148 /// the machine pointer to the allocation. Must never be used
149 /// for any other pointers, nor for TLS statics.
150 ///
151 /// Using the resulting pointer represents a *direct* access to that memory
152 /// (e.g. by directly using a `static`),
153 /// as opposed to access through a pointer that was created by the program.
154 ///
155 /// This function can fail only if `ptr` points to an `extern static`.
156 #[inline]
157 pub fn global_base_pointer(
158 &self,
159 ptr: Pointer<AllocId>,
160 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
161 let alloc_id = ptr.provenance;
162 // We need to handle `extern static`.
163 match self.tcx.get_global_alloc(alloc_id) {
164 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
165 bug!("global memory cannot point to thread-local static")
166 }
167 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
168 return M::extern_static_base_pointer(self, def_id);
169 }
170 _ => {}
171 }
172 // And we need to get the tag.
173 Ok(M::tag_alloc_base_pointer(self, ptr))
174 }
175
176 pub fn create_fn_alloc_ptr(
177 &mut self,
178 fn_val: FnVal<'tcx, M::ExtraFnVal>,
179 ) -> Pointer<M::PointerTag> {
180 let id = match fn_val {
181 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
182 FnVal::Other(extra) => {
183 // FIXME(RalfJung): Should we have a cache here?
184 let id = self.tcx.reserve_alloc_id();
185 let old = self.memory.extra_fn_ptr_map.insert(id, extra);
186 assert!(old.is_none());
187 id
188 }
189 };
190 // Functions are global allocations, so make sure we get the right base pointer.
191 // We know this is not an `extern static` so this cannot fail.
192 self.global_base_pointer(Pointer::from(id)).unwrap()
193 }
194
195 pub fn allocate_ptr(
196 &mut self,
197 size: Size,
198 align: Align,
199 kind: MemoryKind<M::MemoryKind>,
200 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
201 let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
202 // We can `unwrap` since `alloc` contains no pointers.
203 Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
204 }
205
206 pub fn allocate_bytes_ptr(
207 &mut self,
208 bytes: &[u8],
209 align: Align,
210 kind: MemoryKind<M::MemoryKind>,
211 mutability: Mutability,
212 ) -> Pointer<M::PointerTag> {
213 let alloc = Allocation::from_bytes(bytes, align, mutability);
214 // We can `unwrap` since `alloc` contains no pointers.
215 self.allocate_raw_ptr(alloc, kind).unwrap()
216 }
217
218 /// This can fail only of `alloc` contains relocations.
219 pub fn allocate_raw_ptr(
220 &mut self,
221 alloc: Allocation,
222 kind: MemoryKind<M::MemoryKind>,
223 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
224 let id = self.tcx.reserve_alloc_id();
225 debug_assert_ne!(
226 Some(kind),
227 M::GLOBAL_KIND.map(MemoryKind::Machine),
228 "dynamically allocating global memory"
229 );
230 let alloc = M::init_allocation_extra(self, id, Cow::Owned(alloc), Some(kind))?;
231 self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
232 Ok(M::tag_alloc_base_pointer(self, Pointer::from(id)))
233 }
234
235 pub fn reallocate_ptr(
236 &mut self,
237 ptr: Pointer<Option<M::PointerTag>>,
238 old_size_and_align: Option<(Size, Align)>,
239 new_size: Size,
240 new_align: Align,
241 kind: MemoryKind<M::MemoryKind>,
242 ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
243 let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
244 if offset.bytes() != 0 {
245 throw_ub_format!(
246 "reallocating {:?} which does not point to the beginning of an object",
247 ptr
248 );
249 }
250
251 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
252 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
253 let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
254 let old_size = match old_size_and_align {
255 Some((size, _align)) => size,
256 None => self.get_alloc_raw(alloc_id)?.size(),
257 };
258 // This will also call the access hooks.
259 self.mem_copy(
260 ptr,
261 Align::ONE,
262 new_ptr.into(),
263 Align::ONE,
264 old_size.min(new_size),
265 /*nonoverlapping*/ true,
266 )?;
267 self.deallocate_ptr(ptr, old_size_and_align, kind)?;
268
269 Ok(new_ptr)
270 }
271
272 #[instrument(skip(self), level = "debug")]
273 pub fn deallocate_ptr(
274 &mut self,
275 ptr: Pointer<Option<M::PointerTag>>,
276 old_size_and_align: Option<(Size, Align)>,
277 kind: MemoryKind<M::MemoryKind>,
278 ) -> InterpResult<'tcx> {
279 let (alloc_id, offset, tag) = self.ptr_get_alloc_id(ptr)?;
280 trace!("deallocating: {}", alloc_id);
281
282 if offset.bytes() != 0 {
283 throw_ub_format!(
284 "deallocating {:?} which does not point to the beginning of an object",
285 ptr
286 );
287 }
288
289 let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
290 // Deallocating global memory -- always an error
291 return Err(match self.tcx.get_global_alloc(alloc_id) {
292 Some(GlobalAlloc::Function(..)) => {
293 err_ub_format!("deallocating {}, which is a function", alloc_id)
294 }
295 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
296 err_ub_format!("deallocating {}, which is static memory", alloc_id)
297 }
298 None => err_ub!(PointerUseAfterFree(alloc_id)),
299 }
300 .into());
301 };
302
303 debug!(?alloc);
304
305 if alloc.mutability == Mutability::Not {
306 throw_ub_format!("deallocating immutable allocation {}", alloc_id);
307 }
308 if alloc_kind != kind {
309 throw_ub_format!(
310 "deallocating {}, which is {} memory, using {} deallocation operation",
311 alloc_id,
312 alloc_kind,
313 kind
314 );
315 }
316 if let Some((size, align)) = old_size_and_align {
317 if size != alloc.size() || align != alloc.align {
318 throw_ub_format!(
319 "incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
320 alloc_id,
321 alloc.size().bytes(),
322 alloc.align.bytes(),
323 size.bytes(),
324 align.bytes(),
325 )
326 }
327 }
328
329 // Let the machine take some extra action
330 let size = alloc.size();
331 M::memory_deallocated(
332 *self.tcx,
333 &mut self.machine,
334 &mut alloc.extra,
335 (alloc_id, tag),
336 alloc_range(Size::ZERO, size),
337 )?;
338
339 // Don't forget to remember size and align of this now-dead allocation
340 let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
341 if old.is_some() {
342 bug!("Nothing can be deallocated twice");
343 }
344
345 Ok(())
346 }
347
348 /// Internal helper function to determine the allocation and offset of a pointer (if any).
349 #[inline(always)]
350 fn get_ptr_access(
351 &self,
352 ptr: Pointer<Option<M::PointerTag>>,
353 size: Size,
354 align: Align,
355 ) -> InterpResult<'tcx, Option<(AllocId, Size, M::TagExtra)>> {
356 let align = M::enforce_alignment(&self).then_some(align);
357 self.check_and_deref_ptr(
358 ptr,
359 size,
360 align,
361 CheckInAllocMsg::MemoryAccessTest,
362 |alloc_id, offset, tag| {
363 let (size, align) =
364 self.get_alloc_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
365 Ok((size, align, (alloc_id, offset, tag)))
366 },
367 )
368 }
369
370 /// Check if the given pointer points to live memory of given `size` and `align`
371 /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
372 /// out-of-bounds case.
373 #[inline(always)]
374 pub fn check_ptr_access_align(
375 &self,
376 ptr: Pointer<Option<M::PointerTag>>,
377 size: Size,
378 align: Align,
379 msg: CheckInAllocMsg,
380 ) -> InterpResult<'tcx> {
381 self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
382 let check = match msg {
383 CheckInAllocMsg::DerefTest | CheckInAllocMsg::MemoryAccessTest => {
384 AllocCheck::Dereferenceable
385 }
386 CheckInAllocMsg::PointerArithmeticTest
387 | CheckInAllocMsg::OffsetFromTest
388 | CheckInAllocMsg::InboundsTest => AllocCheck::Live,
389 };
390 let (size, align) = self.get_alloc_size_and_align(alloc_id, check)?;
391 Ok((size, align, ()))
392 })?;
393 Ok(())
394 }
395
396 /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
397 /// to the allocation it points to. Supports both shared and mutable references, as the actual
398 /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
399 /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
400 fn check_and_deref_ptr<T>(
401 &self,
402 ptr: Pointer<Option<M::PointerTag>>,
403 size: Size,
404 align: Option<Align>,
405 msg: CheckInAllocMsg,
406 alloc_size: impl FnOnce(AllocId, Size, M::TagExtra) -> InterpResult<'tcx, (Size, Align, T)>,
407 ) -> InterpResult<'tcx, Option<T>> {
408 fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
409 if offset % align.bytes() == 0 {
410 Ok(())
411 } else {
412 // The biggest power of two through which `offset` is divisible.
413 let offset_pow2 = 1 << offset.trailing_zeros();
414 throw_ub!(AlignmentCheckFailed {
415 has: Align::from_bytes(offset_pow2).unwrap(),
416 required: align,
417 })
418 }
419 }
420
421 Ok(match self.ptr_try_get_alloc_id(ptr) {
422 Err(addr) => {
423 // We couldn't get a proper allocation. This is only okay if the access size is 0,
424 // and the address is not null.
425 if size.bytes() > 0 || addr == 0 {
426 throw_ub!(DanglingIntPointer(addr, msg));
427 }
428 // Must be aligned.
429 if let Some(align) = align {
430 check_offset_align(addr, align)?;
431 }
432 None
433 }
434 Ok((alloc_id, offset, tag)) => {
435 let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, tag)?;
436 // Test bounds. This also ensures non-null.
437 // It is sufficient to check this for the end pointer. Also check for overflow!
438 if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
439 throw_ub!(PointerOutOfBounds {
440 alloc_id,
441 alloc_size,
442 ptr_offset: self.machine_usize_to_isize(offset.bytes()),
443 ptr_size: size,
444 msg,
445 })
446 }
447 // Ensure we never consider the null pointer dereferencable.
448 if M::PointerTag::OFFSET_IS_ADDR {
449 assert_ne!(ptr.addr(), Size::ZERO);
450 }
451 // Test align. Check this last; if both bounds and alignment are violated
452 // we want the error to be about the bounds.
453 if let Some(align) = align {
454 if M::force_int_for_alignment_check(self) {
455 // `force_int_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
456 check_offset_align(ptr.addr().bytes(), align)?;
457 } else {
458 // Check allocation alignment and offset alignment.
459 if alloc_align.bytes() < align.bytes() {
460 throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
461 }
462 check_offset_align(offset.bytes(), align)?;
463 }
464 }
465
466 // We can still be zero-sized in this branch, in which case we have to
467 // return `None`.
468 if size.bytes() == 0 { None } else { Some(ret_val) }
469 }
470 })
471 }
472 }
473
474 /// Allocation accessors
475 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
476 /// Helper function to obtain a global (tcx) allocation.
477 /// This attempts to return a reference to an existing allocation if
478 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
479 /// this machine use the same pointer tag, so it is indirected through
480 /// `M::tag_allocation`.
481 fn get_global_alloc(
482 &self,
483 id: AllocId,
484 is_write: bool,
485 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
486 let (alloc, def_id) = match self.tcx.get_global_alloc(id) {
487 Some(GlobalAlloc::Memory(mem)) => {
488 // Memory of a constant or promoted or anonymous memory referenced by a static.
489 (mem, None)
490 }
491 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
492 None => throw_ub!(PointerUseAfterFree(id)),
493 Some(GlobalAlloc::Static(def_id)) => {
494 assert!(self.tcx.is_static(def_id));
495 assert!(!self.tcx.is_thread_local_static(def_id));
496 // Notice that every static has two `AllocId` that will resolve to the same
497 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
498 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
499 // `eval_static_initializer` and it is the "resolved" ID.
500 // The resolved ID is never used by the interpreted program, it is hidden.
501 // This is relied upon for soundness of const-patterns; a pointer to the resolved
502 // ID would "sidestep" the checks that make sure consts do not point to statics!
503 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
504 // contains a reference to memory that was created during its evaluation (i.e., not
505 // to another static), those inner references only exist in "resolved" form.
506 if self.tcx.is_foreign_item(def_id) {
507 throw_unsup!(ReadExternStatic(def_id));
508 }
509
510 // Use a precise span for better cycle errors.
511 (self.tcx.at(self.cur_span()).eval_static_initializer(def_id)?, Some(def_id))
512 }
513 };
514 M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
515 // We got tcx memory. Let the machine initialize its "extra" stuff.
516 M::init_allocation_extra(
517 self,
518 id, // always use the ID we got as input, not the "hidden" one.
519 Cow::Borrowed(alloc.inner()),
520 M::GLOBAL_KIND.map(MemoryKind::Machine),
521 )
522 }
523
524 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
525 /// The caller is responsible for calling the access hooks!
526 fn get_alloc_raw(
527 &self,
528 id: AllocId,
529 ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
530 // The error type of the inner closure here is somewhat funny. We have two
531 // ways of "erroring": An actual error, or because we got a reference from
532 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
533 // So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
534 let a = self.memory.alloc_map.get_or(id, || {
535 let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
536 match alloc {
537 Cow::Borrowed(alloc) => {
538 // We got a ref, cheaply return that as an "error" so that the
539 // map does not get mutated.
540 Err(Ok(alloc))
541 }
542 Cow::Owned(alloc) => {
543 // Need to put it into the map and return a ref to that
544 let kind = M::GLOBAL_KIND.expect(
545 "I got a global allocation that I have to copy but the machine does \
546 not expect that to happen",
547 );
548 Ok((MemoryKind::Machine(kind), alloc))
549 }
550 }
551 });
552 // Now unpack that funny error type
553 match a {
554 Ok(a) => Ok(&a.1),
555 Err(a) => a,
556 }
557 }
558
559 /// "Safe" (bounds and align-checked) allocation access.
560 pub fn get_ptr_alloc<'a>(
561 &'a self,
562 ptr: Pointer<Option<M::PointerTag>>,
563 size: Size,
564 align: Align,
565 ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
566 let align = M::enforce_alignment(self).then_some(align);
567 let ptr_and_alloc = self.check_and_deref_ptr(
568 ptr,
569 size,
570 align,
571 CheckInAllocMsg::MemoryAccessTest,
572 |alloc_id, offset, tag| {
573 let alloc = self.get_alloc_raw(alloc_id)?;
574 Ok((alloc.size(), alloc.align, (alloc_id, offset, tag, alloc)))
575 },
576 )?;
577 if let Some((alloc_id, offset, tag, alloc)) = ptr_and_alloc {
578 let range = alloc_range(offset, size);
579 M::memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, tag), range)?;
580 Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
581 } else {
582 // Even in this branch we have to be sure that we actually access the allocation, in
583 // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
584 // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
585 // always called when `ptr` has an `AllocId`.
586 Ok(None)
587 }
588 }
589
590 /// Return the `extra` field of the given allocation.
591 pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
592 Ok(&self.get_alloc_raw(id)?.extra)
593 }
594
595 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
596 /// The caller is responsible for calling the access hooks!
597 ///
598 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
599 /// allocation.
600 fn get_alloc_raw_mut(
601 &mut self,
602 id: AllocId,
603 ) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M)> {
604 // We have "NLL problem case #3" here, which cannot be worked around without loss of
605 // efficiency even for the common case where the key is in the map.
606 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
607 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
608 if self.memory.alloc_map.get_mut(id).is_none() {
609 // Slow path.
610 // Allocation not found locally, go look global.
611 let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
612 let kind = M::GLOBAL_KIND.expect(
613 "I got a global allocation that I have to copy but the machine does \
614 not expect that to happen",
615 );
616 self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
617 }
618
619 let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
620 if alloc.mutability == Mutability::Not {
621 throw_ub!(WriteToReadOnly(id))
622 }
623 Ok((alloc, &mut self.machine))
624 }
625
626 /// "Safe" (bounds and align-checked) allocation access.
627 pub fn get_ptr_alloc_mut<'a>(
628 &'a mut self,
629 ptr: Pointer<Option<M::PointerTag>>,
630 size: Size,
631 align: Align,
632 ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
633 let parts = self.get_ptr_access(ptr, size, align)?;
634 if let Some((alloc_id, offset, tag)) = parts {
635 let tcx = *self.tcx;
636 // FIXME: can we somehow avoid looking up the allocation twice here?
637 // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
638 let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
639 let range = alloc_range(offset, size);
640 M::memory_written(tcx, machine, &mut alloc.extra, (alloc_id, tag), range)?;
641 Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
642 } else {
643 Ok(None)
644 }
645 }
646
647 /// Return the `extra` field of the given allocation.
648 pub fn get_alloc_extra_mut<'a>(
649 &'a mut self,
650 id: AllocId,
651 ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
652 let (alloc, machine) = self.get_alloc_raw_mut(id)?;
653 Ok((&mut alloc.extra, machine))
654 }
655
656 /// Obtain the size and alignment of an allocation, even if that allocation has
657 /// been deallocated.
658 ///
659 /// If `liveness` is `AllocCheck::MaybeDead`, this function always returns `Ok`.
660 pub fn get_alloc_size_and_align(
661 &self,
662 id: AllocId,
663 liveness: AllocCheck,
664 ) -> InterpResult<'tcx, (Size, Align)> {
665 // # Regular allocations
666 // Don't use `self.get_raw` here as that will
667 // a) cause cycles in case `id` refers to a static
668 // b) duplicate a global's allocation in miri
669 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
670 return Ok((alloc.size(), alloc.align));
671 }
672
673 // # Function pointers
674 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
675 if self.get_fn_alloc(id).is_some() {
676 return if let AllocCheck::Dereferenceable = liveness {
677 // The caller requested no function pointers.
678 throw_ub!(DerefFunctionPointer(id))
679 } else {
680 Ok((Size::ZERO, Align::ONE))
681 };
682 }
683
684 // # Statics
685 // Can't do this in the match argument, we may get cycle errors since the lock would
686 // be held throughout the match.
687 match self.tcx.get_global_alloc(id) {
688 Some(GlobalAlloc::Static(did)) => {
689 assert!(!self.tcx.is_thread_local_static(did));
690 // Use size and align of the type.
691 let ty = self.tcx.type_of(did);
692 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
693 Ok((layout.size, layout.align.abi))
694 }
695 Some(GlobalAlloc::Memory(alloc)) => {
696 // Need to duplicate the logic here, because the global allocations have
697 // different associated types than the interpreter-local ones.
698 let alloc = alloc.inner();
699 Ok((alloc.size(), alloc.align))
700 }
701 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
702 // The rest must be dead.
703 None => {
704 if let AllocCheck::MaybeDead = liveness {
705 // Deallocated pointers are allowed, we should be able to find
706 // them in the map.
707 Ok(*self
708 .memory
709 .dead_alloc_map
710 .get(&id)
711 .expect("deallocated pointers should all be recorded in `dead_alloc_map`"))
712 } else {
713 throw_ub!(PointerUseAfterFree(id))
714 }
715 }
716 }
717 }
718
719 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
720 if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
721 Some(FnVal::Other(*extra))
722 } else {
723 match self.tcx.get_global_alloc(id) {
724 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
725 _ => None,
726 }
727 }
728 }
729
730 pub fn get_ptr_fn(
731 &self,
732 ptr: Pointer<Option<M::PointerTag>>,
733 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
734 trace!("get_fn({:?})", ptr);
735 let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
736 if offset.bytes() != 0 {
737 throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
738 }
739 self.get_fn_alloc(alloc_id)
740 .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
741 }
742
743 pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
744 self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
745 Ok(())
746 }
747
748 /// Create a lazy debug printer that prints the given allocation and all allocations it points
749 /// to, recursively.
750 #[must_use]
751 pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
752 self.dump_allocs(vec![id])
753 }
754
755 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
756 /// recursively.
757 #[must_use]
758 pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
759 allocs.sort();
760 allocs.dedup();
761 DumpAllocs { ecx: self, allocs }
762 }
763
764 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
765 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
766 pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
767 // Collect the set of allocations that are *reachable* from `Global` allocations.
768 let reachable = {
769 let mut reachable = FxHashSet::default();
770 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
771 let mut todo: Vec<_> =
772 self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
773 if Some(kind) == global_kind { Some(id) } else { None }
774 });
775 todo.extend(static_roots);
776 while let Some(id) = todo.pop() {
777 if reachable.insert(id) {
778 // This is a new allocation, add its relocations to `todo`.
779 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
780 todo.extend(
781 alloc.relocations().values().filter_map(|tag| tag.get_alloc_id()),
782 );
783 }
784 }
785 }
786 reachable
787 };
788
789 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
790 let leaks: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
791 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
792 });
793 let n = leaks.len();
794 if n > 0 {
795 eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
796 }
797 n
798 }
799 }
800
801 #[doc(hidden)]
802 /// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
803 pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
804 ecx: &'a InterpCx<'mir, 'tcx, M>,
805 allocs: Vec<AllocId>,
806 }
807
808 impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
809 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
810 // Cannot be a closure because it is generic in `Tag`, `Extra`.
811 fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
812 fmt: &mut std::fmt::Formatter<'_>,
813 tcx: TyCtxt<'tcx>,
814 allocs_to_print: &mut VecDeque<AllocId>,
815 alloc: &Allocation<Tag, Extra>,
816 ) -> std::fmt::Result {
817 for alloc_id in alloc.relocations().values().filter_map(|tag| tag.get_alloc_id()) {
818 allocs_to_print.push_back(alloc_id);
819 }
820 write!(fmt, "{}", display_allocation(tcx, alloc))
821 }
822
823 let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
824 // `allocs_printed` contains all allocations that we have already printed.
825 let mut allocs_printed = FxHashSet::default();
826
827 while let Some(id) = allocs_to_print.pop_front() {
828 if !allocs_printed.insert(id) {
829 // Already printed, so skip this.
830 continue;
831 }
832
833 write!(fmt, "{}", id)?;
834 match self.ecx.memory.alloc_map.get(id) {
835 Some(&(kind, ref alloc)) => {
836 // normal alloc
837 write!(fmt, " ({}, ", kind)?;
838 write_allocation_track_relocs(
839 &mut *fmt,
840 *self.ecx.tcx,
841 &mut allocs_to_print,
842 alloc,
843 )?;
844 }
845 None => {
846 // global alloc
847 match self.ecx.tcx.get_global_alloc(id) {
848 Some(GlobalAlloc::Memory(alloc)) => {
849 write!(fmt, " (unchanged global, ")?;
850 write_allocation_track_relocs(
851 &mut *fmt,
852 *self.ecx.tcx,
853 &mut allocs_to_print,
854 alloc.inner(),
855 )?;
856 }
857 Some(GlobalAlloc::Function(func)) => {
858 write!(fmt, " (fn: {})", func)?;
859 }
860 Some(GlobalAlloc::Static(did)) => {
861 write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
862 }
863 None => {
864 write!(fmt, " (deallocated)")?;
865 }
866 }
867 }
868 }
869 writeln!(fmt)?;
870 }
871 Ok(())
872 }
873 }
874
875 /// Reading and writing.
876 impl<'tcx, 'a, Tag: Provenance, Extra> AllocRefMut<'a, 'tcx, Tag, Extra> {
877 pub fn write_scalar(
878 &mut self,
879 range: AllocRange,
880 val: ScalarMaybeUninit<Tag>,
881 ) -> InterpResult<'tcx> {
882 let range = self.range.subrange(range);
883 debug!(
884 "write_scalar in {} at {:#x}, size {}: {:?}",
885 self.alloc_id,
886 range.start.bytes(),
887 range.size.bytes(),
888 val
889 );
890 Ok(self
891 .alloc
892 .write_scalar(&self.tcx, range, val)
893 .map_err(|e| e.to_interp_error(self.alloc_id))?)
894 }
895
896 pub fn write_ptr_sized(
897 &mut self,
898 offset: Size,
899 val: ScalarMaybeUninit<Tag>,
900 ) -> InterpResult<'tcx> {
901 self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
902 }
903
904 /// Mark the entire referenced range as uninitalized
905 pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
906 Ok(self
907 .alloc
908 .write_uninit(&self.tcx, self.range)
909 .map_err(|e| e.to_interp_error(self.alloc_id))?)
910 }
911 }
912
913 impl<'tcx, 'a, Tag: Provenance, Extra> AllocRef<'a, 'tcx, Tag, Extra> {
914 pub fn read_scalar(
915 &self,
916 range: AllocRange,
917 read_provenance: bool,
918 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
919 let range = self.range.subrange(range);
920 let res = self
921 .alloc
922 .read_scalar(&self.tcx, range, read_provenance)
923 .map_err(|e| e.to_interp_error(self.alloc_id))?;
924 debug!(
925 "read_scalar in {} at {:#x}, size {}: {:?}",
926 self.alloc_id,
927 range.start.bytes(),
928 range.size.bytes(),
929 res
930 );
931 Ok(res)
932 }
933
934 pub fn read_integer(
935 &self,
936 offset: Size,
937 size: Size,
938 ) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
939 self.read_scalar(alloc_range(offset, size), /*read_provenance*/ false)
940 }
941
942 pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
943 self.read_scalar(
944 alloc_range(offset, self.tcx.data_layout().pointer_size),
945 /*read_provenance*/ true,
946 )
947 }
948
949 pub fn check_bytes(
950 &self,
951 range: AllocRange,
952 allow_uninit: bool,
953 allow_ptr: bool,
954 ) -> InterpResult<'tcx> {
955 Ok(self
956 .alloc
957 .check_bytes(&self.tcx, self.range.subrange(range), allow_uninit, allow_ptr)
958 .map_err(|e| e.to_interp_error(self.alloc_id))?)
959 }
960 }
961
962 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
963 /// Reads the given number of bytes from memory. Returns them as a slice.
964 ///
965 /// Performs appropriate bounds checks.
966 pub fn read_bytes_ptr(
967 &self,
968 ptr: Pointer<Option<M::PointerTag>>,
969 size: Size,
970 ) -> InterpResult<'tcx, &[u8]> {
971 let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
972 // zero-sized access
973 return Ok(&[]);
974 };
975 // Side-step AllocRef and directly access the underlying bytes more efficiently.
976 // (We are staying inside the bounds here so all is good.)
977 Ok(alloc_ref
978 .alloc
979 .get_bytes(&alloc_ref.tcx, alloc_ref.range)
980 .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
981 }
982
983 /// Writes the given stream of bytes into memory.
984 ///
985 /// Performs appropriate bounds checks.
986 pub fn write_bytes_ptr(
987 &mut self,
988 ptr: Pointer<Option<M::PointerTag>>,
989 src: impl IntoIterator<Item = u8>,
990 ) -> InterpResult<'tcx> {
991 let mut src = src.into_iter();
992 let (lower, upper) = src.size_hint();
993 let len = upper.expect("can only write bounded iterators");
994 assert_eq!(lower, len, "can only write iterators with a precise length");
995
996 let size = Size::from_bytes(len);
997 let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
998 // zero-sized access
999 assert_matches!(
1000 src.next(),
1001 None,
1002 "iterator said it was empty but returned an element"
1003 );
1004 return Ok(());
1005 };
1006
1007 // Side-step AllocRef and directly access the underlying bytes more efficiently.
1008 // (We are staying inside the bounds here so all is good.)
1009 let alloc_id = alloc_ref.alloc_id;
1010 let bytes = alloc_ref
1011 .alloc
1012 .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
1013 .map_err(move |e| e.to_interp_error(alloc_id))?;
1014 // `zip` would stop when the first iterator ends; we want to definitely
1015 // cover all of `bytes`.
1016 for dest in bytes {
1017 *dest = src.next().expect("iterator was shorter than it said it would be");
1018 }
1019 assert_matches!(src.next(), None, "iterator was longer than it said it would be");
1020 Ok(())
1021 }
1022
1023 pub fn mem_copy(
1024 &mut self,
1025 src: Pointer<Option<M::PointerTag>>,
1026 src_align: Align,
1027 dest: Pointer<Option<M::PointerTag>>,
1028 dest_align: Align,
1029 size: Size,
1030 nonoverlapping: bool,
1031 ) -> InterpResult<'tcx> {
1032 self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
1033 }
1034
1035 pub fn mem_copy_repeatedly(
1036 &mut self,
1037 src: Pointer<Option<M::PointerTag>>,
1038 src_align: Align,
1039 dest: Pointer<Option<M::PointerTag>>,
1040 dest_align: Align,
1041 size: Size,
1042 num_copies: u64,
1043 nonoverlapping: bool,
1044 ) -> InterpResult<'tcx> {
1045 let tcx = self.tcx;
1046 // We need to do our own bounds-checks.
1047 let src_parts = self.get_ptr_access(src, size, src_align)?;
1048 let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
1049
1050 // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
1051 // and once below to get the underlying `&[mut] Allocation`.
1052
1053 // Source alloc preparations and access hooks.
1054 let Some((src_alloc_id, src_offset, src_tag)) = src_parts else {
1055 // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
1056 return Ok(());
1057 };
1058 let src_alloc = self.get_alloc_raw(src_alloc_id)?;
1059 let src_range = alloc_range(src_offset, size);
1060 M::memory_read(*tcx, &self.machine, &src_alloc.extra, (src_alloc_id, src_tag), src_range)?;
1061 // We need the `dest` ptr for the next operation, so we get it now.
1062 // We already did the source checks and called the hooks so we are good to return early.
1063 let Some((dest_alloc_id, dest_offset, dest_tag)) = dest_parts else {
1064 // Zero-sized *destination*.
1065 return Ok(());
1066 };
1067
1068 // This checks relocation edges on the src, which needs to happen before
1069 // `prepare_relocation_copy`.
1070 let src_bytes = src_alloc
1071 .get_bytes_with_uninit_and_ptr(&tcx, src_range)
1072 .map_err(|e| e.to_interp_error(src_alloc_id))?
1073 .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1074 // first copy the relocations to a temporary buffer, because
1075 // `get_bytes_mut` will clear the relocations, which is correct,
1076 // since we don't want to keep any relocations at the target.
1077 let relocations =
1078 src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
1079 // Prepare a copy of the initialization mask.
1080 let compressed = src_alloc.compress_uninit_range(src_range);
1081
1082 // Destination alloc preparations and access hooks.
1083 let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
1084 let dest_range = alloc_range(dest_offset, size * num_copies);
1085 M::memory_written(
1086 *tcx,
1087 extra,
1088 &mut dest_alloc.extra,
1089 (dest_alloc_id, dest_tag),
1090 dest_range,
1091 )?;
1092 let dest_bytes = dest_alloc
1093 .get_bytes_mut_ptr(&tcx, dest_range)
1094 .map_err(|e| e.to_interp_error(dest_alloc_id))?
1095 .as_mut_ptr();
1096
1097 if compressed.no_bytes_init() {
1098 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1099 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1100 // be arbitrary for uninitialized bytes.
1101 // This also avoids writing to the target bytes so that the backing allocation is never
1102 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
1103 // operating system this can avoid physically allocating the page.
1104 dest_alloc
1105 .write_uninit(&tcx, dest_range)
1106 .map_err(|e| e.to_interp_error(dest_alloc_id))?;
1107 // We can forget about the relocations, this is all not initialized anyway.
1108 return Ok(());
1109 }
1110
1111 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1112 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1113 // `dest` could possibly overlap.
1114 // The pointers above remain valid even if the `HashMap` table is moved around because they
1115 // point into the `Vec` storing the bytes.
1116 unsafe {
1117 if src_alloc_id == dest_alloc_id {
1118 if nonoverlapping {
1119 // `Size` additions
1120 if (src_offset <= dest_offset && src_offset + size > dest_offset)
1121 || (dest_offset <= src_offset && dest_offset + size > src_offset)
1122 {
1123 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
1124 }
1125 }
1126
1127 for i in 0..num_copies {
1128 ptr::copy(
1129 src_bytes,
1130 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1131 size.bytes_usize(),
1132 );
1133 }
1134 } else {
1135 for i in 0..num_copies {
1136 ptr::copy_nonoverlapping(
1137 src_bytes,
1138 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1139 size.bytes_usize(),
1140 );
1141 }
1142 }
1143 }
1144
1145 // now fill in all the "init" data
1146 dest_alloc.mark_compressed_init_range(
1147 &compressed,
1148 alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1149 num_copies,
1150 );
1151 // copy the relocations to the destination
1152 dest_alloc.mark_relocation_range(relocations);
1153
1154 Ok(())
1155 }
1156 }
1157
1158 /// Machine pointer introspection.
1159 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
1160 pub fn scalar_to_ptr(
1161 &self,
1162 scalar: Scalar<M::PointerTag>,
1163 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
1164 // We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
1165 // call to force getting out a pointer.
1166 Ok(
1167 match scalar
1168 .to_bits_or_ptr_internal(self.pointer_size())
1169 .map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
1170 {
1171 Err(ptr) => ptr.into(),
1172 Ok(bits) => {
1173 let addr = u64::try_from(bits).unwrap();
1174 M::ptr_from_addr_transmute(&self, addr)
1175 }
1176 },
1177 )
1178 }
1179
1180 /// Test if this value might be null.
1181 /// If the machine does not support ptr-to-int casts, this is conservative.
1182 pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<'tcx, bool> {
1183 Ok(match scalar.try_to_int() {
1184 Ok(int) => int.is_null(),
1185 Err(_) => {
1186 // Can only happen during CTFE.
1187 let ptr = self.scalar_to_ptr(scalar)?;
1188 match self.ptr_try_get_alloc_id(ptr) {
1189 Ok((alloc_id, offset, _)) => {
1190 let (size, _align) = self
1191 .get_alloc_size_and_align(alloc_id, AllocCheck::MaybeDead)
1192 .expect("alloc info with MaybeDead cannot fail");
1193 // If the pointer is out-of-bounds, it may be null.
1194 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
1195 offset > size
1196 }
1197 Err(_offset) => bug!("a non-int scalar is always a pointer"),
1198 }
1199 }
1200 })
1201 }
1202
1203 /// Turning a "maybe pointer" into a proper pointer (and some information
1204 /// about where it points), or an absolute address.
1205 pub fn ptr_try_get_alloc_id(
1206 &self,
1207 ptr: Pointer<Option<M::PointerTag>>,
1208 ) -> Result<(AllocId, Size, M::TagExtra), u64> {
1209 match ptr.into_pointer_or_addr() {
1210 Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
1211 Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1212 None => {
1213 assert!(M::PointerTag::OFFSET_IS_ADDR);
1214 let (_, addr) = ptr.into_parts();
1215 Err(addr.bytes())
1216 }
1217 },
1218 Err(addr) => Err(addr.bytes()),
1219 }
1220 }
1221
1222 /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1223 #[inline(always)]
1224 pub fn ptr_get_alloc_id(
1225 &self,
1226 ptr: Pointer<Option<M::PointerTag>>,
1227 ) -> InterpResult<'tcx, (AllocId, Size, M::TagExtra)> {
1228 self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
1229 err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
1230 })
1231 }
1232 }