]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_const_eval/src/interpret/memory.rs
New upstream version 1.67.1+dfsg1
[rustc.git] / compiler / rustc_const_eval / src / interpret / memory.rs
CommitLineData
b7449926
XL
1//! The memory subsystem.
2//!
3//! Generally, we use `Pointer` to denote memory addresses. However, some operations
4//! have a "size"-like parameter, and they take `Scalar` for the address because
17df50a5 5//! if the size is 0, then the pointer can also be a (properly aligned, non-null)
9fa01778 6//! integer. It is crucial that these operations call `check_align` *before*
b7449926
XL
7//! short-circuiting the empty case!
8
17df50a5 9use std::assert_matches::assert_matches;
dfeec247 10use std::borrow::Cow;
94b46f34 11use std::collections::VecDeque;
ba9703b0 12use std::fmt;
94b46f34 13use std::ptr;
ff7c6d11 14
3dfed10e 15use rustc_ast::Mutability;
ba9703b0 16use rustc_data_structures::fx::{FxHashMap, FxHashSet};
c295e0f8 17use rustc_middle::mir::display_allocation;
064997fb 18use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TyCtxt};
04454e1e 19use rustc_target::abi::{Align, HasDataLayout, Size};
ff7c6d11 20
0bf4aa26 21use super::{
04454e1e 22 alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, InterpCx,
136023e0 23 InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
0bf4aa26 24};
ff7c6d11 25
e74abb32 26#[derive(Debug, PartialEq, Copy, Clone)]
ff7c6d11 27pub enum MemoryKind<T> {
60c5eb7d 28 /// Stack memory. Error if deallocated except during a stack pop.
ff7c6d11 29 Stack,
60c5eb7d
XL
30 /// Memory allocated by `caller_location` intrinsic. Error if ever deallocated.
31 CallerLocation,
32 /// Additional memory kinds a machine wishes to distinguish from the builtin ones.
ff7c6d11
XL
33 Machine(T),
34}
35
0bf4aa26
XL
36impl<T: MayLeak> MayLeak for MemoryKind<T> {
37 #[inline]
38 fn may_leak(self) -> bool {
39 match self {
40 MemoryKind::Stack => false,
60c5eb7d 41 MemoryKind::CallerLocation => true,
dfeec247 42 MemoryKind::Machine(k) => k.may_leak(),
0bf4aa26
XL
43 }
44 }
45}
ff7c6d11 46
ba9703b0
XL
47impl<T: fmt::Display> fmt::Display for MemoryKind<T> {
48 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
49 match self {
50 MemoryKind::Stack => write!(f, "stack variable"),
ba9703b0
XL
51 MemoryKind::CallerLocation => write!(f, "caller location"),
52 MemoryKind::Machine(m) => write!(f, "{}", m),
53 }
54 }
55}
56
064997fb
FG
57/// The return value of `get_alloc_info` indicates the "kind" of the allocation.
58pub enum AllocKind {
59 /// A regular live data allocation.
60 LiveData,
61 /// A function allocation (that fn ptrs point to).
62 Function,
63 /// A (symbolic) vtable allocation.
64 VTable,
65 /// A dead allocation.
66 Dead,
dc9dc135
XL
67}
68
416331ca
XL
69/// The value of a function pointer.
70#[derive(Debug, Copy, Clone)]
71pub enum FnVal<'tcx, Other> {
72 Instance(Instance<'tcx>),
73 Other(Other),
74}
75
76impl<'tcx, Other> FnVal<'tcx, Other> {
77 pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
78 match self {
dfeec247
XL
79 FnVal::Instance(instance) => Ok(instance),
80 FnVal::Other(_) => {
81 throw_unsup_format!("'foreign' function pointers are not supported in this context")
82 }
416331ca
XL
83 }
84 }
85}
86
0bf4aa26 87// `Memory` has to depend on the `Machine` because some of its operations
0731742a 88// (e.g., `get`) call a `Machine` hook.
dc9dc135 89pub struct Memory<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
9fa01778 90 /// Allocations local to this instance of the miri engine. The kind
b7449926 91 /// helps ensure that the same mechanism is used for allocation and
9fa01778 92 /// deallocation. When an allocation is not found here, it is a
ba9703b0
XL
93 /// global and looked up in the `tcx` for read access. Some machines may
94 /// have to mutate this map even on a read-only access to a global (because
0bf4aa26
XL
95 /// they do pointer provenance tracking and the allocations in `tcx` have
96 /// the wrong type), so we let the machine override this type.
ba9703b0
XL
97 /// Either way, if the machine allows writing to a global, doing so will
98 /// create a copy of the global allocation here.
dc9dc135
XL
99 // FIXME: this should not be public, but interning currently needs access to it
100 pub(super) alloc_map: M::MemoryMap,
ff7c6d11 101
416331ca
XL
102 /// Map for "extra" function pointers.
103 extra_fn_ptr_map: FxHashMap<AllocId, M::ExtraFnVal>,
104
17df50a5 105 /// To be able to compare pointers with null, and to check alignment for accesses
b7449926
XL
106 /// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
107 /// that do not exist any more.
416331ca 108 // FIXME: this should not be public, but interning currently needs access to it
dc9dc135 109 pub(super) dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
8faf50e0
XL
110}
111
17df50a5
XL
112/// A reference to some allocation that was already bounds-checked for the given region
113/// and had the on-access machine hooks run.
114#[derive(Copy, Clone)]
487cf647 115pub struct AllocRef<'a, 'tcx, Prov: Provenance, Extra> {
064997fb 116 alloc: &'a Allocation<Prov, Extra>,
17df50a5
XL
117 range: AllocRange,
118 tcx: TyCtxt<'tcx>,
119 alloc_id: AllocId,
120}
121/// A reference to some allocation that was already bounds-checked for the given region
122/// and had the on-access machine hooks run.
487cf647 123pub struct AllocRefMut<'a, 'tcx, Prov: Provenance, Extra> {
064997fb 124 alloc: &'a mut Allocation<Prov, Extra>,
17df50a5
XL
125 range: AllocRange,
126 tcx: TyCtxt<'tcx>,
127 alloc_id: AllocId,
128}
129
dc9dc135 130impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
04454e1e 131 pub fn new() -> Self {
0bf4aa26 132 Memory {
a1dfa0c6 133 alloc_map: M::MemoryMap::default(),
416331ca 134 extra_fn_ptr_map: FxHashMap::default(),
b7449926 135 dead_alloc_map: FxHashMap::default(),
ff7c6d11
XL
136 }
137 }
138
04454e1e
FG
139 /// This is used by [priroda](https://github.com/oli-obk/priroda)
140 pub fn alloc_map(&self) -> &M::MemoryMap {
141 &self.alloc_map
142 }
143}
144
145impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
60c5eb7d 146 /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
3dfed10e
XL
147 /// the machine pointer to the allocation. Must never be used
148 /// for any other pointers, nor for TLS statics.
60c5eb7d 149 ///
3dfed10e
XL
150 /// Using the resulting pointer represents a *direct* access to that memory
151 /// (e.g. by directly using a `static`),
152 /// as opposed to access through a pointer that was created by the program.
153 ///
154 /// This function can fail only if `ptr` points to an `extern static`.
dc9dc135 155 #[inline]
3dfed10e
XL
156 pub fn global_base_pointer(
157 &self,
136023e0 158 ptr: Pointer<AllocId>,
064997fb 159 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
04454e1e 160 let alloc_id = ptr.provenance;
3dfed10e 161 // We need to handle `extern static`.
064997fb 162 match self.tcx.try_get_global_alloc(alloc_id) {
3dfed10e
XL
163 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
164 bug!("global memory cannot point to thread-local static")
165 }
166 Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
136023e0 167 return M::extern_static_base_pointer(self, def_id);
3dfed10e 168 }
136023e0
XL
169 _ => {}
170 }
064997fb
FG
171 // And we need to get the provenance.
172 Ok(M::adjust_alloc_base_pointer(self, ptr))
ff7c6d11
XL
173 }
174
04454e1e 175 pub fn create_fn_alloc_ptr(
416331ca
XL
176 &mut self,
177 fn_val: FnVal<'tcx, M::ExtraFnVal>,
064997fb 178 ) -> Pointer<M::Provenance> {
416331ca 179 let id = match fn_val {
f9f354fc 180 FnVal::Instance(instance) => self.tcx.create_fn_alloc(instance),
416331ca
XL
181 FnVal::Other(extra) => {
182 // FIXME(RalfJung): Should we have a cache here?
f9f354fc 183 let id = self.tcx.reserve_alloc_id();
04454e1e 184 let old = self.memory.extra_fn_ptr_map.insert(id, extra);
416331ca
XL
185 assert!(old.is_none());
186 id
187 }
188 };
3dfed10e
XL
189 // Functions are global allocations, so make sure we get the right base pointer.
190 // We know this is not an `extern static` so this cannot fail.
191 self.global_base_pointer(Pointer::from(id)).unwrap()
ff7c6d11
XL
192 }
193
04454e1e 194 pub fn allocate_ptr(
ff7c6d11 195 &mut self,
dc9dc135
XL
196 size: Size,
197 align: Align,
ba9703b0 198 kind: MemoryKind<M::MemoryKind>,
064997fb 199 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
136023e0 200 let alloc = Allocation::uninit(size, align, M::PANIC_ON_ALLOC_FAIL)?;
923072b8
FG
201 // We can `unwrap` since `alloc` contains no pointers.
202 Ok(self.allocate_raw_ptr(alloc, kind).unwrap())
94b46f34
XL
203 }
204
04454e1e 205 pub fn allocate_bytes_ptr(
94b46f34 206 &mut self,
dc9dc135 207 bytes: &[u8],
17df50a5 208 align: Align,
ba9703b0 209 kind: MemoryKind<M::MemoryKind>,
17df50a5 210 mutability: Mutability,
064997fb 211 ) -> Pointer<M::Provenance> {
17df50a5 212 let alloc = Allocation::from_bytes(bytes, align, mutability);
923072b8
FG
213 // We can `unwrap` since `alloc` contains no pointers.
214 self.allocate_raw_ptr(alloc, kind).unwrap()
dc9dc135
XL
215 }
216
f2b60f7d 217 /// This can fail only of `alloc` contains provenance.
04454e1e 218 pub fn allocate_raw_ptr(
dc9dc135
XL
219 &mut self,
220 alloc: Allocation,
ba9703b0 221 kind: MemoryKind<M::MemoryKind>,
064997fb 222 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
f9f354fc 223 let id = self.tcx.reserve_alloc_id();
dfeec247
XL
224 debug_assert_ne!(
225 Some(kind),
ba9703b0
XL
226 M::GLOBAL_KIND.map(MemoryKind::Machine),
227 "dynamically allocating global memory"
dfeec247 228 );
064997fb 229 let alloc = M::adjust_allocation(self, id, Cow::Owned(alloc), Some(kind))?;
04454e1e 230 self.memory.alloc_map.insert(id, (kind, alloc.into_owned()));
064997fb 231 Ok(M::adjust_alloc_base_pointer(self, Pointer::from(id)))
ff7c6d11
XL
232 }
233
04454e1e 234 pub fn reallocate_ptr(
ff7c6d11 235 &mut self,
064997fb 236 ptr: Pointer<Option<M::Provenance>>,
416331ca 237 old_size_and_align: Option<(Size, Align)>,
94b46f34 238 new_size: Size,
ff7c6d11 239 new_align: Align,
ba9703b0 240 kind: MemoryKind<M::MemoryKind>,
064997fb
FG
241 ) -> InterpResult<'tcx, Pointer<M::Provenance>> {
242 let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
136023e0 243 if offset.bytes() != 0 {
ba9703b0
XL
244 throw_ub_format!(
245 "reallocating {:?} which does not point to the beginning of an object",
246 ptr
247 );
ff7c6d11 248 }
ff7c6d11 249
a1dfa0c6
XL
250 // For simplicities' sake, we implement reallocate as "alloc, copy, dealloc".
251 // This happens so rarely, the perf advantage is outweighed by the maintenance cost.
04454e1e 252 let new_ptr = self.allocate_ptr(new_size, new_align, kind)?;
416331ca
XL
253 let old_size = match old_size_and_align {
254 Some((size, _align)) => size,
04454e1e 255 None => self.get_alloc_raw(alloc_id)?.size(),
416331ca 256 };
17df50a5 257 // This will also call the access hooks.
04454e1e
FG
258 self.mem_copy(
259 ptr,
17df50a5
XL
260 Align::ONE,
261 new_ptr.into(),
262 Align::ONE,
263 old_size.min(new_size),
264 /*nonoverlapping*/ true,
265 )?;
04454e1e 266 self.deallocate_ptr(ptr, old_size_and_align, kind)?;
ff7c6d11
XL
267
268 Ok(new_ptr)
269 }
270
5e7ed085 271 #[instrument(skip(self), level = "debug")]
04454e1e 272 pub fn deallocate_ptr(
ff7c6d11 273 &mut self,
064997fb 274 ptr: Pointer<Option<M::Provenance>>,
416331ca 275 old_size_and_align: Option<(Size, Align)>,
ba9703b0 276 kind: MemoryKind<M::MemoryKind>,
dc9dc135 277 ) -> InterpResult<'tcx> {
064997fb
FG
278 let (alloc_id, offset, prov) = self.ptr_get_alloc_id(ptr)?;
279 trace!("deallocating: {alloc_id:?}");
b7449926 280
136023e0 281 if offset.bytes() != 0 {
ba9703b0
XL
282 throw_ub_format!(
283 "deallocating {:?} which does not point to the beginning of an object",
284 ptr
285 );
ff7c6d11
XL
286 }
287
04454e1e 288 let Some((alloc_kind, mut alloc)) = self.memory.alloc_map.remove(&alloc_id) else {
5e7ed085 289 // Deallocating global memory -- always an error
064997fb 290 return Err(match self.tcx.try_get_global_alloc(alloc_id) {
5e7ed085 291 Some(GlobalAlloc::Function(..)) => {
064997fb
FG
292 err_ub_format!("deallocating {alloc_id:?}, which is a function")
293 }
294 Some(GlobalAlloc::VTable(..)) => {
295 err_ub_format!("deallocating {alloc_id:?}, which is a vtable")
5e7ed085
FG
296 }
297 Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
064997fb 298 err_ub_format!("deallocating {alloc_id:?}, which is static memory")
94b46f34 299 }
5e7ed085 300 None => err_ub!(PointerUseAfterFree(alloc_id)),
94b46f34 301 }
5e7ed085 302 .into());
ff7c6d11
XL
303 };
304
17df50a5 305 if alloc.mutability == Mutability::Not {
064997fb 306 throw_ub_format!("deallocating immutable allocation {alloc_id:?}");
17df50a5 307 }
ff7c6d11 308 if alloc_kind != kind {
ba9703b0 309 throw_ub_format!(
064997fb 310 "deallocating {alloc_id:?}, which is {alloc_kind} memory, using {kind} deallocation operation"
ba9703b0 311 );
ff7c6d11 312 }
416331ca 313 if let Some((size, align)) = old_size_and_align {
17df50a5 314 if size != alloc.size() || align != alloc.align {
ba9703b0 315 throw_ub_format!(
064997fb 316 "incorrect layout on deallocation: {alloc_id:?} has size {} and alignment {}, but gave size {} and alignment {}",
17df50a5 317 alloc.size().bytes(),
ba9703b0
XL
318 alloc.align.bytes(),
319 size.bytes(),
320 align.bytes(),
321 )
ff7c6d11
XL
322 }
323 }
324
0bf4aa26 325 // Let the machine take some extra action
17df50a5 326 let size = alloc.size();
f2b60f7d 327 M::before_memory_deallocation(
04454e1e
FG
328 *self.tcx,
329 &mut self.machine,
136023e0 330 &mut alloc.extra,
064997fb 331 (alloc_id, prov),
136023e0
XL
332 alloc_range(Size::ZERO, size),
333 )?;
0bf4aa26 334
b7449926 335 // Don't forget to remember size and align of this now-dead allocation
04454e1e 336 let old = self.memory.dead_alloc_map.insert(alloc_id, (size, alloc.align));
b7449926
XL
337 if old.is_some() {
338 bug!("Nothing can be deallocated twice");
339 }
ff7c6d11
XL
340
341 Ok(())
342 }
343
136023e0 344 /// Internal helper function to determine the allocation and offset of a pointer (if any).
416331ca 345 #[inline(always)]
136023e0 346 fn get_ptr_access(
0bf4aa26 347 &self,
064997fb 348 ptr: Pointer<Option<M::Provenance>>,
dc9dc135
XL
349 size: Size,
350 align: Align,
064997fb 351 ) -> InterpResult<'tcx, Option<(AllocId, Size, M::ProvenanceExtra)>> {
04454e1e 352 let align = M::enforce_alignment(&self).then_some(align);
136023e0
XL
353 self.check_and_deref_ptr(
354 ptr,
355 size,
356 align,
357 CheckInAllocMsg::MemoryAccessTest,
064997fb
FG
358 |alloc_id, offset, prov| {
359 let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
360 Ok((size, align, (alloc_id, offset, prov)))
136023e0
XL
361 },
362 )
416331ca
XL
363 }
364
136023e0 365 /// Check if the given pointer points to live memory of given `size` and `align`
17df50a5
XL
366 /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
367 /// out-of-bounds case.
368 #[inline(always)]
60c5eb7d 369 pub fn check_ptr_access_align(
17df50a5 370 &self,
064997fb 371 ptr: Pointer<Option<M::Provenance>>,
17df50a5
XL
372 size: Size,
373 align: Align,
374 msg: CheckInAllocMsg,
375 ) -> InterpResult<'tcx> {
136023e0 376 self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
064997fb 377 let (size, align) = self.get_live_alloc_size_and_align(alloc_id)?;
17df50a5
XL
378 Ok((size, align, ()))
379 })?;
380 Ok(())
381 }
382
383 /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
136023e0 384 /// to the allocation it points to. Supports both shared and mutable references, as the actual
17df50a5
XL
385 /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
386 /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
387 fn check_and_deref_ptr<T>(
416331ca 388 &self,
064997fb 389 ptr: Pointer<Option<M::Provenance>>,
416331ca
XL
390 size: Size,
391 align: Option<Align>,
60c5eb7d 392 msg: CheckInAllocMsg,
064997fb
FG
393 alloc_size: impl FnOnce(
394 AllocId,
395 Size,
396 M::ProvenanceExtra,
397 ) -> InterpResult<'tcx, (Size, Align, T)>,
17df50a5 398 ) -> InterpResult<'tcx, Option<T>> {
923072b8 399 fn check_offset_align<'tcx>(offset: u64, align: Align) -> InterpResult<'tcx> {
dc9dc135
XL
400 if offset % align.bytes() == 0 {
401 Ok(())
402 } else {
403 // The biggest power of two through which `offset` is divisible.
404 let offset_pow2 = 1 << offset.trailing_zeros();
ba9703b0 405 throw_ub!(AlignmentCheckFailed {
dc9dc135
XL
406 has: Align::from_bytes(offset_pow2).unwrap(),
407 required: align,
408 })
ff7c6d11 409 }
dc9dc135
XL
410 }
411
04454e1e 412 Ok(match self.ptr_try_get_alloc_id(ptr) {
136023e0 413 Err(addr) => {
5e7ed085
FG
414 // We couldn't get a proper allocation. This is only okay if the access size is 0,
415 // and the address is not null.
416 if size.bytes() > 0 || addr == 0 {
417 throw_ub!(DanglingIntPointer(addr, msg));
416331ca
XL
418 }
419 // Must be aligned.
420 if let Some(align) = align {
136023e0 421 check_offset_align(addr, align)?;
ff7c6d11 422 }
dc9dc135 423 None
ff7c6d11 424 }
064997fb
FG
425 Ok((alloc_id, offset, prov)) => {
426 let (alloc_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, prov)?;
17df50a5 427 // Test bounds. This also ensures non-null.
136023e0
XL
428 // It is sufficient to check this for the end pointer. Also check for overflow!
429 if offset.checked_add(size, &self.tcx).map_or(true, |end| end > alloc_size) {
430 throw_ub!(PointerOutOfBounds {
431 alloc_id,
432 alloc_size,
433 ptr_offset: self.machine_usize_to_isize(offset.bytes()),
434 ptr_size: size,
435 msg,
436 })
ba9703b0 437 }
f2b60f7d 438 // Ensure we never consider the null pointer dereferenceable.
064997fb 439 if M::Provenance::OFFSET_IS_ADDR {
923072b8
FG
440 assert_ne!(ptr.addr(), Size::ZERO);
441 }
dc9dc135
XL
442 // Test align. Check this last; if both bounds and alignment are violated
443 // we want the error to be about the bounds.
416331ca 444 if let Some(align) = align {
f2b60f7d
FG
445 if M::use_addr_for_alignment_check(self) {
446 // `use_addr_for_alignment_check` can only be true if `OFFSET_IS_ADDR` is true.
04454e1e 447 check_offset_align(ptr.addr().bytes(), align)?;
3dfed10e
XL
448 } else {
449 // Check allocation alignment and offset alignment.
450 if alloc_align.bytes() < align.bytes() {
451 throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
452 }
136023e0 453 check_offset_align(offset.bytes(), align)?;
416331ca 454 }
dc9dc135 455 }
dc9dc135
XL
456
457 // We can still be zero-sized in this branch, in which case we have to
458 // return `None`.
17df50a5 459 if size.bytes() == 0 { None } else { Some(ret_val) }
dc9dc135
XL
460 }
461 })
ff7c6d11 462 }
ff7c6d11
XL
463}
464
465/// Allocation accessors
04454e1e 466impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ba9703b0 467 /// Helper function to obtain a global (tcx) allocation.
0bf4aa26
XL
468 /// This attempts to return a reference to an existing allocation if
469 /// one can be found in `tcx`. That, however, is only possible if `tcx` and
064997fb
FG
470 /// this machine use the same pointer provenance, so it is indirected through
471 /// `M::adjust_allocation`.
ba9703b0 472 fn get_global_alloc(
136023e0 473 &self,
416331ca 474 id: AllocId,
ba9703b0 475 is_write: bool,
064997fb
FG
476 ) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::Provenance, M::AllocExtra>>> {
477 let (alloc, def_id) = match self.tcx.try_get_global_alloc(id) {
ba9703b0
XL
478 Some(GlobalAlloc::Memory(mem)) => {
479 // Memory of a constant or promoted or anonymous memory referenced by a static.
480 (mem, None)
481 }
482 Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
064997fb 483 Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)),
ba9703b0 484 None => throw_ub!(PointerUseAfterFree(id)),
dc9dc135 485 Some(GlobalAlloc::Static(def_id)) => {
136023e0
XL
486 assert!(self.tcx.is_static(def_id));
487 assert!(!self.tcx.is_thread_local_static(def_id));
ba9703b0
XL
488 // Notice that every static has two `AllocId` that will resolve to the same
489 // thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
490 // and the other one is maps to `GlobalAlloc::Memory`, this is returned by
1b1a35ee 491 // `eval_static_initializer` and it is the "resolved" ID.
f9f354fc
XL
492 // The resolved ID is never used by the interpreted program, it is hidden.
493 // This is relied upon for soundness of const-patterns; a pointer to the resolved
494 // ID would "sidestep" the checks that make sure consts do not point to statics!
ba9703b0
XL
495 // The `GlobalAlloc::Memory` branch here is still reachable though; when a static
496 // contains a reference to memory that was created during its evaluation (i.e., not
497 // to another static), those inner references only exist in "resolved" form.
136023e0 498 if self.tcx.is_foreign_item(def_id) {
064997fb
FG
499 // This is unreachable in Miri, but can happen in CTFE where we actually *do* support
500 // referencing arbitrary (declared) extern statics.
3dfed10e 501 throw_unsup!(ReadExternStatic(def_id));
dc9dc135 502 }
3dfed10e 503
487cf647
FG
504 // We don't give a span -- statics don't need that, they cannot be generic or associated.
505 let val = self.ctfe_query(None, |tcx| tcx.eval_static_initializer(def_id))?;
506 (val, Some(def_id))
b7449926 507 }
dc9dc135 508 };
04454e1e 509 M::before_access_global(*self.tcx, &self.machine, id, alloc, def_id, is_write)?;
60c5eb7d 510 // We got tcx memory. Let the machine initialize its "extra" stuff.
064997fb 511 M::adjust_allocation(
136023e0 512 self,
dc9dc135 513 id, // always use the ID we got as input, not the "hidden" one.
5e7ed085 514 Cow::Borrowed(alloc.inner()),
ba9703b0 515 M::GLOBAL_KIND.map(MemoryKind::Machine),
923072b8 516 )
94b46f34
XL
517 }
518
60c5eb7d 519 /// Gives raw access to the `Allocation`, without bounds or alignment checks.
17df50a5 520 /// The caller is responsible for calling the access hooks!
f2b60f7d
FG
521 ///
522 /// You almost certainly want to use `get_ptr_alloc`/`get_ptr_alloc_mut` instead.
04454e1e 523 fn get_alloc_raw(
dc9dc135
XL
524 &self,
525 id: AllocId,
064997fb 526 ) -> InterpResult<'tcx, &Allocation<M::Provenance, M::AllocExtra>> {
0bf4aa26
XL
527 // The error type of the inner closure here is somewhat funny. We have two
528 // ways of "erroring": An actual error, or because we got a reference from
ba9703b0 529 // `get_global_alloc` that we can actually use directly without inserting anything anywhere.
064997fb 530 // So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
04454e1e 531 let a = self.memory.alloc_map.get_or(id, || {
136023e0 532 let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
0bf4aa26
XL
533 match alloc {
534 Cow::Borrowed(alloc) => {
535 // We got a ref, cheaply return that as an "error" so that the
536 // map does not get mutated.
537 Err(Ok(alloc))
538 }
539 Cow::Owned(alloc) => {
540 // Need to put it into the map and return a ref to that
ba9703b0
XL
541 let kind = M::GLOBAL_KIND.expect(
542 "I got a global allocation that I have to copy but the machine does \
dfeec247 543 not expect that to happen",
0bf4aa26
XL
544 );
545 Ok((MemoryKind::Machine(kind), alloc))
546 }
b7449926 547 }
0bf4aa26
XL
548 });
549 // Now unpack that funny error type
550 match a {
551 Ok(a) => Ok(&a.1),
dfeec247 552 Err(a) => a,
0bf4aa26 553 }
ff7c6d11
XL
554 }
555
17df50a5 556 /// "Safe" (bounds and align-checked) allocation access.
04454e1e 557 pub fn get_ptr_alloc<'a>(
17df50a5 558 &'a self,
064997fb 559 ptr: Pointer<Option<M::Provenance>>,
17df50a5
XL
560 size: Size,
561 align: Align,
064997fb 562 ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
04454e1e 563 let align = M::enforce_alignment(self).then_some(align);
17df50a5 564 let ptr_and_alloc = self.check_and_deref_ptr(
136023e0 565 ptr,
17df50a5
XL
566 size,
567 align,
568 CheckInAllocMsg::MemoryAccessTest,
064997fb 569 |alloc_id, offset, prov| {
04454e1e 570 let alloc = self.get_alloc_raw(alloc_id)?;
064997fb 571 Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
17df50a5
XL
572 },
573 )?;
064997fb 574 if let Some((alloc_id, offset, prov, alloc)) = ptr_and_alloc {
136023e0 575 let range = alloc_range(offset, size);
f2b60f7d 576 M::before_memory_read(*self.tcx, &self.machine, &alloc.extra, (alloc_id, prov), range)?;
04454e1e 577 Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
17df50a5
XL
578 } else {
579 // Even in this branch we have to be sure that we actually access the allocation, in
580 // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
581 // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
136023e0 582 // always called when `ptr` has an `AllocId`.
17df50a5
XL
583 Ok(None)
584 }
585 }
586
587 /// Return the `extra` field of the given allocation.
588 pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
04454e1e 589 Ok(&self.get_alloc_raw(id)?.extra)
17df50a5
XL
590 }
591
f2b60f7d
FG
592 /// Return the `mutability` field of the given allocation.
593 pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
594 Ok(self.get_alloc_raw(id)?.mutability)
595 }
596
60c5eb7d 597 /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
17df50a5
XL
598 /// The caller is responsible for calling the access hooks!
599 ///
600 /// Also returns a ptr to `self.extra` so that the caller can use it in parallel with the
601 /// allocation.
04454e1e 602 fn get_alloc_raw_mut(
ff7c6d11
XL
603 &mut self,
604 id: AllocId,
064997fb 605 ) -> InterpResult<'tcx, (&mut Allocation<M::Provenance, M::AllocExtra>, &mut M)> {
136023e0
XL
606 // We have "NLL problem case #3" here, which cannot be worked around without loss of
607 // efficiency even for the common case where the key is in the map.
608 // <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
609 // (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
04454e1e 610 if self.memory.alloc_map.get_mut(id).is_none() {
136023e0
XL
611 // Slow path.
612 // Allocation not found locally, go look global.
613 let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
ba9703b0
XL
614 let kind = M::GLOBAL_KIND.expect(
615 "I got a global allocation that I have to copy but the machine does \
616 not expect that to happen",
617 );
04454e1e 618 self.memory.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
136023e0
XL
619 }
620
04454e1e 621 let (_kind, alloc) = self.memory.alloc_map.get_mut(id).unwrap();
136023e0
XL
622 if alloc.mutability == Mutability::Not {
623 throw_ub!(WriteToReadOnly(id))
b7449926 624 }
04454e1e 625 Ok((alloc, &mut self.machine))
0bf4aa26
XL
626 }
627
17df50a5 628 /// "Safe" (bounds and align-checked) allocation access.
04454e1e 629 pub fn get_ptr_alloc_mut<'a>(
17df50a5 630 &'a mut self,
064997fb 631 ptr: Pointer<Option<M::Provenance>>,
17df50a5
XL
632 size: Size,
633 align: Align,
064997fb 634 ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::Provenance, M::AllocExtra>>> {
136023e0 635 let parts = self.get_ptr_access(ptr, size, align)?;
064997fb 636 if let Some((alloc_id, offset, prov)) = parts {
04454e1e 637 let tcx = *self.tcx;
17df50a5
XL
638 // FIXME: can we somehow avoid looking up the allocation twice here?
639 // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
04454e1e 640 let (alloc, machine) = self.get_alloc_raw_mut(alloc_id)?;
136023e0 641 let range = alloc_range(offset, size);
f2b60f7d 642 M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
136023e0 643 Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
17df50a5
XL
644 } else {
645 Ok(None)
646 }
647 }
648
649 /// Return the `extra` field of the given allocation.
650 pub fn get_alloc_extra_mut<'a>(
651 &'a mut self,
652 id: AllocId,
04454e1e
FG
653 ) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
654 let (alloc, machine) = self.get_alloc_raw_mut(id)?;
655 Ok((&mut alloc.extra, machine))
17df50a5
XL
656 }
657
dc9dc135
XL
658 /// Obtain the size and alignment of an allocation, even if that allocation has
659 /// been deallocated.
064997fb 660 pub fn get_alloc_info(&self, id: AllocId) -> (Size, Align, AllocKind) {
dc9dc135 661 // # Regular allocations
60c5eb7d 662 // Don't use `self.get_raw` here as that will
dc9dc135 663 // a) cause cycles in case `id` refers to a static
ba9703b0 664 // b) duplicate a global's allocation in miri
04454e1e 665 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
064997fb 666 return (alloc.size(), alloc.align, AllocKind::LiveData);
0bf4aa26 667 }
dc9dc135 668
416331ca
XL
669 // # Function pointers
670 // (both global from `alloc_map` and local from `extra_fn_ptr_map`)
74b04a01 671 if self.get_fn_alloc(id).is_some() {
064997fb 672 return (Size::ZERO, Align::ONE, AllocKind::Function);
416331ca
XL
673 }
674
675 // # Statics
dc9dc135
XL
676 // Can't do this in the match argument, we may get cycle errors since the lock would
677 // be held throughout the match.
064997fb
FG
678 match self.tcx.try_get_global_alloc(id) {
679 Some(GlobalAlloc::Static(def_id)) => {
680 assert!(self.tcx.is_static(def_id));
681 assert!(!self.tcx.is_thread_local_static(def_id));
dc9dc135 682 // Use size and align of the type.
064997fb 683 let ty = self.tcx.type_of(def_id);
0bf4aa26 684 let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
487cf647 685 assert!(layout.is_sized());
064997fb 686 (layout.size, layout.align.abi, AllocKind::LiveData)
dfeec247
XL
687 }
688 Some(GlobalAlloc::Memory(alloc)) => {
dc9dc135
XL
689 // Need to duplicate the logic here, because the global allocations have
690 // different associated types than the interpreter-local ones.
5e7ed085 691 let alloc = alloc.inner();
064997fb 692 (alloc.size(), alloc.align, AllocKind::LiveData)
dfeec247
XL
693 }
694 Some(GlobalAlloc::Function(_)) => bug!("We already checked function pointers above"),
064997fb
FG
695 Some(GlobalAlloc::VTable(..)) => {
696 // No data to be accessed here. But vtables are pointer-aligned.
697 return (Size::ZERO, self.tcx.data_layout.pointer_align.abi, AllocKind::VTable);
698 }
dc9dc135 699 // The rest must be dead.
dfeec247 700 None => {
064997fb
FG
701 // Deallocated pointers are allowed, we should be able to find
702 // them in the map.
703 let (size, align) = *self
704 .memory
705 .dead_alloc_map
706 .get(&id)
707 .expect("deallocated pointers should all be recorded in `dead_alloc_map`");
708 (size, align, AllocKind::Dead)
dfeec247 709 }
ff7c6d11
XL
710 }
711 }
712
064997fb
FG
713 /// Obtain the size and alignment of a live allocation.
714 pub fn get_live_alloc_size_and_align(&self, id: AllocId) -> InterpResult<'tcx, (Size, Align)> {
715 let (size, align, kind) = self.get_alloc_info(id);
716 if matches!(kind, AllocKind::Dead) {
717 throw_ub!(PointerUseAfterFree(id))
718 }
719 Ok((size, align))
720 }
721
74b04a01 722 fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
04454e1e 723 if let Some(extra) = self.memory.extra_fn_ptr_map.get(&id) {
74b04a01 724 Some(FnVal::Other(*extra))
416331ca 725 } else {
064997fb 726 match self.tcx.try_get_global_alloc(id) {
74b04a01
XL
727 Some(GlobalAlloc::Function(instance)) => Some(FnVal::Instance(instance)),
728 _ => None,
416331ca 729 }
ff7c6d11 730 }
416331ca
XL
731 }
732
04454e1e 733 pub fn get_ptr_fn(
416331ca 734 &self,
064997fb 735 ptr: Pointer<Option<M::Provenance>>,
416331ca 736 ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
064997fb
FG
737 trace!("get_ptr_fn({:?})", ptr);
738 let (alloc_id, offset, _prov) = self.ptr_get_alloc_id(ptr)?;
136023e0
XL
739 if offset.bytes() != 0 {
740 throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
94b46f34 741 }
136023e0
XL
742 self.get_fn_alloc(alloc_id)
743 .ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
ff7c6d11
XL
744 }
745
064997fb
FG
746 pub fn get_ptr_vtable(
747 &self,
748 ptr: Pointer<Option<M::Provenance>>,
749 ) -> InterpResult<'tcx, (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)> {
750 trace!("get_ptr_vtable({:?})", ptr);
751 let (alloc_id, offset, _tag) = self.ptr_get_alloc_id(ptr)?;
752 if offset.bytes() != 0 {
753 throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset)))
754 }
755 match self.tcx.try_get_global_alloc(alloc_id) {
756 Some(GlobalAlloc::VTable(ty, trait_ref)) => Ok((ty, trait_ref)),
757 _ => throw_ub!(InvalidVTablePointer(Pointer::new(alloc_id, offset))),
758 }
759 }
760
04454e1e
FG
761 pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
762 self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
0bf4aa26
XL
763 Ok(())
764 }
765
3dfed10e
XL
766 /// Create a lazy debug printer that prints the given allocation and all allocations it points
767 /// to, recursively.
768 #[must_use]
769 pub fn dump_alloc<'a>(&'a self, id: AllocId) -> DumpAllocs<'a, 'mir, 'tcx, M> {
770 self.dump_allocs(vec![id])
771 }
772
773 /// Create a lazy debug printer for a list of allocations and all allocations they point to,
774 /// recursively.
775 #[must_use]
776 pub fn dump_allocs<'a>(&'a self, mut allocs: Vec<AllocId>) -> DumpAllocs<'a, 'mir, 'tcx, M> {
777 allocs.sort();
778 allocs.dedup();
04454e1e 779 DumpAllocs { ecx: self, allocs }
3dfed10e
XL
780 }
781
782 /// Print leaked memory. Allocations reachable from `static_roots` or a `Global` allocation
783 /// are not considered leaked. Leaks whose kind `may_leak()` returns true are not reported.
784 pub fn leak_report(&self, static_roots: &[AllocId]) -> usize {
785 // Collect the set of allocations that are *reachable* from `Global` allocations.
786 let reachable = {
787 let mut reachable = FxHashSet::default();
788 let global_kind = M::GLOBAL_KIND.map(MemoryKind::Machine);
04454e1e
FG
789 let mut todo: Vec<_> =
790 self.memory.alloc_map.filter_map_collect(move |&id, &(kind, _)| {
791 if Some(kind) == global_kind { Some(id) } else { None }
792 });
3dfed10e
XL
793 todo.extend(static_roots);
794 while let Some(id) = todo.pop() {
795 if reachable.insert(id) {
2b03887a 796 // This is a new allocation, add the allocation it points to `todo`.
04454e1e
FG
797 if let Some((_, alloc)) = self.memory.alloc_map.get(id) {
798 todo.extend(
487cf647 799 alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id()),
04454e1e 800 );
3dfed10e
XL
801 }
802 }
803 }
804 reachable
805 };
806
807 // All allocations that are *not* `reachable` and *not* `may_leak` are considered leaking.
04454e1e 808 let leaks: Vec<_> = self.memory.alloc_map.filter_map_collect(|&id, &(kind, _)| {
3dfed10e
XL
809 if kind.may_leak() || reachable.contains(&id) { None } else { Some(id) }
810 });
811 let n = leaks.len();
812 if n > 0 {
813 eprintln!("The following memory was leaked: {:?}", self.dump_allocs(leaks));
814 }
815 n
ff7c6d11 816 }
3dfed10e
XL
817}
818
819#[doc(hidden)]
820/// There's no way to use this directly, it's just a helper struct for the `dump_alloc(s)` methods.
821pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
04454e1e 822 ecx: &'a InterpCx<'mir, 'tcx, M>,
3dfed10e
XL
823 allocs: Vec<AllocId>,
824}
825
826impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
827 fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
064997fb
FG
828 // Cannot be a closure because it is generic in `Prov`, `Extra`.
829 fn write_allocation_track_relocs<'tcx, Prov: Provenance, Extra>(
3dfed10e 830 fmt: &mut std::fmt::Formatter<'_>,
f035d41b 831 tcx: TyCtxt<'tcx>,
ba9703b0 832 allocs_to_print: &mut VecDeque<AllocId>,
064997fb 833 alloc: &Allocation<Prov, Extra>,
3dfed10e 834 ) -> std::fmt::Result {
487cf647
FG
835 for alloc_id in alloc.provenance().provenances().filter_map(|prov| prov.get_alloc_id())
836 {
136023e0 837 allocs_to_print.push_back(alloc_id);
ba9703b0 838 }
c295e0f8 839 write!(fmt, "{}", display_allocation(tcx, alloc))
ba9703b0
XL
840 }
841
3dfed10e 842 let mut allocs_to_print: VecDeque<_> = self.allocs.iter().copied().collect();
ba9703b0
XL
843 // `allocs_printed` contains all allocations that we have already printed.
844 let mut allocs_printed = FxHashSet::default();
ff7c6d11
XL
845
846 while let Some(id) = allocs_to_print.pop_front() {
ba9703b0
XL
847 if !allocs_printed.insert(id) {
848 // Already printed, so skip this.
849 continue;
850 }
851
064997fb 852 write!(fmt, "{id:?}")?;
04454e1e 853 match self.ecx.memory.alloc_map.get(id) {
ba9703b0
XL
854 Some(&(kind, ref alloc)) => {
855 // normal alloc
3dfed10e
XL
856 write!(fmt, " ({}, ", kind)?;
857 write_allocation_track_relocs(
858 &mut *fmt,
04454e1e 859 *self.ecx.tcx,
3dfed10e
XL
860 &mut allocs_to_print,
861 alloc,
862 )?;
dfeec247 863 }
ba9703b0
XL
864 None => {
865 // global alloc
064997fb 866 match self.ecx.tcx.try_get_global_alloc(id) {
dc9dc135 867 Some(GlobalAlloc::Memory(alloc)) => {
3dfed10e
XL
868 write!(fmt, " (unchanged global, ")?;
869 write_allocation_track_relocs(
870 &mut *fmt,
04454e1e 871 *self.ecx.tcx,
3dfed10e 872 &mut allocs_to_print,
5e7ed085 873 alloc.inner(),
3dfed10e 874 )?;
0bf4aa26 875 }
dc9dc135 876 Some(GlobalAlloc::Function(func)) => {
064997fb
FG
877 write!(fmt, " (fn: {func})")?;
878 }
879 Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
880 write!(fmt, " (vtable: impl {trait_ref} for {ty})")?;
881 }
882 Some(GlobalAlloc::VTable(ty, None)) => {
883 write!(fmt, " (vtable: impl <auto trait> for {ty})")?;
0bf4aa26 884 }
dc9dc135 885 Some(GlobalAlloc::Static(did)) => {
04454e1e 886 write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?;
0bf4aa26
XL
887 }
888 None => {
3dfed10e 889 write!(fmt, " (deallocated)")?;
8faf50e0 890 }
ff7c6d11 891 }
dfeec247 892 }
ba9703b0 893 }
3dfed10e 894 writeln!(fmt)?;
ff7c6d11 895 }
3dfed10e 896 Ok(())
0bf4aa26 897 }
ff7c6d11
XL
898}
899
dc9dc135 900/// Reading and writing.
064997fb
FG
901impl<'tcx, 'a, Prov: Provenance, Extra> AllocRefMut<'a, 'tcx, Prov, Extra> {
902 /// `range` is relative to this allocation reference, not the base of the allocation.
f2b60f7d 903 pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
04454e1e 904 let range = self.range.subrange(range);
064997fb 905 debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
17df50a5
XL
906 Ok(self
907 .alloc
04454e1e 908 .write_scalar(&self.tcx, range, val)
17df50a5 909 .map_err(|e| e.to_interp_error(self.alloc_id))?)
ff7c6d11
XL
910 }
911
064997fb 912 /// `offset` is relative to this allocation reference, not the base of the allocation.
f2b60f7d 913 pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> {
17df50a5 914 self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val)
416331ca 915 }
04454e1e 916
f2b60f7d 917 /// Mark the entire referenced range as uninitialized
04454e1e
FG
918 pub fn write_uninit(&mut self) -> InterpResult<'tcx> {
919 Ok(self
920 .alloc
921 .write_uninit(&self.tcx, self.range)
922 .map_err(|e| e.to_interp_error(self.alloc_id))?)
923 }
17df50a5 924}
416331ca 925
064997fb
FG
926impl<'tcx, 'a, Prov: Provenance, Extra> AllocRef<'a, 'tcx, Prov, Extra> {
927 /// `range` is relative to this allocation reference, not the base of the allocation.
923072b8
FG
928 pub fn read_scalar(
929 &self,
930 range: AllocRange,
931 read_provenance: bool,
f2b60f7d 932 ) -> InterpResult<'tcx, Scalar<Prov>> {
04454e1e
FG
933 let range = self.range.subrange(range);
934 let res = self
17df50a5 935 .alloc
923072b8 936 .read_scalar(&self.tcx, range, read_provenance)
04454e1e 937 .map_err(|e| e.to_interp_error(self.alloc_id))?;
064997fb 938 debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
04454e1e 939 Ok(res)
74b04a01
XL
940 }
941
064997fb 942 /// `range` is relative to this allocation reference, not the base of the allocation.
f2b60f7d 943 pub fn read_integer(&self, range: AllocRange) -> InterpResult<'tcx, Scalar<Prov>> {
064997fb 944 self.read_scalar(range, /*read_provenance*/ false)
923072b8
FG
945 }
946
064997fb 947 /// `offset` is relative to this allocation reference, not the base of the allocation.
f2b60f7d 948 pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> {
923072b8
FG
949 self.read_scalar(
950 alloc_range(offset, self.tcx.data_layout().pointer_size),
951 /*read_provenance*/ true,
952 )
17df50a5
XL
953 }
954
064997fb 955 /// `range` is relative to this allocation reference, not the base of the allocation.
f2b60f7d 956 pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
17df50a5
XL
957 Ok(self
958 .alloc
f2b60f7d 959 .get_bytes_strip_provenance(&self.tcx, self.range)
17df50a5
XL
960 .map_err(|e| e.to_interp_error(self.alloc_id))?)
961 }
064997fb 962
f2b60f7d
FG
963 /// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
964 pub(crate) fn has_provenance(&self) -> bool {
487cf647 965 !self.alloc.provenance().range_empty(self.range, &self.tcx)
064997fb 966 }
17df50a5
XL
967}
968
04454e1e 969impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
f2b60f7d
FG
970 /// Reads the given number of bytes from memory, and strips their provenance if possible.
971 /// Returns them as a slice.
e74abb32
XL
972 ///
973 /// Performs appropriate bounds checks.
f2b60f7d 974 pub fn read_bytes_ptr_strip_provenance(
136023e0 975 &self,
064997fb 976 ptr: Pointer<Option<M::Provenance>>,
136023e0
XL
977 size: Size,
978 ) -> InterpResult<'tcx, &[u8]> {
04454e1e 979 let Some(alloc_ref) = self.get_ptr_alloc(ptr, size, Align::ONE)? else {
5e7ed085
FG
980 // zero-sized access
981 return Ok(&[]);
e74abb32 982 };
17df50a5
XL
983 // Side-step AllocRef and directly access the underlying bytes more efficiently.
984 // (We are staying inside the bounds here so all is good.)
985 Ok(alloc_ref
986 .alloc
f2b60f7d 987 .get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
17df50a5 988 .map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
e74abb32
XL
989 }
990
17df50a5 991 /// Writes the given stream of bytes into memory.
ba9703b0
XL
992 ///
993 /// Performs appropriate bounds checks.
04454e1e 994 pub fn write_bytes_ptr(
ba9703b0 995 &mut self,
064997fb 996 ptr: Pointer<Option<M::Provenance>>,
17df50a5 997 src: impl IntoIterator<Item = u8>,
ba9703b0
XL
998 ) -> InterpResult<'tcx> {
999 let mut src = src.into_iter();
1000 let (lower, upper) = src.size_hint();
1001 let len = upper.expect("can only write bounded iterators");
1002 assert_eq!(lower, len, "can only write iterators with a precise length");
1003
17df50a5 1004 let size = Size::from_bytes(len);
04454e1e 1005 let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size, Align::ONE)? else {
5e7ed085
FG
1006 // zero-sized access
1007 assert_matches!(
1008 src.next(),
1009 None,
1010 "iterator said it was empty but returned an element"
1011 );
1012 return Ok(());
ba9703b0 1013 };
ba9703b0 1014
17df50a5
XL
1015 // Side-step AllocRef and directly access the underlying bytes more efficiently.
1016 // (We are staying inside the bounds here so all is good.)
94222f64
XL
1017 let alloc_id = alloc_ref.alloc_id;
1018 let bytes = alloc_ref
1019 .alloc
1020 .get_bytes_mut(&alloc_ref.tcx, alloc_ref.range)
1021 .map_err(move |e| e.to_interp_error(alloc_id))?;
17df50a5
XL
1022 // `zip` would stop when the first iterator ends; we want to definitely
1023 // cover all of `bytes`.
1024 for dest in bytes {
1025 *dest = src.next().expect("iterator was shorter than it said it would be");
ba9703b0 1026 }
17df50a5 1027 assert_matches!(src.next(), None, "iterator was longer than it said it would be");
ba9703b0
XL
1028 Ok(())
1029 }
1030
04454e1e 1031 pub fn mem_copy(
ff7c6d11 1032 &mut self,
064997fb 1033 src: Pointer<Option<M::Provenance>>,
17df50a5 1034 src_align: Align,
064997fb 1035 dest: Pointer<Option<M::Provenance>>,
17df50a5 1036 dest_align: Align,
94b46f34 1037 size: Size,
ff7c6d11 1038 nonoverlapping: bool,
dc9dc135 1039 ) -> InterpResult<'tcx> {
04454e1e 1040 self.mem_copy_repeatedly(src, src_align, dest, dest_align, size, 1, nonoverlapping)
8faf50e0
XL
1041 }
1042
04454e1e 1043 pub fn mem_copy_repeatedly(
8faf50e0 1044 &mut self,
064997fb 1045 src: Pointer<Option<M::Provenance>>,
17df50a5 1046 src_align: Align,
064997fb 1047 dest: Pointer<Option<M::Provenance>>,
17df50a5 1048 dest_align: Align,
8faf50e0 1049 size: Size,
17df50a5 1050 num_copies: u64,
8faf50e0 1051 nonoverlapping: bool,
dc9dc135 1052 ) -> InterpResult<'tcx> {
17df50a5
XL
1053 let tcx = self.tcx;
1054 // We need to do our own bounds-checks.
136023e0
XL
1055 let src_parts = self.get_ptr_access(src, size, src_align)?;
1056 let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
17df50a5 1057
5e7ed085 1058 // FIXME: we look up both allocations twice here, once before for the `check_ptr_access`
17df50a5
XL
1059 // and once below to get the underlying `&[mut] Allocation`.
1060
1061 // Source alloc preparations and access hooks.
064997fb 1062 let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
487cf647 1063 // Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
5e7ed085 1064 return Ok(());
17df50a5 1065 };
04454e1e 1066 let src_alloc = self.get_alloc_raw(src_alloc_id)?;
136023e0 1067 let src_range = alloc_range(src_offset, size);
f2b60f7d
FG
1068 M::before_memory_read(
1069 *tcx,
1070 &self.machine,
1071 &src_alloc.extra,
1072 (src_alloc_id, src_prov),
1073 src_range,
1074 )?;
17df50a5
XL
1075 // We need the `dest` ptr for the next operation, so we get it now.
1076 // We already did the source checks and called the hooks so we are good to return early.
064997fb 1077 let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
5e7ed085
FG
1078 // Zero-sized *destination*.
1079 return Ok(());
17df50a5
XL
1080 };
1081
487cf647 1082 // Prepare getting source provenance.
f2b60f7d
FG
1083 let src_bytes = src_alloc.get_bytes_unchecked(src_range).as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
1084 // first copy the provenance to a temporary buffer, because
1085 // `get_bytes_mut` will clear the provenance, which is correct,
1086 // since we don't want to keep any provenance at the target.
487cf647
FG
1087 // This will also error if copying partial provenance is not supported.
1088 let provenance = src_alloc
1089 .provenance()
1090 .prepare_copy(src_range, dest_offset, num_copies, self)
1091 .map_err(|e| e.to_interp_error(dest_alloc_id))?;
3dfed10e 1092 // Prepare a copy of the initialization mask.
487cf647 1093 let init = src_alloc.init_mask().prepare_copy(src_range);
17df50a5
XL
1094
1095 // Destination alloc preparations and access hooks.
04454e1e 1096 let (dest_alloc, extra) = self.get_alloc_raw_mut(dest_alloc_id)?;
136023e0 1097 let dest_range = alloc_range(dest_offset, size * num_copies);
f2b60f7d 1098 M::before_memory_write(
04454e1e
FG
1099 *tcx,
1100 extra,
1101 &mut dest_alloc.extra,
064997fb 1102 (dest_alloc_id, dest_prov),
04454e1e
FG
1103 dest_range,
1104 )?;
94222f64
XL
1105 let dest_bytes = dest_alloc
1106 .get_bytes_mut_ptr(&tcx, dest_range)
1107 .map_err(|e| e.to_interp_error(dest_alloc_id))?
1108 .as_mut_ptr();
dfeec247 1109
487cf647 1110 if init.no_bytes_init() {
3dfed10e
XL
1111 // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
1112 // is marked as uninitialized but we otherwise omit changing the byte representation which may
1113 // be arbitrary for uninitialized bytes.
dfeec247 1114 // This also avoids writing to the target bytes so that the backing allocation is never
3dfed10e 1115 // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
dfeec247 1116 // operating system this can avoid physically allocating the page.
04454e1e
FG
1117 dest_alloc
1118 .write_uninit(&tcx, dest_range)
1119 .map_err(|e| e.to_interp_error(dest_alloc_id))?;
f2b60f7d 1120 // We can forget about the provenance, this is all not initialized anyway.
dfeec247
XL
1121 return Ok(());
1122 }
ff7c6d11
XL
1123
1124 // SAFE: The above indexing would have panicked if there weren't at least `size` bytes
1125 // behind `src` and `dest`. Also, we use the overlapping-safe `ptr::copy` if `src` and
1126 // `dest` could possibly overlap.
0bf4aa26
XL
1127 // The pointers above remain valid even if the `HashMap` table is moved around because they
1128 // point into the `Vec` storing the bytes.
ff7c6d11 1129 unsafe {
136023e0 1130 if src_alloc_id == dest_alloc_id {
ff7c6d11 1131 if nonoverlapping {
ba9703b0 1132 // `Size` additions
136023e0
XL
1133 if (src_offset <= dest_offset && src_offset + size > dest_offset)
1134 || (dest_offset <= src_offset && dest_offset + size > src_offset)
ff7c6d11 1135 {
dfeec247 1136 throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
ff7c6d11
XL
1137 }
1138 }
8faf50e0 1139
17df50a5 1140 for i in 0..num_copies {
dfeec247
XL
1141 ptr::copy(
1142 src_bytes,
ba9703b0
XL
1143 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1144 size.bytes_usize(),
dfeec247 1145 );
8faf50e0 1146 }
ff7c6d11 1147 } else {
17df50a5 1148 for i in 0..num_copies {
dfeec247
XL
1149 ptr::copy_nonoverlapping(
1150 src_bytes,
ba9703b0
XL
1151 dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
1152 size.bytes_usize(),
dfeec247 1153 );
8faf50e0 1154 }
ff7c6d11
XL
1155 }
1156 }
1157
17df50a5 1158 // now fill in all the "init" data
487cf647
FG
1159 dest_alloc.init_mask_apply_copy(
1160 init,
136023e0
XL
1161 alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
1162 num_copies,
1163 );
f2b60f7d 1164 // copy the provenance to the destination
487cf647 1165 dest_alloc.provenance_apply_copy(provenance);
ff7c6d11
XL
1166
1167 Ok(())
1168 }
ff7c6d11
XL
1169}
1170
dfeec247 1171/// Machine pointer introspection.
04454e1e 1172impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
04454e1e
FG
1173 /// Test if this value might be null.
1174 /// If the machine does not support ptr-to-int casts, this is conservative.
064997fb 1175 pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
04454e1e
FG
1176 Ok(match scalar.try_to_int() {
1177 Ok(int) => int.is_null(),
1178 Err(_) => {
1179 // Can only happen during CTFE.
064997fb 1180 let ptr = scalar.to_pointer(self)?;
04454e1e
FG
1181 match self.ptr_try_get_alloc_id(ptr) {
1182 Ok((alloc_id, offset, _)) => {
064997fb 1183 let (size, _align, _kind) = self.get_alloc_info(alloc_id);
04454e1e
FG
1184 // If the pointer is out-of-bounds, it may be null.
1185 // Note that one-past-the-end (offset == size) is still inbounds, and never null.
1186 offset > size
1187 }
1188 Err(_offset) => bug!("a non-int scalar is always a pointer"),
94222f64 1189 }
136023e0 1190 }
04454e1e 1191 })
dc9dc135
XL
1192 }
1193
136023e0
XL
1194 /// Turning a "maybe pointer" into a proper pointer (and some information
1195 /// about where it points), or an absolute address.
04454e1e 1196 pub fn ptr_try_get_alloc_id(
dc9dc135 1197 &self,
064997fb
FG
1198 ptr: Pointer<Option<M::Provenance>>,
1199 ) -> Result<(AllocId, Size, M::ProvenanceExtra), u64> {
136023e0 1200 match ptr.into_pointer_or_addr() {
04454e1e
FG
1201 Ok(ptr) => match M::ptr_get_alloc(self, ptr) {
1202 Some((alloc_id, offset, extra)) => Ok((alloc_id, offset, extra)),
1203 None => {
064997fb 1204 assert!(M::Provenance::OFFSET_IS_ADDR);
04454e1e
FG
1205 let (_, addr) = ptr.into_parts();
1206 Err(addr.bytes())
1207 }
1208 },
136023e0 1209 Err(addr) => Err(addr.bytes()),
dc9dc135
XL
1210 }
1211 }
136023e0
XL
1212
1213 /// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
1214 #[inline(always)]
04454e1e 1215 pub fn ptr_get_alloc_id(
136023e0 1216 &self,
064997fb
FG
1217 ptr: Pointer<Option<M::Provenance>>,
1218 ) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
04454e1e 1219 self.ptr_try_get_alloc_id(ptr).map_err(|offset| {
136023e0
XL
1220 err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
1221 })
1222 }
ff7c6d11 1223}