]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_mir/src/interpret/machine.rs
New upstream version 1.55.0+dfsg1
[rustc.git] / compiler / rustc_mir / src / interpret / machine.rs
CommitLineData
ea8adc8c
XL
1//! This module contains everything needed to instantiate an interpreter.
2//! This separation exists to ensure that no fancy miri features like
3//! interpreting common C functions leak into CTFE.
4
0bf4aa26 5use std::borrow::{Borrow, Cow};
29967ef6 6use std::fmt::Debug;
0bf4aa26
XL
7use std::hash::Hash;
8
ba9703b0
XL
9use rustc_middle::mir;
10use rustc_middle::ty::{self, Ty};
11use rustc_span::def_id::DefId;
fc512014 12use rustc_target::abi::Size;
5869c6ff 13use rustc_target::spec::abi::Abi;
b7449926 14
0bf4aa26 15use super::{
136023e0
XL
16 AllocId, AllocRange, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace,
17 Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
0bf4aa26 18};
ea8adc8c 19
60c5eb7d
XL
20/// Data returned by Machine::stack_pop,
21/// to provide further control over the popping of the stack frame
22#[derive(Eq, PartialEq, Debug, Copy, Clone)]
ba9703b0 23pub enum StackPopJump {
60c5eb7d
XL
24 /// Indicates that no special handling should be
25 /// done - we'll either return normally or unwind
26 /// based on the terminator for the function
27 /// we're leaving.
28 Normal,
29
ba9703b0
XL
30 /// Indicates that we should *not* jump to the return/unwind address, as the callback already
31 /// took care of everything.
32 NoJump,
60c5eb7d
XL
33}
34
0bf4aa26
XL
35/// Whether this kind of memory is allowed to leak
36pub trait MayLeak: Copy {
37 fn may_leak(self) -> bool;
38}
39
40/// The functionality needed by memory to manage its allocations
41pub trait AllocMap<K: Hash + Eq, V> {
9fa01778 42 /// Tests if the map contains the given key.
0bf4aa26
XL
43 /// Deliberately takes `&mut` because that is sufficient, and some implementations
44 /// can be more efficient then (using `RefCell::get_mut`).
45 fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
dfeec247
XL
46 where
47 K: Borrow<Q>;
0bf4aa26 48
9fa01778 49 /// Inserts a new entry into the map.
0bf4aa26
XL
50 fn insert(&mut self, k: K, v: V) -> Option<V>;
51
9fa01778 52 /// Removes an entry from the map.
0bf4aa26 53 fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
dfeec247
XL
54 where
55 K: Borrow<Q>;
0bf4aa26 56
ba9703b0 57 /// Returns data based on the keys and values in the map.
0bf4aa26 58 fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
ea8adc8c 59
9fa01778 60 /// Returns a reference to entry `k`. If no such entry exists, call
0bf4aa26
XL
61 /// `vacant` and either forward its error, or add its result to the map
62 /// and return a reference to *that*.
dfeec247 63 fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
0bf4aa26 64
9fa01778 65 /// Returns a mutable reference to entry `k`. If no such entry exists, call
0bf4aa26
XL
66 /// `vacant` and either forward its error, or add its result to the map
67 /// and return a reference to *that*.
dfeec247 68 fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
dc9dc135
XL
69
70 /// Read-only lookup.
71 fn get(&self, k: K) -> Option<&V> {
72 self.get_or(k, || Err(())).ok()
73 }
416331ca
XL
74
75 /// Mutable lookup.
76 fn get_mut(&mut self, k: K) -> Option<&mut V> {
77 self.get_mut_or(k, || Err(())).ok()
78 }
0bf4aa26
XL
79}
80
81/// Methods of this trait signifies a point where CTFE evaluation would fail
82/// and some use case dependent behaviour can instead be applied.
dc9dc135 83pub trait Machine<'mir, 'tcx>: Sized {
ea8adc8c 84 /// Additional memory kinds a machine wishes to distinguish from the builtin ones
29967ef6 85 type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
0bf4aa26 86
136023e0
XL
87 /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
88 type PointerTag: Provenance + Eq + Hash + 'static;
0bf4aa26 89
416331ca 90 /// Machines can define extra (non-instance) things that represent values of function pointers.
60c5eb7d 91 /// For example, Miri uses this to return a function pointer from `dlsym`
416331ca 92 /// that can later be called to execute the right thing.
29967ef6 93 type ExtraFnVal: Debug + Copy;
416331ca 94
a1dfa0c6
XL
95 /// Extra data stored in every call frame.
96 type FrameExtra;
97
9fa01778 98 /// Extra data stored in memory. A reference to this is available when `AllocExtra`
0731742a 99 /// gets initialized, so you can e.g., have an `Rc` here if there is global state you
a1dfa0c6 100 /// need access to in the `AllocExtra` hooks.
416331ca 101 type MemoryExtra;
a1dfa0c6 102
0bf4aa26 103 /// Extra data stored in every allocation.
17df50a5 104 type AllocExtra: Debug + Clone + 'static;
0bf4aa26
XL
105
106 /// Memory's allocation map
dfeec247 107 type MemoryMap: AllocMap<
0bf4aa26 108 AllocId,
ba9703b0 109 (MemoryKind<Self::MemoryKind>, Allocation<Self::PointerTag, Self::AllocExtra>),
dfeec247
XL
110 > + Default
111 + Clone;
0bf4aa26 112
ba9703b0
XL
113 /// The memory kind to use for copied global memory (held in `tcx`) --
114 /// or None if such memory should not be mutated and thus any such attempt will cause
115 /// a `ModifiedStatic` error to be raised.
0bf4aa26 116 /// Statics are copied under two circumstances: When they are mutated, and when
ba9703b0 117 /// `tag_allocation` (see below) returns an owned allocation
0bf4aa26 118 /// that is added to the memory so that the work is not done twice.
ba9703b0 119 const GLOBAL_KIND: Option<Self::MemoryKind>;
0bf4aa26 120
136023e0
XL
121 /// Should the machine panic on allocation failures?
122 const PANIC_ON_ALLOC_FAIL: bool;
123
416331ca 124 /// Whether memory accesses should be alignment-checked.
ba9703b0 125 fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool;
416331ca 126
3dfed10e
XL
127 /// Whether, when checking alignment, we should `force_int` and thus support
128 /// custom alignment logic based on whatever the integer address happens to be.
129 fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool;
130
0bf4aa26 131 /// Whether to enforce the validity invariant
416331ca 132 fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
b7449926 133
17df50a5
XL
134 /// Whether function calls should be [ABI](Abi)-checked.
135 fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
136 true
137 }
138
5869c6ff
XL
139 /// Entry point for obtaining the MIR of anything that should get evaluated.
140 /// So not just functions and shims, but also const/static initializers, anonymous
141 /// constants, ...
142 fn load_mir(
143 ecx: &InterpCx<'mir, 'tcx, Self>,
144 instance: ty::InstanceDef<'tcx>,
145 ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
146 Ok(ecx.tcx.instance_mir(instance))
147 }
148
ea8adc8c
XL
149 /// Entry point to all function calls.
150 ///
b7449926
XL
151 /// Returns either the mir to use for the call, or `None` if execution should
152 /// just proceed (which usually means this hook did all the work that the
9fa01778 153 /// called function should usually have done). In the latter case, it is
60c5eb7d 154 /// this hook's responsibility to advance the instruction pointer!
b7449926
XL
155 /// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
156 /// nor just jump to `ret`, but instead push their own stack frame.)
157 /// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
158 /// was used.
60c5eb7d 159 fn find_mir_or_eval_fn(
416331ca 160 ecx: &mut InterpCx<'mir, 'tcx, Self>,
ea8adc8c 161 instance: ty::Instance<'tcx>,
5869c6ff 162 abi: Abi,
0bf4aa26 163 args: &[OpTy<'tcx, Self::PointerTag>],
6a06907d 164 ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
17df50a5 165 unwind: StackPopUnwind,
dc9dc135 166 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
ea8adc8c 167
60c5eb7d 168 /// Execute `fn_val`. It is the hook's responsibility to advance the instruction
416331ca
XL
169 /// pointer as appropriate.
170 fn call_extra_fn(
171 ecx: &mut InterpCx<'mir, 'tcx, Self>,
172 fn_val: Self::ExtraFnVal,
5869c6ff 173 abi: Abi,
416331ca 174 args: &[OpTy<'tcx, Self::PointerTag>],
6a06907d 175 ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
17df50a5 176 unwind: StackPopUnwind,
416331ca
XL
177 ) -> InterpResult<'tcx>;
178
60c5eb7d
XL
179 /// Directly process an intrinsic without pushing a stack frame. It is the hook's
180 /// responsibility to advance the instruction pointer as appropriate.
0bf4aa26 181 fn call_intrinsic(
416331ca 182 ecx: &mut InterpCx<'mir, 'tcx, Self>,
ea8adc8c 183 instance: ty::Instance<'tcx>,
0bf4aa26 184 args: &[OpTy<'tcx, Self::PointerTag>],
6a06907d 185 ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
17df50a5 186 unwind: StackPopUnwind,
60c5eb7d
XL
187 ) -> InterpResult<'tcx>;
188
189 /// Called to evaluate `Assert` MIR terminators that trigger a panic.
190 fn assert_panic(
191 ecx: &mut InterpCx<'mir, 'tcx, Self>,
74b04a01 192 msg: &mir::AssertMessage<'tcx>,
60c5eb7d 193 unwind: Option<mir::BasicBlock>,
dc9dc135 194 ) -> InterpResult<'tcx>;
ea8adc8c 195
ba9703b0 196 /// Called to evaluate `Abort` MIR terminator.
fc512014 197 fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
ba9703b0
XL
198 throw_unsup_format!("aborting execution is not supported")
199 }
200
416331ca 201 /// Called for all binary operations where the LHS has pointer type.
ea8adc8c
XL
202 ///
203 /// Returns a (value, overflowed) pair if the operation succeeded
416331ca
XL
204 fn binary_ptr_op(
205 ecx: &InterpCx<'mir, 'tcx, Self>,
ea8adc8c 206 bin_op: mir::BinOp,
6a06907d
XL
207 left: &ImmTy<'tcx, Self::PointerTag>,
208 right: &ImmTy<'tcx, Self::PointerTag>,
e1599b0c 209 ) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
ea8adc8c 210
0bf4aa26
XL
211 /// Heap allocations via the `box` keyword.
212 fn box_alloc(
416331ca 213 ecx: &mut InterpCx<'mir, 'tcx, Self>,
6a06907d 214 dest: &PlaceTy<'tcx, Self::PointerTag>,
dc9dc135 215 ) -> InterpResult<'tcx>;
ff7c6d11 216
e74abb32 217 /// Called to read the specified `local` from the `frame`.
f035d41b
XL
218 /// Since reading a ZST is not actually accessing memory or locals, this is never invoked
219 /// for ZST reads.
74b04a01 220 #[inline]
e74abb32
XL
221 fn access_local(
222 _ecx: &InterpCx<'mir, 'tcx, Self>,
223 frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
224 local: mir::Local,
225 ) -> InterpResult<'tcx, Operand<Self::PointerTag>> {
226 frame.locals[local].access()
227 }
228
f035d41b
XL
229 /// Called to write the specified `local` from the `frame`.
230 /// Since writing a ZST is not actually accessing memory or locals, this is never invoked
231 /// for ZST reads.
232 #[inline]
233 fn access_local_mut<'a>(
234 ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
235 frame: usize,
236 local: mir::Local,
237 ) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
238 where
239 'tcx: 'mir,
240 {
241 ecx.stack_mut()[frame].locals[local].access_mut()
242 }
243
74b04a01
XL
244 /// Called before a basic block terminator is executed.
245 /// You can use this to detect endlessly running programs.
246 #[inline]
247 fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
248 Ok(())
249 }
250
ba9703b0
XL
251 /// Called before a global allocation is accessed.
252 /// `def_id` is `Some` if this is the "lazy" allocation of a static.
74b04a01 253 #[inline]
ba9703b0 254 fn before_access_global(
dfeec247 255 _memory_extra: &Self::MemoryExtra,
ba9703b0 256 _alloc_id: AllocId,
e74abb32 257 _allocation: &Allocation,
ba9703b0
XL
258 _static_def_id: Option<DefId>,
259 _is_write: bool,
e74abb32
XL
260 ) -> InterpResult<'tcx> {
261 Ok(())
262 }
263
3dfed10e 264 /// Return the `AllocId` for the given thread-local static in the current thread.
136023e0 265 fn thread_local_static_base_pointer(
3dfed10e
XL
266 _ecx: &mut InterpCx<'mir, 'tcx, Self>,
267 def_id: DefId,
136023e0 268 ) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
3dfed10e 269 throw_unsup!(ThreadLocalStatic(def_id))
74b04a01
XL
270 }
271
136023e0
XL
272 /// Return the root pointer for the given `extern static`.
273 fn extern_static_base_pointer(
3dfed10e
XL
274 mem: &Memory<'mir, 'tcx, Self>,
275 def_id: DefId,
136023e0 276 ) -> InterpResult<'tcx, Pointer<Self::PointerTag>>;
ba9703b0 277
136023e0
XL
278 /// Return a "base" pointer for the given allocation: the one that is used for direct
279 /// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
3dfed10e 280 ///
136023e0
XL
281 /// Not called on `extern` or thread-local statics (those use the methods above).
282 fn tag_alloc_base_pointer(
283 mem: &Memory<'mir, 'tcx, Self>,
284 ptr: Pointer,
285 ) -> Pointer<Self::PointerTag>;
286
287 /// "Int-to-pointer cast"
288 fn ptr_from_addr(
289 mem: &Memory<'mir, 'tcx, Self>,
290 addr: u64,
291 ) -> Pointer<Option<Self::PointerTag>>;
292
293 /// Convert a pointer with provenance into an allocation-offset pair.
294 fn ptr_get_alloc(
295 mem: &Memory<'mir, 'tcx, Self>,
296 ptr: Pointer<Self::PointerTag>,
297 ) -> (AllocId, Size);
3dfed10e 298
dc9dc135
XL
299 /// Called to initialize the "extra" state of an allocation and make the pointers
300 /// it contains (in relocations) tagged. The way we construct allocations is
301 /// to always first construct it without extra and then add the extra.
302 /// This keeps uniform code paths for handling both allocations created by CTFE
ba9703b0 303 /// for globals, and allocations created by Miri during evaluation.
dc9dc135
XL
304 ///
305 /// `kind` is the kind of the allocation being tagged; it can be `None` when
ba9703b0 306 /// it's a global and `GLOBAL_KIND` is `None`.
48663c56
XL
307 ///
308 /// This should avoid copying if no work has to be done! If this returns an owned
309 /// allocation (because a copy had to be done to add tags or metadata), machine memory will
310 /// cache the result. (This relies on `AllocMap::get_or` being able to add the
311 /// owned allocation to the map even when the map is shared.)
60c5eb7d 312 fn init_allocation_extra<'b>(
136023e0 313 mem: &Memory<'mir, 'tcx, Self>,
dc9dc135
XL
314 id: AllocId,
315 alloc: Cow<'b, Allocation>,
ba9703b0 316 kind: Option<MemoryKind<Self::MemoryKind>>,
136023e0 317 ) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>;
dc9dc135 318
17df50a5
XL
319 /// Hook for performing extra checks on a memory read access.
320 ///
321 /// Takes read-only access to the allocation so we can keep all the memory read
322 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
323 /// need to mutate.
324 #[inline(always)]
325 fn memory_read(
326 _memory_extra: &Self::MemoryExtra,
327 _alloc_extra: &Self::AllocExtra,
136023e0
XL
328 _tag: Self::PointerTag,
329 _range: AllocRange,
17df50a5
XL
330 ) -> InterpResult<'tcx> {
331 Ok(())
332 }
333
334 /// Hook for performing extra checks on a memory write access.
335 #[inline(always)]
336 fn memory_written(
ba9703b0 337 _memory_extra: &mut Self::MemoryExtra,
17df50a5 338 _alloc_extra: &mut Self::AllocExtra,
136023e0
XL
339 _tag: Self::PointerTag,
340 _range: AllocRange,
17df50a5
XL
341 ) -> InterpResult<'tcx> {
342 Ok(())
343 }
344
345 /// Hook for performing extra operations on a memory deallocation.
346 #[inline(always)]
347 fn memory_deallocated(
348 _memory_extra: &mut Self::MemoryExtra,
349 _alloc_extra: &mut Self::AllocExtra,
136023e0
XL
350 _tag: Self::PointerTag,
351 _range: AllocRange,
fc512014
XL
352 ) -> InterpResult<'tcx> {
353 Ok(())
354 }
355
17df50a5 356 /// Executes a retagging operation.
0bf4aa26 357 #[inline]
a1dfa0c6 358 fn retag(
416331ca 359 _ecx: &mut InterpCx<'mir, 'tcx, Self>,
0731742a 360 _kind: mir::RetagKind,
6a06907d 361 _place: &PlaceTy<'tcx, Self::PointerTag>,
dc9dc135 362 ) -> InterpResult<'tcx> {
0bf4aa26
XL
363 Ok(())
364 }
365
ba9703b0
XL
366 /// Called immediately before a new stack frame gets pushed.
367 fn init_frame_extra(
368 ecx: &mut InterpCx<'mir, 'tcx, Self>,
369 frame: Frame<'mir, 'tcx, Self::PointerTag>,
370 ) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
371
372 /// Borrow the current thread's stack.
373 fn stack(
374 ecx: &'a InterpCx<'mir, 'tcx, Self>,
375 ) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
376
377 /// Mutably borrow the current thread's stack.
378 fn stack_mut(
379 ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
380 ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
381
382 /// Called immediately after a stack frame got pushed and its locals got initialized.
383 fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
384 Ok(())
385 }
a1dfa0c6 386
ba9703b0
XL
387 /// Called immediately after a stack frame got popped, but before jumping back to the caller.
388 fn after_stack_pop(
60c5eb7d 389 _ecx: &mut InterpCx<'mir, 'tcx, Self>,
ba9703b0 390 _frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
dfeec247 391 _unwinding: bool,
ba9703b0 392 ) -> InterpResult<'tcx, StackPopJump> {
60c5eb7d 393 // By default, we do not support unwinding from panics
ba9703b0 394 Ok(StackPopJump::Normal)
60c5eb7d 395 }
f9f354fc
XL
396}
397
398// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
399// (CTFE and ConstProp) use the same instance. Here, we share that code.
400pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
136023e0 401 type PointerTag = AllocId;
f9f354fc
XL
402 type ExtraFnVal = !;
403
fc512014
XL
404 type MemoryMap =
405 rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
406 const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
f9f354fc
XL
407
408 type AllocExtra = ();
409 type FrameExtra = ();
410
411 #[inline(always)]
412 fn enforce_alignment(_memory_extra: &Self::MemoryExtra) -> bool {
413 // We do not check for alignment to avoid having to carry an `Align`
414 // in `ConstValue::ByRef`.
415 false
416 }
417
3dfed10e
XL
418 #[inline(always)]
419 fn force_int_for_alignment_check(_memory_extra: &Self::MemoryExtra) -> bool {
420 // We do not support `force_int`.
421 false
422 }
423
f9f354fc
XL
424 #[inline(always)]
425 fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
426 false // for now, we don't enforce validity
427 }
428
429 #[inline(always)]
430 fn call_extra_fn(
431 _ecx: &mut InterpCx<$mir, $tcx, Self>,
432 fn_val: !,
5869c6ff 433 _abi: Abi,
f9f354fc 434 _args: &[OpTy<$tcx>],
6a06907d 435 _ret: Option<(&PlaceTy<$tcx>, mir::BasicBlock)>,
17df50a5 436 _unwind: StackPopUnwind,
f9f354fc
XL
437 ) -> InterpResult<$tcx> {
438 match fn_val {}
439 }
440
441 #[inline(always)]
442 fn init_allocation_extra<'b>(
136023e0 443 _mem: &Memory<$mir, $tcx, Self>,
f9f354fc
XL
444 _id: AllocId,
445 alloc: Cow<'b, Allocation>,
fc512014 446 _kind: Option<MemoryKind<Self::MemoryKind>>,
136023e0 447 ) -> Cow<'b, Allocation<Self::PointerTag>> {
f9f354fc 448 // We do not use a tag so we can just cheaply forward the allocation
136023e0
XL
449 alloc
450 }
451
452 fn extern_static_base_pointer(
453 mem: &Memory<$mir, $tcx, Self>,
454 def_id: DefId,
455 ) -> InterpResult<$tcx, Pointer> {
456 // Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
457 Ok(Pointer::new(mem.tcx.create_static_alloc(def_id), Size::ZERO))
f9f354fc
XL
458 }
459
460 #[inline(always)]
136023e0
XL
461 fn tag_alloc_base_pointer(
462 _mem: &Memory<$mir, $tcx, Self>,
463 ptr: Pointer<AllocId>,
464 ) -> Pointer<AllocId> {
465 ptr
466 }
467
468 #[inline(always)]
469 fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
470 Pointer::new(None, Size::from_bytes(addr))
471 }
472
473 #[inline(always)]
474 fn ptr_get_alloc(_mem: &Memory<$mir, $tcx, Self>, ptr: Pointer<AllocId>) -> (AllocId, Size) {
475 // We know `offset` is relative to the allocation, so we can use `into_parts`.
476 let (alloc_id, offset) = ptr.into_parts();
477 (alloc_id, offset)
f9f354fc 478 }
ea8adc8c 479}