1 //! The virtual memory representation of the MIR interpreter.
4 use std
::convert
::{TryFrom, TryInto}
;
7 use std
::ops
::{Deref, Range}
;
10 use rustc_ast
::Mutability
;
11 use rustc_data_structures
::intern
::Interned
;
12 use rustc_data_structures
::sorted_map
::SortedMap
;
13 use rustc_span
::DUMMY_SP
;
14 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
17 read_target_uint
, write_target_uint
, AllocId
, InterpError
, InterpResult
, Pointer
, Provenance
,
18 ResourceExhaustionInfo
, Scalar
, ScalarMaybeUninit
, ScalarSizeMismatch
, UndefinedBehaviorInfo
,
19 UninitBytesAccess
, UnsupportedOpInfo
,
23 /// This type represents an Allocation in the Miri/CTFE core engine.
25 /// Its public API is rather low-level, working directly with allocation offsets and a custom error
26 /// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
27 /// module provides higher-level access.
28 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
30 pub struct Allocation
<Tag
= AllocId
, Extra
= ()> {
31 /// The actual bytes of the allocation.
32 /// Note that the bytes of a pointer represent the offset of the pointer.
34 /// Maps from byte addresses to extra data for each pointer.
35 /// Only the first byte of a pointer is inserted into the map; i.e.,
36 /// every entry in this map applies to `pointer_size` consecutive bytes starting
37 /// at the given offset.
38 relocations
: Relocations
<Tag
>,
39 /// Denotes which part of this allocation is initialized.
41 /// The alignment of the allocation to detect unaligned reads.
42 /// (`Align` guarantees that this is a power of two.)
44 /// `true` if the allocation is mutable.
45 /// Also used by codegen to determine if a static should be put into mutable memory,
46 /// which happens for `static mut` and `static` with interior mutability.
47 pub mutability
: Mutability
,
48 /// Extra state for the machine.
52 /// Interned types generally have an `Outer` type and an `Inner` type, where
53 /// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
54 /// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
55 /// outer type and `TyS` is its inner type.
57 /// Here things are different because only const allocations are interned. This
58 /// means that both the inner type (`Allocation`) and the outer type
59 /// (`ConstAllocation`) are used quite a bit.
60 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
61 #[rustc_pass_by_value]
62 pub struct ConstAllocation
<'tcx
, Tag
= AllocId
, Extra
= ()>(
63 pub Interned
<'tcx
, Allocation
<Tag
, Extra
>>,
66 impl<'tcx
> fmt
::Debug
for ConstAllocation
<'tcx
> {
67 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
68 // This matches how `Allocation` is printed. We print it like this to
69 // avoid having to update expected output in a lot of tests.
70 write
!(f
, "{:?}", self.inner())
74 impl<'tcx
, Tag
, Extra
> ConstAllocation
<'tcx
, Tag
, Extra
> {
75 pub fn inner(self) -> &'tcx Allocation
<Tag
, Extra
> {
80 /// We have our own error type that does not know about the `AllocId`; that information
81 /// is added when converting to `InterpError`.
84 /// A scalar had the wrong size.
85 ScalarSizeMismatch(ScalarSizeMismatch
),
86 /// Encountered a pointer where we needed raw bytes.
88 /// Partially overwriting a pointer.
89 PartialPointerOverwrite(Size
),
90 /// Using uninitialized data where it is not allowed.
91 InvalidUninitBytes(Option
<UninitBytesAccess
>),
93 pub type AllocResult
<T
= ()> = Result
<T
, AllocError
>;
95 impl From
<ScalarSizeMismatch
> for AllocError
{
96 fn from(s
: ScalarSizeMismatch
) -> Self {
97 AllocError
::ScalarSizeMismatch(s
)
102 pub fn to_interp_error
<'tcx
>(self, alloc_id
: AllocId
) -> InterpError
<'tcx
> {
105 ScalarSizeMismatch(s
) => {
106 InterpError
::UndefinedBehavior(UndefinedBehaviorInfo
::ScalarSizeMismatch(s
))
108 ReadPointerAsBytes
=> InterpError
::Unsupported(UnsupportedOpInfo
::ReadPointerAsBytes
),
109 PartialPointerOverwrite(offset
) => InterpError
::Unsupported(
110 UnsupportedOpInfo
::PartialPointerOverwrite(Pointer
::new(alloc_id
, offset
)),
112 InvalidUninitBytes(info
) => InterpError
::UndefinedBehavior(
113 UndefinedBehaviorInfo
::InvalidUninitBytes(info
.map(|b
| (alloc_id
, b
))),
119 /// The information that makes up a memory access: offset and size.
120 #[derive(Copy, Clone, Debug)]
121 pub struct AllocRange
{
126 /// Free-starting constructor for less syntactic overhead.
128 pub fn alloc_range(start
: Size
, size
: Size
) -> AllocRange
{
129 AllocRange { start, size }
134 pub fn end(self) -> Size
{
135 self.start
+ self.size
// This does overflow checking.
138 /// Returns the `subrange` within this range; panics if it is not a subrange.
140 pub fn subrange(self, subrange
: AllocRange
) -> AllocRange
{
141 let sub_start
= self.start
+ subrange
.start
;
142 let range
= alloc_range(sub_start
, subrange
.size
);
143 assert
!(range
.end() <= self.end(), "access outside the bounds for given AllocRange");
148 // The constructors are all without extra; the extra gets added by a machine hook later.
149 impl<Tag
> Allocation
<Tag
> {
150 /// Creates an allocation initialized by the given bytes
151 pub fn from_bytes
<'a
>(
152 slice
: impl Into
<Cow
<'a
, [u8]>>,
154 mutability
: Mutability
,
156 let bytes
= Box
::<[u8]>::from(slice
.into());
157 let size
= Size
::from_bytes(bytes
.len());
160 relocations
: Relocations
::new(),
161 init_mask
: InitMask
::new(size
, true),
168 pub fn from_bytes_byte_aligned_immutable
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>) -> Self {
169 Allocation
::from_bytes(slice
, Align
::ONE
, Mutability
::Not
)
172 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
173 /// available to the compiler to do so.
174 pub fn uninit(size
: Size
, align
: Align
, panic_on_fail
: bool
) -> InterpResult
<'
static, Self> {
175 let bytes
= Box
::<[u8]>::try_new_zeroed_slice(size
.bytes_usize()).map_err(|_
| {
176 // This results in an error that can happen non-deterministically, since the memory
177 // available to the compiler can change between runs. Normally queries are always
178 // deterministic. However, we can be non-deterministic here because all uses of const
179 // evaluation (including ConstProp!) will make compilation fail (via hard error
180 // or ICE) upon encountering a `MemoryExhausted` error.
182 panic
!("Allocation::uninit called with panic_on_fail had allocation failure")
184 ty
::tls
::with(|tcx
| {
185 tcx
.sess
.delay_span_bug(DUMMY_SP
, "exhausted memory during interpretation")
187 InterpError
::ResourceExhaustion(ResourceExhaustionInfo
::MemoryExhausted
)
189 // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
190 let bytes
= unsafe { bytes.assume_init() }
;
193 relocations
: Relocations
::new(),
194 init_mask
: InitMask
::new(size
, false),
196 mutability
: Mutability
::Mut
,
203 /// Convert Tag and add Extra fields
204 pub fn convert_tag_add_extra
<Tag
, Extra
>(
206 cx
: &impl HasDataLayout
,
208 mut tagger
: impl FnMut(Pointer
<AllocId
>) -> Pointer
<Tag
>,
209 ) -> Allocation
<Tag
, Extra
> {
210 // Compute new pointer tags, which also adjusts the bytes.
211 let mut bytes
= self.bytes
;
212 let mut new_relocations
= Vec
::with_capacity(self.relocations
.0.len());
213 let ptr_size
= cx
.data_layout().pointer_size
.bytes_usize();
214 let endian
= cx
.data_layout().endian
;
215 for &(offset
, alloc_id
) in self.relocations
.iter() {
216 let idx
= offset
.bytes_usize();
217 let ptr_bytes
= &mut bytes
[idx
..idx
+ ptr_size
];
218 let bits
= read_target_uint(endian
, ptr_bytes
).unwrap();
219 let (ptr_tag
, ptr_offset
) =
220 tagger(Pointer
::new(alloc_id
, Size
::from_bytes(bits
))).into_parts();
221 write_target_uint(endian
, ptr_bytes
, ptr_offset
.bytes().into()).unwrap();
222 new_relocations
.push((offset
, ptr_tag
));
224 // Create allocation.
227 relocations
: Relocations
::from_presorted(new_relocations
),
228 init_mask
: self.init_mask
,
230 mutability
: self.mutability
,
236 /// Raw accessors. Provide access to otherwise private bytes.
237 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
238 pub fn len(&self) -> usize {
242 pub fn size(&self) -> Size
{
243 Size
::from_bytes(self.len())
246 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
247 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
249 /// This must not be used for reads affecting the interpreter execution.
250 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range
: Range
<usize>) -> &[u8] {
254 /// Returns the mask indicating which bytes are initialized.
255 pub fn init_mask(&self) -> &InitMask
{
259 /// Returns the relocation list.
260 pub fn relocations(&self) -> &Relocations
<Tag
> {
266 impl<Tag
: Provenance
, Extra
> Allocation
<Tag
, Extra
> {
267 /// The last argument controls whether we error out when there are uninitialized
268 /// or pointer bytes. You should never call this, call `get_bytes` or
269 /// `get_bytes_with_uninit_and_ptr` instead,
271 /// This function also guarantees that the resulting pointer will remain stable
272 /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
275 /// It is the caller's responsibility to check bounds and alignment beforehand.
276 fn get_bytes_internal(
278 cx
: &impl HasDataLayout
,
280 check_init_and_ptr
: bool
,
281 ) -> AllocResult
<&[u8]> {
282 if check_init_and_ptr
{
283 self.check_init(range
)?
;
284 self.check_relocations(cx
, range
)?
;
286 // We still don't want relocations on the *edges*.
287 self.check_relocation_edges(cx
, range
)?
;
290 Ok(&self.bytes
[range
.start
.bytes_usize()..range
.end().bytes_usize()])
293 /// Checks that these bytes are initialized and not pointer bytes, and then return them
296 /// It is the caller's responsibility to check bounds and alignment beforehand.
297 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
298 /// on `InterpCx` instead.
300 pub fn get_bytes(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
<&[u8]> {
301 self.get_bytes_internal(cx
, range
, true)
304 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
305 /// However, this still checks that there are no relocations on the *edges*.
307 /// It is the caller's responsibility to check bounds and alignment beforehand.
309 pub fn get_bytes_with_uninit_and_ptr(
311 cx
: &impl HasDataLayout
,
313 ) -> AllocResult
<&[u8]> {
314 self.get_bytes_internal(cx
, range
, false)
317 /// Just calling this already marks everything as defined and removes relocations,
318 /// so be sure to actually put data there!
320 /// It is the caller's responsibility to check bounds and alignment beforehand.
321 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
322 /// on `InterpCx` instead.
323 pub fn get_bytes_mut(
325 cx
: &impl HasDataLayout
,
327 ) -> AllocResult
<&mut [u8]> {
328 self.mark_init(range
, true);
329 self.clear_relocations(cx
, range
)?
;
331 Ok(&mut self.bytes
[range
.start
.bytes_usize()..range
.end().bytes_usize()])
334 /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
335 pub fn get_bytes_mut_ptr(
337 cx
: &impl HasDataLayout
,
339 ) -> AllocResult
<*mut [u8]> {
340 self.mark_init(range
, true);
341 self.clear_relocations(cx
, range
)?
;
343 assert
!(range
.end().bytes_usize() <= self.bytes
.len()); // need to do our own bounds-check
344 let begin_ptr
= self.bytes
.as_mut_ptr().wrapping_add(range
.start
.bytes_usize());
345 let len
= range
.end().bytes_usize() - range
.start
.bytes_usize();
346 Ok(ptr
::slice_from_raw_parts_mut(begin_ptr
, len
))
350 /// Reading and writing.
351 impl<Tag
: Provenance
, Extra
> Allocation
<Tag
, Extra
> {
352 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
353 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
354 /// given range contains neither relocations nor uninitialized bytes.
357 cx
: &impl HasDataLayout
,
359 allow_uninit_and_ptr
: bool
,
361 // Check bounds and relocations on the edges.
362 self.get_bytes_with_uninit_and_ptr(cx
, range
)?
;
363 // Check uninit and ptr.
364 if !allow_uninit_and_ptr
{
365 self.check_init(range
)?
;
366 self.check_relocations(cx
, range
)?
;
371 /// Reads a *non-ZST* scalar.
373 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
374 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
376 /// It is the caller's responsibility to check bounds and alignment beforehand.
377 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
380 cx
: &impl HasDataLayout
,
382 ) -> AllocResult
<ScalarMaybeUninit
<Tag
>> {
383 // `get_bytes_with_uninit_and_ptr` tests relocation edges.
384 // We deliberately error when loading data that partially has provenance, or partially
385 // initialized data (that's the check below), into a scalar. The LLVM semantics of this are
386 // unclear so we are conservative. See <https://github.com/rust-lang/rust/issues/69488> for
387 // further discussion.
388 let bytes
= self.get_bytes_with_uninit_and_ptr(cx
, range
)?
;
389 // Uninit check happens *after* we established that the alignment is correct.
390 // We must not return `Ok()` for unaligned pointers!
391 if self.is_init(range
).is_err() {
392 // This inflates uninitialized bytes to the entire scalar, even if only a few
393 // bytes are uninitialized.
394 return Ok(ScalarMaybeUninit
::Uninit
);
396 // Now we do the actual reading.
397 let bits
= read_target_uint(cx
.data_layout().endian
, bytes
).unwrap();
398 // See if we got a pointer.
399 if range
.size
!= cx
.data_layout().pointer_size
{
401 // *Now*, we better make sure that the inside is free of relocations too.
402 self.check_relocations(cx
, range
)?
;
405 if let Some(&prov
) = self.relocations
.get(&range
.start
) {
406 let ptr
= Pointer
::new(prov
, Size
::from_bytes(bits
));
407 return Ok(ScalarMaybeUninit
::from_pointer(ptr
, cx
));
410 // We don't. Just return the bits.
411 Ok(ScalarMaybeUninit
::Scalar(Scalar
::from_uint(bits
, range
.size
)))
414 /// Writes a *non-ZST* scalar.
416 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
417 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
419 /// It is the caller's responsibility to check bounds and alignment beforehand.
420 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
421 #[instrument(skip(self, cx), level = "debug")]
424 cx
: &impl HasDataLayout
,
426 val
: ScalarMaybeUninit
<Tag
>,
428 assert
!(self.mutability
== Mutability
::Mut
);
430 let val
= match val
{
431 ScalarMaybeUninit
::Scalar(scalar
) => scalar
,
432 ScalarMaybeUninit
::Uninit
=> {
433 return self.write_uninit(cx
, range
);
437 // `to_bits_or_ptr_internal` is the right method because we just want to store this data
438 // as-is into memory.
439 let (bytes
, provenance
) = match val
.to_bits_or_ptr_internal(range
.size
)?
{
441 let (provenance
, offset
) = val
.into_parts();
442 (u128
::from(offset
.bytes()), Some(provenance
))
444 Ok(data
) => (data
, None
),
447 let endian
= cx
.data_layout().endian
;
448 let dst
= self.get_bytes_mut(cx
, range
)?
;
449 write_target_uint(endian
, dst
, bytes
).unwrap();
451 // See if we have to also write a relocation.
452 if let Some(provenance
) = provenance
{
453 self.relocations
.0.insert
(range
.start
, provenance
);
459 /// Write "uninit" to the given memory range.
460 pub fn write_uninit(&mut self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
{
461 self.mark_init(range
, false);
462 self.clear_relocations(cx
, range
)?
;
468 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
469 /// Returns all relocations overlapping with the given pointer-offset pair.
470 pub fn get_relocations(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> &[(Size
, Tag
)] {
471 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
472 // the beginning of this range.
473 let start
= range
.start
.bytes().saturating_sub(cx
.data_layout().pointer_size
.bytes() - 1);
474 self.relocations
.range(Size
::from_bytes(start
)..range
.end())
477 /// Checks that there are no relocations overlapping with the given range.
479 fn check_relocations(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
{
480 if self.get_relocations(cx
, range
).is_empty() {
483 Err(AllocError
::ReadPointerAsBytes
)
487 /// Removes all relocations inside the given range.
488 /// If there are relocations overlapping with the edges, they
489 /// are removed as well *and* the bytes they cover are marked as
490 /// uninitialized. This is a somewhat odd "spooky action at a distance",
491 /// but it allows strictly more code to run than if we would just error
492 /// immediately in that case.
493 fn clear_relocations(&mut self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
497 // Find the start and end of the given range and its outermost relocations.
498 let (first
, last
) = {
499 // Find all relocations overlapping the given range.
500 let relocations
= self.get_relocations(cx
, range
);
501 if relocations
.is_empty() {
506 relocations
.first().unwrap().0,
507 relocations
.last().unwrap().0 + cx
.data_layout().pointer_size
,
510 let start
= range
.start
;
511 let end
= range
.end();
513 // We need to handle clearing the relocations from parts of a pointer. See
514 // <https://github.com/rust-lang/rust/issues/87184> for details.
516 if Tag
::ERR_ON_PARTIAL_PTR_OVERWRITE
{
517 return Err(AllocError
::PartialPointerOverwrite(first
));
520 "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
522 self.init_mask
.set_range(first
, start
, false);
525 if Tag
::ERR_ON_PARTIAL_PTR_OVERWRITE
{
526 return Err(AllocError
::PartialPointerOverwrite(
527 last
- cx
.data_layout().pointer_size
,
531 "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
533 self.init_mask
.set_range(end
, last
, false);
536 // Forget all the relocations.
537 // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine,
538 // i.e., this will not remove any other relocations just after the ones we care about.
539 self.relocations
.0.remove_range(first
..last
);
544 /// Errors if there are relocations overlapping with the edges of the
545 /// given memory range.
547 fn check_relocation_edges(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
{
548 self.check_relocations(cx
, alloc_range(range
.start
, Size
::ZERO
))?
;
549 self.check_relocations(cx
, alloc_range(range
.end(), Size
::ZERO
))?
;
554 /// "Relocations" stores the provenance information of pointers stored in memory.
555 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
556 pub struct Relocations
<Tag
= AllocId
>(SortedMap
<Size
, Tag
>);
558 impl<Tag
> Relocations
<Tag
> {
559 pub fn new() -> Self {
560 Relocations(SortedMap
::new())
563 // The caller must guarantee that the given relocations are already sorted
564 // by address and contain no duplicates.
565 pub fn from_presorted(r
: Vec
<(Size
, Tag
)>) -> Self {
566 Relocations(SortedMap
::from_presorted_elements(r
))
570 impl<Tag
> Deref
for Relocations
<Tag
> {
571 type Target
= SortedMap
<Size
, Tag
>;
573 fn deref(&self) -> &Self::Target
{
578 /// A partial, owned list of relocations to transfer into another allocation.
580 /// Offsets are already adjusted to the destination allocation.
581 pub struct AllocationRelocations
<Tag
> {
582 dest_relocations
: Vec
<(Size
, Tag
)>,
585 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
586 pub fn prepare_relocation_copy(
588 cx
: &impl HasDataLayout
,
592 ) -> AllocationRelocations
<Tag
> {
593 let relocations
= self.get_relocations(cx
, src
);
594 if relocations
.is_empty() {
595 return AllocationRelocations { dest_relocations: Vec::new() }
;
599 let mut new_relocations
= Vec
::with_capacity(relocations
.len() * (count
as usize));
601 // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
602 // is mostly filled with redundant information since it's just N copies of the same `Tag`s
603 // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
604 // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
605 // the right sequence of relocations for all N copies.
607 new_relocations
.extend(relocations
.iter().map(|&(offset
, reloc
)| {
608 // compute offset for current repetition
609 let dest_offset
= dest
+ size
* i
; // `Size` operations
611 // shift offsets from source allocation to destination allocation
612 (offset
+ dest_offset
) - src
.start
, // `Size` operations
618 AllocationRelocations { dest_relocations: new_relocations }
621 /// Applies a relocation copy.
622 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
623 /// to be clear of relocations.
625 /// This is dangerous to use as it can violate internal `Allocation` invariants!
626 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
627 pub fn mark_relocation_range(&mut self, relocations
: AllocationRelocations
<Tag
>) {
628 self.relocations
.0.insert_presorted
(relocations
.dest_relocations
);
632 ////////////////////////////////////////////////////////////////////////////////
633 // Uninitialized byte tracking
634 ////////////////////////////////////////////////////////////////////////////////
638 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
639 /// is initialized. If it is `false` the byte is uninitialized.
640 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
641 #[derive(HashStable)]
642 pub struct InitMask
{
648 pub const BLOCK_SIZE
: u64 = 64;
651 fn bit_index(bits
: Size
) -> (usize, usize) {
652 // BLOCK_SIZE is the number of bits that can fit in a `Block`.
653 // Each bit in a `Block` represents the initialization state of one byte of an allocation,
654 // so we use `.bytes()` here.
655 let bits
= bits
.bytes();
656 let a
= bits
/ InitMask
::BLOCK_SIZE
;
657 let b
= bits
% InitMask
::BLOCK_SIZE
;
658 (usize::try_from(a
).unwrap(), usize::try_from(b
).unwrap())
662 fn size_from_bit_index(block
: impl TryInto
<u64>, bit
: impl TryInto
<u64>) -> Size
{
663 let block
= block
.try_into().ok().unwrap();
664 let bit
= bit
.try_into().ok().unwrap();
665 Size
::from_bytes(block
* InitMask
::BLOCK_SIZE
+ bit
)
668 pub fn new(size
: Size
, state
: bool
) -> Self {
669 let mut m
= InitMask { blocks: vec![], len: Size::ZERO }
;
674 pub fn set_range(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
677 self.grow(end
- len
, new_state
);
679 self.set_range_inbounds(start
, end
, new_state
);
682 pub fn set_range_inbounds(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
683 let (blocka
, bita
) = Self::bit_index(start
);
684 let (blockb
, bitb
) = Self::bit_index(end
);
685 if blocka
== blockb
{
686 // First set all bits except the first `bita`,
687 // then unset the last `64 - bitb` bits.
688 let range
= if bitb
== 0 {
691 (u64::MAX
<< bita
) & (u64::MAX
>> (64 - bitb
))
694 self.blocks
[blocka
] |= range
;
696 self.blocks
[blocka
] &= !range
;
700 // across block boundaries
702 // Set `bita..64` to `1`.
703 self.blocks
[blocka
] |= u64::MAX
<< bita
;
704 // Set `0..bitb` to `1`.
706 self.blocks
[blockb
] |= u64::MAX
>> (64 - bitb
);
708 // Fill in all the other blocks (much faster than one bit at a time).
709 for block
in (blocka
+ 1)..blockb
{
710 self.blocks
[block
] = u64::MAX
;
713 // Set `bita..64` to `0`.
714 self.blocks
[blocka
] &= !(u64::MAX
<< bita
);
715 // Set `0..bitb` to `0`.
717 self.blocks
[blockb
] &= !(u64::MAX
>> (64 - bitb
));
719 // Fill in all the other blocks (much faster than one bit at a time).
720 for block
in (blocka
+ 1)..blockb
{
721 self.blocks
[block
] = 0;
727 pub fn get(&self, i
: Size
) -> bool
{
728 let (block
, bit
) = Self::bit_index(i
);
729 (self.blocks
[block
] & (1 << bit
)) != 0
733 pub fn set(&mut self, i
: Size
, new_state
: bool
) {
734 let (block
, bit
) = Self::bit_index(i
);
735 self.set_bit(block
, bit
, new_state
);
739 fn set_bit(&mut self, block
: usize, bit
: usize, new_state
: bool
) {
741 self.blocks
[block
] |= 1 << bit
;
743 self.blocks
[block
] &= !(1 << bit
);
747 pub fn grow(&mut self, amount
: Size
, new_state
: bool
) {
748 if amount
.bytes() == 0 {
751 let unused_trailing_bits
=
752 u64::try_from(self.blocks
.len()).unwrap() * Self::BLOCK_SIZE
- self.len
.bytes();
753 if amount
.bytes() > unused_trailing_bits
{
754 let additional_blocks
= amount
.bytes() / Self::BLOCK_SIZE
+ 1;
756 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
757 iter
::repeat(0).take(usize::try_from(additional_blocks
).unwrap()),
760 let start
= self.len
;
762 self.set_range_inbounds(start
, start
+ amount
, new_state
); // `Size` operation
765 /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
766 fn find_bit(&self, start
: Size
, end
: Size
, is_init
: bool
) -> Option
<Size
> {
767 /// A fast implementation of `find_bit`,
768 /// which skips over an entire block at a time if it's all 0s (resp. 1s),
769 /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
771 /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
772 /// and with the least significant bit (and lowest block) first:
774 /// 00000000|00000000
778 /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
780 init_mask
: &InitMask
,
785 /// Search one block, returning the index of the first bit equal to `is_init`.
792 // For the following examples, assume this function was called with:
796 // Note that, for the examples in this function, the most significant bit is written first,
797 // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
799 // Invert bits so we're always looking for the first set bit.
802 let bits
= if is_init { bits }
else { !bits }
;
803 // Mask off unused start bits.
807 let bits
= bits
& (!0 << start_bit
);
808 // Find set bit, if any.
809 // bit = trailing_zeros(0b11000000)
814 let bit
= bits
.trailing_zeros();
815 Some(InitMask
::size_from_bit_index(block
, bit
))
823 // Convert `start` and `end` to block indexes and bit indexes within each block.
824 // We must convert `end` to an inclusive bound to handle block boundaries correctly.
828 // (a) 00000000|00000000 (b) 00000000|
829 // ^~~~~~~~~~~^ ^~~~~~~~~^
830 // start end start end
832 // In both cases, the block index of `end` is 1.
833 // But we do want to search block 1 in (a), and we don't in (b).
835 // We subtract 1 from both end positions to make them inclusive:
837 // (a) 00000000|00000000 (b) 00000000|
838 // ^~~~~~~~~~^ ^~~~~~~^
839 // start end_inclusive start end_inclusive
841 // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
842 // This provides the desired behavior of searching blocks 0 and 1 for (a),
843 // and searching only block 0 for (b).
844 // There is no concern of overflows since we checked for `start >= end` above.
845 let (start_block
, start_bit
) = InitMask
::bit_index(start
);
846 let end_inclusive
= Size
::from_bytes(end
.bytes() - 1);
847 let (end_block_inclusive
, _
) = InitMask
::bit_index(end_inclusive
);
849 // Handle first block: need to skip `start_bit` bits.
851 // We need to handle the first block separately,
852 // because there may be bits earlier in the block that should be ignored,
853 // such as the bit marked (1) in this example:
857 // (c) 01000000|00000000|00000001
858 // ^~~~~~~~~~~~~~~~~~^
861 search_block(init_mask
.blocks
[start_block
], start_block
, start_bit
, is_init
)
863 // If the range is less than a block, we may find a matching bit after `end`.
865 // For example, we shouldn't successfully find bit (2), because it's after `end`:
869 // (d) 00000001|00000000|00000001
873 // An alternative would be to mask off end bits in the same way as we do for start bits,
874 // but performing this check afterwards is faster and simpler to implement.
882 // Handle remaining blocks.
884 // We can skip over an entire block at once if it's all 0s (resp. 1s).
885 // The block marked (3) in this example is the first block that will be handled by this loop,
886 // and it will be skipped for that reason:
890 // (e) 01000000|00000000|00000001
891 // ^~~~~~~~~~~~~~~~~~^
893 if start_block
< end_block_inclusive
{
894 // This loop is written in a specific way for performance.
895 // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
896 // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
897 // because both alternatives result in significantly worse codegen.
898 // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
899 // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
900 for (&bits
, block
) in init_mask
.blocks
[start_block
+ 1..end_block_inclusive
+ 1]
902 .zip(start_block
+ 1..)
904 if let Some(i
) = search_block(bits
, block
, 0, is_init
) {
905 // If this is the last block, we may find a matching bit after `end`.
907 // For example, we shouldn't successfully find bit (4), because it's after `end`:
911 // (f) 00000001|00000000|00000001
912 // ^~~~~~~~~~~~~~~~~~^
915 // As above with example (d), we could handle the end block separately and mask off end bits,
916 // but unconditionally searching an entire block at once and performing this check afterwards
917 // is faster and much simpler to implement.
930 #[cfg_attr(not(debug_assertions), allow(dead_code))]
932 init_mask
: &InitMask
,
937 (start
..end
).find(|&i
| init_mask
.get(i
) == is_init
)
940 let result
= find_bit_fast(self, start
, end
, is_init
);
944 find_bit_slow(self, start
, end
, is_init
),
945 "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
956 /// A contiguous chunk of initialized or uninitialized memory.
964 pub fn is_init(&self) -> bool
{
966 Self::Init(_
) => true,
967 Self::Uninit(_
) => false,
972 pub fn range(&self) -> Range
<Size
> {
974 Self::Init(r
) => r
.clone(),
975 Self::Uninit(r
) => r
.clone(),
981 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
983 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
984 /// indexes for the first contiguous span of the uninitialized access.
986 pub fn is_range_initialized(&self, start
: Size
, end
: Size
) -> Result
<(), Range
<Size
>> {
988 return Err(self.len
..end
);
991 let uninit_start
= self.find_bit(start
, end
, false);
994 Some(uninit_start
) => {
995 let uninit_end
= self.find_bit(uninit_start
, end
, true).unwrap_or(end
);
996 Err(uninit_start
..uninit_end
)
1002 /// Returns an iterator, yielding a range of byte indexes for each contiguous region
1003 /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
1005 /// The iterator guarantees the following:
1006 /// - Chunks are nonempty.
1007 /// - Chunks are adjacent (each range's start is equal to the previous range's end).
1008 /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
1009 /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
1011 pub fn range_as_init_chunks(&self, start
: Size
, end
: Size
) -> InitChunkIter
<'_
> {
1012 assert
!(end
<= self.len
);
1014 let is_init
= if start
< end
{
1017 // `start..end` is empty: there are no chunks, so use some arbitrary value
1021 InitChunkIter { init_mask: self, is_init, start, end }
1025 /// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
1027 pub struct InitChunkIter
<'a
> {
1028 init_mask
: &'a InitMask
,
1029 /// Whether the next chunk we will return is initialized.
1030 /// If there are no more chunks, contains some arbitrary value.
1032 /// The current byte index into `init_mask`.
1034 /// The end byte index into `init_mask`.
1038 impl<'a
> Iterator
for InitChunkIter
<'a
> {
1039 type Item
= InitChunk
;
1042 fn next(&mut self) -> Option
<Self::Item
> {
1043 if self.start
>= self.end
{
1048 self.init_mask
.find_bit(self.start
, self.end
, !self.is_init
).unwrap_or(self.end
);
1049 let range
= self.start
..end_of_chunk
;
1052 Some(if self.is_init { InitChunk::Init(range) }
else { InitChunk::Uninit(range) }
);
1054 self.is_init
= !self.is_init
;
1055 self.start
= end_of_chunk
;
1061 /// Uninitialized bytes.
1062 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
1063 /// Checks whether the given range is entirely initialized.
1065 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
1066 /// indexes of the first contiguous uninitialized access.
1067 fn is_init(&self, range
: AllocRange
) -> Result
<(), Range
<Size
>> {
1068 self.init_mask
.is_range_initialized(range
.start
, range
.end()) // `Size` addition
1071 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
1072 /// error which will report the first range of bytes which is uninitialized.
1073 fn check_init(&self, range
: AllocRange
) -> AllocResult
{
1074 self.is_init(range
).map_err(|idx_range
| {
1075 AllocError
::InvalidUninitBytes(Some(UninitBytesAccess
{
1076 access_offset
: range
.start
,
1077 access_size
: range
.size
,
1078 uninit_offset
: idx_range
.start
,
1079 uninit_size
: idx_range
.end
- idx_range
.start
, // `Size` subtraction
1084 fn mark_init(&mut self, range
: AllocRange
, is_init
: bool
) {
1085 if range
.size
.bytes() == 0 {
1088 assert
!(self.mutability
== Mutability
::Mut
);
1089 self.init_mask
.set_range(range
.start
, range
.end(), is_init
);
1093 /// Run-length encoding of the uninit mask.
1094 /// Used to copy parts of a mask multiple times to another allocation.
1095 pub struct InitMaskCompressed
{
1096 /// Whether the first range is initialized.
1098 /// The lengths of ranges that are run-length encoded.
1099 /// The initialization state of the ranges alternate starting with `initial`.
1100 ranges
: smallvec
::SmallVec
<[u64; 1]>,
1103 impl InitMaskCompressed
{
1104 pub fn no_bytes_init(&self) -> bool
{
1105 // The `ranges` are run-length encoded and of alternating initialization state.
1106 // So if `ranges.len() > 1` then the second block is an initialized range.
1107 !self.initial
&& self.ranges
.len() == 1
1111 /// Transferring the initialization mask to other allocations.
1112 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
1113 /// Creates a run-length encoding of the initialization mask; panics if range is empty.
1115 /// This is essentially a more space-efficient version of
1116 /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
1117 pub fn compress_uninit_range(&self, range
: AllocRange
) -> InitMaskCompressed
{
1118 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
1119 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
1120 // the source and write it to the destination. Even if we optimized the memory accesses,
1121 // we'd be doing all of this `repeat` times.
1122 // Therefore we precompute a compressed version of the initialization mask of the source value and
1123 // then write it back `repeat` times without computing any more information from the source.
1125 // A precomputed cache for ranges of initialized / uninitialized bits
1126 // 0000010010001110 will become
1127 // `[5, 1, 2, 1, 3, 3, 1]`,
1128 // where each element toggles the state.
1130 let mut ranges
= smallvec
::SmallVec
::<[u64; 1]>::new();
1132 let mut chunks
= self.init_mask
.range_as_init_chunks(range
.start
, range
.end()).peekable();
1134 let initial
= chunks
.peek().expect("range should be nonempty").is_init();
1136 // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
1137 for chunk
in chunks
{
1138 let len
= chunk
.range().end
.bytes() - chunk
.range().start
.bytes();
1142 InitMaskCompressed { ranges, initial }
1145 /// Applies multiple instances of the run-length encoding to the initialization mask.
1147 /// This is dangerous to use as it can violate internal `Allocation` invariants!
1148 /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
1149 pub fn mark_compressed_init_range(
1151 defined
: &InitMaskCompressed
,
1155 // An optimization where we can just overwrite an entire range of initialization
1156 // bits if they are going to be uniformly `1` or `0`.
1157 if defined
.ranges
.len() <= 1 {
1158 self.init_mask
.set_range_inbounds(
1160 range
.start
+ range
.size
* repeat
, // `Size` operations
1166 for mut j
in 0..repeat
{
1167 j
*= range
.size
.bytes();
1168 j
+= range
.start
.bytes();
1169 let mut cur
= defined
.initial
;
1170 for range
in &defined
.ranges
{
1173 self.init_mask
.set_range_inbounds(
1174 Size
::from_bytes(old_j
),
1175 Size
::from_bytes(j
),