1 //! The virtual memory representation of the MIR interpreter.
4 use std
::convert
::{TryFrom, TryInto}
;
7 use std
::ops
::{Deref, Range}
;
10 use rustc_ast
::Mutability
;
11 use rustc_data_structures
::intern
::Interned
;
12 use rustc_data_structures
::sorted_map
::SortedMap
;
13 use rustc_span
::DUMMY_SP
;
14 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
17 read_target_uint
, write_target_uint
, AllocId
, InterpError
, InterpResult
, Pointer
, Provenance
,
18 ResourceExhaustionInfo
, Scalar
, ScalarMaybeUninit
, UndefinedBehaviorInfo
, UninitBytesAccess
,
23 /// This type represents an Allocation in the Miri/CTFE core engine.
25 /// Its public API is rather low-level, working directly with allocation offsets and a custom error
26 /// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
27 /// module provides higher-level access.
28 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
30 pub struct Allocation
<Tag
= AllocId
, Extra
= ()> {
31 /// The actual bytes of the allocation.
32 /// Note that the bytes of a pointer represent the offset of the pointer.
34 /// Maps from byte addresses to extra data for each pointer.
35 /// Only the first byte of a pointer is inserted into the map; i.e.,
36 /// every entry in this map applies to `pointer_size` consecutive bytes starting
37 /// at the given offset.
38 relocations
: Relocations
<Tag
>,
39 /// Denotes which part of this allocation is initialized.
41 /// The alignment of the allocation to detect unaligned reads.
42 /// (`Align` guarantees that this is a power of two.)
44 /// `true` if the allocation is mutable.
45 /// Also used by codegen to determine if a static should be put into mutable memory,
46 /// which happens for `static mut` and `static` with interior mutability.
47 pub mutability
: Mutability
,
48 /// Extra state for the machine.
52 /// Interned types generally have an `Outer` type and an `Inner` type, where
53 /// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
54 /// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
55 /// outer type and `TyS` is its inner type.
57 /// Here things are different because only const allocations are interned. This
58 /// means that both the inner type (`Allocation`) and the outer type
59 /// (`ConstAllocation`) are used quite a bit.
60 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
61 #[cfg_attr(not(bootstrap), rustc_pass_by_value)]
62 pub struct ConstAllocation
<'tcx
, Tag
= AllocId
, Extra
= ()>(
63 pub Interned
<'tcx
, Allocation
<Tag
, Extra
>>,
66 impl<'tcx
> fmt
::Debug
for ConstAllocation
<'tcx
> {
67 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
68 // This matches how `Allocation` is printed. We print it like this to
69 // avoid having to update expected output in a lot of tests.
70 write
!(f
, "{:?}", self.inner())
74 impl<'tcx
, Tag
, Extra
> ConstAllocation
<'tcx
, Tag
, Extra
> {
75 pub fn inner(self) -> &'tcx Allocation
<Tag
, Extra
> {
80 /// We have our own error type that does not know about the `AllocId`; that information
81 /// is added when converting to `InterpError`.
84 /// Encountered a pointer where we needed raw bytes.
86 /// Partially overwriting a pointer.
87 PartialPointerOverwrite(Size
),
88 /// Using uninitialized data where it is not allowed.
89 InvalidUninitBytes(Option
<UninitBytesAccess
>),
91 pub type AllocResult
<T
= ()> = Result
<T
, AllocError
>;
94 pub fn to_interp_error
<'tcx
>(self, alloc_id
: AllocId
) -> InterpError
<'tcx
> {
97 ReadPointerAsBytes
=> InterpError
::Unsupported(UnsupportedOpInfo
::ReadPointerAsBytes
),
98 PartialPointerOverwrite(offset
) => InterpError
::Unsupported(
99 UnsupportedOpInfo
::PartialPointerOverwrite(Pointer
::new(alloc_id
, offset
)),
101 InvalidUninitBytes(info
) => InterpError
::UndefinedBehavior(
102 UndefinedBehaviorInfo
::InvalidUninitBytes(info
.map(|b
| (alloc_id
, b
))),
108 /// The information that makes up a memory access: offset and size.
109 #[derive(Copy, Clone, Debug)]
110 pub struct AllocRange
{
115 /// Free-starting constructor for less syntactic overhead.
117 pub fn alloc_range(start
: Size
, size
: Size
) -> AllocRange
{
118 AllocRange { start, size }
123 pub fn end(self) -> Size
{
124 self.start
+ self.size
// This does overflow checking.
127 /// Returns the `subrange` within this range; panics if it is not a subrange.
129 pub fn subrange(self, subrange
: AllocRange
) -> AllocRange
{
130 let sub_start
= self.start
+ subrange
.start
;
131 let range
= alloc_range(sub_start
, subrange
.size
);
132 assert
!(range
.end() <= self.end(), "access outside the bounds for given AllocRange");
137 // The constructors are all without extra; the extra gets added by a machine hook later.
138 impl<Tag
> Allocation
<Tag
> {
139 /// Creates an allocation initialized by the given bytes
140 pub fn from_bytes
<'a
>(
141 slice
: impl Into
<Cow
<'a
, [u8]>>,
143 mutability
: Mutability
,
145 let bytes
= Box
::<[u8]>::from(slice
.into());
146 let size
= Size
::from_bytes(bytes
.len());
149 relocations
: Relocations
::new(),
150 init_mask
: InitMask
::new(size
, true),
157 pub fn from_bytes_byte_aligned_immutable
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>) -> Self {
158 Allocation
::from_bytes(slice
, Align
::ONE
, Mutability
::Not
)
161 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
162 /// available to the compiler to do so.
163 pub fn uninit(size
: Size
, align
: Align
, panic_on_fail
: bool
) -> InterpResult
<'
static, Self> {
164 let bytes
= Box
::<[u8]>::try_new_zeroed_slice(size
.bytes_usize()).map_err(|_
| {
165 // This results in an error that can happen non-deterministically, since the memory
166 // available to the compiler can change between runs. Normally queries are always
167 // deterministic. However, we can be non-deterministic here because all uses of const
168 // evaluation (including ConstProp!) will make compilation fail (via hard error
169 // or ICE) upon encountering a `MemoryExhausted` error.
171 panic
!("Allocation::uninit called with panic_on_fail had allocation failure")
173 ty
::tls
::with(|tcx
| {
174 tcx
.sess
.delay_span_bug(DUMMY_SP
, "exhausted memory during interpretation")
176 InterpError
::ResourceExhaustion(ResourceExhaustionInfo
::MemoryExhausted
)
178 // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
179 let bytes
= unsafe { bytes.assume_init() }
;
182 relocations
: Relocations
::new(),
183 init_mask
: InitMask
::new(size
, false),
185 mutability
: Mutability
::Mut
,
192 /// Convert Tag and add Extra fields
193 pub fn convert_tag_add_extra
<Tag
, Extra
>(
195 cx
: &impl HasDataLayout
,
197 mut tagger
: impl FnMut(Pointer
<AllocId
>) -> Pointer
<Tag
>,
198 ) -> Allocation
<Tag
, Extra
> {
199 // Compute new pointer tags, which also adjusts the bytes.
200 let mut bytes
= self.bytes
;
201 let mut new_relocations
= Vec
::with_capacity(self.relocations
.0.len());
202 let ptr_size
= cx
.data_layout().pointer_size
.bytes_usize();
203 let endian
= cx
.data_layout().endian
;
204 for &(offset
, alloc_id
) in self.relocations
.iter() {
205 let idx
= offset
.bytes_usize();
206 let ptr_bytes
= &mut bytes
[idx
..idx
+ ptr_size
];
207 let bits
= read_target_uint(endian
, ptr_bytes
).unwrap();
208 let (ptr_tag
, ptr_offset
) =
209 tagger(Pointer
::new(alloc_id
, Size
::from_bytes(bits
))).into_parts();
210 write_target_uint(endian
, ptr_bytes
, ptr_offset
.bytes().into()).unwrap();
211 new_relocations
.push((offset
, ptr_tag
));
213 // Create allocation.
216 relocations
: Relocations
::from_presorted(new_relocations
),
217 init_mask
: self.init_mask
,
219 mutability
: self.mutability
,
225 /// Raw accessors. Provide access to otherwise private bytes.
226 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
227 pub fn len(&self) -> usize {
231 pub fn size(&self) -> Size
{
232 Size
::from_bytes(self.len())
235 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
236 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
238 /// This must not be used for reads affecting the interpreter execution.
239 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range
: Range
<usize>) -> &[u8] {
243 /// Returns the mask indicating which bytes are initialized.
244 pub fn init_mask(&self) -> &InitMask
{
248 /// Returns the relocation list.
249 pub fn relocations(&self) -> &Relocations
<Tag
> {
255 impl<Tag
: Provenance
, Extra
> Allocation
<Tag
, Extra
> {
256 /// The last argument controls whether we error out when there are uninitialized
257 /// or pointer bytes. You should never call this, call `get_bytes` or
258 /// `get_bytes_with_uninit_and_ptr` instead,
260 /// This function also guarantees that the resulting pointer will remain stable
261 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
264 /// It is the caller's responsibility to check bounds and alignment beforehand.
265 fn get_bytes_internal(
267 cx
: &impl HasDataLayout
,
269 check_init_and_ptr
: bool
,
270 ) -> AllocResult
<&[u8]> {
271 if check_init_and_ptr
{
272 self.check_init(range
)?
;
273 self.check_relocations(cx
, range
)?
;
275 // We still don't want relocations on the *edges*.
276 self.check_relocation_edges(cx
, range
)?
;
279 Ok(&self.bytes
[range
.start
.bytes_usize()..range
.end().bytes_usize()])
282 /// Checks that these bytes are initialized and not pointer bytes, and then return them
285 /// It is the caller's responsibility to check bounds and alignment beforehand.
286 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
287 /// on `InterpCx` instead.
289 pub fn get_bytes(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
<&[u8]> {
290 self.get_bytes_internal(cx
, range
, true)
293 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
294 /// However, this still checks that there are no relocations on the *edges*.
296 /// It is the caller's responsibility to check bounds and alignment beforehand.
298 pub fn get_bytes_with_uninit_and_ptr(
300 cx
: &impl HasDataLayout
,
302 ) -> AllocResult
<&[u8]> {
303 self.get_bytes_internal(cx
, range
, false)
306 /// Just calling this already marks everything as defined and removes relocations,
307 /// so be sure to actually put data there!
309 /// It is the caller's responsibility to check bounds and alignment beforehand.
310 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
311 /// on `InterpCx` instead.
312 pub fn get_bytes_mut(
314 cx
: &impl HasDataLayout
,
316 ) -> AllocResult
<&mut [u8]> {
317 self.mark_init(range
, true);
318 self.clear_relocations(cx
, range
)?
;
320 Ok(&mut self.bytes
[range
.start
.bytes_usize()..range
.end().bytes_usize()])
323 /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
324 pub fn get_bytes_mut_ptr(
326 cx
: &impl HasDataLayout
,
328 ) -> AllocResult
<*mut [u8]> {
329 self.mark_init(range
, true);
330 self.clear_relocations(cx
, range
)?
;
332 assert
!(range
.end().bytes_usize() <= self.bytes
.len()); // need to do our own bounds-check
333 let begin_ptr
= self.bytes
.as_mut_ptr().wrapping_add(range
.start
.bytes_usize());
334 let len
= range
.end().bytes_usize() - range
.start
.bytes_usize();
335 Ok(ptr
::slice_from_raw_parts_mut(begin_ptr
, len
))
339 /// Reading and writing.
340 impl<Tag
: Provenance
, Extra
> Allocation
<Tag
, Extra
> {
341 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
342 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
343 /// given range contains neither relocations nor uninitialized bytes.
346 cx
: &impl HasDataLayout
,
348 allow_uninit_and_ptr
: bool
,
350 // Check bounds and relocations on the edges.
351 self.get_bytes_with_uninit_and_ptr(cx
, range
)?
;
352 // Check uninit and ptr.
353 if !allow_uninit_and_ptr
{
354 self.check_init(range
)?
;
355 self.check_relocations(cx
, range
)?
;
360 /// Reads a *non-ZST* scalar.
362 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
363 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
365 /// It is the caller's responsibility to check bounds and alignment beforehand.
366 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
369 cx
: &impl HasDataLayout
,
371 ) -> AllocResult
<ScalarMaybeUninit
<Tag
>> {
372 // `get_bytes_with_uninit_and_ptr` tests relocation edges.
373 // We deliberately error when loading data that partially has provenance, or partially
374 // initialized data (that's the check below), into a scalar. The LLVM semantics of this are
375 // unclear so we are conservative. See <https://github.com/rust-lang/rust/issues/69488> for
376 // further discussion.
377 let bytes
= self.get_bytes_with_uninit_and_ptr(cx
, range
)?
;
378 // Uninit check happens *after* we established that the alignment is correct.
379 // We must not return `Ok()` for unaligned pointers!
380 if self.is_init(range
).is_err() {
381 // This inflates uninitialized bytes to the entire scalar, even if only a few
382 // bytes are uninitialized.
383 return Ok(ScalarMaybeUninit
::Uninit
);
385 // Now we do the actual reading.
386 let bits
= read_target_uint(cx
.data_layout().endian
, bytes
).unwrap();
387 // See if we got a pointer.
388 if range
.size
!= cx
.data_layout().pointer_size
{
390 // *Now*, we better make sure that the inside is free of relocations too.
391 self.check_relocations(cx
, range
)?
;
394 if let Some(&prov
) = self.relocations
.get(&range
.start
) {
395 let ptr
= Pointer
::new(prov
, Size
::from_bytes(bits
));
396 return Ok(ScalarMaybeUninit
::from_pointer(ptr
, cx
));
399 // We don't. Just return the bits.
400 Ok(ScalarMaybeUninit
::Scalar(Scalar
::from_uint(bits
, range
.size
)))
403 /// Writes a *non-ZST* scalar.
405 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
406 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
408 /// It is the caller's responsibility to check bounds and alignment beforehand.
409 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
412 cx
: &impl HasDataLayout
,
414 val
: ScalarMaybeUninit
<Tag
>,
416 assert
!(self.mutability
== Mutability
::Mut
);
418 let val
= match val
{
419 ScalarMaybeUninit
::Scalar(scalar
) => scalar
,
420 ScalarMaybeUninit
::Uninit
=> {
421 self.mark_init(range
, false);
426 // `to_bits_or_ptr_internal` is the right method because we just want to store this data
427 // as-is into memory.
428 let (bytes
, provenance
) = match val
.to_bits_or_ptr_internal(range
.size
) {
430 let (provenance
, offset
) = val
.into_parts();
431 (u128
::from(offset
.bytes()), Some(provenance
))
433 Ok(data
) => (data
, None
),
436 let endian
= cx
.data_layout().endian
;
437 let dst
= self.get_bytes_mut(cx
, range
)?
;
438 write_target_uint(endian
, dst
, bytes
).unwrap();
440 // See if we have to also write a relocation.
441 if let Some(provenance
) = provenance
{
442 self.relocations
.0.insert
(range
.start
, provenance
);
450 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
451 /// Returns all relocations overlapping with the given pointer-offset pair.
452 pub fn get_relocations(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> &[(Size
, Tag
)] {
453 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
454 // the beginning of this range.
455 let start
= range
.start
.bytes().saturating_sub(cx
.data_layout().pointer_size
.bytes() - 1);
456 self.relocations
.range(Size
::from_bytes(start
)..range
.end())
459 /// Checks that there are no relocations overlapping with the given range.
461 fn check_relocations(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
{
462 if self.get_relocations(cx
, range
).is_empty() {
465 Err(AllocError
::ReadPointerAsBytes
)
469 /// Removes all relocations inside the given range.
470 /// If there are relocations overlapping with the edges, they
471 /// are removed as well *and* the bytes they cover are marked as
472 /// uninitialized. This is a somewhat odd "spooky action at a distance",
473 /// but it allows strictly more code to run than if we would just error
474 /// immediately in that case.
475 fn clear_relocations(&mut self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
479 // Find the start and end of the given range and its outermost relocations.
480 let (first
, last
) = {
481 // Find all relocations overlapping the given range.
482 let relocations
= self.get_relocations(cx
, range
);
483 if relocations
.is_empty() {
488 relocations
.first().unwrap().0,
489 relocations
.last().unwrap().0 + cx
.data_layout().pointer_size
,
492 let start
= range
.start
;
493 let end
= range
.end();
495 // We need to handle clearing the relocations from parts of a pointer. See
496 // <https://github.com/rust-lang/rust/issues/87184> for details.
498 if Tag
::ERR_ON_PARTIAL_PTR_OVERWRITE
{
499 return Err(AllocError
::PartialPointerOverwrite(first
));
501 self.init_mask
.set_range(first
, start
, false);
504 if Tag
::ERR_ON_PARTIAL_PTR_OVERWRITE
{
505 return Err(AllocError
::PartialPointerOverwrite(
506 last
- cx
.data_layout().pointer_size
,
509 self.init_mask
.set_range(end
, last
, false);
512 // Forget all the relocations.
513 self.relocations
.0.remove_range(first
..last
);
518 /// Errors if there are relocations overlapping with the edges of the
519 /// given memory range.
521 fn check_relocation_edges(&self, cx
: &impl HasDataLayout
, range
: AllocRange
) -> AllocResult
{
522 self.check_relocations(cx
, alloc_range(range
.start
, Size
::ZERO
))?
;
523 self.check_relocations(cx
, alloc_range(range
.end(), Size
::ZERO
))?
;
528 /// "Relocations" stores the provenance information of pointers stored in memory.
529 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
530 pub struct Relocations
<Tag
= AllocId
>(SortedMap
<Size
, Tag
>);
532 impl<Tag
> Relocations
<Tag
> {
533 pub fn new() -> Self {
534 Relocations(SortedMap
::new())
537 // The caller must guarantee that the given relocations are already sorted
538 // by address and contain no duplicates.
539 pub fn from_presorted(r
: Vec
<(Size
, Tag
)>) -> Self {
540 Relocations(SortedMap
::from_presorted_elements(r
))
544 impl<Tag
> Deref
for Relocations
<Tag
> {
545 type Target
= SortedMap
<Size
, Tag
>;
547 fn deref(&self) -> &Self::Target
{
552 /// A partial, owned list of relocations to transfer into another allocation.
553 pub struct AllocationRelocations
<Tag
> {
554 relative_relocations
: Vec
<(Size
, Tag
)>,
557 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
558 pub fn prepare_relocation_copy(
560 cx
: &impl HasDataLayout
,
564 ) -> AllocationRelocations
<Tag
> {
565 let relocations
= self.get_relocations(cx
, src
);
566 if relocations
.is_empty() {
567 return AllocationRelocations { relative_relocations: Vec::new() }
;
571 let mut new_relocations
= Vec
::with_capacity(relocations
.len() * (count
as usize));
574 new_relocations
.extend(relocations
.iter().map(|&(offset
, reloc
)| {
575 // compute offset for current repetition
576 let dest_offset
= dest
+ size
* i
; // `Size` operations
578 // shift offsets from source allocation to destination allocation
579 (offset
+ dest_offset
) - src
.start
, // `Size` operations
585 AllocationRelocations { relative_relocations: new_relocations }
588 /// Applies a relocation copy.
589 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
590 /// to be clear of relocations.
591 pub fn mark_relocation_range(&mut self, relocations
: AllocationRelocations
<Tag
>) {
592 self.relocations
.0.insert_presorted
(relocations
.relative_relocations
);
596 ////////////////////////////////////////////////////////////////////////////////
597 // Uninitialized byte tracking
598 ////////////////////////////////////////////////////////////////////////////////
602 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
603 /// is initialized. If it is `false` the byte is uninitialized.
604 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
605 #[derive(HashStable)]
606 pub struct InitMask
{
612 pub const BLOCK_SIZE
: u64 = 64;
615 fn bit_index(bits
: Size
) -> (usize, usize) {
616 // BLOCK_SIZE is the number of bits that can fit in a `Block`.
617 // Each bit in a `Block` represents the initialization state of one byte of an allocation,
618 // so we use `.bytes()` here.
619 let bits
= bits
.bytes();
620 let a
= bits
/ InitMask
::BLOCK_SIZE
;
621 let b
= bits
% InitMask
::BLOCK_SIZE
;
622 (usize::try_from(a
).unwrap(), usize::try_from(b
).unwrap())
626 fn size_from_bit_index(block
: impl TryInto
<u64>, bit
: impl TryInto
<u64>) -> Size
{
627 let block
= block
.try_into().ok().unwrap();
628 let bit
= bit
.try_into().ok().unwrap();
629 Size
::from_bytes(block
* InitMask
::BLOCK_SIZE
+ bit
)
632 pub fn new(size
: Size
, state
: bool
) -> Self {
633 let mut m
= InitMask { blocks: vec![], len: Size::ZERO }
;
638 pub fn set_range(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
641 self.grow(end
- len
, new_state
);
643 self.set_range_inbounds(start
, end
, new_state
);
646 pub fn set_range_inbounds(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
647 let (blocka
, bita
) = Self::bit_index(start
);
648 let (blockb
, bitb
) = Self::bit_index(end
);
649 if blocka
== blockb
{
650 // First set all bits except the first `bita`,
651 // then unset the last `64 - bitb` bits.
652 let range
= if bitb
== 0 {
655 (u64::MAX
<< bita
) & (u64::MAX
>> (64 - bitb
))
658 self.blocks
[blocka
] |= range
;
660 self.blocks
[blocka
] &= !range
;
664 // across block boundaries
666 // Set `bita..64` to `1`.
667 self.blocks
[blocka
] |= u64::MAX
<< bita
;
668 // Set `0..bitb` to `1`.
670 self.blocks
[blockb
] |= u64::MAX
>> (64 - bitb
);
672 // Fill in all the other blocks (much faster than one bit at a time).
673 for block
in (blocka
+ 1)..blockb
{
674 self.blocks
[block
] = u64::MAX
;
677 // Set `bita..64` to `0`.
678 self.blocks
[blocka
] &= !(u64::MAX
<< bita
);
679 // Set `0..bitb` to `0`.
681 self.blocks
[blockb
] &= !(u64::MAX
>> (64 - bitb
));
683 // Fill in all the other blocks (much faster than one bit at a time).
684 for block
in (blocka
+ 1)..blockb
{
685 self.blocks
[block
] = 0;
691 pub fn get(&self, i
: Size
) -> bool
{
692 let (block
, bit
) = Self::bit_index(i
);
693 (self.blocks
[block
] & (1 << bit
)) != 0
697 pub fn set(&mut self, i
: Size
, new_state
: bool
) {
698 let (block
, bit
) = Self::bit_index(i
);
699 self.set_bit(block
, bit
, new_state
);
703 fn set_bit(&mut self, block
: usize, bit
: usize, new_state
: bool
) {
705 self.blocks
[block
] |= 1 << bit
;
707 self.blocks
[block
] &= !(1 << bit
);
711 pub fn grow(&mut self, amount
: Size
, new_state
: bool
) {
712 if amount
.bytes() == 0 {
715 let unused_trailing_bits
=
716 u64::try_from(self.blocks
.len()).unwrap() * Self::BLOCK_SIZE
- self.len
.bytes();
717 if amount
.bytes() > unused_trailing_bits
{
718 let additional_blocks
= amount
.bytes() / Self::BLOCK_SIZE
+ 1;
720 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
721 iter
::repeat(0).take(usize::try_from(additional_blocks
).unwrap()),
724 let start
= self.len
;
726 self.set_range_inbounds(start
, start
+ amount
, new_state
); // `Size` operation
729 /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
730 fn find_bit(&self, start
: Size
, end
: Size
, is_init
: bool
) -> Option
<Size
> {
731 /// A fast implementation of `find_bit`,
732 /// which skips over an entire block at a time if it's all 0s (resp. 1s),
733 /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
735 /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
736 /// and with the least significant bit (and lowest block) first:
738 /// 00000000|00000000
742 /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
744 init_mask
: &InitMask
,
749 /// Search one block, returning the index of the first bit equal to `is_init`.
756 // For the following examples, assume this function was called with:
760 // Note that, for the examples in this function, the most significant bit is written first,
761 // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
763 // Invert bits so we're always looking for the first set bit.
766 let bits
= if is_init { bits }
else { !bits }
;
767 // Mask off unused start bits.
771 let bits
= bits
& (!0 << start_bit
);
772 // Find set bit, if any.
773 // bit = trailing_zeros(0b11000000)
778 let bit
= bits
.trailing_zeros();
779 Some(InitMask
::size_from_bit_index(block
, bit
))
787 // Convert `start` and `end` to block indexes and bit indexes within each block.
788 // We must convert `end` to an inclusive bound to handle block boundaries correctly.
792 // (a) 00000000|00000000 (b) 00000000|
793 // ^~~~~~~~~~~^ ^~~~~~~~~^
794 // start end start end
796 // In both cases, the block index of `end` is 1.
797 // But we do want to search block 1 in (a), and we don't in (b).
799 // We subtract 1 from both end positions to make them inclusive:
801 // (a) 00000000|00000000 (b) 00000000|
802 // ^~~~~~~~~~^ ^~~~~~~^
803 // start end_inclusive start end_inclusive
805 // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
806 // This provides the desired behavior of searching blocks 0 and 1 for (a),
807 // and searching only block 0 for (b).
808 // There is no concern of overflows since we checked for `start >= end` above.
809 let (start_block
, start_bit
) = InitMask
::bit_index(start
);
810 let end_inclusive
= Size
::from_bytes(end
.bytes() - 1);
811 let (end_block_inclusive
, _
) = InitMask
::bit_index(end_inclusive
);
813 // Handle first block: need to skip `start_bit` bits.
815 // We need to handle the first block separately,
816 // because there may be bits earlier in the block that should be ignored,
817 // such as the bit marked (1) in this example:
821 // (c) 01000000|00000000|00000001
822 // ^~~~~~~~~~~~~~~~~~^
825 search_block(init_mask
.blocks
[start_block
], start_block
, start_bit
, is_init
)
827 // If the range is less than a block, we may find a matching bit after `end`.
829 // For example, we shouldn't successfully find bit (2), because it's after `end`:
833 // (d) 00000001|00000000|00000001
837 // An alternative would be to mask off end bits in the same way as we do for start bits,
838 // but performing this check afterwards is faster and simpler to implement.
846 // Handle remaining blocks.
848 // We can skip over an entire block at once if it's all 0s (resp. 1s).
849 // The block marked (3) in this example is the first block that will be handled by this loop,
850 // and it will be skipped for that reason:
854 // (e) 01000000|00000000|00000001
855 // ^~~~~~~~~~~~~~~~~~^
857 if start_block
< end_block_inclusive
{
858 // This loop is written in a specific way for performance.
859 // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
860 // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
861 // because both alternatives result in significantly worse codegen.
862 // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
863 // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
864 for (&bits
, block
) in init_mask
.blocks
[start_block
+ 1..end_block_inclusive
+ 1]
866 .zip(start_block
+ 1..)
868 if let Some(i
) = search_block(bits
, block
, 0, is_init
) {
869 // If this is the last block, we may find a matching bit after `end`.
871 // For example, we shouldn't successfully find bit (4), because it's after `end`:
875 // (f) 00000001|00000000|00000001
876 // ^~~~~~~~~~~~~~~~~~^
879 // As above with example (d), we could handle the end block separately and mask off end bits,
880 // but unconditionally searching an entire block at once and performing this check afterwards
881 // is faster and much simpler to implement.
894 #[cfg_attr(not(debug_assertions), allow(dead_code))]
896 init_mask
: &InitMask
,
901 (start
..end
).find(|&i
| init_mask
.get(i
) == is_init
)
904 let result
= find_bit_fast(self, start
, end
, is_init
);
908 find_bit_slow(self, start
, end
, is_init
),
909 "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
920 /// A contiguous chunk of initialized or uninitialized memory.
928 pub fn is_init(&self) -> bool
{
930 Self::Init(_
) => true,
931 Self::Uninit(_
) => false,
936 pub fn range(&self) -> Range
<Size
> {
938 Self::Init(r
) => r
.clone(),
939 Self::Uninit(r
) => r
.clone(),
945 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
947 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
948 /// indexes for the first contiguous span of the uninitialized access.
950 pub fn is_range_initialized(&self, start
: Size
, end
: Size
) -> Result
<(), Range
<Size
>> {
952 return Err(self.len
..end
);
955 let uninit_start
= self.find_bit(start
, end
, false);
958 Some(uninit_start
) => {
959 let uninit_end
= self.find_bit(uninit_start
, end
, true).unwrap_or(end
);
960 Err(uninit_start
..uninit_end
)
966 /// Returns an iterator, yielding a range of byte indexes for each contiguous region
967 /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
969 /// The iterator guarantees the following:
970 /// - Chunks are nonempty.
971 /// - Chunks are adjacent (each range's start is equal to the previous range's end).
972 /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
973 /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
975 pub fn range_as_init_chunks(&self, start
: Size
, end
: Size
) -> InitChunkIter
<'_
> {
976 assert
!(end
<= self.len
);
978 let is_init
= if start
< end
{
981 // `start..end` is empty: there are no chunks, so use some arbitrary value
985 InitChunkIter { init_mask: self, is_init, start, end }
989 /// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
991 pub struct InitChunkIter
<'a
> {
992 init_mask
: &'a InitMask
,
993 /// Whether the next chunk we will return is initialized.
994 /// If there are no more chunks, contains some arbitrary value.
996 /// The current byte index into `init_mask`.
998 /// The end byte index into `init_mask`.
1002 impl<'a
> Iterator
for InitChunkIter
<'a
> {
1003 type Item
= InitChunk
;
1006 fn next(&mut self) -> Option
<Self::Item
> {
1007 if self.start
>= self.end
{
1012 self.init_mask
.find_bit(self.start
, self.end
, !self.is_init
).unwrap_or(self.end
);
1013 let range
= self.start
..end_of_chunk
;
1016 Some(if self.is_init { InitChunk::Init(range) }
else { InitChunk::Uninit(range) }
);
1018 self.is_init
= !self.is_init
;
1019 self.start
= end_of_chunk
;
1025 /// Uninitialized bytes.
1026 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
1027 /// Checks whether the given range is entirely initialized.
1029 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
1030 /// indexes of the first contiguous uninitialized access.
1031 fn is_init(&self, range
: AllocRange
) -> Result
<(), Range
<Size
>> {
1032 self.init_mask
.is_range_initialized(range
.start
, range
.end()) // `Size` addition
1035 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
1036 /// error which will report the first range of bytes which is uninitialized.
1037 fn check_init(&self, range
: AllocRange
) -> AllocResult
{
1038 self.is_init(range
).map_err(|idx_range
| {
1039 AllocError
::InvalidUninitBytes(Some(UninitBytesAccess
{
1040 access_offset
: range
.start
,
1041 access_size
: range
.size
,
1042 uninit_offset
: idx_range
.start
,
1043 uninit_size
: idx_range
.end
- idx_range
.start
, // `Size` subtraction
1048 pub fn mark_init(&mut self, range
: AllocRange
, is_init
: bool
) {
1049 if range
.size
.bytes() == 0 {
1052 assert
!(self.mutability
== Mutability
::Mut
);
1053 self.init_mask
.set_range(range
.start
, range
.end(), is_init
);
1057 /// Run-length encoding of the uninit mask.
1058 /// Used to copy parts of a mask multiple times to another allocation.
1059 pub struct InitMaskCompressed
{
1060 /// Whether the first range is initialized.
1062 /// The lengths of ranges that are run-length encoded.
1063 /// The initialization state of the ranges alternate starting with `initial`.
1064 ranges
: smallvec
::SmallVec
<[u64; 1]>,
1067 impl InitMaskCompressed
{
1068 pub fn no_bytes_init(&self) -> bool
{
1069 // The `ranges` are run-length encoded and of alternating initialization state.
1070 // So if `ranges.len() > 1` then the second block is an initialized range.
1071 !self.initial
&& self.ranges
.len() == 1
1075 /// Transferring the initialization mask to other allocations.
1076 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
1077 /// Creates a run-length encoding of the initialization mask; panics if range is empty.
1079 /// This is essentially a more space-efficient version of
1080 /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
1081 pub fn compress_uninit_range(&self, range
: AllocRange
) -> InitMaskCompressed
{
1082 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
1083 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
1084 // the source and write it to the destination. Even if we optimized the memory accesses,
1085 // we'd be doing all of this `repeat` times.
1086 // Therefore we precompute a compressed version of the initialization mask of the source value and
1087 // then write it back `repeat` times without computing any more information from the source.
1089 // A precomputed cache for ranges of initialized / uninitialized bits
1090 // 0000010010001110 will become
1091 // `[5, 1, 2, 1, 3, 3, 1]`,
1092 // where each element toggles the state.
1094 let mut ranges
= smallvec
::SmallVec
::<[u64; 1]>::new();
1096 let mut chunks
= self.init_mask
.range_as_init_chunks(range
.start
, range
.end()).peekable();
1098 let initial
= chunks
.peek().expect("range should be nonempty").is_init();
1100 // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
1101 for chunk
in chunks
{
1102 let len
= chunk
.range().end
.bytes() - chunk
.range().start
.bytes();
1106 InitMaskCompressed { ranges, initial }
1109 /// Applies multiple instances of the run-length encoding to the initialization mask.
1110 pub fn mark_compressed_init_range(
1112 defined
: &InitMaskCompressed
,
1116 // An optimization where we can just overwrite an entire range of initialization
1117 // bits if they are going to be uniformly `1` or `0`.
1118 if defined
.ranges
.len() <= 1 {
1119 self.init_mask
.set_range_inbounds(
1121 range
.start
+ range
.size
* repeat
, // `Size` operations
1127 for mut j
in 0..repeat
{
1128 j
*= range
.size
.bytes();
1129 j
+= range
.start
.bytes();
1130 let mut cur
= defined
.initial
;
1131 for range
in &defined
.ranges
{
1134 self.init_mask
.set_range_inbounds(
1135 Size
::from_bytes(old_j
),
1136 Size
::from_bytes(j
),