1 //! The virtual memory representation of the MIR interpreter.
4 use std
::convert
::TryFrom
;
6 use std
::ops
::{Deref, DerefMut, Range}
;
8 use rustc_ast
::Mutability
;
9 use rustc_data_structures
::sorted_map
::SortedMap
;
10 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
13 read_target_uint
, write_target_uint
, AllocId
, InterpResult
, Pointer
, Scalar
, ScalarMaybeUninit
,
17 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
19 pub struct Allocation
<Tag
= (), Extra
= ()> {
20 /// The actual bytes of the allocation.
21 /// Note that the bytes of a pointer represent the offset of the pointer.
23 /// Maps from byte addresses to extra data for each pointer.
24 /// Only the first byte of a pointer is inserted into the map; i.e.,
25 /// every entry in this map applies to `pointer_size` consecutive bytes starting
26 /// at the given offset.
27 relocations
: Relocations
<Tag
>,
28 /// Denotes which part of this allocation is initialized.
30 /// The size of the allocation. Currently, must always equal `bytes.len()`.
32 /// The alignment of the allocation to detect unaligned reads.
33 /// (`Align` guarantees that this is a power of two.)
35 /// `true` if the allocation is mutable.
36 /// Also used by codegen to determine if a static should be put into mutable memory,
37 /// which happens for `static mut` and `static` with interior mutability.
38 pub mutability
: Mutability
,
39 /// Extra state for the machine.
43 pub trait AllocationExtra
<Tag
>: std
::fmt
::Debug
+ Clone
{
44 // There is no constructor in here because the constructor's type depends
45 // on `MemoryKind`, and making things sufficiently generic leads to painful
48 /// Hook for performing extra checks on a memory read access.
50 /// Takes read-only access to the allocation so we can keep all the memory read
51 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
55 _alloc
: &Allocation
<Tag
, Self>,
58 ) -> InterpResult
<'tcx
> {
62 /// Hook for performing extra checks on a memory write access.
65 _alloc
: &mut Allocation
<Tag
, Self>,
68 ) -> InterpResult
<'tcx
> {
72 /// Hook for performing extra checks on a memory deallocation.
73 /// `size` will be the size of the allocation.
75 fn memory_deallocated(
76 _alloc
: &mut Allocation
<Tag
, Self>,
79 ) -> InterpResult
<'tcx
> {
84 // For `Tag = ()` and no extra state, we have a trivial implementation.
85 impl AllocationExtra
<()> for () {}
87 // The constructors are all without extra; the extra gets added by a machine hook later.
88 impl<Tag
> Allocation
<Tag
> {
89 /// Creates a read-only allocation initialized by the given bytes
90 pub fn from_bytes
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>, align
: Align
) -> Self {
91 let bytes
= slice
.into().into_owned();
92 let size
= Size
::from_bytes(bytes
.len());
95 relocations
: Relocations
::new(),
96 init_mask
: InitMask
::new(size
, true),
99 mutability
: Mutability
::Not
,
104 pub fn from_byte_aligned_bytes
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>) -> Self {
105 Allocation
::from_bytes(slice
, Align
::from_bytes(1).unwrap())
108 pub fn uninit(size
: Size
, align
: Align
) -> Self {
110 bytes
: vec
![0; size
.bytes_usize()],
111 relocations
: Relocations
::new(),
112 init_mask
: InitMask
::new(size
, false),
115 mutability
: Mutability
::Mut
,
121 impl Allocation
<(), ()> {
122 /// Add Tag and Extra fields
123 pub fn with_tags_and_extra
<T
, E
>(
125 mut tagger
: impl FnMut(AllocId
) -> T
,
127 ) -> Allocation
<T
, E
> {
131 relocations
: Relocations
::from_presorted(
134 // The allocations in the relocations (pointers stored *inside* this allocation)
135 // all get the base pointer tag.
136 .map(|&(offset
, ((), alloc
))| {
137 let tag
= tagger(alloc
);
138 (offset
, (tag
, alloc
))
142 init_mask
: self.init_mask
,
144 mutability
: self.mutability
,
150 /// Raw accessors. Provide access to otherwise private bytes.
151 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
152 pub fn len(&self) -> usize {
153 self.size
.bytes_usize()
156 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
157 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
158 /// edges) at all. It further ignores `AllocationExtra` callbacks.
159 /// This must not be used for reads affecting the interpreter execution.
160 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range
: Range
<usize>) -> &[u8] {
164 /// Returns the mask indicating which bytes are initialized.
165 pub fn init_mask(&self) -> &InitMask
{
169 /// Returns the relocation list.
170 pub fn relocations(&self) -> &Relocations
<Tag
> {
176 impl<'tcx
, Tag
: Copy
, Extra
: AllocationExtra
<Tag
>> Allocation
<Tag
, Extra
> {
177 /// Just a small local helper function to avoid a bit of code repetition.
178 /// Returns the range of this allocation that was meant.
180 fn check_bounds(&self, offset
: Size
, size
: Size
) -> Range
<usize> {
181 let end
= offset
+ size
; // This does overflow checking.
182 let end
= usize::try_from(end
.bytes()).expect("access too big for this host architecture");
185 "Out-of-bounds access at offset {}, size {} in allocation of size {}",
190 offset
.bytes_usize()..end
193 /// The last argument controls whether we error out when there are uninitialized
194 /// or pointer bytes. You should never call this, call `get_bytes` or
195 /// `get_bytes_with_uninit_and_ptr` instead,
197 /// This function also guarantees that the resulting pointer will remain stable
198 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
201 /// It is the caller's responsibility to check bounds and alignment beforehand.
202 fn get_bytes_internal(
204 cx
: &impl HasDataLayout
,
207 check_init_and_ptr
: bool
,
208 ) -> InterpResult
<'tcx
, &[u8]> {
209 let range
= self.check_bounds(ptr
.offset
, size
);
211 if check_init_and_ptr
{
212 self.check_init(ptr
, size
)?
;
213 self.check_relocations(cx
, ptr
, size
)?
;
215 // We still don't want relocations on the *edges*.
216 self.check_relocation_edges(cx
, ptr
, size
)?
;
219 AllocationExtra
::memory_read(self, ptr
, size
)?
;
221 Ok(&self.bytes
[range
])
224 /// Checks that these bytes are initialized and not pointer bytes, and then return them
227 /// It is the caller's responsibility to check bounds and alignment beforehand.
228 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
229 /// on `InterpCx` instead.
233 cx
: &impl HasDataLayout
,
236 ) -> InterpResult
<'tcx
, &[u8]> {
237 self.get_bytes_internal(cx
, ptr
, size
, true)
240 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
241 /// However, this still checks that there are no relocations on the *edges*.
243 /// It is the caller's responsibility to check bounds and alignment beforehand.
245 pub fn get_bytes_with_uninit_and_ptr(
247 cx
: &impl HasDataLayout
,
250 ) -> InterpResult
<'tcx
, &[u8]> {
251 self.get_bytes_internal(cx
, ptr
, size
, false)
254 /// Just calling this already marks everything as defined and removes relocations,
255 /// so be sure to actually put data there!
257 /// It is the caller's responsibility to check bounds and alignment beforehand.
258 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
259 /// on `InterpCx` instead.
260 pub fn get_bytes_mut(
262 cx
: &impl HasDataLayout
,
265 ) -> InterpResult
<'tcx
, &mut [u8]> {
266 let range
= self.check_bounds(ptr
.offset
, size
);
268 self.mark_init(ptr
, size
, true);
269 self.clear_relocations(cx
, ptr
, size
);
271 AllocationExtra
::memory_written(self, ptr
, size
)?
;
273 Ok(&mut self.bytes
[range
])
277 /// Reading and writing.
278 impl<'tcx
, Tag
: Copy
, Extra
: AllocationExtra
<Tag
>> Allocation
<Tag
, Extra
> {
279 /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
280 /// before a `0` is found.
282 /// Most likely, you want to call `Memory::read_c_str` instead of this method.
285 cx
: &impl HasDataLayout
,
287 ) -> InterpResult
<'tcx
, &[u8]> {
288 let offset
= ptr
.offset
.bytes_usize();
289 Ok(match self.bytes
[offset
..].iter().position(|&c
| c
== 0) {
291 let size_with_null
= Size
::from_bytes(size
) + Size
::from_bytes(1);
292 // Go through `get_bytes` for checks and AllocationExtra hooks.
293 // We read the null, so we include it in the request, but we want it removed
294 // from the result, so we do subslicing.
295 &self.get_bytes(cx
, ptr
, size_with_null
)?
[..size
]
297 // This includes the case where `offset` is out-of-bounds to begin with.
298 None
=> throw_ub
!(UnterminatedCString(ptr
.erase_tag())),
302 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
303 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
304 /// given range contains neither relocations nor uninitialized bytes.
307 cx
: &impl HasDataLayout
,
310 allow_uninit_and_ptr
: bool
,
311 ) -> InterpResult
<'tcx
> {
312 // Check bounds and relocations on the edges.
313 self.get_bytes_with_uninit_and_ptr(cx
, ptr
, size
)?
;
314 // Check uninit and ptr.
315 if !allow_uninit_and_ptr
{
316 self.check_init(ptr
, size
)?
;
317 self.check_relocations(cx
, ptr
, size
)?
;
322 /// Writes `src` to the memory starting at `ptr.offset`.
324 /// It is the caller's responsibility to check bounds and alignment beforehand.
325 /// Most likely, you want to call `Memory::write_bytes` instead of this method.
328 cx
: &impl HasDataLayout
,
330 src
: impl IntoIterator
<Item
= u8>,
331 ) -> InterpResult
<'tcx
> {
332 let mut src
= src
.into_iter();
333 let (lower
, upper
) = src
.size_hint();
334 let len
= upper
.expect("can only write bounded iterators");
335 assert_eq
!(lower
, len
, "can only write iterators with a precise length");
336 let bytes
= self.get_bytes_mut(cx
, ptr
, Size
::from_bytes(len
))?
;
337 // `zip` would stop when the first iterator ends; we want to definitely
338 // cover all of `bytes`.
340 *dest
= src
.next().expect("iterator was shorter than it said it would be");
342 assert_matches
!(src
.next(), None
, "iterator was longer than it said it would be");
346 /// Reads a *non-ZST* scalar.
348 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
349 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
351 /// It is the caller's responsibility to check bounds and alignment beforehand.
352 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
355 cx
: &impl HasDataLayout
,
358 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<Tag
>> {
359 // `get_bytes_unchecked` tests relocation edges.
360 let bytes
= self.get_bytes_with_uninit_and_ptr(cx
, ptr
, size
)?
;
361 // Uninit check happens *after* we established that the alignment is correct.
362 // We must not return `Ok()` for unaligned pointers!
363 if self.is_init(ptr
, size
).is_err() {
364 // This inflates uninitialized bytes to the entire scalar, even if only a few
365 // bytes are uninitialized.
366 return Ok(ScalarMaybeUninit
::Uninit
);
368 // Now we do the actual reading.
369 let bits
= read_target_uint(cx
.data_layout().endian
, bytes
).unwrap();
370 // See if we got a pointer.
371 if size
!= cx
.data_layout().pointer_size
{
372 // *Now*, we better make sure that the inside is free of relocations too.
373 self.check_relocations(cx
, ptr
, size
)?
;
375 if let Some(&(tag
, alloc_id
)) = self.relocations
.get(&ptr
.offset
) {
376 let ptr
= Pointer
::new_with_tag(alloc_id
, Size
::from_bytes(bits
), tag
);
377 return Ok(ScalarMaybeUninit
::Scalar(ptr
.into()));
380 // We don't. Just return the bits.
381 Ok(ScalarMaybeUninit
::Scalar(Scalar
::from_uint(bits
, size
)))
384 /// Reads a pointer-sized scalar.
386 /// It is the caller's responsibility to check bounds and alignment beforehand.
387 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
388 pub fn read_ptr_sized(
390 cx
: &impl HasDataLayout
,
392 ) -> InterpResult
<'tcx
, ScalarMaybeUninit
<Tag
>> {
393 self.read_scalar(cx
, ptr
, cx
.data_layout().pointer_size
)
396 /// Writes a *non-ZST* scalar.
398 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
399 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
401 /// It is the caller's responsibility to check bounds and alignment beforehand.
402 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
405 cx
: &impl HasDataLayout
,
407 val
: ScalarMaybeUninit
<Tag
>,
409 ) -> InterpResult
<'tcx
> {
410 let val
= match val
{
411 ScalarMaybeUninit
::Scalar(scalar
) => scalar
,
412 ScalarMaybeUninit
::Uninit
=> {
413 self.mark_init(ptr
, type_size
, false);
418 let bytes
= match val
.to_bits_or_ptr(type_size
, cx
) {
419 Err(val
) => u128
::from(val
.offset
.bytes()),
423 let endian
= cx
.data_layout().endian
;
424 let dst
= self.get_bytes_mut(cx
, ptr
, type_size
)?
;
425 write_target_uint(endian
, dst
, bytes
).unwrap();
427 // See if we have to also write a relocation.
428 if let Scalar
::Ptr(val
) = val
{
429 self.relocations
.insert(ptr
.offset
, (val
.tag
, val
.alloc_id
));
435 /// Writes a pointer-sized scalar.
437 /// It is the caller's responsibility to check bounds and alignment beforehand.
438 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
439 pub fn write_ptr_sized(
441 cx
: &impl HasDataLayout
,
443 val
: ScalarMaybeUninit
<Tag
>,
444 ) -> InterpResult
<'tcx
> {
445 let ptr_size
= cx
.data_layout().pointer_size
;
446 self.write_scalar(cx
, ptr
, val
, ptr_size
)
451 impl<'tcx
, Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
452 /// Returns all relocations overlapping with the given pointer-offset pair.
453 pub fn get_relocations(
455 cx
: &impl HasDataLayout
,
458 ) -> &[(Size
, (Tag
, AllocId
))] {
459 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
460 // the beginning of this range.
461 let start
= ptr
.offset
.bytes().saturating_sub(cx
.data_layout().pointer_size
.bytes() - 1);
462 let end
= ptr
.offset
+ size
; // This does overflow checking.
463 self.relocations
.range(Size
::from_bytes(start
)..end
)
466 /// Checks that there are no relocations overlapping with the given range.
468 fn check_relocations(
470 cx
: &impl HasDataLayout
,
473 ) -> InterpResult
<'tcx
> {
474 if self.get_relocations(cx
, ptr
, size
).is_empty() {
477 throw_unsup
!(ReadPointerAsBytes
)
481 /// Removes all relocations inside the given range.
482 /// If there are relocations overlapping with the edges, they
483 /// are removed as well *and* the bytes they cover are marked as
484 /// uninitialized. This is a somewhat odd "spooky action at a distance",
485 /// but it allows strictly more code to run than if we would just error
486 /// immediately in that case.
487 fn clear_relocations(&mut self, cx
: &impl HasDataLayout
, ptr
: Pointer
<Tag
>, size
: Size
) {
488 // Find the start and end of the given range and its outermost relocations.
489 let (first
, last
) = {
490 // Find all relocations overlapping the given range.
491 let relocations
= self.get_relocations(cx
, ptr
, size
);
492 if relocations
.is_empty() {
497 relocations
.first().unwrap().0,
498 relocations
.last().unwrap().0 + cx
.data_layout().pointer_size
,
501 let start
= ptr
.offset
;
502 let end
= start
+ size
; // `Size` addition
504 // Mark parts of the outermost relocations as uninitialized if they partially fall outside the
507 self.init_mask
.set_range(first
, start
, false);
510 self.init_mask
.set_range(end
, last
, false);
513 // Forget all the relocations.
514 self.relocations
.remove_range(first
..last
);
517 /// Errors if there are relocations overlapping with the edges of the
518 /// given memory range.
520 fn check_relocation_edges(
522 cx
: &impl HasDataLayout
,
525 ) -> InterpResult
<'tcx
> {
526 self.check_relocations(cx
, ptr
, Size
::ZERO
)?
;
527 self.check_relocations(cx
, ptr
.offset(size
, cx
)?
, Size
::ZERO
)?
;
532 /// Uninitialized bytes.
533 impl<'tcx
, Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
534 /// Checks whether the given range is entirely initialized.
536 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
537 /// indexes of the first contiguous uninitialized access.
538 fn is_init(&self, ptr
: Pointer
<Tag
>, size
: Size
) -> Result
<(), Range
<Size
>> {
539 self.init_mask
.is_range_initialized(ptr
.offset
, ptr
.offset
+ size
) // `Size` addition
542 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
543 /// error which will report the first range of bytes which is uninitialized.
544 fn check_init(&self, ptr
: Pointer
<Tag
>, size
: Size
) -> InterpResult
<'tcx
> {
545 self.is_init(ptr
, size
).or_else(|idx_range
| {
546 throw_ub
!(InvalidUninitBytes(Some(UninitBytesAccess
{
547 access_ptr
: ptr
.erase_tag(),
549 uninit_ptr
: Pointer
::new(ptr
.alloc_id
, idx_range
.start
),
550 uninit_size
: idx_range
.end
- idx_range
.start
, // `Size` subtraction
555 pub fn mark_init(&mut self, ptr
: Pointer
<Tag
>, size
: Size
, is_init
: bool
) {
556 if size
.bytes() == 0 {
559 self.init_mask
.set_range(ptr
.offset
, ptr
.offset
+ size
, is_init
);
563 /// Run-length encoding of the uninit mask.
564 /// Used to copy parts of a mask multiple times to another allocation.
565 pub struct InitMaskCompressed
{
566 /// Whether the first range is initialized.
568 /// The lengths of ranges that are run-length encoded.
569 /// The initialization state of the ranges alternate starting with `initial`.
570 ranges
: smallvec
::SmallVec
<[u64; 1]>,
573 impl InitMaskCompressed
{
574 pub fn no_bytes_init(&self) -> bool
{
575 // The `ranges` are run-length encoded and of alternating initialization state.
576 // So if `ranges.len() > 1` then the second block is an initialized range.
577 !self.initial
&& self.ranges
.len() == 1
581 /// Transferring the initialization mask to other allocations.
582 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
583 /// Creates a run-length encoding of the initialization mask.
584 pub fn compress_uninit_range(&self, src
: Pointer
<Tag
>, size
: Size
) -> InitMaskCompressed
{
585 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
586 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
587 // the source and write it to the destination. Even if we optimized the memory accesses,
588 // we'd be doing all of this `repeat` times.
589 // Therefore we precompute a compressed version of the initialization mask of the source value and
590 // then write it back `repeat` times without computing any more information from the source.
592 // A precomputed cache for ranges of initialized / uninitialized bits
593 // 0000010010001110 will become
594 // `[5, 1, 2, 1, 3, 3, 1]`,
595 // where each element toggles the state.
597 let mut ranges
= smallvec
::SmallVec
::<[u64; 1]>::new();
598 let initial
= self.init_mask
.get(src
.offset
);
600 let mut cur
= initial
;
602 for i
in 1..size
.bytes() {
603 // FIXME: optimize to bitshift the current uninitialized block's bits and read the top bit.
604 if self.init_mask
.get(src
.offset
+ Size
::from_bytes(i
)) == cur
{
607 ranges
.push(cur_len
);
613 ranges
.push(cur_len
);
615 InitMaskCompressed { ranges, initial }
618 /// Applies multiple instances of the run-length encoding to the initialization mask.
619 pub fn mark_compressed_init_range(
621 defined
: &InitMaskCompressed
,
626 // An optimization where we can just overwrite an entire range of initialization
627 // bits if they are going to be uniformly `1` or `0`.
628 if defined
.ranges
.len() <= 1 {
629 self.init_mask
.set_range_inbounds(
631 dest
.offset
+ size
* repeat
, // `Size` operations
637 for mut j
in 0..repeat
{
639 j
+= dest
.offset
.bytes();
640 let mut cur
= defined
.initial
;
641 for range
in &defined
.ranges
{
644 self.init_mask
.set_range_inbounds(
645 Size
::from_bytes(old_j
),
656 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
657 pub struct Relocations
<Tag
= (), Id
= AllocId
>(SortedMap
<Size
, (Tag
, Id
)>);
659 impl<Tag
, Id
> Relocations
<Tag
, Id
> {
660 pub fn new() -> Self {
661 Relocations(SortedMap
::new())
664 // The caller must guarantee that the given relocations are already sorted
665 // by address and contain no duplicates.
666 pub fn from_presorted(r
: Vec
<(Size
, (Tag
, Id
))>) -> Self {
667 Relocations(SortedMap
::from_presorted_elements(r
))
671 impl<Tag
> Deref
for Relocations
<Tag
> {
672 type Target
= SortedMap
<Size
, (Tag
, AllocId
)>;
674 fn deref(&self) -> &Self::Target
{
679 impl<Tag
> DerefMut
for Relocations
<Tag
> {
680 fn deref_mut(&mut self) -> &mut Self::Target
{
685 /// A partial, owned list of relocations to transfer into another allocation.
686 pub struct AllocationRelocations
<Tag
> {
687 relative_relocations
: Vec
<(Size
, (Tag
, AllocId
))>,
690 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
691 pub fn prepare_relocation_copy(
693 cx
: &impl HasDataLayout
,
698 ) -> AllocationRelocations
<Tag
> {
699 let relocations
= self.get_relocations(cx
, src
, size
);
700 if relocations
.is_empty() {
701 return AllocationRelocations { relative_relocations: Vec::new() }
;
704 let mut new_relocations
= Vec
::with_capacity(relocations
.len() * (length
as usize));
707 new_relocations
.extend(relocations
.iter().map(|&(offset
, reloc
)| {
708 // compute offset for current repetition
709 let dest_offset
= dest
.offset
+ size
* i
; // `Size` operations
711 // shift offsets from source allocation to destination allocation
712 (offset
+ dest_offset
) - src
.offset
, // `Size` operations
718 AllocationRelocations { relative_relocations: new_relocations }
721 /// Applies a relocation copy.
722 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
723 /// to be clear of relocations.
724 pub fn mark_relocation_range(&mut self, relocations
: AllocationRelocations
<Tag
>) {
725 self.relocations
.insert_presorted(relocations
.relative_relocations
);
729 ////////////////////////////////////////////////////////////////////////////////
730 // Uninitialized byte tracking
731 ////////////////////////////////////////////////////////////////////////////////
735 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
736 /// is initialized. If it is `false` the byte is uninitialized.
737 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
738 #[derive(HashStable)]
739 pub struct InitMask
{
745 pub const BLOCK_SIZE
: u64 = 64;
747 pub fn new(size
: Size
, state
: bool
) -> Self {
748 let mut m
= InitMask { blocks: vec![], len: Size::ZERO }
;
753 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
755 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
756 /// indexes for the first contiguous span of the uninitialized access.
758 pub fn is_range_initialized(&self, start
: Size
, end
: Size
) -> Result
<(), Range
<Size
>> {
760 return Err(self.len
..end
);
763 // FIXME(oli-obk): optimize this for allocations larger than a block.
764 let idx
= (start
.bytes()..end
.bytes()).map(Size
::from_bytes
).find(|&i
| !self.get(i
));
768 let uninit_end
= (idx
.bytes()..end
.bytes())
769 .map(Size
::from_bytes
)
770 .find(|&i
| self.get(i
))
778 pub fn set_range(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
781 self.grow(end
- len
, new_state
);
783 self.set_range_inbounds(start
, end
, new_state
);
786 pub fn set_range_inbounds(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
787 let (blocka
, bita
) = bit_index(start
);
788 let (blockb
, bitb
) = bit_index(end
);
789 if blocka
== blockb
{
790 // First set all bits except the first `bita`,
791 // then unset the last `64 - bitb` bits.
792 let range
= if bitb
== 0 {
795 (u64::MAX
<< bita
) & (u64::MAX
>> (64 - bitb
))
798 self.blocks
[blocka
] |= range
;
800 self.blocks
[blocka
] &= !range
;
804 // across block boundaries
806 // Set `bita..64` to `1`.
807 self.blocks
[blocka
] |= u64::MAX
<< bita
;
808 // Set `0..bitb` to `1`.
810 self.blocks
[blockb
] |= u64::MAX
>> (64 - bitb
);
812 // Fill in all the other blocks (much faster than one bit at a time).
813 for block
in (blocka
+ 1)..blockb
{
814 self.blocks
[block
] = u64::MAX
;
817 // Set `bita..64` to `0`.
818 self.blocks
[blocka
] &= !(u64::MAX
<< bita
);
819 // Set `0..bitb` to `0`.
821 self.blocks
[blockb
] &= !(u64::MAX
>> (64 - bitb
));
823 // Fill in all the other blocks (much faster than one bit at a time).
824 for block
in (blocka
+ 1)..blockb
{
825 self.blocks
[block
] = 0;
831 pub fn get(&self, i
: Size
) -> bool
{
832 let (block
, bit
) = bit_index(i
);
833 (self.blocks
[block
] & (1 << bit
)) != 0
837 pub fn set(&mut self, i
: Size
, new_state
: bool
) {
838 let (block
, bit
) = bit_index(i
);
839 self.set_bit(block
, bit
, new_state
);
843 fn set_bit(&mut self, block
: usize, bit
: usize, new_state
: bool
) {
845 self.blocks
[block
] |= 1 << bit
;
847 self.blocks
[block
] &= !(1 << bit
);
851 pub fn grow(&mut self, amount
: Size
, new_state
: bool
) {
852 if amount
.bytes() == 0 {
855 let unused_trailing_bits
=
856 u64::try_from(self.blocks
.len()).unwrap() * Self::BLOCK_SIZE
- self.len
.bytes();
857 if amount
.bytes() > unused_trailing_bits
{
858 let additional_blocks
= amount
.bytes() / Self::BLOCK_SIZE
+ 1;
860 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
861 iter
::repeat(0).take(usize::try_from(additional_blocks
).unwrap()),
864 let start
= self.len
;
866 self.set_range_inbounds(start
, start
+ amount
, new_state
); // `Size` operation
871 fn bit_index(bits
: Size
) -> (usize, usize) {
872 let bits
= bits
.bytes();
873 let a
= bits
/ InitMask
::BLOCK_SIZE
;
874 let b
= bits
% InitMask
::BLOCK_SIZE
;
875 (usize::try_from(a
).unwrap(), usize::try_from(b
).unwrap())