1 //! The virtual memory representation of the MIR interpreter.
4 use std
::convert
::TryFrom
;
6 use std
::ops
::{Deref, DerefMut, Range}
;
8 use rustc_ast
::ast
::Mutability
;
9 use rustc_data_structures
::sorted_map
::SortedMap
;
10 use rustc_target
::abi
::{Align, HasDataLayout, Size}
;
13 read_target_uint
, write_target_uint
, AllocId
, InterpResult
, Pointer
, Scalar
, ScalarMaybeUndef
,
16 // NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
17 // `src/librustc_mir/interpret/snapshot.rs`.
18 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
20 pub struct Allocation
<Tag
= (), Extra
= ()> {
21 /// The actual bytes of the allocation.
22 /// Note that the bytes of a pointer represent the offset of the pointer.
24 /// Maps from byte addresses to extra data for each pointer.
25 /// Only the first byte of a pointer is inserted into the map; i.e.,
26 /// every entry in this map applies to `pointer_size` consecutive bytes starting
27 /// at the given offset.
28 relocations
: Relocations
<Tag
>,
29 /// Denotes which part of this allocation is initialized.
30 undef_mask
: UndefMask
,
31 /// The size of the allocation. Currently, must always equal `bytes.len()`.
33 /// The alignment of the allocation to detect unaligned reads.
34 /// (`Align` guarantees that this is a power of two.)
36 /// `true` if the allocation is mutable.
37 /// Also used by codegen to determine if a static should be put into mutable memory,
38 /// which happens for `static mut` and `static` with interior mutability.
39 pub mutability
: Mutability
,
40 /// Extra state for the machine.
44 pub trait AllocationExtra
<Tag
>: ::std
::fmt
::Debug
+ Clone
{
45 // There is no constructor in here because the constructor's type depends
46 // on `MemoryKind`, and making things sufficiently generic leads to painful
49 /// Hook for performing extra checks on a memory read access.
51 /// Takes read-only access to the allocation so we can keep all the memory read
52 /// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
56 _alloc
: &Allocation
<Tag
, Self>,
59 ) -> InterpResult
<'tcx
> {
63 /// Hook for performing extra checks on a memory write access.
66 _alloc
: &mut Allocation
<Tag
, Self>,
69 ) -> InterpResult
<'tcx
> {
73 /// Hook for performing extra checks on a memory deallocation.
74 /// `size` will be the size of the allocation.
76 fn memory_deallocated(
77 _alloc
: &mut Allocation
<Tag
, Self>,
80 ) -> InterpResult
<'tcx
> {
85 // For `Tag = ()` and no extra state, we have a trivial implementation.
86 impl AllocationExtra
<()> for () {}
88 // The constructors are all without extra; the extra gets added by a machine hook later.
89 impl<Tag
> Allocation
<Tag
> {
90 /// Creates a read-only allocation initialized by the given bytes
91 pub fn from_bytes
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>, align
: Align
) -> Self {
92 let bytes
= slice
.into().into_owned();
93 let size
= Size
::from_bytes(bytes
.len());
96 relocations
: Relocations
::new(),
97 undef_mask
: UndefMask
::new(size
, true),
100 mutability
: Mutability
::Not
,
105 pub fn from_byte_aligned_bytes
<'a
>(slice
: impl Into
<Cow
<'a
, [u8]>>) -> Self {
106 Allocation
::from_bytes(slice
, Align
::from_bytes(1).unwrap())
109 pub fn undef(size
: Size
, align
: Align
) -> Self {
111 bytes
: vec
![0; size
.bytes_usize()],
112 relocations
: Relocations
::new(),
113 undef_mask
: UndefMask
::new(size
, false),
116 mutability
: Mutability
::Mut
,
122 impl Allocation
<(), ()> {
123 /// Add Tag and Extra fields
124 pub fn with_tags_and_extra
<T
, E
>(
126 mut tagger
: impl FnMut(AllocId
) -> T
,
128 ) -> Allocation
<T
, E
> {
132 relocations
: Relocations
::from_presorted(
135 // The allocations in the relocations (pointers stored *inside* this allocation)
136 // all get the base pointer tag.
137 .map(|&(offset
, ((), alloc
))| {
138 let tag
= tagger(alloc
);
139 (offset
, (tag
, alloc
))
143 undef_mask
: self.undef_mask
,
145 mutability
: self.mutability
,
151 /// Raw accessors. Provide access to otherwise private bytes.
152 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
153 pub fn len(&self) -> usize {
154 self.size
.bytes_usize()
157 /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
158 /// from `get_bytes_with_undef_and_ptr` in that it does no relocation checks (even on the
159 /// edges) at all. It further ignores `AllocationExtra` callbacks.
160 /// This must not be used for reads affecting the interpreter execution.
161 pub fn inspect_with_undef_and_ptr_outside_interpreter(&self, range
: Range
<usize>) -> &[u8] {
165 /// Returns the undef mask.
166 pub fn undef_mask(&self) -> &UndefMask
{
170 /// Returns the relocation list.
171 pub fn relocations(&self) -> &Relocations
<Tag
> {
176 impl<'tcx
> rustc_serialize
::UseSpecializedDecodable
for &'tcx Allocation {}
179 impl<'tcx
, Tag
: Copy
, Extra
: AllocationExtra
<Tag
>> Allocation
<Tag
, Extra
> {
180 /// Just a small local helper function to avoid a bit of code repetition.
181 /// Returns the range of this allocation that was meant.
183 fn check_bounds(&self, offset
: Size
, size
: Size
) -> Range
<usize> {
184 let end
= offset
+ size
; // This does overflow checking.
185 let end
= usize::try_from(end
.bytes()).expect("access too big for this host architecture");
188 "Out-of-bounds access at offset {}, size {} in allocation of size {}",
193 offset
.bytes_usize()..end
196 /// The last argument controls whether we error out when there are undefined
197 /// or pointer bytes. You should never call this, call `get_bytes` or
198 /// `get_bytes_with_undef_and_ptr` instead,
200 /// This function also guarantees that the resulting pointer will remain stable
201 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
204 /// It is the caller's responsibility to check bounds and alignment beforehand.
205 fn get_bytes_internal(
207 cx
: &impl HasDataLayout
,
210 check_defined_and_ptr
: bool
,
211 ) -> InterpResult
<'tcx
, &[u8]> {
212 let range
= self.check_bounds(ptr
.offset
, size
);
214 if check_defined_and_ptr
{
215 self.check_defined(ptr
, size
)?
;
216 self.check_relocations(cx
, ptr
, size
)?
;
218 // We still don't want relocations on the *edges*.
219 self.check_relocation_edges(cx
, ptr
, size
)?
;
222 AllocationExtra
::memory_read(self, ptr
, size
)?
;
224 Ok(&self.bytes
[range
])
227 /// Checks that these bytes are initialized and not pointer bytes, and then return them
230 /// It is the caller's responsibility to check bounds and alignment beforehand.
231 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
232 /// on `InterpCx` instead.
236 cx
: &impl HasDataLayout
,
239 ) -> InterpResult
<'tcx
, &[u8]> {
240 self.get_bytes_internal(cx
, ptr
, size
, true)
243 /// It is the caller's responsibility to handle undefined and pointer bytes.
244 /// However, this still checks that there are no relocations on the *edges*.
246 /// It is the caller's responsibility to check bounds and alignment beforehand.
248 pub fn get_bytes_with_undef_and_ptr(
250 cx
: &impl HasDataLayout
,
253 ) -> InterpResult
<'tcx
, &[u8]> {
254 self.get_bytes_internal(cx
, ptr
, size
, false)
257 /// Just calling this already marks everything as defined and removes relocations,
258 /// so be sure to actually put data there!
260 /// It is the caller's responsibility to check bounds and alignment beforehand.
261 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
262 /// on `InterpCx` instead.
263 pub fn get_bytes_mut(
265 cx
: &impl HasDataLayout
,
268 ) -> InterpResult
<'tcx
, &mut [u8]> {
269 let range
= self.check_bounds(ptr
.offset
, size
);
271 self.mark_definedness(ptr
, size
, true);
272 self.clear_relocations(cx
, ptr
, size
)?
;
274 AllocationExtra
::memory_written(self, ptr
, size
)?
;
276 Ok(&mut self.bytes
[range
])
280 /// Reading and writing.
281 impl<'tcx
, Tag
: Copy
, Extra
: AllocationExtra
<Tag
>> Allocation
<Tag
, Extra
> {
282 /// Reads bytes until a `0` is encountered. Will error if the end of the allocation is reached
283 /// before a `0` is found.
285 /// Most likely, you want to call `Memory::read_c_str` instead of this method.
288 cx
: &impl HasDataLayout
,
290 ) -> InterpResult
<'tcx
, &[u8]> {
291 let offset
= ptr
.offset
.bytes_usize();
292 Ok(match self.bytes
[offset
..].iter().position(|&c
| c
== 0) {
294 let size_with_null
= Size
::from_bytes(size
) + Size
::from_bytes(1);
295 // Go through `get_bytes` for checks and AllocationExtra hooks.
296 // We read the null, so we include it in the request, but we want it removed
297 // from the result, so we do subslicing.
298 &self.get_bytes(cx
, ptr
, size_with_null
)?
[..size
]
300 // This includes the case where `offset` is out-of-bounds to begin with.
301 None
=> throw_ub
!(UnterminatedCString(ptr
.erase_tag())),
305 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
306 /// relocation. If `allow_ptr_and_undef` is `false`, also enforces that the memory in the
307 /// given range contains neither relocations nor undef bytes.
310 cx
: &impl HasDataLayout
,
313 allow_ptr_and_undef
: bool
,
314 ) -> InterpResult
<'tcx
> {
315 // Check bounds and relocations on the edges.
316 self.get_bytes_with_undef_and_ptr(cx
, ptr
, size
)?
;
317 // Check undef and ptr.
318 if !allow_ptr_and_undef
{
319 self.check_defined(ptr
, size
)?
;
320 self.check_relocations(cx
, ptr
, size
)?
;
325 /// Writes `src` to the memory starting at `ptr.offset`.
327 /// It is the caller's responsibility to check bounds and alignment beforehand.
328 /// Most likely, you want to call `Memory::write_bytes` instead of this method.
331 cx
: &impl HasDataLayout
,
333 src
: impl IntoIterator
<Item
= u8>,
334 ) -> InterpResult
<'tcx
> {
335 let mut src
= src
.into_iter();
336 let (lower
, upper
) = src
.size_hint();
337 let len
= upper
.expect("can only write bounded iterators");
338 assert_eq
!(lower
, len
, "can only write iterators with a precise length");
339 let bytes
= self.get_bytes_mut(cx
, ptr
, Size
::from_bytes(len
))?
;
340 // `zip` would stop when the first iterator ends; we want to definitely
341 // cover all of `bytes`.
343 *dest
= src
.next().expect("iterator was shorter than it said it would be");
345 src
.next().expect_none("iterator was longer than it said it would be");
349 /// Reads a *non-ZST* scalar.
351 /// ZSTs can't be read for two reasons:
352 /// * byte-order cannot work with zero-element buffers;
353 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
354 /// pointers being valid for ZSTs.
356 /// It is the caller's responsibility to check bounds and alignment beforehand.
357 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
360 cx
: &impl HasDataLayout
,
363 ) -> InterpResult
<'tcx
, ScalarMaybeUndef
<Tag
>> {
364 // `get_bytes_unchecked` tests relocation edges.
365 let bytes
= self.get_bytes_with_undef_and_ptr(cx
, ptr
, size
)?
;
366 // Undef check happens *after* we established that the alignment is correct.
367 // We must not return `Ok()` for unaligned pointers!
368 if self.is_defined(ptr
, size
).is_err() {
369 // This inflates undefined bytes to the entire scalar, even if only a few
370 // bytes are undefined.
371 return Ok(ScalarMaybeUndef
::Undef
);
373 // Now we do the actual reading.
374 let bits
= read_target_uint(cx
.data_layout().endian
, bytes
).unwrap();
375 // See if we got a pointer.
376 if size
!= cx
.data_layout().pointer_size
{
377 // *Now*, we better make sure that the inside is free of relocations too.
378 self.check_relocations(cx
, ptr
, size
)?
;
380 if let Some(&(tag
, alloc_id
)) = self.relocations
.get(&ptr
.offset
) {
381 let ptr
= Pointer
::new_with_tag(alloc_id
, Size
::from_bytes(bits
), tag
);
382 return Ok(ScalarMaybeUndef
::Scalar(ptr
.into()));
385 // We don't. Just return the bits.
386 Ok(ScalarMaybeUndef
::Scalar(Scalar
::from_uint(bits
, size
)))
389 /// Reads a pointer-sized scalar.
391 /// It is the caller's responsibility to check bounds and alignment beforehand.
392 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
393 pub fn read_ptr_sized(
395 cx
: &impl HasDataLayout
,
397 ) -> InterpResult
<'tcx
, ScalarMaybeUndef
<Tag
>> {
398 self.read_scalar(cx
, ptr
, cx
.data_layout().pointer_size
)
401 /// Writes a *non-ZST* scalar.
403 /// ZSTs can't be read for two reasons:
404 /// * byte-order cannot work with zero-element buffers;
405 /// * in order to obtain a `Pointer`, we need to check for ZSTness anyway due to integer
406 /// pointers being valid for ZSTs.
408 /// It is the caller's responsibility to check bounds and alignment beforehand.
409 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
412 cx
: &impl HasDataLayout
,
414 val
: ScalarMaybeUndef
<Tag
>,
416 ) -> InterpResult
<'tcx
> {
417 let val
= match val
{
418 ScalarMaybeUndef
::Scalar(scalar
) => scalar
,
419 ScalarMaybeUndef
::Undef
=> {
420 self.mark_definedness(ptr
, type_size
, false);
425 let bytes
= match val
.to_bits_or_ptr(type_size
, cx
) {
426 Err(val
) => u128
::from(val
.offset
.bytes()),
430 let endian
= cx
.data_layout().endian
;
431 let dst
= self.get_bytes_mut(cx
, ptr
, type_size
)?
;
432 write_target_uint(endian
, dst
, bytes
).unwrap();
434 // See if we have to also write a relocation.
435 if let Scalar
::Ptr(val
) = val
{
436 self.relocations
.insert(ptr
.offset
, (val
.tag
, val
.alloc_id
));
442 /// Writes a pointer-sized scalar.
444 /// It is the caller's responsibility to check bounds and alignment beforehand.
445 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
446 pub fn write_ptr_sized(
448 cx
: &impl HasDataLayout
,
450 val
: ScalarMaybeUndef
<Tag
>,
451 ) -> InterpResult
<'tcx
> {
452 let ptr_size
= cx
.data_layout().pointer_size
;
453 self.write_scalar(cx
, ptr
, val
, ptr_size
)
458 impl<'tcx
, Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
459 /// Returns all relocations overlapping with the given pointer-offset pair.
460 pub fn get_relocations(
462 cx
: &impl HasDataLayout
,
465 ) -> &[(Size
, (Tag
, AllocId
))] {
466 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
467 // the beginning of this range.
468 let start
= ptr
.offset
.bytes().saturating_sub(cx
.data_layout().pointer_size
.bytes() - 1);
469 let end
= ptr
.offset
+ size
; // This does overflow checking.
470 self.relocations
.range(Size
::from_bytes(start
)..end
)
473 /// Checks that there are no relocations overlapping with the given range.
475 fn check_relocations(
477 cx
: &impl HasDataLayout
,
480 ) -> InterpResult
<'tcx
> {
481 if self.get_relocations(cx
, ptr
, size
).is_empty() {
484 throw_unsup
!(ReadPointerAsBytes
)
488 /// Removes all relocations inside the given range.
489 /// If there are relocations overlapping with the edges, they
490 /// are removed as well *and* the bytes they cover are marked as
491 /// uninitialized. This is a somewhat odd "spooky action at a distance",
492 /// but it allows strictly more code to run than if we would just error
493 /// immediately in that case.
494 fn clear_relocations(
496 cx
: &impl HasDataLayout
,
499 ) -> InterpResult
<'tcx
> {
500 // Find the start and end of the given range and its outermost relocations.
501 let (first
, last
) = {
502 // Find all relocations overlapping the given range.
503 let relocations
= self.get_relocations(cx
, ptr
, size
);
504 if relocations
.is_empty() {
509 relocations
.first().unwrap().0,
510 relocations
.last().unwrap().0 + cx
.data_layout().pointer_size
,
513 let start
= ptr
.offset
;
514 let end
= start
+ size
; // `Size` addition
516 // Mark parts of the outermost relocations as undefined if they partially fall outside the
519 self.undef_mask
.set_range(first
, start
, false);
522 self.undef_mask
.set_range(end
, last
, false);
525 // Forget all the relocations.
526 self.relocations
.remove_range(first
..last
);
531 /// Errors if there are relocations overlapping with the edges of the
532 /// given memory range.
534 fn check_relocation_edges(
536 cx
: &impl HasDataLayout
,
539 ) -> InterpResult
<'tcx
> {
540 self.check_relocations(cx
, ptr
, Size
::ZERO
)?
;
541 self.check_relocations(cx
, ptr
.offset(size
, cx
)?
, Size
::ZERO
)?
;
547 impl<'tcx
, Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
548 /// Checks whether the given range is entirely defined.
550 /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
551 /// at which the first undefined access begins.
552 fn is_defined(&self, ptr
: Pointer
<Tag
>, size
: Size
) -> Result
<(), Size
> {
553 self.undef_mask
.is_range_defined(ptr
.offset
, ptr
.offset
+ size
) // `Size` addition
556 /// Checks that a range of bytes is defined. If not, returns the `ReadUndefBytes`
557 /// error which will report the first byte which is undefined.
558 fn check_defined(&self, ptr
: Pointer
<Tag
>, size
: Size
) -> InterpResult
<'tcx
> {
559 self.is_defined(ptr
, size
)
560 .or_else(|idx
| throw_ub
!(InvalidUndefBytes(Some(Pointer
::new(ptr
.alloc_id
, idx
)))))
563 pub fn mark_definedness(&mut self, ptr
: Pointer
<Tag
>, size
: Size
, new_state
: bool
) {
564 if size
.bytes() == 0 {
567 self.undef_mask
.set_range(ptr
.offset
, ptr
.offset
+ size
, new_state
);
571 /// Run-length encoding of the undef mask.
572 /// Used to copy parts of a mask multiple times to another allocation.
573 pub struct AllocationDefinedness
{
574 /// The definedness of the first range.
576 /// The lengths of ranges that are run-length encoded.
577 /// The definedness of the ranges alternate starting with `initial`.
578 ranges
: smallvec
::SmallVec
<[u64; 1]>,
581 impl AllocationDefinedness
{
582 pub fn all_bytes_undef(&self) -> bool
{
583 // The `ranges` are run-length encoded and of alternating definedness.
584 // So if `ranges.len() > 1` then the second block is a range of defined.
585 !self.initial
&& self.ranges
.len() == 1
589 /// Transferring the definedness mask to other allocations.
590 impl<Tag
, Extra
> Allocation
<Tag
, Extra
> {
591 /// Creates a run-length encoding of the undef mask.
592 pub fn compress_undef_range(&self, src
: Pointer
<Tag
>, size
: Size
) -> AllocationDefinedness
{
593 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
594 // a naive undef mask copying algorithm would repeatedly have to read the undef mask from
595 // the source and write it to the destination. Even if we optimized the memory accesses,
596 // we'd be doing all of this `repeat` times.
597 // Therefore we precompute a compressed version of the undef mask of the source value and
598 // then write it back `repeat` times without computing any more information from the source.
600 // A precomputed cache for ranges of defined/undefined bits
601 // 0000010010001110 will become
602 // `[5, 1, 2, 1, 3, 3, 1]`,
603 // where each element toggles the state.
605 let mut ranges
= smallvec
::SmallVec
::<[u64; 1]>::new();
606 let initial
= self.undef_mask
.get(src
.offset
);
608 let mut cur
= initial
;
610 for i
in 1..size
.bytes() {
611 // FIXME: optimize to bitshift the current undef block's bits and read the top bit.
612 if self.undef_mask
.get(src
.offset
+ Size
::from_bytes(i
)) == cur
{
615 ranges
.push(cur_len
);
621 ranges
.push(cur_len
);
623 AllocationDefinedness { ranges, initial }
626 /// Applies multiple instances of the run-length encoding to the undef mask.
627 pub fn mark_compressed_undef_range(
629 defined
: &AllocationDefinedness
,
634 // An optimization where we can just overwrite an entire range of definedness bits if
635 // they are going to be uniformly `1` or `0`.
636 if defined
.ranges
.len() <= 1 {
637 self.undef_mask
.set_range_inbounds(
639 dest
.offset
+ size
* repeat
, // `Size` operations
645 for mut j
in 0..repeat
{
647 j
+= dest
.offset
.bytes();
648 let mut cur
= defined
.initial
;
649 for range
in &defined
.ranges
{
652 self.undef_mask
.set_range_inbounds(
653 Size
::from_bytes(old_j
),
664 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
665 pub struct Relocations
<Tag
= (), Id
= AllocId
>(SortedMap
<Size
, (Tag
, Id
)>);
667 impl<Tag
, Id
> Relocations
<Tag
, Id
> {
668 pub fn new() -> Self {
669 Relocations(SortedMap
::new())
672 // The caller must guarantee that the given relocations are already sorted
673 // by address and contain no duplicates.
674 pub fn from_presorted(r
: Vec
<(Size
, (Tag
, Id
))>) -> Self {
675 Relocations(SortedMap
::from_presorted_elements(r
))
679 impl<Tag
> Deref
for Relocations
<Tag
> {
680 type Target
= SortedMap
<Size
, (Tag
, AllocId
)>;
682 fn deref(&self) -> &Self::Target
{
687 impl<Tag
> DerefMut
for Relocations
<Tag
> {
688 fn deref_mut(&mut self) -> &mut Self::Target
{
693 /// A partial, owned list of relocations to transfer into another allocation.
694 pub struct AllocationRelocations
<Tag
> {
695 relative_relocations
: Vec
<(Size
, (Tag
, AllocId
))>,
698 impl<Tag
: Copy
, Extra
> Allocation
<Tag
, Extra
> {
699 pub fn prepare_relocation_copy(
701 cx
: &impl HasDataLayout
,
706 ) -> AllocationRelocations
<Tag
> {
707 let relocations
= self.get_relocations(cx
, src
, size
);
708 if relocations
.is_empty() {
709 return AllocationRelocations { relative_relocations: Vec::new() }
;
712 let mut new_relocations
= Vec
::with_capacity(relocations
.len() * (length
as usize));
715 new_relocations
.extend(relocations
.iter().map(|&(offset
, reloc
)| {
716 // compute offset for current repetition
717 let dest_offset
= dest
.offset
+ size
* i
; // `Size` operations
719 // shift offsets from source allocation to destination allocation
720 (offset
+ dest_offset
) - src
.offset
, // `Size` operations
726 AllocationRelocations { relative_relocations: new_relocations }
729 /// Applies a relocation copy.
730 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
731 /// to be clear of relocations.
732 pub fn mark_relocation_range(&mut self, relocations
: AllocationRelocations
<Tag
>) {
733 self.relocations
.insert_presorted(relocations
.relative_relocations
);
737 ////////////////////////////////////////////////////////////////////////////////
738 // Undefined byte tracking
739 ////////////////////////////////////////////////////////////////////////////////
743 /// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
744 /// is defined. If it is `false` the byte is undefined.
745 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
746 #[derive(HashStable)]
747 pub struct UndefMask
{
753 pub const BLOCK_SIZE
: u64 = 64;
755 pub fn new(size
: Size
, state
: bool
) -> Self {
756 let mut m
= UndefMask { blocks: vec![], len: Size::ZERO }
;
761 /// Checks whether the range `start..end` (end-exclusive) is entirely defined.
763 /// Returns `Ok(())` if it's defined. Otherwise returns the index of the byte
764 /// at which the first undefined access begins.
766 pub fn is_range_defined(&self, start
: Size
, end
: Size
) -> Result
<(), Size
> {
768 return Err(self.len
);
771 // FIXME(oli-obk): optimize this for allocations larger than a block.
772 let idx
= (start
.bytes()..end
.bytes()).map(Size
::from_bytes
).find(|&i
| !self.get(i
));
775 Some(idx
) => Err(idx
),
780 pub fn set_range(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
783 self.grow(end
- len
, new_state
);
785 self.set_range_inbounds(start
, end
, new_state
);
788 pub fn set_range_inbounds(&mut self, start
: Size
, end
: Size
, new_state
: bool
) {
789 let (blocka
, bita
) = bit_index(start
);
790 let (blockb
, bitb
) = bit_index(end
);
791 if blocka
== blockb
{
792 // First set all bits except the first `bita`,
793 // then unset the last `64 - bitb` bits.
794 let range
= if bitb
== 0 {
797 (u64::MAX
<< bita
) & (u64::MAX
>> (64 - bitb
))
800 self.blocks
[blocka
] |= range
;
802 self.blocks
[blocka
] &= !range
;
806 // across block boundaries
808 // Set `bita..64` to `1`.
809 self.blocks
[blocka
] |= u64::MAX
<< bita
;
810 // Set `0..bitb` to `1`.
812 self.blocks
[blockb
] |= u64::MAX
>> (64 - bitb
);
814 // Fill in all the other blocks (much faster than one bit at a time).
815 for block
in (blocka
+ 1)..blockb
{
816 self.blocks
[block
] = u64::MAX
;
819 // Set `bita..64` to `0`.
820 self.blocks
[blocka
] &= !(u64::MAX
<< bita
);
821 // Set `0..bitb` to `0`.
823 self.blocks
[blockb
] &= !(u64::MAX
>> (64 - bitb
));
825 // Fill in all the other blocks (much faster than one bit at a time).
826 for block
in (blocka
+ 1)..blockb
{
827 self.blocks
[block
] = 0;
833 pub fn get(&self, i
: Size
) -> bool
{
834 let (block
, bit
) = bit_index(i
);
835 (self.blocks
[block
] & (1 << bit
)) != 0
839 pub fn set(&mut self, i
: Size
, new_state
: bool
) {
840 let (block
, bit
) = bit_index(i
);
841 self.set_bit(block
, bit
, new_state
);
845 fn set_bit(&mut self, block
: usize, bit
: usize, new_state
: bool
) {
847 self.blocks
[block
] |= 1 << bit
;
849 self.blocks
[block
] &= !(1 << bit
);
853 pub fn grow(&mut self, amount
: Size
, new_state
: bool
) {
854 if amount
.bytes() == 0 {
857 let unused_trailing_bits
=
858 u64::try_from(self.blocks
.len()).unwrap() * Self::BLOCK_SIZE
- self.len
.bytes();
859 if amount
.bytes() > unused_trailing_bits
{
860 let additional_blocks
= amount
.bytes() / Self::BLOCK_SIZE
+ 1;
862 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
863 iter
::repeat(0).take(usize::try_from(additional_blocks
).unwrap()),
866 let start
= self.len
;
868 self.set_range_inbounds(start
, start
+ amount
, new_state
); // `Size` operation
873 fn bit_index(bits
: Size
) -> (usize, usize) {
874 let bits
= bits
.bytes();
875 let a
= bits
/ UndefMask
::BLOCK_SIZE
;
876 let b
= bits
% UndefMask
::BLOCK_SIZE
;
877 (usize::try_from(a
).unwrap(), usize::try_from(b
).unwrap())