]> git.proxmox.com Git - rustc.git/blame - compiler/rustc_middle/src/mir/interpret/allocation.rs
New upstream version 1.57.0+dfsg1
[rustc.git] / compiler / rustc_middle / src / mir / interpret / allocation.rs
CommitLineData
9fa01778 1//! The virtual memory representation of the MIR interpreter.
a1dfa0c6 2
dc9dc135 3use std::borrow::Cow;
94222f64 4use std::convert::{TryFrom, TryInto};
dfeec247 5use std::iter;
136023e0 6use std::ops::{Deref, Range};
17df50a5 7use std::ptr;
a1dfa0c6 8
3dfed10e 9use rustc_ast::Mutability;
ba9703b0 10use rustc_data_structures::sorted_map::SortedMap;
136023e0 11use rustc_span::DUMMY_SP;
ba9703b0
XL
12use rustc_target::abi::{Align, HasDataLayout, Size};
13
14use super::{
94222f64 15 read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
136023e0
XL
16 ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, UndefinedBehaviorInfo, UninitBytesAccess,
17 UnsupportedOpInfo,
ba9703b0 18};
136023e0 19use crate::ty;
ba9703b0 20
17df50a5
XL
21/// This type represents an Allocation in the Miri/CTFE core engine.
22///
23/// Its public API is rather low-level, working directly with allocation offsets and a custom error
24/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
25/// module provides higher-level access.
3dfed10e 26#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
ba9703b0 27#[derive(HashStable)]
136023e0 28pub struct Allocation<Tag = AllocId, Extra = ()> {
a1dfa0c6 29 /// The actual bytes of the allocation.
e1599b0c 30 /// Note that the bytes of a pointer represent the offset of the pointer.
94222f64 31 bytes: Box<[u8]>,
a1dfa0c6
XL
32 /// Maps from byte addresses to extra data for each pointer.
33 /// Only the first byte of a pointer is inserted into the map; i.e.,
34 /// every entry in this map applies to `pointer_size` consecutive bytes starting
35 /// at the given offset.
e1599b0c
XL
36 relocations: Relocations<Tag>,
37 /// Denotes which part of this allocation is initialized.
f9f354fc 38 init_mask: InitMask,
a1dfa0c6 39 /// The alignment of the allocation to detect unaligned reads.
ba9703b0 40 /// (`Align` guarantees that this is a power of two.)
a1dfa0c6 41 pub align: Align,
e1599b0c 42 /// `true` if the allocation is mutable.
a1dfa0c6
XL
43 /// Also used by codegen to determine if a static should be put into mutable memory,
44 /// which happens for `static mut` and `static` with interior mutability.
45 pub mutability: Mutability,
46 /// Extra state for the machine.
47 pub extra: Extra,
48}
49
17df50a5
XL
50/// We have our own error type that does not know about the `AllocId`; that information
51/// is added when converting to `InterpError`.
52#[derive(Debug)]
53pub enum AllocError {
54 /// Encountered a pointer where we needed raw bytes.
55 ReadPointerAsBytes,
94222f64
XL
56 /// Partially overwriting a pointer.
57 PartialPointerOverwrite(Size),
17df50a5
XL
58 /// Using uninitialized data where it is not allowed.
59 InvalidUninitBytes(Option<UninitBytesAccess>),
60}
61pub type AllocResult<T = ()> = Result<T, AllocError>;
a1dfa0c6 62
17df50a5
XL
63impl AllocError {
64 pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
94222f64 65 use AllocError::*;
17df50a5 66 match self {
94222f64
XL
67 ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
68 PartialPointerOverwrite(offset) => InterpError::Unsupported(
69 UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
70 ),
71 InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
17df50a5
XL
72 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
73 ),
74 }
a1dfa0c6 75 }
17df50a5
XL
76}
77
78/// The information that makes up a memory access: offset and size.
79#[derive(Copy, Clone, Debug)]
80pub struct AllocRange {
81 pub start: Size,
82 pub size: Size,
83}
a1dfa0c6 84
17df50a5
XL
85/// Free-starting constructor for less syntactic overhead.
86#[inline(always)]
87pub fn alloc_range(start: Size, size: Size) -> AllocRange {
88 AllocRange { start, size }
89}
90
91impl AllocRange {
a1dfa0c6 92 #[inline(always)]
17df50a5
XL
93 pub fn end(self) -> Size {
94 self.start + self.size // This does overflow checking.
a1dfa0c6
XL
95 }
96
17df50a5
XL
97 /// Returns the `subrange` within this range; panics if it is not a subrange.
98 #[inline]
99 pub fn subrange(self, subrange: AllocRange) -> AllocRange {
100 let sub_start = self.start + subrange.start;
101 let range = alloc_range(sub_start, subrange.size);
102 assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
103 range
a1dfa0c6
XL
104 }
105}
106
dc9dc135
XL
107// The constructors are all without extra; the extra gets added by a machine hook later.
108impl<Tag> Allocation<Tag> {
17df50a5
XL
109 /// Creates an allocation initialized by the given bytes
110 pub fn from_bytes<'a>(
111 slice: impl Into<Cow<'a, [u8]>>,
112 align: Align,
113 mutability: Mutability,
114 ) -> Self {
94222f64 115 let bytes = Box::<[u8]>::from(slice.into());
ba9703b0 116 let size = Size::from_bytes(bytes.len());
a1dfa0c6 117 Self {
dc9dc135 118 bytes,
a1dfa0c6 119 relocations: Relocations::new(),
f9f354fc 120 init_mask: InitMask::new(size, true),
a1dfa0c6 121 align,
17df50a5 122 mutability,
dc9dc135 123 extra: (),
a1dfa0c6
XL
124 }
125 }
126
17df50a5
XL
127 pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
128 Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
a1dfa0c6
XL
129 }
130
136023e0
XL
131 /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
132 /// available to the compiler to do so.
133 pub fn uninit(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'static, Self> {
94222f64 134 let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).map_err(|_| {
136023e0
XL
135 // This results in an error that can happen non-deterministically, since the memory
136 // available to the compiler can change between runs. Normally queries are always
137 // deterministic. However, we can be non-determinstic here because all uses of const
138 // evaluation (including ConstProp!) will make compilation fail (via hard error
139 // or ICE) upon encountering a `MemoryExhausted` error.
140 if panic_on_fail {
141 panic!("Allocation::uninit called with panic_on_fail had allocation failure")
142 }
143 ty::tls::with(|tcx| {
144 tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpreation")
145 });
146 InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
147 })?;
94222f64
XL
148 // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
149 let bytes = unsafe { bytes.assume_init() };
136023e0
XL
150 Ok(Allocation {
151 bytes,
a1dfa0c6 152 relocations: Relocations::new(),
f9f354fc 153 init_mask: InitMask::new(size, false),
a1dfa0c6 154 align,
dfeec247 155 mutability: Mutability::Mut,
dc9dc135 156 extra: (),
136023e0 157 })
a1dfa0c6
XL
158 }
159}
160
136023e0
XL
161impl Allocation {
162 /// Convert Tag and add Extra fields
163 pub fn convert_tag_add_extra<Tag, Extra>(
e1599b0c 164 self,
136023e0
XL
165 cx: &impl HasDataLayout,
166 extra: Extra,
167 mut tagger: impl FnMut(Pointer<AllocId>) -> Pointer<Tag>,
168 ) -> Allocation<Tag, Extra> {
169 // Compute new pointer tags, which also adjusts the bytes.
170 let mut bytes = self.bytes;
171 let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
172 let ptr_size = cx.data_layout().pointer_size.bytes_usize();
173 let endian = cx.data_layout().endian;
174 for &(offset, alloc_id) in self.relocations.iter() {
175 let idx = offset.bytes_usize();
176 let ptr_bytes = &mut bytes[idx..idx + ptr_size];
177 let bits = read_target_uint(endian, ptr_bytes).unwrap();
178 let (ptr_tag, ptr_offset) =
179 tagger(Pointer::new(alloc_id, Size::from_bytes(bits))).into_parts();
180 write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
181 new_relocations.push((offset, ptr_tag));
182 }
183 // Create allocation.
e1599b0c 184 Allocation {
136023e0
XL
185 bytes,
186 relocations: Relocations::from_presorted(new_relocations),
f9f354fc 187 init_mask: self.init_mask,
e1599b0c
XL
188 align: self.align,
189 mutability: self.mutability,
190 extra,
191 }
192 }
193}
194
195/// Raw accessors. Provide access to otherwise private bytes.
196impl<Tag, Extra> Allocation<Tag, Extra> {
197 pub fn len(&self) -> usize {
17df50a5
XL
198 self.bytes.len()
199 }
200
201 pub fn size(&self) -> Size {
202 Size::from_bytes(self.len())
e1599b0c
XL
203 }
204
3dfed10e
XL
205 /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
206 /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
17df50a5 207 /// edges) at all.
e1599b0c 208 /// This must not be used for reads affecting the interpreter execution.
3dfed10e 209 pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
e1599b0c
XL
210 &self.bytes[range]
211 }
212
f9f354fc
XL
213 /// Returns the mask indicating which bytes are initialized.
214 pub fn init_mask(&self) -> &InitMask {
215 &self.init_mask
e1599b0c
XL
216 }
217
218 /// Returns the relocation list.
219 pub fn relocations(&self) -> &Relocations<Tag> {
220 &self.relocations
221 }
222}
223
e1599b0c 224/// Byte accessors.
94222f64 225impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
3dfed10e 226 /// The last argument controls whether we error out when there are uninitialized
9fa01778 227 /// or pointer bytes. You should never call this, call `get_bytes` or
3dfed10e 228 /// `get_bytes_with_uninit_and_ptr` instead,
a1dfa0c6
XL
229 ///
230 /// This function also guarantees that the resulting pointer will remain stable
231 /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies
232 /// on that.
dc9dc135
XL
233 ///
234 /// It is the caller's responsibility to check bounds and alignment beforehand.
48663c56 235 fn get_bytes_internal(
a1dfa0c6
XL
236 &self,
237 cx: &impl HasDataLayout,
17df50a5 238 range: AllocRange,
3dfed10e 239 check_init_and_ptr: bool,
17df50a5 240 ) -> AllocResult<&[u8]> {
3dfed10e 241 if check_init_and_ptr {
17df50a5
XL
242 self.check_init(range)?;
243 self.check_relocations(cx, range)?;
a1dfa0c6 244 } else {
e1599b0c 245 // We still don't want relocations on the *edges*.
17df50a5 246 self.check_relocation_edges(cx, range)?;
a1dfa0c6
XL
247 }
248
17df50a5 249 Ok(&self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
a1dfa0c6
XL
250 }
251
e1599b0c 252 /// Checks that these bytes are initialized and not pointer bytes, and then return them
dc9dc135
XL
253 /// as a slice.
254 ///
255 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32
XL
256 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
257 /// on `InterpCx` instead.
a1dfa0c6 258 #[inline]
17df50a5
XL
259 pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
260 self.get_bytes_internal(cx, range, true)
a1dfa0c6
XL
261 }
262
3dfed10e 263 /// It is the caller's responsibility to handle uninitialized and pointer bytes.
a1dfa0c6 264 /// However, this still checks that there are no relocations on the *edges*.
dc9dc135
XL
265 ///
266 /// It is the caller's responsibility to check bounds and alignment beforehand.
a1dfa0c6 267 #[inline]
3dfed10e 268 pub fn get_bytes_with_uninit_and_ptr(
a1dfa0c6
XL
269 &self,
270 cx: &impl HasDataLayout,
17df50a5
XL
271 range: AllocRange,
272 ) -> AllocResult<&[u8]> {
273 self.get_bytes_internal(cx, range, false)
a1dfa0c6
XL
274 }
275
276 /// Just calling this already marks everything as defined and removes relocations,
277 /// so be sure to actually put data there!
dc9dc135
XL
278 ///
279 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32
XL
280 /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
281 /// on `InterpCx` instead.
94222f64
XL
282 pub fn get_bytes_mut(
283 &mut self,
284 cx: &impl HasDataLayout,
285 range: AllocRange,
286 ) -> AllocResult<&mut [u8]> {
17df50a5 287 self.mark_init(range, true);
94222f64 288 self.clear_relocations(cx, range)?;
a1dfa0c6 289
94222f64 290 Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
17df50a5 291 }
a1dfa0c6 292
17df50a5 293 /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
94222f64
XL
294 pub fn get_bytes_mut_ptr(
295 &mut self,
296 cx: &impl HasDataLayout,
297 range: AllocRange,
298 ) -> AllocResult<*mut [u8]> {
17df50a5 299 self.mark_init(range, true);
94222f64 300 self.clear_relocations(cx, range)?;
a1dfa0c6 301
17df50a5
XL
302 assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
303 let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
304 let len = range.end().bytes_usize() - range.start.bytes_usize();
94222f64 305 Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
a1dfa0c6
XL
306 }
307}
308
e1599b0c 309/// Reading and writing.
94222f64 310impl<Tag: Provenance, Extra> Allocation<Tag, Extra> {
a1dfa0c6 311 /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
3dfed10e
XL
312 /// relocation. If `allow_uninit_and_ptr` is `false`, also enforces that the memory in the
313 /// given range contains neither relocations nor uninitialized bytes.
48663c56 314 pub fn check_bytes(
a1dfa0c6
XL
315 &self,
316 cx: &impl HasDataLayout,
17df50a5 317 range: AllocRange,
3dfed10e 318 allow_uninit_and_ptr: bool,
17df50a5 319 ) -> AllocResult {
e1599b0c 320 // Check bounds and relocations on the edges.
17df50a5 321 self.get_bytes_with_uninit_and_ptr(cx, range)?;
3dfed10e
XL
322 // Check uninit and ptr.
323 if !allow_uninit_and_ptr {
17df50a5
XL
324 self.check_init(range)?;
325 self.check_relocations(cx, range)?;
a1dfa0c6
XL
326 }
327 Ok(())
328 }
329
e1599b0c 330 /// Reads a *non-ZST* scalar.
a1dfa0c6 331 ///
1b1a35ee
XL
332 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
333 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
a1dfa0c6 334 ///
dc9dc135 335 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32 336 /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
48663c56 337 pub fn read_scalar(
a1dfa0c6
XL
338 &self,
339 cx: &impl HasDataLayout,
17df50a5
XL
340 range: AllocRange,
341 ) -> AllocResult<ScalarMaybeUninit<Tag>> {
136023e0
XL
342 // `get_bytes_with_uninit_and_ptr` tests relocation edges.
343 // We deliberately error when loading data that partially has provenance, or partially
344 // initialized data (that's the check below), into a scalar. The LLVM semantics of this are
345 // unclear so we are conservative. See <https://github.com/rust-lang/rust/issues/69488> for
346 // further discussion.
17df50a5 347 let bytes = self.get_bytes_with_uninit_and_ptr(cx, range)?;
f9f354fc 348 // Uninit check happens *after* we established that the alignment is correct.
e1599b0c 349 // We must not return `Ok()` for unaligned pointers!
17df50a5 350 if self.is_init(range).is_err() {
f9f354fc
XL
351 // This inflates uninitialized bytes to the entire scalar, even if only a few
352 // bytes are uninitialized.
353 return Ok(ScalarMaybeUninit::Uninit);
a1dfa0c6 354 }
e1599b0c 355 // Now we do the actual reading.
a1dfa0c6 356 let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
e1599b0c 357 // See if we got a pointer.
17df50a5
XL
358 if range.size != cx.data_layout().pointer_size {
359 // Not a pointer.
e1599b0c 360 // *Now*, we better make sure that the inside is free of relocations too.
17df50a5 361 self.check_relocations(cx, range)?;
a1dfa0c6 362 } else {
17df50a5 363 // Maybe a pointer.
136023e0
XL
364 if let Some(&prov) = self.relocations.get(&range.start) {
365 let ptr = Pointer::new(prov, Size::from_bytes(bits));
366 return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
a1dfa0c6
XL
367 }
368 }
369 // We don't. Just return the bits.
17df50a5 370 Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
a1dfa0c6
XL
371 }
372
e1599b0c 373 /// Writes a *non-ZST* scalar.
a1dfa0c6 374 ///
1b1a35ee
XL
375 /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
376 /// for ZSTness anyway due to integer pointers being valid for ZSTs.
a1dfa0c6 377 ///
dc9dc135 378 /// It is the caller's responsibility to check bounds and alignment beforehand.
e74abb32 379 /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
48663c56 380 pub fn write_scalar(
a1dfa0c6
XL
381 &mut self,
382 cx: &impl HasDataLayout,
17df50a5 383 range: AllocRange,
f9f354fc 384 val: ScalarMaybeUninit<Tag>,
17df50a5 385 ) -> AllocResult {
136023e0
XL
386 assert!(self.mutability == Mutability::Mut);
387
a1dfa0c6 388 let val = match val {
f9f354fc
XL
389 ScalarMaybeUninit::Scalar(scalar) => scalar,
390 ScalarMaybeUninit::Uninit => {
17df50a5 391 self.mark_init(range, false);
dc9dc135 392 return Ok(());
dfeec247 393 }
a1dfa0c6
XL
394 };
395
136023e0
XL
396 // `to_bits_or_ptr_internal` is the right method because we just want to store this data
397 // as-is into memory.
398 let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size) {
399 Err(val) => {
400 let (provenance, offset) = val.into_parts();
401 (u128::from(offset.bytes()), Some(provenance))
402 }
403 Ok(data) => (data, None),
a1dfa0c6
XL
404 };
405
406 let endian = cx.data_layout().endian;
94222f64 407 let dst = self.get_bytes_mut(cx, range)?;
a1dfa0c6
XL
408 write_target_uint(endian, dst, bytes).unwrap();
409
e1599b0c 410 // See if we have to also write a relocation.
136023e0
XL
411 if let Some(provenance) = provenance {
412 self.relocations.0.insert(range.start, provenance);
a1dfa0c6
XL
413 }
414
415 Ok(())
416 }
a1dfa0c6
XL
417}
418
e1599b0c 419/// Relocations.
17df50a5 420impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
e1599b0c 421 /// Returns all relocations overlapping with the given pointer-offset pair.
136023e0 422 pub fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
a1dfa0c6
XL
423 // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
424 // the beginning of this range.
17df50a5
XL
425 let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
426 self.relocations.range(Size::from_bytes(start)..range.end())
a1dfa0c6
XL
427 }
428
9fa01778 429 /// Checks that there are no relocations overlapping with the given range.
a1dfa0c6 430 #[inline(always)]
17df50a5
XL
431 fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
432 if self.get_relocations(cx, range).is_empty() {
a1dfa0c6
XL
433 Ok(())
434 } else {
17df50a5 435 Err(AllocError::ReadPointerAsBytes)
a1dfa0c6
XL
436 }
437 }
438
9fa01778 439 /// Removes all relocations inside the given range.
a1dfa0c6
XL
440 /// If there are relocations overlapping with the edges, they
441 /// are removed as well *and* the bytes they cover are marked as
9fa01778 442 /// uninitialized. This is a somewhat odd "spooky action at a distance",
a1dfa0c6
XL
443 /// but it allows strictly more code to run than if we would just error
444 /// immediately in that case.
94222f64
XL
445 fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
446 where
447 Tag: Provenance,
448 {
a1dfa0c6
XL
449 // Find the start and end of the given range and its outermost relocations.
450 let (first, last) = {
451 // Find all relocations overlapping the given range.
17df50a5 452 let relocations = self.get_relocations(cx, range);
a1dfa0c6 453 if relocations.is_empty() {
94222f64 454 return Ok(());
a1dfa0c6
XL
455 }
456
dfeec247
XL
457 (
458 relocations.first().unwrap().0,
459 relocations.last().unwrap().0 + cx.data_layout().pointer_size,
460 )
a1dfa0c6 461 };
17df50a5
XL
462 let start = range.start;
463 let end = range.end();
a1dfa0c6 464
94222f64
XL
465 // We need to handle clearing the relocations from parts of a pointer. See
466 // <https://github.com/rust-lang/rust/issues/87184> for details.
a1dfa0c6 467 if first < start {
94222f64
XL
468 if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
469 return Err(AllocError::PartialPointerOverwrite(first));
470 }
f9f354fc 471 self.init_mask.set_range(first, start, false);
a1dfa0c6
XL
472 }
473 if last > end {
94222f64
XL
474 if Tag::ERR_ON_PARTIAL_PTR_OVERWRITE {
475 return Err(AllocError::PartialPointerOverwrite(
476 last - cx.data_layout().pointer_size,
477 ));
478 }
f9f354fc 479 self.init_mask.set_range(end, last, false);
a1dfa0c6
XL
480 }
481
482 // Forget all the relocations.
136023e0 483 self.relocations.0.remove_range(first..last);
94222f64
XL
484
485 Ok(())
a1dfa0c6
XL
486 }
487
e1599b0c 488 /// Errors if there are relocations overlapping with the edges of the
a1dfa0c6
XL
489 /// given memory range.
490 #[inline]
17df50a5
XL
491 fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
492 self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
493 self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
a1dfa0c6
XL
494 Ok(())
495 }
496}
497
136023e0 498/// "Relocations" stores the provenance information of pointers stored in memory.
3dfed10e 499#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
136023e0 500pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
a1dfa0c6 501
136023e0 502impl<Tag> Relocations<Tag> {
a1dfa0c6
XL
503 pub fn new() -> Self {
504 Relocations(SortedMap::new())
505 }
506
507 // The caller must guarantee that the given relocations are already sorted
508 // by address and contain no duplicates.
136023e0 509 pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
a1dfa0c6
XL
510 Relocations(SortedMap::from_presorted_elements(r))
511 }
512}
513
514impl<Tag> Deref for Relocations<Tag> {
136023e0 515 type Target = SortedMap<Size, Tag>;
a1dfa0c6
XL
516
517 fn deref(&self) -> &Self::Target {
518 &self.0
519 }
520}
521
e1599b0c
XL
522/// A partial, owned list of relocations to transfer into another allocation.
523pub struct AllocationRelocations<Tag> {
136023e0 524 relative_relocations: Vec<(Size, Tag)>,
e1599b0c
XL
525}
526
527impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
528 pub fn prepare_relocation_copy(
529 &self,
530 cx: &impl HasDataLayout,
17df50a5
XL
531 src: AllocRange,
532 dest: Size,
533 count: u64,
e1599b0c 534 ) -> AllocationRelocations<Tag> {
17df50a5 535 let relocations = self.get_relocations(cx, src);
e1599b0c
XL
536 if relocations.is_empty() {
537 return AllocationRelocations { relative_relocations: Vec::new() };
538 }
539
17df50a5
XL
540 let size = src.size;
541 let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
e1599b0c 542
17df50a5 543 for i in 0..count {
dfeec247
XL
544 new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
545 // compute offset for current repetition
17df50a5 546 let dest_offset = dest + size * i; // `Size` operations
dfeec247
XL
547 (
548 // shift offsets from source allocation to destination allocation
17df50a5 549 (offset + dest_offset) - src.start, // `Size` operations
dfeec247
XL
550 reloc,
551 )
552 }));
e1599b0c
XL
553 }
554
dfeec247 555 AllocationRelocations { relative_relocations: new_relocations }
e1599b0c
XL
556 }
557
558 /// Applies a relocation copy.
559 /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
560 /// to be clear of relocations.
dfeec247 561 pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) {
136023e0 562 self.relocations.0.insert_presorted(relocations.relative_relocations);
e1599b0c
XL
563 }
564}
565
a1dfa0c6 566////////////////////////////////////////////////////////////////////////////////
3dfed10e 567// Uninitialized byte tracking
a1dfa0c6
XL
568////////////////////////////////////////////////////////////////////////////////
569
570type Block = u64;
a1dfa0c6 571
532ac7d7 572/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
f9f354fc 573/// is initialized. If it is `false` the byte is uninitialized.
3dfed10e 574#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
ba9703b0 575#[derive(HashStable)]
f9f354fc 576pub struct InitMask {
a1dfa0c6
XL
577 blocks: Vec<Block>,
578 len: Size,
579}
580
f9f354fc 581impl InitMask {
532ac7d7
XL
582 pub const BLOCK_SIZE: u64 = 64;
583
94222f64
XL
584 #[inline]
585 fn bit_index(bits: Size) -> (usize, usize) {
586 // BLOCK_SIZE is the number of bits that can fit in a `Block`.
587 // Each bit in a `Block` represents the initialization state of one byte of an allocation,
588 // so we use `.bytes()` here.
589 let bits = bits.bytes();
590 let a = bits / InitMask::BLOCK_SIZE;
591 let b = bits % InitMask::BLOCK_SIZE;
592 (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
a1dfa0c6
XL
593 }
594
a1dfa0c6 595 #[inline]
94222f64
XL
596 fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
597 let block = block.try_into().ok().unwrap();
598 let bit = bit.try_into().ok().unwrap();
599 Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
600 }
a1dfa0c6 601
94222f64
XL
602 pub fn new(size: Size, state: bool) -> Self {
603 let mut m = InitMask { blocks: vec![], len: Size::ZERO };
604 m.grow(size, state);
605 m
a1dfa0c6
XL
606 }
607
608 pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
609 let len = self.len;
610 if end > len {
611 self.grow(end - len, new_state);
612 }
613 self.set_range_inbounds(start, end, new_state);
614 }
615
616 pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
94222f64
XL
617 let (blocka, bita) = Self::bit_index(start);
618 let (blockb, bitb) = Self::bit_index(end);
532ac7d7 619 if blocka == blockb {
e1599b0c
XL
620 // First set all bits except the first `bita`,
621 // then unset the last `64 - bitb` bits.
532ac7d7 622 let range = if bitb == 0 {
74b04a01 623 u64::MAX << bita
532ac7d7 624 } else {
74b04a01 625 (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
532ac7d7
XL
626 };
627 if new_state {
628 self.blocks[blocka] |= range;
629 } else {
630 self.blocks[blocka] &= !range;
631 }
632 return;
633 }
634 // across block boundaries
635 if new_state {
e1599b0c 636 // Set `bita..64` to `1`.
74b04a01 637 self.blocks[blocka] |= u64::MAX << bita;
e1599b0c 638 // Set `0..bitb` to `1`.
532ac7d7 639 if bitb != 0 {
74b04a01 640 self.blocks[blockb] |= u64::MAX >> (64 - bitb);
532ac7d7 641 }
e1599b0c 642 // Fill in all the other blocks (much faster than one bit at a time).
dfeec247 643 for block in (blocka + 1)..blockb {
74b04a01 644 self.blocks[block] = u64::MAX;
532ac7d7
XL
645 }
646 } else {
e1599b0c 647 // Set `bita..64` to `0`.
74b04a01 648 self.blocks[blocka] &= !(u64::MAX << bita);
e1599b0c 649 // Set `0..bitb` to `0`.
532ac7d7 650 if bitb != 0 {
74b04a01 651 self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
532ac7d7 652 }
e1599b0c 653 // Fill in all the other blocks (much faster than one bit at a time).
dfeec247 654 for block in (blocka + 1)..blockb {
532ac7d7
XL
655 self.blocks[block] = 0;
656 }
a1dfa0c6
XL
657 }
658 }
659
660 #[inline]
661 pub fn get(&self, i: Size) -> bool {
94222f64 662 let (block, bit) = Self::bit_index(i);
532ac7d7 663 (self.blocks[block] & (1 << bit)) != 0
a1dfa0c6
XL
664 }
665
666 #[inline]
667 pub fn set(&mut self, i: Size, new_state: bool) {
94222f64 668 let (block, bit) = Self::bit_index(i);
532ac7d7
XL
669 self.set_bit(block, bit, new_state);
670 }
671
672 #[inline]
673 fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
a1dfa0c6
XL
674 if new_state {
675 self.blocks[block] |= 1 << bit;
676 } else {
677 self.blocks[block] &= !(1 << bit);
678 }
679 }
680
681 pub fn grow(&mut self, amount: Size, new_state: bool) {
532ac7d7
XL
682 if amount.bytes() == 0 {
683 return;
684 }
ba9703b0
XL
685 let unused_trailing_bits =
686 u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
a1dfa0c6 687 if amount.bytes() > unused_trailing_bits {
532ac7d7 688 let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
a1dfa0c6 689 self.blocks.extend(
e1599b0c 690 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
ba9703b0 691 iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
a1dfa0c6
XL
692 );
693 }
694 let start = self.len;
695 self.len += amount;
ba9703b0 696 self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
a1dfa0c6 697 }
94222f64
XL
698
699 /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
700 fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
701 /// A fast implementation of `find_bit`,
702 /// which skips over an entire block at a time if it's all 0s (resp. 1s),
703 /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
704 ///
705 /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
706 /// and with the least significant bit (and lowest block) first:
707 ///
708 /// 00000000|00000000
709 /// ^ ^ ^ ^
710 /// index: 0 7 8 15
711 ///
712 /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
713 fn find_bit_fast(
714 init_mask: &InitMask,
715 start: Size,
716 end: Size,
717 is_init: bool,
718 ) -> Option<Size> {
719 /// Search one block, returning the index of the first bit equal to `is_init`.
720 fn search_block(
721 bits: Block,
722 block: usize,
723 start_bit: usize,
724 is_init: bool,
725 ) -> Option<Size> {
726 // For the following examples, assume this function was called with:
727 // bits = 0b00111011
728 // start_bit = 3
729 // is_init = false
730 // Note that, for the examples in this function, the most significant bit is written first,
731 // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
732
733 // Invert bits so we're always looking for the first set bit.
734 // ! 0b00111011
735 // bits = 0b11000100
736 let bits = if is_init { bits } else { !bits };
737 // Mask off unused start bits.
738 // 0b11000100
739 // & 0b11111000
740 // bits = 0b11000000
741 let bits = bits & (!0 << start_bit);
742 // Find set bit, if any.
743 // bit = trailing_zeros(0b11000000)
744 // bit = 6
745 if bits == 0 {
746 None
747 } else {
748 let bit = bits.trailing_zeros();
749 Some(InitMask::size_from_bit_index(block, bit))
750 }
751 }
752
753 if start >= end {
754 return None;
755 }
756
757 // Convert `start` and `end` to block indexes and bit indexes within each block.
758 // We must convert `end` to an inclusive bound to handle block boundaries correctly.
759 //
760 // For example:
761 //
762 // (a) 00000000|00000000 (b) 00000000|
763 // ^~~~~~~~~~~^ ^~~~~~~~~^
764 // start end start end
765 //
766 // In both cases, the block index of `end` is 1.
767 // But we do want to search block 1 in (a), and we don't in (b).
768 //
769 // We subtract 1 from both end positions to make them inclusive:
770 //
771 // (a) 00000000|00000000 (b) 00000000|
772 // ^~~~~~~~~~^ ^~~~~~~^
773 // start end_inclusive start end_inclusive
774 //
775 // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
776 // This provides the desired behavior of searching blocks 0 and 1 for (a),
777 // and searching only block 0 for (b).
778 // There is no concern of overflows since we checked for `start >= end` above.
779 let (start_block, start_bit) = InitMask::bit_index(start);
780 let end_inclusive = Size::from_bytes(end.bytes() - 1);
781 let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
782
783 // Handle first block: need to skip `start_bit` bits.
784 //
785 // We need to handle the first block separately,
786 // because there may be bits earlier in the block that should be ignored,
787 // such as the bit marked (1) in this example:
788 //
789 // (1)
790 // -|------
791 // (c) 01000000|00000000|00000001
792 // ^~~~~~~~~~~~~~~~~~^
793 // start end
794 if let Some(i) =
795 search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
796 {
797 // If the range is less than a block, we may find a matching bit after `end`.
798 //
799 // For example, we shouldn't successfully find bit (2), because it's after `end`:
800 //
801 // (2)
802 // -------|
803 // (d) 00000001|00000000|00000001
804 // ^~~~~^
805 // start end
806 //
807 // An alternative would be to mask off end bits in the same way as we do for start bits,
808 // but performing this check afterwards is faster and simpler to implement.
809 if i < end {
810 return Some(i);
811 } else {
812 return None;
813 }
814 }
815
816 // Handle remaining blocks.
817 //
818 // We can skip over an entire block at once if it's all 0s (resp. 1s).
819 // The block marked (3) in this example is the first block that will be handled by this loop,
820 // and it will be skipped for that reason:
821 //
822 // (3)
823 // --------
824 // (e) 01000000|00000000|00000001
825 // ^~~~~~~~~~~~~~~~~~^
826 // start end
827 if start_block < end_block_inclusive {
828 // This loop is written in a specific way for performance.
829 // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
830 // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
831 // because both alternatives result in significantly worse codegen.
832 // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
833 // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
834 for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
835 .iter()
836 .zip(start_block + 1..)
837 {
838 if let Some(i) = search_block(bits, block, 0, is_init) {
839 // If this is the last block, we may find a matching bit after `end`.
840 //
841 // For example, we shouldn't successfully find bit (4), because it's after `end`:
842 //
843 // (4)
844 // -------|
845 // (f) 00000001|00000000|00000001
846 // ^~~~~~~~~~~~~~~~~~^
847 // start end
848 //
849 // As above with example (d), we could handle the end block separately and mask off end bits,
850 // but unconditionally searching an entire block at once and performing this check afterwards
851 // is faster and much simpler to implement.
852 if i < end {
853 return Some(i);
854 } else {
855 return None;
856 }
857 }
858 }
859 }
860
861 None
862 }
863
864 #[cfg_attr(not(debug_assertions), allow(dead_code))]
865 fn find_bit_slow(
866 init_mask: &InitMask,
867 start: Size,
868 end: Size,
869 is_init: bool,
870 ) -> Option<Size> {
871 (start..end).find(|&i| init_mask.get(i) == is_init)
872 }
873
874 let result = find_bit_fast(self, start, end, is_init);
875
876 debug_assert_eq!(
877 result,
878 find_bit_slow(self, start, end, is_init),
879 "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
880 start,
881 end,
882 is_init,
883 self
884 );
885
886 result
887 }
888}
889
890/// A contiguous chunk of initialized or uninitialized memory.
891pub enum InitChunk {
892 Init(Range<Size>),
893 Uninit(Range<Size>),
894}
895
896impl InitChunk {
897 #[inline]
898 pub fn is_init(&self) -> bool {
899 match self {
900 Self::Init(_) => true,
901 Self::Uninit(_) => false,
902 }
903 }
904
905 #[inline]
906 pub fn range(&self) -> Range<Size> {
907 match self {
908 Self::Init(r) => r.clone(),
909 Self::Uninit(r) => r.clone(),
910 }
911 }
912}
913
914impl InitMask {
915 /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
916 ///
917 /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
918 /// indexes for the first contiguous span of the uninitialized access.
919 #[inline]
920 pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), Range<Size>> {
921 if end > self.len {
922 return Err(self.len..end);
923 }
924
925 let uninit_start = self.find_bit(start, end, false);
926
927 match uninit_start {
928 Some(uninit_start) => {
929 let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
930 Err(uninit_start..uninit_end)
931 }
932 None => Ok(()),
933 }
934 }
935
936 /// Returns an iterator, yielding a range of byte indexes for each contiguous region
937 /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
938 ///
939 /// The iterator guarantees the following:
940 /// - Chunks are nonempty.
941 /// - Chunks are adjacent (each range's start is equal to the previous range's end).
942 /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
943 /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
944 #[inline]
945 pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
946 assert!(end <= self.len);
947
948 let is_init = if start < end {
949 self.get(start)
950 } else {
951 // `start..end` is empty: there are no chunks, so use some arbitrary value
952 false
953 };
954
955 InitChunkIter { init_mask: self, is_init, start, end }
956 }
957}
958
959/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
960pub struct InitChunkIter<'a> {
961 init_mask: &'a InitMask,
962 /// Whether the next chunk we will return is initialized.
963 /// If there are no more chunks, contains some arbitrary value.
964 is_init: bool,
965 /// The current byte index into `init_mask`.
966 start: Size,
967 /// The end byte index into `init_mask`.
968 end: Size,
a1dfa0c6
XL
969}
970
94222f64
XL
971impl<'a> Iterator for InitChunkIter<'a> {
972 type Item = InitChunk;
973
974 #[inline]
975 fn next(&mut self) -> Option<Self::Item> {
976 if self.start >= self.end {
977 return None;
978 }
979
980 let end_of_chunk =
981 self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
982 let range = self.start..end_of_chunk;
983
984 let ret =
985 Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
986
987 self.is_init = !self.is_init;
988 self.start = end_of_chunk;
989
990 ret
991 }
992}
993
994/// Uninitialized bytes.
995impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
996 /// Checks whether the given range is entirely initialized.
997 ///
998 /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
999 /// indexes of the first contiguous uninitialized access.
1000 fn is_init(&self, range: AllocRange) -> Result<(), Range<Size>> {
1001 self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
1002 }
1003
1004 /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
1005 /// error which will report the first range of bytes which is uninitialized.
1006 fn check_init(&self, range: AllocRange) -> AllocResult {
c295e0f8
XL
1007 self.is_init(range).map_err(|idx_range| {
1008 AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
94222f64
XL
1009 access_offset: range.start,
1010 access_size: range.size,
1011 uninit_offset: idx_range.start,
1012 uninit_size: idx_range.end - idx_range.start, // `Size` subtraction
c295e0f8 1013 }))
94222f64
XL
1014 })
1015 }
1016
1017 pub fn mark_init(&mut self, range: AllocRange, is_init: bool) {
1018 if range.size.bytes() == 0 {
1019 return;
1020 }
1021 assert!(self.mutability == Mutability::Mut);
1022 self.init_mask.set_range(range.start, range.end(), is_init);
1023 }
1024}
1025
1026/// Run-length encoding of the uninit mask.
1027/// Used to copy parts of a mask multiple times to another allocation.
1028pub struct InitMaskCompressed {
1029 /// Whether the first range is initialized.
1030 initial: bool,
1031 /// The lengths of ranges that are run-length encoded.
1032 /// The initialization state of the ranges alternate starting with `initial`.
1033 ranges: smallvec::SmallVec<[u64; 1]>,
1034}
1035
1036impl InitMaskCompressed {
1037 pub fn no_bytes_init(&self) -> bool {
1038 // The `ranges` are run-length encoded and of alternating initialization state.
1039 // So if `ranges.len() > 1` then the second block is an initialized range.
1040 !self.initial && self.ranges.len() == 1
1041 }
1042}
1043
1044/// Transferring the initialization mask to other allocations.
1045impl<Tag, Extra> Allocation<Tag, Extra> {
1046 /// Creates a run-length encoding of the initialization mask; panics if range is empty.
1047 ///
1048 /// This is essentially a more space-efficient version of
1049 /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
1050 pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
1051 // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
1052 // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
1053 // the source and write it to the destination. Even if we optimized the memory accesses,
1054 // we'd be doing all of this `repeat` times.
1055 // Therefore we precompute a compressed version of the initialization mask of the source value and
1056 // then write it back `repeat` times without computing any more information from the source.
1057
1058 // A precomputed cache for ranges of initialized / uninitialized bits
1059 // 0000010010001110 will become
1060 // `[5, 1, 2, 1, 3, 3, 1]`,
1061 // where each element toggles the state.
1062
1063 let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
1064
1065 let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
1066
1067 let initial = chunks.peek().expect("range should be nonempty").is_init();
1068
1069 // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
1070 for chunk in chunks {
1071 let len = chunk.range().end.bytes() - chunk.range().start.bytes();
1072 ranges.push(len);
1073 }
1074
1075 InitMaskCompressed { ranges, initial }
1076 }
1077
1078 /// Applies multiple instances of the run-length encoding to the initialization mask.
1079 pub fn mark_compressed_init_range(
1080 &mut self,
1081 defined: &InitMaskCompressed,
1082 range: AllocRange,
1083 repeat: u64,
1084 ) {
1085 // An optimization where we can just overwrite an entire range of initialization
1086 // bits if they are going to be uniformly `1` or `0`.
1087 if defined.ranges.len() <= 1 {
1088 self.init_mask.set_range_inbounds(
1089 range.start,
1090 range.start + range.size * repeat, // `Size` operations
1091 defined.initial,
1092 );
1093 return;
1094 }
1095
1096 for mut j in 0..repeat {
1097 j *= range.size.bytes();
1098 j += range.start.bytes();
1099 let mut cur = defined.initial;
1100 for range in &defined.ranges {
1101 let old_j = j;
1102 j += range;
1103 self.init_mask.set_range_inbounds(
1104 Size::from_bytes(old_j),
1105 Size::from_bytes(j),
1106 cur,
1107 );
1108 cur = !cur;
1109 }
1110 }
1111 }
a1dfa0c6 1112}