1 use crate::alloc
::alloc
::{alloc, dealloc, handle_alloc_error}
;
2 use crate::scopeguard
::guard
;
3 use crate::TryReserveError
;
4 use core
::alloc
::Layout
;
6 use core
::iter
::FusedIterator
;
7 use core
::marker
::PhantomData
;
9 use core
::mem
::ManuallyDrop
;
10 use core
::ptr
::NonNull
;
13 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14 // at once instead of 8. We don't bother with AVX since it would require
15 // runtime dispatch and wouldn't gain us much anyways: the probability of
16 // finding a match drops off drastically after the first few buckets.
18 // I attempted an implementation on ARM using NEON instructions, but it
19 // turns out that most NEON instructions have multi-cycle latency, which in
20 // the end outweighs any gains over the generic implementation.
22 target_feature
= "sse2",
23 any(target_arch
= "x86", target_arch
= "x86_64"),
29 #[path = "generic.rs"]
37 use self::bitmask
::{BitMask, BitMaskIter}
;
40 // Branch prediction hint. This is currently only available on nightly but it
41 // consistently improves performance by 10-15%.
42 #[cfg(feature = "nightly")]
43 use core
::intrinsics
::{likely, unlikely}
;
44 #[cfg(not(feature = "nightly"))]
46 fn likely(b
: bool
) -> bool
{
49 #[cfg(not(feature = "nightly"))]
51 fn unlikely(b
: bool
) -> bool
{
55 #[cfg(feature = "nightly")]
56 #[cfg_attr(feature = "inline-more", inline)]
57 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
58 to
.offset_from(from
) as usize
60 #[cfg(not(feature = "nightly"))]
61 #[cfg_attr(feature = "inline-more", inline)]
62 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
63 (to
as usize - from
as usize) / mem
::size_of
::<T
>()
66 /// Whether memory allocation errors should return an error or abort.
67 #[derive(Copy, Clone)]
74 /// Error to return on capacity overflow.
75 #[cfg_attr(feature = "inline-more", inline)]
76 fn capacity_overflow(self) -> TryReserveError
{
78 Fallibility
::Fallible
=> TryReserveError
::CapacityOverflow
,
79 Fallibility
::Infallible
=> panic
!("Hash table capacity overflow"),
83 /// Error to return on allocation error.
84 #[cfg_attr(feature = "inline-more", inline)]
85 fn alloc_err(self, layout
: Layout
) -> TryReserveError
{
87 Fallibility
::Fallible
=> TryReserveError
::AllocError { layout }
,
88 Fallibility
::Infallible
=> handle_alloc_error(layout
),
93 /// Control byte value for an empty bucket.
94 const EMPTY
: u8 = 0b1111_1111;
96 /// Control byte value for a deleted bucket.
97 const DELETED
: u8 = 0b1000_0000;
99 /// Checks whether a control byte represents a full bucket (top bit is clear).
101 fn is_full(ctrl
: u8) -> bool
{
105 /// Checks whether a control byte represents a special value (top bit is set).
107 fn is_special(ctrl
: u8) -> bool
{
111 /// Checks whether a special control value is EMPTY (just check 1 bit).
113 fn special_is_empty(ctrl
: u8) -> bool
{
114 debug_assert
!(is_special(ctrl
));
118 /// Primary hash function, used to select the initial bucket to probe from.
120 #[allow(clippy::cast_possible_truncation)]
121 fn h1(hash
: u64) -> usize {
122 // On 32-bit platforms we simply ignore the higher hash bits.
126 /// Secondary hash function, saved in the low 7 bits of the control byte.
128 #[allow(clippy::cast_possible_truncation)]
129 fn h2(hash
: u64) -> u8 {
130 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
131 // value, some hash functions (such as FxHash) produce a usize result
132 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
133 let hash_len
= usize::min(mem
::size_of
::<usize>(), mem
::size_of
::<u64>());
134 let top7
= hash
>> (hash_len
* 8 - 7);
135 (top7
& 0x7f) as u8 // truncation
138 /// Probe sequence based on triangular numbers, which is guaranteed (since our
139 /// table size is a power of two) to visit every group of elements exactly once.
141 /// A triangular probe has us jump by 1 more group every time. So first we
142 /// jump by 1 group (meaning we just continue our linear scan), then 2 groups
143 /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
145 /// Proof that the probe will visit every group in the table:
146 /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
153 impl Iterator
for ProbeSeq
{
157 fn next(&mut self) -> Option
<usize> {
158 // We should have found an empty bucket by now and ended the probe.
160 self.stride
<= self.bucket_mask
,
161 "Went past end of probe sequence"
164 let result
= self.pos
;
165 self.stride
+= Group
::WIDTH
;
166 self.pos
+= self.stride
;
167 self.pos
&= self.bucket_mask
;
172 /// Returns the number of buckets needed to hold the given number of items,
173 /// taking the maximum load factor into account.
175 /// Returns `None` if an overflow occurs.
176 // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
177 #[cfg_attr(target_os = "emscripten", inline(never))]
178 #[cfg_attr(not(target_os = "emscripten"), inline)]
179 fn capacity_to_buckets(cap
: usize) -> Option
<usize> {
180 debug_assert_ne
!(cap
, 0);
182 // For small tables we require at least 1 empty bucket so that lookups are
183 // guaranteed to terminate if an element doesn't exist in the table.
185 // We don't bother with a table size of 2 buckets since that can only
186 // hold a single element. Instead we skip directly to a 4 bucket table
187 // which can hold 3 elements.
188 return Some(if cap
< 4 { 4 }
else { 8 }
);
191 // Otherwise require 1/8 buckets to be empty (87.5% load)
193 // Be careful when modifying this, calculate_layout relies on the
194 // overflow check here.
195 let adjusted_cap
= cap
.checked_mul(8)?
/ 7;
197 // Any overflows will have been caught by the checked_mul. Also, any
198 // rounding errors from the division above will be cleaned up by
199 // next_power_of_two (which can't overflow because of the previous divison).
200 Some(adjusted_cap
.next_power_of_two())
203 /// Returns the maximum effective capacity for the given bucket mask, taking
204 /// the maximum load factor into account.
206 fn bucket_mask_to_capacity(bucket_mask
: usize) -> usize {
208 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
209 // Keep in mind that the bucket mask is one less than the bucket count.
212 // For larger tables we reserve 12.5% of the slots as empty.
213 ((bucket_mask
+ 1) / 8) * 7
217 /// Returns a Layout which describes the allocation required for a hash table,
218 /// and the offset of the control bytes in the allocation.
219 /// (the offset is also one past last element of buckets)
221 /// Returns `None` if an overflow occurs.
222 #[cfg_attr(feature = "inline-more", inline)]
223 #[cfg(feature = "nightly")]
224 fn calculate_layout
<T
>(buckets
: usize) -> Option
<(Layout
, usize)> {
225 debug_assert
!(buckets
.is_power_of_two());
228 let data
= Layout
::array
::<T
>(buckets
).ok()?
;
230 // Array of control bytes. This must be aligned to the group size.
232 // We add `Group::WIDTH` control bytes at the end of the array which
233 // replicate the bytes at the start of the array and thus avoids the need to
234 // perform bounds-checking while probing.
236 // There is no possible overflow here since buckets is a power of two and
237 // Group::WIDTH is a small number.
238 let ctrl
= unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) }
;
240 data
.extend(ctrl
).ok()
243 /// Returns a Layout which describes the allocation required for a hash table,
244 /// and the offset of the control bytes in the allocation.
245 /// (the offset is also one past last element of buckets)
247 /// Returns `None` if an overflow occurs.
248 #[cfg_attr(feature = "inline-more", inline)]
249 #[cfg(not(feature = "nightly"))]
250 fn calculate_layout
<T
>(buckets
: usize) -> Option
<(Layout
, usize)> {
251 debug_assert
!(buckets
.is_power_of_two());
253 // Manual layout calculation since Layout methods are not yet stable.
254 let ctrl_align
= usize::max(mem
::align_of
::<T
>(), Group
::WIDTH
);
255 let ctrl_offset
= mem
::size_of
::<T
>()
256 .checked_mul(buckets
)?
257 .checked_add(ctrl_align
- 1)?
259 let len
= ctrl_offset
.checked_add(buckets
+ Group
::WIDTH
)?
;
262 unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }
,
267 /// A reference to a hash table bucket containing a `T`.
269 /// This is usually just a pointer to the element itself. However if the element
270 /// is a ZST, then we instead track the index of the element in the table so
271 /// that `erase` works properly.
272 pub struct Bucket
<T
> {
273 // Actually it is pointer to next element than element itself
274 // this is needed to maintain pointer arithmetic invariants
275 // keeping direct pointer to element introduces difficulty.
276 // Using `NonNull` for variance and niche layout
280 // This Send impl is needed for rayon support. This is safe since Bucket is
281 // never exposed in a public API.
282 unsafe impl<T
> Send
for Bucket
<T
> {}
284 impl<T
> Clone
for Bucket
<T
> {
285 #[cfg_attr(feature = "inline-more", inline)]
286 fn clone(&self) -> Self {
287 Self { ptr: self.ptr }
292 #[cfg_attr(feature = "inline-more", inline)]
293 unsafe fn from_base_index(base
: NonNull
<T
>, index
: usize) -> Self {
294 let ptr
= if mem
::size_of
::<T
>() == 0 {
295 // won't overflow because index must be less than length
296 (index
+ 1) as *mut T
298 base
.as_ptr().sub(index
)
301 ptr
: NonNull
::new_unchecked(ptr
),
304 #[cfg_attr(feature = "inline-more", inline)]
305 unsafe fn to_base_index(&self, base
: NonNull
<T
>) -> usize {
306 if mem
::size_of
::<T
>() == 0 {
307 self.ptr
.as_ptr() as usize - 1
309 offset_from(base
.as_ptr(), self.ptr
.as_ptr())
312 #[cfg_attr(feature = "inline-more", inline)]
313 pub unsafe fn as_ptr(&self) -> *mut T
{
314 if mem
::size_of
::<T
>() == 0 {
315 // Just return an arbitrary ZST pointer which is properly aligned
316 mem
::align_of
::<T
>() as *mut T
318 self.ptr
.as_ptr().sub(1)
321 #[cfg_attr(feature = "inline-more", inline)]
322 unsafe fn next_n(&self, offset
: usize) -> Self {
323 let ptr
= if mem
::size_of
::<T
>() == 0 {
324 (self.ptr
.as_ptr() as usize + offset
) as *mut T
326 self.ptr
.as_ptr().sub(offset
)
329 ptr
: NonNull
::new_unchecked(ptr
),
332 #[cfg_attr(feature = "inline-more", inline)]
333 pub unsafe fn drop(&self) {
334 self.as_ptr().drop_in_place();
336 #[cfg_attr(feature = "inline-more", inline)]
337 pub unsafe fn read(&self) -> T
{
340 #[cfg_attr(feature = "inline-more", inline)]
341 pub unsafe fn write(&self, val
: T
) {
342 self.as_ptr().write(val
);
344 #[cfg_attr(feature = "inline-more", inline)]
345 pub unsafe fn as_ref
<'a
>(&self) -> &'a T
{
348 #[cfg_attr(feature = "inline-more", inline)]
349 pub unsafe fn as_mut
<'a
>(&self) -> &'a
mut T
{
352 #[cfg_attr(feature = "inline-more", inline)]
353 pub unsafe fn copy_from_nonoverlapping(&self, other
: &Self) {
354 self.as_ptr().copy_from_nonoverlapping(other
.as_ptr(), 1);
358 /// A raw hash table with an unsafe API.
359 pub struct RawTable
<T
> {
360 // Mask to get an index from a hash value. The value is one less than the
361 // number of buckets in the table.
364 // [Padding], T1, T2, ..., Tlast, C1, C2, ...
368 // Number of elements that can be inserted before we need to grow the table
371 // Number of elements in the table, only really used by len()
374 // Tell dropck that we own instances of T.
375 marker
: PhantomData
<T
>,
378 impl<T
> RawTable
<T
> {
379 /// Creates a new empty hash table without allocating any memory.
381 /// In effect this returns a table with exactly 1 bucket. However we can
382 /// leave the data pointer dangling since that bucket is never written to
383 /// due to our load factor forcing us to always have at least 1 free bucket.
384 #[cfg_attr(feature = "inline-more", inline)]
385 pub const fn new() -> Self {
387 // Be careful to cast the entire slice to a raw pointer.
388 ctrl
: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }
,
396 /// Allocates a new hash table with the given number of buckets.
398 /// The control bytes are left uninitialized.
399 #[cfg_attr(feature = "inline-more", inline)]
400 unsafe fn new_uninitialized(
402 fallability
: Fallibility
,
403 ) -> Result
<Self, TryReserveError
> {
404 debug_assert
!(buckets
.is_power_of_two());
406 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
407 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(buckets
) {
409 None
=> return Err(fallability
.capacity_overflow()),
411 let ptr
= match NonNull
::new(alloc(layout
)) {
413 None
=> return Err(fallability
.alloc_err(layout
)),
415 let ctrl
= NonNull
::new_unchecked(ptr
.as_ptr().add(ctrl_offset
));
418 bucket_mask
: buckets
- 1,
420 growth_left
: bucket_mask_to_capacity(buckets
- 1),
425 /// Attempts to allocate a new hash table with at least enough capacity
426 /// for inserting the given number of elements without reallocating.
427 fn fallible_with_capacity(
429 fallability
: Fallibility
,
430 ) -> Result
<Self, TryReserveError
> {
435 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
436 let buckets
= match capacity_to_buckets(capacity
) {
437 Some(buckets
) => buckets
,
438 None
=> return Err(fallability
.capacity_overflow()),
440 let result
= Self::new_uninitialized(buckets
, fallability
)?
;
441 result
.ctrl(0).write_bytes(EMPTY
, result
.num_ctrl_bytes());
448 /// Attempts to allocate a new hash table with at least enough capacity
449 /// for inserting the given number of elements without reallocating.
450 #[cfg(feature = "raw")]
451 pub fn try_with_capacity(capacity
: usize) -> Result
<Self, TryReserveError
> {
452 Self::fallible_with_capacity(capacity
, Fallibility
::Fallible
)
455 /// Allocates a new hash table with at least enough capacity for inserting
456 /// the given number of elements without reallocating.
457 pub fn with_capacity(capacity
: usize) -> Self {
458 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
459 match Self::fallible_with_capacity(capacity
, Fallibility
::Infallible
) {
460 Ok(capacity
) => capacity
,
461 Err(_
) => unsafe { hint::unreachable_unchecked() }
,
465 /// Deallocates the table without dropping any entries.
466 #[cfg_attr(feature = "inline-more", inline)]
467 unsafe fn free_buckets(&mut self) {
468 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
469 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(self.buckets()) {
471 None
=> hint
::unreachable_unchecked(),
473 dealloc(self.ctrl
.as_ptr().sub(ctrl_offset
), layout
);
476 /// Returns pointer to one past last element of data table.
477 #[cfg_attr(feature = "inline-more", inline)]
478 pub unsafe fn data_end(&self) -> NonNull
<T
> {
479 NonNull
::new_unchecked(self.ctrl
.as_ptr() as *mut T
)
482 /// Returns pointer to start of data table.
483 #[cfg_attr(feature = "inline-more", inline)]
484 #[cfg(feature = "nightly")]
485 pub unsafe fn data_start(&self) -> *mut T
{
486 self.data_end().as_ptr().wrapping_sub(self.buckets())
489 /// Returns the index of a bucket from a `Bucket`.
490 #[cfg_attr(feature = "inline-more", inline)]
491 pub unsafe fn bucket_index(&self, bucket
: &Bucket
<T
>) -> usize {
492 bucket
.to_base_index(self.data_end())
495 /// Returns a pointer to a control byte.
496 #[cfg_attr(feature = "inline-more", inline)]
497 unsafe fn ctrl(&self, index
: usize) -> *mut u8 {
498 debug_assert
!(index
< self.num_ctrl_bytes());
499 self.ctrl
.as_ptr().add(index
)
502 /// Returns a pointer to an element in the table.
503 #[cfg_attr(feature = "inline-more", inline)]
504 pub unsafe fn bucket(&self, index
: usize) -> Bucket
<T
> {
505 debug_assert_ne
!(self.bucket_mask
, 0);
506 debug_assert
!(index
< self.buckets());
507 Bucket
::from_base_index(self.data_end(), index
)
510 /// Erases an element from the table without dropping it.
511 #[cfg_attr(feature = "inline-more", inline)]
512 #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
513 pub unsafe fn erase_no_drop(&mut self, item
: &Bucket
<T
>) {
514 let index
= self.bucket_index(item
);
515 debug_assert
!(is_full(*self.ctrl(index
)));
516 let index_before
= index
.wrapping_sub(Group
::WIDTH
) & self.bucket_mask
;
517 let empty_before
= Group
::load(self.ctrl(index_before
)).match_empty();
518 let empty_after
= Group
::load(self.ctrl(index
)).match_empty();
520 // If we are inside a continuous block of Group::WIDTH full or deleted
521 // cells then a probe window may have seen a full block when trying to
522 // insert. We therefore need to keep that block non-empty so that
523 // lookups will continue searching to the next probe window.
525 // Note that in this context `leading_zeros` refers to the bytes at the
526 // end of a group, while `trailing_zeros` refers to the bytes at the
527 // begining of a group.
528 let ctrl
= if empty_before
.leading_zeros() + empty_after
.trailing_zeros() >= Group
::WIDTH
{
531 self.growth_left
+= 1;
534 self.set_ctrl(index
, ctrl
);
538 /// Erases an element from the table, dropping it in place.
539 #[cfg_attr(feature = "inline-more", inline)]
540 #[allow(clippy::needless_pass_by_value)]
542 pub unsafe fn erase(&mut self, item
: Bucket
<T
>) {
543 // Erase the element from the table first since drop might panic.
544 self.erase_no_drop(&item
);
548 /// Removes an element from the table, returning it.
549 #[cfg_attr(feature = "inline-more", inline)]
550 #[allow(clippy::needless_pass_by_value)]
552 pub unsafe fn remove(&mut self, item
: Bucket
<T
>) -> T
{
553 self.erase_no_drop(&item
);
557 /// Returns an iterator for a probe sequence on the table.
559 /// This iterator never terminates, but is guaranteed to visit each bucket
560 /// group exactly once. The loop using `probe_seq` must terminate upon
561 /// reaching a group containing an empty bucket.
562 #[cfg_attr(feature = "inline-more", inline)]
563 fn probe_seq(&self, hash
: u64) -> ProbeSeq
{
565 bucket_mask
: self.bucket_mask
,
566 pos
: h1(hash
) & self.bucket_mask
,
571 /// Sets a control byte, and possibly also the replicated control byte at
572 /// the end of the array.
573 #[cfg_attr(feature = "inline-more", inline)]
574 unsafe fn set_ctrl(&self, index
: usize, ctrl
: u8) {
575 // Replicate the first Group::WIDTH control bytes at the end of
576 // the array without using a branch:
577 // - If index >= Group::WIDTH then index == index2.
578 // - Otherwise index2 == self.bucket_mask + 1 + index.
580 // The very last replicated control byte is never actually read because
581 // we mask the initial index for unaligned loads, but we write it
582 // anyways because it makes the set_ctrl implementation simpler.
584 // If there are fewer buckets than Group::WIDTH then this code will
585 // replicate the buckets at the end of the trailing group. For example
586 // with 2 buckets and a group size of 4, the control bytes will look
590 // ---------------------------------------------
591 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
592 // ---------------------------------------------
593 let index2
= ((index
.wrapping_sub(Group
::WIDTH
)) & self.bucket_mask
) + Group
::WIDTH
;
595 *self.ctrl(index
) = ctrl
;
596 *self.ctrl(index2
) = ctrl
;
599 /// Searches for an empty or deleted bucket which is suitable for inserting
602 /// There must be at least 1 empty bucket in the table.
603 #[cfg_attr(feature = "inline-more", inline)]
604 fn find_insert_slot(&self, hash
: u64) -> usize {
605 for pos
in self.probe_seq(hash
) {
607 let group
= Group
::load(self.ctrl(pos
));
608 if let Some(bit
) = group
.match_empty_or_deleted().lowest_set_bit() {
609 let result
= (pos
+ bit
) & self.bucket_mask
;
611 // In tables smaller than the group width, trailing control
612 // bytes outside the range of the table are filled with
613 // EMPTY entries. These will unfortunately trigger a
614 // match, but once masked may point to a full bucket that
615 // is already occupied. We detect this situation here and
616 // perform a second scan starting at the begining of the
617 // table. This second scan is guaranteed to find an empty
618 // slot (due to the load factor) before hitting the trailing
619 // control bytes (containing EMPTY).
620 if unlikely(is_full(*self.ctrl(result
))) {
621 debug_assert
!(self.bucket_mask
< Group
::WIDTH
);
622 debug_assert_ne
!(pos
, 0);
623 return Group
::load_aligned(self.ctrl(0))
624 .match_empty_or_deleted()
625 .lowest_set_bit_nonzero();
633 // probe_seq never returns.
637 /// Marks all table buckets as empty without dropping their contents.
638 #[cfg_attr(feature = "inline-more", inline)]
639 pub fn clear_no_drop(&mut self) {
640 if !self.is_empty_singleton() {
642 self.ctrl(0).write_bytes(EMPTY
, self.num_ctrl_bytes());
646 self.growth_left
= bucket_mask_to_capacity(self.bucket_mask
);
649 /// Removes all elements from the table without freeing the backing memory.
650 #[cfg_attr(feature = "inline-more", inline)]
651 pub fn clear(&mut self) {
652 // Ensure that the table is reset even if one of the drops panic
653 let self_
= guard(self, |self_
| self_
.clear_no_drop());
655 if mem
::needs_drop
::<T
>() && self_
.len() != 0 {
657 for item
in self_
.iter() {
664 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
665 #[cfg_attr(feature = "inline-more", inline)]
666 pub fn shrink_to(&mut self, min_size
: usize, hasher
: impl Fn(&T
) -> u64) {
667 // Calculate the minimal number of elements that we need to reserve
669 let min_size
= usize::max(self.items
, min_size
);
675 // Calculate the number of buckets that we need for this number of
676 // elements. If the calculation overflows then the requested bucket
677 // count must be larger than what we have right and nothing needs to be
679 let min_buckets
= match capacity_to_buckets(min_size
) {
680 Some(buckets
) => buckets
,
684 // If we have more buckets than we need, shrink the table.
685 if min_buckets
< self.buckets() {
686 // Fast path if the table is empty
688 *self = Self::with_capacity(min_size
)
690 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
692 .resize(min_size
, hasher
, Fallibility
::Infallible
)
695 unsafe { hint::unreachable_unchecked() }
701 /// Ensures that at least `additional` items can be inserted into the table
702 /// without reallocation.
703 #[cfg_attr(feature = "inline-more", inline)]
704 pub fn reserve(&mut self, additional
: usize, hasher
: impl Fn(&T
) -> u64) {
705 if additional
> self.growth_left
{
706 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
708 .reserve_rehash(additional
, hasher
, Fallibility
::Infallible
)
711 unsafe { hint::unreachable_unchecked() }
716 /// Tries to ensure that at least `additional` items can be inserted into
717 /// the table without reallocation.
718 #[cfg_attr(feature = "inline-more", inline)]
722 hasher
: impl Fn(&T
) -> u64,
723 ) -> Result
<(), TryReserveError
> {
724 if additional
> self.growth_left
{
725 self.reserve_rehash(additional
, hasher
, Fallibility
::Fallible
)
731 /// Out-of-line slow path for `reserve` and `try_reserve`.
737 hasher
: impl Fn(&T
) -> u64,
738 fallability
: Fallibility
,
739 ) -> Result
<(), TryReserveError
> {
740 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
741 let new_items
= match self.items
.checked_add(additional
) {
742 Some(new_items
) => new_items
,
743 None
=> return Err(fallability
.capacity_overflow()),
745 let full_capacity
= bucket_mask_to_capacity(self.bucket_mask
);
746 if new_items
<= full_capacity
/ 2 {
747 // Rehash in-place without re-allocating if we have plenty of spare
748 // capacity that is locked up due to DELETED entries.
749 self.rehash_in_place(hasher
);
752 // Otherwise, conservatively resize to at least the next size up
753 // to avoid churning deletes into frequent rehashes.
755 usize::max(new_items
, full_capacity
+ 1),
762 /// Rehashes the contents of the table in place (i.e. without changing the
765 /// If `hasher` panics then some the table's contents may be lost.
766 fn rehash_in_place(&mut self, hasher
: impl Fn(&T
) -> u64) {
768 // Bulk convert all full control bytes to DELETED, and all DELETED
769 // control bytes to EMPTY. This effectively frees up all buckets
770 // containing a DELETED entry.
771 for i
in (0..self.buckets()).step_by(Group
::WIDTH
) {
772 let group
= Group
::load_aligned(self.ctrl(i
));
773 let group
= group
.convert_special_to_empty_and_full_to_deleted();
774 group
.store_aligned(self.ctrl(i
));
777 // Fix up the trailing control bytes. See the comments in set_ctrl
778 // for the handling of tables smaller than the group width.
779 if self.buckets() < Group
::WIDTH
{
781 .copy_to(self.ctrl(Group
::WIDTH
), self.buckets());
784 .copy_to(self.ctrl(self.buckets()), Group
::WIDTH
);
787 // If the hash function panics then properly clean up any elements
788 // that we haven't rehashed yet. We unfortunately can't preserve the
789 // element since we lost their hash and have no way of recovering it
790 // without risking another panic.
791 let mut guard
= guard(self, |self_
| {
792 if mem
::needs_drop
::<T
>() {
793 for i
in 0..self_
.buckets() {
794 if *self_
.ctrl(i
) == DELETED
{
795 self_
.set_ctrl(i
, EMPTY
);
796 self_
.bucket(i
).drop();
801 self_
.growth_left
= bucket_mask_to_capacity(self_
.bucket_mask
) - self_
.items
;
804 // At this point, DELETED elements are elements that we haven't
805 // rehashed yet. Find them and re-insert them at their ideal
807 'outer
: for i
in 0..guard
.buckets() {
808 if *guard
.ctrl(i
) != DELETED
{
812 // Hash the current item
813 let item
= guard
.bucket(i
);
814 let hash
= hasher(item
.as_ref());
816 // Search for a suitable place to put it
817 let new_i
= guard
.find_insert_slot(hash
);
819 // Probing works by scanning through all of the control
820 // bytes in groups, which may not be aligned to the group
821 // size. If both the new and old position fall within the
822 // same unaligned group, then there is no benefit in moving
823 // it and we can just continue to the next item.
824 let probe_index
= |pos
: usize| {
825 (pos
.wrapping_sub(guard
.probe_seq(hash
).pos
) & guard
.bucket_mask
)
828 if likely(probe_index(i
) == probe_index(new_i
)) {
829 guard
.set_ctrl(i
, h2(hash
));
833 // We are moving the current item to a new position. Write
834 // our H2 to the control byte of the new position.
835 let prev_ctrl
= *guard
.ctrl(new_i
);
836 guard
.set_ctrl(new_i
, h2(hash
));
838 if prev_ctrl
== EMPTY
{
839 // If the target slot is empty, simply move the current
840 // element into the new slot and clear the old control
842 guard
.set_ctrl(i
, EMPTY
);
843 guard
.bucket(new_i
).copy_from_nonoverlapping(&item
);
846 // If the target slot is occupied, swap the two elements
847 // and then continue processing the element that we just
848 // swapped into the old slot.
849 debug_assert_eq
!(prev_ctrl
, DELETED
);
850 mem
::swap(guard
.bucket(new_i
).as_mut(), item
.as_mut());
856 guard
.growth_left
= bucket_mask_to_capacity(guard
.bucket_mask
) - guard
.items
;
861 /// Allocates a new table of a different size and moves the contents of the
862 /// current table into it.
866 hasher
: impl Fn(&T
) -> u64,
867 fallability
: Fallibility
,
868 ) -> Result
<(), TryReserveError
> {
870 debug_assert
!(self.items
<= capacity
);
872 // Allocate and initialize the new table.
873 let mut new_table
= Self::fallible_with_capacity(capacity
, fallability
)?
;
874 new_table
.growth_left
-= self.items
;
875 new_table
.items
= self.items
;
877 // The hash function may panic, in which case we simply free the new
878 // table without dropping any elements that may have been copied into
881 // This guard is also used to free the old table on success, see
882 // the comment at the bottom of this function.
883 let mut new_table
= guard(ManuallyDrop
::new(new_table
), |new_table
| {
884 if !new_table
.is_empty_singleton() {
885 new_table
.free_buckets();
889 // Copy all elements to the new table.
890 for item
in self.iter() {
892 let hash
= hasher(item
.as_ref());
894 // We can use a simpler version of insert() here since:
895 // - there are no DELETED entries.
896 // - we know there is enough space in the table.
897 // - all elements are unique.
898 let index
= new_table
.find_insert_slot(hash
);
899 new_table
.set_ctrl(index
, h2(hash
));
900 new_table
.bucket(index
).copy_from_nonoverlapping(&item
);
903 // We successfully copied all elements without panicking. Now replace
904 // self with the new table. The old table will have its memory freed but
905 // the items will not be dropped (since they have been moved into the
907 mem
::swap(self, &mut new_table
);
913 /// Inserts a new element into the table.
915 /// This does not check if the given element already exists in the table.
916 #[cfg_attr(feature = "inline-more", inline)]
917 pub fn insert(&mut self, hash
: u64, value
: T
, hasher
: impl Fn(&T
) -> u64) -> Bucket
<T
> {
919 let mut index
= self.find_insert_slot(hash
);
921 // We can avoid growing the table once we have reached our load
922 // factor if we are replacing a tombstone. This works since the
923 // number of EMPTY slots does not change in this case.
924 let old_ctrl
= *self.ctrl(index
);
925 if unlikely(self.growth_left
== 0 && special_is_empty(old_ctrl
)) {
926 self.reserve(1, hasher
);
927 index
= self.find_insert_slot(hash
);
930 let bucket
= self.bucket(index
);
931 self.growth_left
-= special_is_empty(old_ctrl
) as usize;
932 self.set_ctrl(index
, h2(hash
));
939 /// Inserts a new element into the table, without growing the table.
941 /// There must be enough space in the table to insert the new element.
943 /// This does not check if the given element already exists in the table.
944 #[cfg_attr(feature = "inline-more", inline)]
945 #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
946 pub fn insert_no_grow(&mut self, hash
: u64, value
: T
) -> Bucket
<T
> {
948 let index
= self.find_insert_slot(hash
);
949 let bucket
= self.bucket(index
);
951 // If we are replacing a DELETED entry then we don't need to update
953 let old_ctrl
= *self.ctrl(index
);
954 self.growth_left
-= special_is_empty(old_ctrl
) as usize;
956 self.set_ctrl(index
, h2(hash
));
963 /// Temporary removes a bucket, applying the given function to the removed
964 /// element and optionally put back the returned value in the same bucket.
966 /// Returns `true` if the bucket still contains an element
968 /// This does not check if the given bucket is actually occupied.
969 #[cfg_attr(feature = "inline-more", inline)]
970 pub unsafe fn replace_bucket_with
<F
>(&mut self, bucket
: Bucket
<T
>, f
: F
) -> bool
972 F
: FnOnce(T
) -> Option
<T
>,
974 let index
= self.bucket_index(&bucket
);
975 let old_ctrl
= *self.ctrl(index
);
976 debug_assert
!(is_full(old_ctrl
));
977 let old_growth_left
= self.growth_left
;
978 let item
= self.remove(bucket
);
979 if let Some(new_item
) = f(item
) {
980 self.growth_left
= old_growth_left
;
981 self.set_ctrl(index
, old_ctrl
);
983 self.bucket(index
).write(new_item
);
990 /// Searches for an element in the table.
992 pub fn find(&self, hash
: u64, mut eq
: impl FnMut(&T
) -> bool
) -> Option
<Bucket
<T
>> {
994 for bucket
in self.iter_hash(hash
) {
995 let elm
= bucket
.as_ref();
1004 /// Returns the number of elements the map can hold without reallocating.
1006 /// This number is a lower bound; the table might be able to hold
1007 /// more, but is guaranteed to be able to hold at least this many.
1008 #[cfg_attr(feature = "inline-more", inline)]
1009 pub fn capacity(&self) -> usize {
1010 self.items
+ self.growth_left
1013 /// Returns the number of elements in the table.
1014 #[cfg_attr(feature = "inline-more", inline)]
1015 pub fn len(&self) -> usize {
1019 /// Returns the number of buckets in the table.
1020 #[cfg_attr(feature = "inline-more", inline)]
1021 pub fn buckets(&self) -> usize {
1022 self.bucket_mask
+ 1
1025 /// Returns the number of control bytes in the table.
1026 #[cfg_attr(feature = "inline-more", inline)]
1027 fn num_ctrl_bytes(&self) -> usize {
1028 self.bucket_mask
+ 1 + Group
::WIDTH
1031 /// Returns whether this table points to the empty singleton with a capacity
1033 #[cfg_attr(feature = "inline-more", inline)]
1034 fn is_empty_singleton(&self) -> bool
{
1035 self.bucket_mask
== 0
1038 /// Returns an iterator over every element in the table. It is up to
1039 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1040 /// Because we cannot make the `next` method unsafe on the `RawIter`
1041 /// struct, we have to make the `iter` method unsafe.
1042 #[cfg_attr(feature = "inline-more", inline)]
1043 pub unsafe fn iter(&self) -> RawIter
<T
> {
1044 let data
= Bucket
::from_base_index(self.data_end(), 0);
1046 iter
: RawIterRange
::new(self.ctrl
.as_ptr(), data
, self.buckets()),
1051 /// Returns an iterator over occupied buckets that could match a given hash.
1053 /// In rare cases, the iterator may return a bucket with a different hash.
1055 /// It is up to the caller to ensure that the `RawTable` outlives the
1056 /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1057 /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1058 #[cfg_attr(feature = "inline-more", inline)]
1059 pub unsafe fn iter_hash(&self, hash
: u64) -> RawIterHash
<'_
, T
> {
1060 RawIterHash
::new(self, hash
)
1063 /// Returns an iterator which removes all elements from the table without
1064 /// freeing the memory.
1066 /// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
1067 /// Because we cannot make the `next` method unsafe on the `RawDrain`,
1068 /// we have to make the `drain` method unsafe.
1069 #[cfg_attr(feature = "inline-more", inline)]
1070 pub unsafe fn drain(&mut self) -> RawDrain
<'_
, T
> {
1071 let iter
= self.iter();
1072 self.drain_iter_from(iter
)
1075 /// Returns an iterator which removes all elements from the table without
1076 /// freeing the memory.
1078 /// It is up to the caller to ensure that the `RawTable` outlives the `RawDrain`.
1079 /// Because we cannot make the `next` method unsafe on the `RawDrain`,
1080 /// we have to make the `drain` method unsafe.
1082 /// Iteration starts at the provided iterator's current location.
1083 /// You must ensure that the iterator covers all items that remain in the table.
1084 #[cfg_attr(feature = "inline-more", inline)]
1085 pub unsafe fn drain_iter_from(&mut self, iter
: RawIter
<T
>) -> RawDrain
<'_
, T
> {
1086 debug_assert_eq
!(iter
.len(), self.len());
1089 table
: ManuallyDrop
::new(mem
::replace(self, Self::new())),
1090 orig_table
: NonNull
::from(self),
1091 marker
: PhantomData
,
1095 /// Returns an iterator which consumes all elements from the table.
1097 /// It is up to the caller to ensure that the `RawTable` outlives the `RawIntoIter`.
1098 /// Because we cannot make the `next` method unsafe on the `RawIntoIter`,
1099 /// we have to make the `into_iter_from` method unsafe.
1101 /// Iteration starts at the provided iterator's current location.
1102 /// You must ensure that the iterator covers all items that remain in the table.
1103 pub unsafe fn into_iter_from(self, iter
: RawIter
<T
>) -> RawIntoIter
<T
> {
1104 debug_assert_eq
!(iter
.len(), self.len());
1106 let alloc
= self.into_alloc();
1110 marker
: PhantomData
,
1114 /// Converts the table into a raw allocation. The contents of the table
1115 /// should be dropped using a `RawIter` before freeing the allocation.
1116 #[cfg_attr(feature = "inline-more", inline)]
1117 pub(crate) fn into_alloc(self) -> Option
<(NonNull
<u8>, Layout
)> {
1118 let alloc
= if self.is_empty_singleton() {
1121 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1122 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(self.buckets()) {
1124 None
=> unsafe { hint::unreachable_unchecked() }
,
1127 unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }
,
1136 unsafe impl<T
> Send
for RawTable
<T
> where T
: Send {}
1137 unsafe impl<T
> Sync
for RawTable
<T
> where T
: Sync {}
1139 impl<T
: Clone
> Clone
for RawTable
<T
> {
1140 fn clone(&self) -> Self {
1141 if self.is_empty_singleton() {
1145 let mut new_table
= ManuallyDrop
::new(
1146 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
1147 match Self::new_uninitialized(self.buckets(), Fallibility
::Infallible
) {
1149 Err(_
) => hint
::unreachable_unchecked(),
1153 new_table
.clone_from_spec(self, |new_table
| {
1154 // We need to free the memory allocated for the new table.
1155 new_table
.free_buckets();
1158 // Return the newly created table.
1159 ManuallyDrop
::into_inner(new_table
)
1164 fn clone_from(&mut self, source
: &Self) {
1165 if source
.is_empty_singleton() {
1166 *self = Self::new();
1169 // First, drop all our elements without clearing the control bytes.
1170 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1171 for item
in self.iter() {
1176 // If necessary, resize our table to match the source.
1177 if self.buckets() != source
.buckets() {
1178 // Skip our drop by using ptr::write.
1179 if !self.is_empty_singleton() {
1180 self.free_buckets();
1182 (self as *mut Self).write(
1183 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1184 match Self::new_uninitialized(source
.buckets(), Fallibility
::Infallible
) {
1186 Err(_
) => hint
::unreachable_unchecked(),
1191 self.clone_from_spec(source
, |self_
| {
1192 // We need to leave the table in an empty state.
1193 self_
.clear_no_drop()
1200 /// Specialization of `clone_from` for `Copy` types
1201 trait RawTableClone
{
1202 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self));
1204 impl<T
: Clone
> RawTableClone
for RawTable
<T
> {
1205 #[cfg_attr(feature = "inline-more", inline)]
1207 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self)) {
1208 self.clone_from_impl(source
, on_panic
);
1212 #[cfg(feature = "nightly")]
1213 impl<T
: Copy
> RawTableClone
for RawTable
<T
> {
1214 #[cfg_attr(feature = "inline-more", inline)]
1215 unsafe fn clone_from_spec(&mut self, source
: &Self, _on_panic
: impl FnMut(&mut Self)) {
1218 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1221 .copy_to_nonoverlapping(self.data_start(), self.buckets());
1223 self.items
= source
.items
;
1224 self.growth_left
= source
.growth_left
;
1228 impl<T
: Clone
> RawTable
<T
> {
1229 /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
1230 #[cfg_attr(feature = "inline-more", inline)]
1231 unsafe fn clone_from_impl(&mut self, source
: &Self, mut on_panic
: impl FnMut(&mut Self)) {
1232 // Copy the control bytes unchanged. We do this in a single pass
1235 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1237 // The cloning of elements may panic, in which case we need
1238 // to make sure we drop only the elements that have been
1240 let mut guard
= guard((0, &mut *self), |(index
, self_
)| {
1241 if mem
::needs_drop
::<T
>() && self_
.len() != 0 {
1242 for i
in 0..=*index
{
1243 if is_full(*self_
.ctrl(i
)) {
1244 self_
.bucket(i
).drop();
1249 // Depending on whether we were called from clone or clone_from, we
1250 // either need to free the memory for the destination table or just
1251 // clear the control bytes.
1255 for from
in source
.iter() {
1256 let index
= source
.bucket_index(&from
);
1257 let to
= guard
.1.bucket(index
);
1258 to
.write(from
.as_ref().clone());
1260 // Update the index in case we need to unwind.
1264 // Successfully cloned all items, no need to clean up.
1267 self.items
= source
.items
;
1268 self.growth_left
= source
.growth_left
;
1271 /// Variant of `clone_from` to use when a hasher is available.
1272 #[cfg(feature = "raw")]
1273 pub fn clone_from_with_hasher(&mut self, source
: &Self, hasher
: impl Fn(&T
) -> u64) {
1274 // If we have enough capacity in the table, just clear it and insert
1275 // elements one by one. We don't do this if we have the same number of
1276 // buckets as the source since we can just copy the contents directly
1278 if self.buckets() != source
.buckets()
1279 && bucket_mask_to_capacity(self.bucket_mask
) >= source
.len()
1283 let guard_self
= guard(&mut *self, |self_
| {
1284 // Clear the partially copied table if a panic occurs, otherwise
1285 // items and growth_left will be out of sync with the contents
1291 for item
in source
.iter() {
1293 let item
= item
.as_ref().clone();
1294 let hash
= hasher(&item
);
1296 // We can use a simpler version of insert() here since:
1297 // - there are no DELETED entries.
1298 // - we know there is enough space in the table.
1299 // - all elements are unique.
1300 let index
= guard_self
.find_insert_slot(hash
);
1301 guard_self
.set_ctrl(index
, h2(hash
));
1302 guard_self
.bucket(index
).write(item
);
1306 // Successfully cloned all items, no need to clean up.
1307 mem
::forget(guard_self
);
1309 self.items
= source
.items
;
1310 self.growth_left
-= source
.items
;
1312 self.clone_from(source
);
1317 #[cfg(feature = "nightly")]
1318 unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
1319 #[cfg_attr(feature = "inline-more", inline)]
1320 fn drop(&mut self) {
1321 if !self.is_empty_singleton() {
1323 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1324 for item
in self.iter() {
1328 self.free_buckets();
1333 #[cfg(not(feature = "nightly"))]
1334 impl<T
> Drop
for RawTable
<T
> {
1335 #[cfg_attr(feature = "inline-more", inline)]
1336 fn drop(&mut self) {
1337 if !self.is_empty_singleton() {
1339 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1340 for item
in self.iter() {
1344 self.free_buckets();
1350 impl<T
> IntoIterator
for RawTable
<T
> {
1352 type IntoIter
= RawIntoIter
<T
>;
1354 #[cfg_attr(feature = "inline-more", inline)]
1355 fn into_iter(self) -> RawIntoIter
<T
> {
1357 let iter
= self.iter();
1358 self.into_iter_from(iter
)
1363 /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1364 /// not track an item count.
1365 pub(crate) struct RawIterRange
<T
> {
1366 // Mask of full buckets in the current group. Bits are cleared from this
1367 // mask as each element is processed.
1368 current_group
: BitMask
,
1370 // Pointer to the buckets for the current group.
1373 // Pointer to the next group of control bytes,
1374 // Must be aligned to the group size.
1375 next_ctrl
: *const u8,
1377 // Pointer one past the last control byte of this range.
1381 impl<T
> RawIterRange
<T
> {
1382 /// Returns a `RawIterRange` covering a subset of a table.
1384 /// The control byte address must be aligned to the group size.
1385 #[cfg_attr(feature = "inline-more", inline)]
1386 unsafe fn new(ctrl
: *const u8, data
: Bucket
<T
>, len
: usize) -> Self {
1387 debug_assert_ne
!(len
, 0);
1388 debug_assert_eq
!(ctrl
as usize % Group
::WIDTH
, 0);
1389 let end
= ctrl
.add(len
);
1391 // Load the first group and advance ctrl to point to the next group
1392 let current_group
= Group
::load_aligned(ctrl
).match_full();
1393 let next_ctrl
= ctrl
.add(Group
::WIDTH
);
1403 /// Splits a `RawIterRange` into two halves.
1405 /// Returns `None` if the remaining range is smaller than or equal to the
1407 #[cfg_attr(feature = "inline-more", inline)]
1408 #[cfg(feature = "rayon")]
1409 pub(crate) fn split(mut self) -> (Self, Option
<RawIterRange
<T
>>) {
1411 if self.end
<= self.next_ctrl
{
1412 // Nothing to split if the group that we are current processing
1416 // len is the remaining number of elements after the group that
1417 // we are currently processing. It must be a multiple of the
1418 // group size (small tables are caught by the check above).
1419 let len
= offset_from(self.end
, self.next_ctrl
);
1420 debug_assert_eq
!(len
% Group
::WIDTH
, 0);
1422 // Split the remaining elements into two halves, but round the
1423 // midpoint down in case there is an odd number of groups
1424 // remaining. This ensures that:
1425 // - The tail is at least 1 group long.
1426 // - The split is roughly even considering we still have the
1427 // current group to process.
1428 let mid
= (len
/ 2) & !(Group
::WIDTH
- 1);
1430 let tail
= Self::new(
1431 self.next_ctrl
.add(mid
),
1432 self.data
.next_n(Group
::WIDTH
).next_n(mid
),
1436 self.data
.next_n(Group
::WIDTH
).next_n(mid
).ptr
,
1439 debug_assert_eq
!(self.end
, tail
.end
);
1440 self.end
= self.next_ctrl
.add(mid
);
1441 debug_assert_eq
!(self.end
.add(Group
::WIDTH
), tail
.next_ctrl
);
1448 // We make raw iterators unconditionally Send and Sync, and let the PhantomData
1449 // in the actual iterator implementations determine the real Send/Sync bounds.
1450 unsafe impl<T
> Send
for RawIterRange
<T
> {}
1451 unsafe impl<T
> Sync
for RawIterRange
<T
> {}
1453 impl<T
> Clone
for RawIterRange
<T
> {
1454 #[cfg_attr(feature = "inline-more", inline)]
1455 fn clone(&self) -> Self {
1457 data
: self.data
.clone(),
1458 next_ctrl
: self.next_ctrl
,
1459 current_group
: self.current_group
,
1465 impl<T
> Iterator
for RawIterRange
<T
> {
1466 type Item
= Bucket
<T
>;
1468 #[cfg_attr(feature = "inline-more", inline)]
1469 fn next(&mut self) -> Option
<Bucket
<T
>> {
1472 if let Some(index
) = self.current_group
.lowest_set_bit() {
1473 self.current_group
= self.current_group
.remove_lowest_bit();
1474 return Some(self.data
.next_n(index
));
1477 if self.next_ctrl
>= self.end
{
1481 // We might read past self.end up to the next group boundary,
1482 // but this is fine because it only occurs on tables smaller
1483 // than the group size where the trailing control bytes are all
1484 // EMPTY. On larger tables self.end is guaranteed to be aligned
1485 // to the group size (since tables are power-of-two sized).
1486 self.current_group
= Group
::load_aligned(self.next_ctrl
).match_full();
1487 self.data
= self.data
.next_n(Group
::WIDTH
);
1488 self.next_ctrl
= self.next_ctrl
.add(Group
::WIDTH
);
1493 #[cfg_attr(feature = "inline-more", inline)]
1494 fn size_hint(&self) -> (usize, Option
<usize>) {
1495 // We don't have an item count, so just guess based on the range size.
1498 Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }
),
1503 impl<T
> FusedIterator
for RawIterRange
<T
> {}
1505 /// Iterator which returns a raw pointer to every full bucket in the table.
1507 /// For maximum flexibility this iterator is not bound by a lifetime, but you
1508 /// must observe several rules when using it:
1509 /// - You must not free the hash table while iterating (including via growing/shrinking).
1510 /// - It is fine to erase a bucket that has been yielded by the iterator.
1511 /// - Erasing a bucket that has not yet been yielded by the iterator may still
1512 /// result in the iterator yielding that bucket (unless `reflect_remove` is called).
1513 /// - It is unspecified whether an element inserted after the iterator was
1514 /// created will be yielded by that iterator (unless `reflect_insert` is called).
1515 /// - The order in which the iterator yields bucket is unspecified and may
1516 /// change in the future.
1517 pub struct RawIter
<T
> {
1518 pub(crate) iter
: RawIterRange
<T
>,
1522 impl<T
> RawIter
<T
> {
1523 /// Refresh the iterator so that it reflects a removal from the given bucket.
1525 /// For the iterator to remain valid, this method must be called once
1526 /// for each removed bucket before `next` is called again.
1528 /// This method should be called _before_ the removal is made. It is not necessary to call this
1529 /// method if you are removing an item that this iterator yielded in the past.
1530 #[cfg(feature = "raw")]
1531 pub fn reflect_remove(&mut self, b
: &Bucket
<T
>) {
1532 self.reflect_toggle_full(b
, false);
1535 /// Refresh the iterator so that it reflects an insertion into the given bucket.
1537 /// For the iterator to remain valid, this method must be called once
1538 /// for each insert before `next` is called again.
1540 /// This method does not guarantee that an insertion of a bucket witha greater
1541 /// index than the last one yielded will be reflected in the iterator.
1543 /// This method should be called _after_ the given insert is made.
1544 #[cfg(feature = "raw")]
1545 pub fn reflect_insert(&mut self, b
: &Bucket
<T
>) {
1546 self.reflect_toggle_full(b
, true);
1549 /// Refresh the iterator so that it reflects a change to the state of the given bucket.
1550 #[cfg(feature = "raw")]
1551 fn reflect_toggle_full(&mut self, b
: &Bucket
<T
>, is_insert
: bool
) {
1553 if b
.as_ptr() > self.iter
.data
.as_ptr() {
1554 // The iterator has already passed the bucket's group.
1555 // So the toggle isn't relevant to this iterator.
1559 if self.iter
.next_ctrl
< self.iter
.end
1560 && b
.as_ptr() <= self.iter
.data
.next_n(Group
::WIDTH
).as_ptr()
1562 // The iterator has not yet reached the bucket's group.
1563 // We don't need to reload anything, but we do need to adjust the item count.
1565 if cfg
!(debug_assertions
) {
1566 // Double-check that the user isn't lying to us by checking the bucket state.
1567 // To do that, we need to find its control byte. We know that self.iter.data is
1568 // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
1569 let offset
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1570 let ctrl
= self.iter
.next_ctrl
.sub(Group
::WIDTH
).add(offset
);
1571 // This method should be called _before_ a removal, or _after_ an insert,
1572 // so in both cases the ctrl byte should indicate that the bucket is full.
1573 assert
!(is_full(*ctrl
));
1585 // The iterator is at the bucket group that the toggled bucket is in.
1586 // We need to do two things:
1588 // - Determine if the iterator already yielded the toggled bucket.
1589 // If it did, we're done.
1590 // - Otherwise, update the iterator cached group so that it won't
1591 // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1592 // We'll also need ot update the item count accordingly.
1593 if let Some(index
) = self.iter
.current_group
.lowest_set_bit() {
1594 let next_bucket
= self.iter
.data
.next_n(index
);
1595 if b
.as_ptr() > next_bucket
.as_ptr() {
1596 // The toggled bucket is "before" the bucket the iterator would yield next. We
1597 // therefore don't need to do anything --- the iterator has already passed the
1598 // bucket in question.
1600 // The item count must already be correct, since a removal or insert "prior" to
1601 // the iterator's position wouldn't affect the item count.
1603 // The removed bucket is an upcoming bucket. We need to make sure it does _not_
1604 // get yielded, and also that it's no longer included in the item count.
1606 // NOTE: We can't just reload the group here, both since that might reflect
1607 // inserts we've already passed, and because that might inadvertently unset the
1608 // bits for _other_ removals. If we do that, we'd have to also decrement the
1609 // item count for those other bits that we unset. But the presumably subsequent
1610 // call to reflect for those buckets might _also_ decrement the item count.
1611 // Instead, we _just_ flip the bit for the particular bucket the caller asked
1613 let our_bit
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1614 let was_full
= self.iter
.current_group
.flip(our_bit
);
1615 debug_assert_ne
!(was_full
, is_insert
);
1623 if cfg
!(debug_assertions
) {
1624 if b
.as_ptr() == next_bucket
.as_ptr() {
1625 // The removed bucket should no longer be next
1626 debug_assert_ne
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1628 // We should not have changed what bucket comes next.
1629 debug_assert_eq
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1634 // We must have already iterated past the removed item.
1640 impl<T
> Clone
for RawIter
<T
> {
1641 #[cfg_attr(feature = "inline-more", inline)]
1642 fn clone(&self) -> Self {
1644 iter
: self.iter
.clone(),
1650 impl<T
> Iterator
for RawIter
<T
> {
1651 type Item
= Bucket
<T
>;
1653 #[cfg_attr(feature = "inline-more", inline)]
1654 fn next(&mut self) -> Option
<Bucket
<T
>> {
1655 if let Some(b
) = self.iter
.next() {
1659 // We don't check against items == 0 here to allow the
1660 // compiler to optimize away the item count entirely if the
1661 // iterator length is never queried.
1662 debug_assert_eq
!(self.items
, 0);
1667 #[cfg_attr(feature = "inline-more", inline)]
1668 fn size_hint(&self) -> (usize, Option
<usize>) {
1669 (self.items
, Some(self.items
))
1673 impl<T
> ExactSizeIterator
for RawIter
<T
> {}
1674 impl<T
> FusedIterator
for RawIter
<T
> {}
1676 /// Iterator which consumes a table and returns elements.
1677 pub struct RawIntoIter
<T
> {
1679 alloc
: Option
<(NonNull
<u8>, Layout
)>,
1680 marker
: PhantomData
<T
>,
1683 impl<T
> RawIntoIter
<T
> {
1684 #[cfg_attr(feature = "inline-more", inline)]
1685 pub fn iter(&self) -> RawIter
<T
> {
1690 unsafe impl<T
> Send
for RawIntoIter
<T
> where T
: Send {}
1691 unsafe impl<T
> Sync
for RawIntoIter
<T
> where T
: Sync {}
1693 #[cfg(feature = "nightly")]
1694 unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
1695 #[cfg_attr(feature = "inline-more", inline)]
1696 fn drop(&mut self) {
1698 // Drop all remaining elements
1699 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1700 while let Some(item
) = self.iter
.next() {
1706 if let Some((ptr
, layout
)) = self.alloc
{
1707 dealloc(ptr
.as_ptr(), layout
);
1712 #[cfg(not(feature = "nightly"))]
1713 impl<T
> Drop
for RawIntoIter
<T
> {
1714 #[cfg_attr(feature = "inline-more", inline)]
1715 fn drop(&mut self) {
1717 // Drop all remaining elements
1718 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1719 while let Some(item
) = self.iter
.next() {
1725 if let Some((ptr
, layout
)) = self.alloc
{
1726 dealloc(ptr
.as_ptr(), layout
);
1732 impl<T
> Iterator
for RawIntoIter
<T
> {
1735 #[cfg_attr(feature = "inline-more", inline)]
1736 fn next(&mut self) -> Option
<T
> {
1737 unsafe { Some(self.iter.next()?.read()) }
1740 #[cfg_attr(feature = "inline-more", inline)]
1741 fn size_hint(&self) -> (usize, Option
<usize>) {
1742 self.iter
.size_hint()
1746 impl<T
> ExactSizeIterator
for RawIntoIter
<T
> {}
1747 impl<T
> FusedIterator
for RawIntoIter
<T
> {}
1749 /// Iterator which consumes elements without freeing the table storage.
1750 pub struct RawDrain
<'a
, T
> {
1753 // The table is moved into the iterator for the duration of the drain. This
1754 // ensures that an empty table is left if the drain iterator is leaked
1755 // without dropping.
1756 table
: ManuallyDrop
<RawTable
<T
>>,
1757 orig_table
: NonNull
<RawTable
<T
>>,
1759 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
1760 // covariant over T.
1761 marker
: PhantomData
<&'a RawTable
<T
>>,
1764 impl<T
> RawDrain
<'_
, T
> {
1765 #[cfg_attr(feature = "inline-more", inline)]
1766 pub fn iter(&self) -> RawIter
<T
> {
1771 unsafe impl<T
> Send
for RawDrain
<'_
, T
> where T
: Send {}
1772 unsafe impl<T
> Sync
for RawDrain
<'_
, T
> where T
: Sync {}
1774 impl<T
> Drop
for RawDrain
<'_
, T
> {
1775 #[cfg_attr(feature = "inline-more", inline)]
1776 fn drop(&mut self) {
1778 // Drop all remaining elements. Note that this may panic.
1779 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1780 while let Some(item
) = self.iter
.next() {
1785 // Reset the contents of the table now that all elements have been
1787 self.table
.clear_no_drop();
1789 // Move the now empty table back to its original location.
1792 .copy_from_nonoverlapping(&*self.table
, 1);
1797 impl<T
> Iterator
for RawDrain
<'_
, T
> {
1800 #[cfg_attr(feature = "inline-more", inline)]
1801 fn next(&mut self) -> Option
<T
> {
1803 let item
= self.iter
.next()?
;
1808 #[cfg_attr(feature = "inline-more", inline)]
1809 fn size_hint(&self) -> (usize, Option
<usize>) {
1810 self.iter
.size_hint()
1814 impl<T
> ExactSizeIterator
for RawDrain
<'_
, T
> {}
1815 impl<T
> FusedIterator
for RawDrain
<'_
, T
> {}
1817 /// Iterator over occupied buckets that could match a given hash.
1819 /// In rare cases, the iterator may return a bucket with a different hash.
1820 pub struct RawIterHash
<'a
, T
> {
1821 table
: &'a RawTable
<T
>,
1823 // The top 7 bits of the hash.
1826 // The sequence of groups to probe in the search.
1827 probe_seq
: ProbeSeq
,
1829 // The current group and its position.
1833 // The elements within the group with a matching h2-hash.
1834 bitmask
: BitMaskIter
,
1837 impl<'a
, T
> RawIterHash
<'a
, T
> {
1838 fn new(table
: &'a RawTable
<T
>, hash
: u64) -> Self {
1840 let h2_hash
= h2(hash
);
1841 let mut probe_seq
= table
.probe_seq(hash
);
1842 let pos
= probe_seq
.next().unwrap();
1843 let group
= Group
::load(table
.ctrl(pos
));
1844 let bitmask
= group
.match_byte(h2_hash
).into_iter();
1858 impl<'a
, T
> Iterator
for RawIterHash
<'a
, T
> {
1859 type Item
= Bucket
<T
>;
1861 fn next(&mut self) -> Option
<Bucket
<T
>> {
1864 if let Some(bit
) = self.bitmask
.next() {
1865 let index
= (self.pos
+ bit
) & self.table
.bucket_mask
;
1866 let bucket
= self.table
.bucket(index
);
1867 return Some(bucket
);
1869 if likely(self.group
.match_empty().any_bit_set()) {
1872 self.pos
= self.probe_seq
.next().unwrap();
1873 self.group
= Group
::load(self.table
.ctrl(self.pos
));
1874 self.bitmask
= self.group
.match_byte(self.h2_hash
).into_iter();