1 use crate::alloc
::alloc
::{handle_alloc_error, Layout}
;
2 use crate::scopeguard
::guard
;
3 use crate::TryReserveError
;
4 #[cfg(feature = "nightly")]
5 use crate::UnavailableMutError
;
7 use core
::iter
::FusedIterator
;
8 use core
::marker
::PhantomData
;
10 use core
::mem
::ManuallyDrop
;
11 #[cfg(feature = "nightly")]
12 use core
::mem
::MaybeUninit
;
13 use core
::ptr
::NonNull
;
16 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
17 // at once instead of 8. We don't bother with AVX since it would require
18 // runtime dispatch and wouldn't gain us much anyways: the probability of
19 // finding a match drops off drastically after the first few buckets.
21 // I attempted an implementation on ARM using NEON instructions, but it
22 // turns out that most NEON instructions have multi-cycle latency, which in
23 // the end outweighs any gains over the generic implementation.
25 target_feature
= "sse2",
26 any(target_arch
= "x86", target_arch
= "x86_64"),
32 #[path = "generic.rs"]
39 pub(crate) use self::alloc
::{do_alloc, Allocator, Global}
;
43 use self::bitmask
::{BitMask, BitMaskIter}
;
46 // Branch prediction hint. This is currently only available on nightly but it
47 // consistently improves performance by 10-15%.
48 #[cfg(feature = "nightly")]
49 use core
::intrinsics
::{likely, unlikely}
;
51 // On stable we can use #[cold] to get a equivalent effect: this attributes
52 // suggests that the function is unlikely to be called
53 #[cfg(not(feature = "nightly"))]
58 #[cfg(not(feature = "nightly"))]
60 fn likely(b
: bool
) -> bool
{
66 #[cfg(not(feature = "nightly"))]
68 fn unlikely(b
: bool
) -> bool
{
75 #[cfg(feature = "nightly")]
76 #[cfg_attr(feature = "inline-more", inline)]
77 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
78 to
.offset_from(from
) as usize
80 #[cfg(not(feature = "nightly"))]
81 #[cfg_attr(feature = "inline-more", inline)]
82 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
83 (to
as usize - from
as usize) / mem
::size_of
::<T
>()
86 /// Whether memory allocation errors should return an error or abort.
87 #[derive(Copy, Clone)]
94 /// Error to return on capacity overflow.
95 #[cfg_attr(feature = "inline-more", inline)]
96 fn capacity_overflow(self) -> TryReserveError
{
98 Fallibility
::Fallible
=> TryReserveError
::CapacityOverflow
,
99 Fallibility
::Infallible
=> panic
!("Hash table capacity overflow"),
103 /// Error to return on allocation error.
104 #[cfg_attr(feature = "inline-more", inline)]
105 fn alloc_err(self, layout
: Layout
) -> TryReserveError
{
107 Fallibility
::Fallible
=> TryReserveError
::AllocError { layout }
,
108 Fallibility
::Infallible
=> handle_alloc_error(layout
),
113 /// Control byte value for an empty bucket.
114 const EMPTY
: u8 = 0b1111_1111;
116 /// Control byte value for a deleted bucket.
117 const DELETED
: u8 = 0b1000_0000;
119 /// Checks whether a control byte represents a full bucket (top bit is clear).
121 fn is_full(ctrl
: u8) -> bool
{
125 /// Checks whether a control byte represents a special value (top bit is set).
127 fn is_special(ctrl
: u8) -> bool
{
131 /// Checks whether a special control value is EMPTY (just check 1 bit).
133 fn special_is_empty(ctrl
: u8) -> bool
{
134 debug_assert
!(is_special(ctrl
));
138 /// Primary hash function, used to select the initial bucket to probe from.
140 #[allow(clippy::cast_possible_truncation)]
141 fn h1(hash
: u64) -> usize {
142 // On 32-bit platforms we simply ignore the higher hash bits.
146 /// Secondary hash function, saved in the low 7 bits of the control byte.
148 #[allow(clippy::cast_possible_truncation)]
149 fn h2(hash
: u64) -> u8 {
150 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
151 // value, some hash functions (such as FxHash) produce a usize result
152 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
153 let hash_len
= usize::min(mem
::size_of
::<usize>(), mem
::size_of
::<u64>());
154 let top7
= hash
>> (hash_len
* 8 - 7);
155 (top7
& 0x7f) as u8 // truncation
158 /// Probe sequence based on triangular numbers, which is guaranteed (since our
159 /// table size is a power of two) to visit every group of elements exactly once.
161 /// A triangular probe has us jump by 1 more group every time. So first we
162 /// jump by 1 group (meaning we just continue our linear scan), then 2 groups
163 /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
165 /// Proof that the probe will visit every group in the table:
166 /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
174 fn move_next(&mut self, bucket_mask
: usize) {
175 // We should have found an empty bucket by now and ended the probe.
177 self.stride
<= bucket_mask
,
178 "Went past end of probe sequence"
181 self.stride
+= Group
::WIDTH
;
182 self.pos
+= self.stride
;
183 self.pos
&= bucket_mask
;
187 /// Returns the number of buckets needed to hold the given number of items,
188 /// taking the maximum load factor into account.
190 /// Returns `None` if an overflow occurs.
191 // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
192 #[cfg_attr(target_os = "emscripten", inline(never))]
193 #[cfg_attr(not(target_os = "emscripten"), inline)]
194 fn capacity_to_buckets(cap
: usize) -> Option
<usize> {
195 debug_assert_ne
!(cap
, 0);
197 // For small tables we require at least 1 empty bucket so that lookups are
198 // guaranteed to terminate if an element doesn't exist in the table.
200 // We don't bother with a table size of 2 buckets since that can only
201 // hold a single element. Instead we skip directly to a 4 bucket table
202 // which can hold 3 elements.
203 return Some(if cap
< 4 { 4 }
else { 8 }
);
206 // Otherwise require 1/8 buckets to be empty (87.5% load)
208 // Be careful when modifying this, calculate_layout relies on the
209 // overflow check here.
210 let adjusted_cap
= cap
.checked_mul(8)?
/ 7;
212 // Any overflows will have been caught by the checked_mul. Also, any
213 // rounding errors from the division above will be cleaned up by
214 // next_power_of_two (which can't overflow because of the previous divison).
215 Some(adjusted_cap
.next_power_of_two())
218 /// Returns the maximum effective capacity for the given bucket mask, taking
219 /// the maximum load factor into account.
221 fn bucket_mask_to_capacity(bucket_mask
: usize) -> usize {
223 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
224 // Keep in mind that the bucket mask is one less than the bucket count.
227 // For larger tables we reserve 12.5% of the slots as empty.
228 ((bucket_mask
+ 1) / 8) * 7
232 /// Helper which allows the max calculation for ctrl_align to be statically computed for each T
233 /// while keeping the rest of `calculate_layout_for` independent of `T`
234 #[derive(Copy, Clone)]
242 fn new
<T
>() -> Self {
243 let layout
= Layout
::new
::<T
>();
246 ctrl_align
: usize::max(layout
.align(), Group
::WIDTH
),
251 fn calculate_layout_for(self, buckets
: usize) -> Option
<(Layout
, usize)> {
252 debug_assert
!(buckets
.is_power_of_two());
254 let TableLayout { size, ctrl_align }
= self;
255 // Manual layout calculation since Layout methods are not yet stable.
257 size
.checked_mul(buckets
)?
.checked_add(ctrl_align
- 1)?
& !(ctrl_align
- 1);
258 let len
= ctrl_offset
.checked_add(buckets
+ Group
::WIDTH
)?
;
261 unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }
,
267 /// Returns a Layout which describes the allocation required for a hash table,
268 /// and the offset of the control bytes in the allocation.
269 /// (the offset is also one past last element of buckets)
271 /// Returns `None` if an overflow occurs.
272 #[cfg_attr(feature = "inline-more", inline)]
273 fn calculate_layout
<T
>(buckets
: usize) -> Option
<(Layout
, usize)> {
274 TableLayout
::new
::<T
>().calculate_layout_for(buckets
)
277 /// A reference to a hash table bucket containing a `T`.
279 /// This is usually just a pointer to the element itself. However if the element
280 /// is a ZST, then we instead track the index of the element in the table so
281 /// that `erase` works properly.
282 pub struct Bucket
<T
> {
283 // Actually it is pointer to next element than element itself
284 // this is needed to maintain pointer arithmetic invariants
285 // keeping direct pointer to element introduces difficulty.
286 // Using `NonNull` for variance and niche layout
290 // This Send impl is needed for rayon support. This is safe since Bucket is
291 // never exposed in a public API.
292 unsafe impl<T
> Send
for Bucket
<T
> {}
294 impl<T
> Clone
for Bucket
<T
> {
295 #[cfg_attr(feature = "inline-more", inline)]
296 fn clone(&self) -> Self {
297 Self { ptr: self.ptr }
302 #[cfg_attr(feature = "inline-more", inline)]
303 unsafe fn from_base_index(base
: NonNull
<T
>, index
: usize) -> Self {
304 let ptr
= if mem
::size_of
::<T
>() == 0 {
305 // won't overflow because index must be less than length
306 (index
+ 1) as *mut T
308 base
.as_ptr().sub(index
)
311 ptr
: NonNull
::new_unchecked(ptr
),
314 #[cfg_attr(feature = "inline-more", inline)]
315 unsafe fn to_base_index(&self, base
: NonNull
<T
>) -> usize {
316 if mem
::size_of
::<T
>() == 0 {
317 self.ptr
.as_ptr() as usize - 1
319 offset_from(base
.as_ptr(), self.ptr
.as_ptr())
322 #[cfg_attr(feature = "inline-more", inline)]
323 pub fn as_ptr(&self) -> *mut T
{
324 if mem
::size_of
::<T
>() == 0 {
325 // Just return an arbitrary ZST pointer which is properly aligned
326 mem
::align_of
::<T
>() as *mut T
328 unsafe { self.ptr.as_ptr().sub(1) }
331 #[cfg_attr(feature = "inline-more", inline)]
332 unsafe fn next_n(&self, offset
: usize) -> Self {
333 let ptr
= if mem
::size_of
::<T
>() == 0 {
334 (self.ptr
.as_ptr() as usize + offset
) as *mut T
336 self.ptr
.as_ptr().sub(offset
)
339 ptr
: NonNull
::new_unchecked(ptr
),
342 #[cfg_attr(feature = "inline-more", inline)]
343 pub unsafe fn drop(&self) {
344 self.as_ptr().drop_in_place();
346 #[cfg_attr(feature = "inline-more", inline)]
347 pub unsafe fn read(&self) -> T
{
350 #[cfg_attr(feature = "inline-more", inline)]
351 pub unsafe fn write(&self, val
: T
) {
352 self.as_ptr().write(val
);
354 #[cfg_attr(feature = "inline-more", inline)]
355 pub unsafe fn as_ref
<'a
>(&self) -> &'a T
{
358 #[cfg_attr(feature = "inline-more", inline)]
359 pub unsafe fn as_mut
<'a
>(&self) -> &'a
mut T
{
362 #[cfg_attr(feature = "inline-more", inline)]
363 pub unsafe fn copy_from_nonoverlapping(&self, other
: &Self) {
364 self.as_ptr().copy_from_nonoverlapping(other
.as_ptr(), 1);
368 /// A raw hash table with an unsafe API.
369 pub struct RawTable
<T
, A
: Allocator
+ Clone
= Global
> {
370 table
: RawTableInner
<A
>,
371 // Tell dropck that we own instances of T.
372 marker
: PhantomData
<T
>,
375 /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
376 /// of how many different key-value types are used.
377 struct RawTableInner
<A
> {
378 // Mask to get an index from a hash value. The value is one less than the
379 // number of buckets in the table.
382 // [Padding], T1, T2, ..., Tlast, C1, C2, ...
386 // Number of elements that can be inserted before we need to grow the table
389 // Number of elements in the table, only really used by len()
395 impl<T
> RawTable
<T
, Global
> {
396 /// Creates a new empty hash table without allocating any memory.
398 /// In effect this returns a table with exactly 1 bucket. However we can
399 /// leave the data pointer dangling since that bucket is never written to
400 /// due to our load factor forcing us to always have at least 1 free bucket.
401 #[cfg_attr(feature = "inline-more", inline)]
402 pub const fn new() -> Self {
404 table
: RawTableInner
::new_in(Global
),
409 /// Attempts to allocate a new hash table with at least enough capacity
410 /// for inserting the given number of elements without reallocating.
411 #[cfg(feature = "raw")]
412 pub fn try_with_capacity(capacity
: usize) -> Result
<Self, TryReserveError
> {
413 Self::try_with_capacity_in(capacity
, Global
)
416 /// Allocates a new hash table with at least enough capacity for inserting
417 /// the given number of elements without reallocating.
418 pub fn with_capacity(capacity
: usize) -> Self {
419 Self::with_capacity_in(capacity
, Global
)
423 impl<T
, A
: Allocator
+ Clone
> RawTable
<T
, A
> {
424 /// Creates a new empty hash table without allocating any memory, using the
427 /// In effect this returns a table with exactly 1 bucket. However we can
428 /// leave the data pointer dangling since that bucket is never written to
429 /// due to our load factor forcing us to always have at least 1 free bucket.
430 #[cfg_attr(feature = "inline-more", inline)]
431 pub fn new_in(alloc
: A
) -> Self {
433 table
: RawTableInner
::new_in(alloc
),
438 /// Allocates a new hash table with the given number of buckets.
440 /// The control bytes are left uninitialized.
441 #[cfg_attr(feature = "inline-more", inline)]
442 unsafe fn new_uninitialized(
445 fallibility
: Fallibility
,
446 ) -> Result
<Self, TryReserveError
> {
447 debug_assert
!(buckets
.is_power_of_two());
450 table
: RawTableInner
::new_uninitialized(
452 TableLayout
::new
::<T
>(),
460 /// Attempts to allocate a new hash table with at least enough capacity
461 /// for inserting the given number of elements without reallocating.
462 fn fallible_with_capacity(
465 fallibility
: Fallibility
,
466 ) -> Result
<Self, TryReserveError
> {
468 table
: RawTableInner
::fallible_with_capacity(
470 TableLayout
::new
::<T
>(),
478 /// Attempts to allocate a new hash table using the given allocator, with at least enough
479 /// capacity for inserting the given number of elements without reallocating.
480 #[cfg(feature = "raw")]
481 pub fn try_with_capacity_in(capacity
: usize, alloc
: A
) -> Result
<Self, TryReserveError
> {
482 Self::fallible_with_capacity(alloc
, capacity
, Fallibility
::Fallible
)
485 /// Allocates a new hash table using the given allocator, with at least enough capacity for
486 /// inserting the given number of elements without reallocating.
487 pub fn with_capacity_in(capacity
: usize, alloc
: A
) -> Self {
488 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
489 match Self::fallible_with_capacity(alloc
, capacity
, Fallibility
::Infallible
) {
490 Ok(capacity
) => capacity
,
491 Err(_
) => unsafe { hint::unreachable_unchecked() }
,
495 /// Deallocates the table without dropping any entries.
496 #[cfg_attr(feature = "inline-more", inline)]
497 unsafe fn free_buckets(&mut self) {
498 self.table
.free_buckets(TableLayout
::new
::<T
>())
501 /// Returns pointer to one past last element of data table.
502 #[cfg_attr(feature = "inline-more", inline)]
503 pub unsafe fn data_end(&self) -> NonNull
<T
> {
504 NonNull
::new_unchecked(self.table
.ctrl
.as_ptr().cast())
507 /// Returns pointer to start of data table.
508 #[cfg_attr(feature = "inline-more", inline)]
509 #[cfg(feature = "nightly")]
510 pub unsafe fn data_start(&self) -> *mut T
{
511 self.data_end().as_ptr().wrapping_sub(self.buckets())
514 /// Returns the index of a bucket from a `Bucket`.
515 #[cfg_attr(feature = "inline-more", inline)]
516 pub unsafe fn bucket_index(&self, bucket
: &Bucket
<T
>) -> usize {
517 bucket
.to_base_index(self.data_end())
520 /// Returns a pointer to an element in the table.
521 #[cfg_attr(feature = "inline-more", inline)]
522 pub unsafe fn bucket(&self, index
: usize) -> Bucket
<T
> {
523 debug_assert_ne
!(self.table
.bucket_mask
, 0);
524 debug_assert
!(index
< self.buckets());
525 Bucket
::from_base_index(self.data_end(), index
)
528 /// Erases an element from the table without dropping it.
529 #[cfg_attr(feature = "inline-more", inline)]
530 #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
531 pub unsafe fn erase_no_drop(&mut self, item
: &Bucket
<T
>) {
532 let index
= self.bucket_index(item
);
533 self.table
.erase(index
)
536 /// Erases an element from the table, dropping it in place.
537 #[cfg_attr(feature = "inline-more", inline)]
538 #[allow(clippy::needless_pass_by_value)]
540 pub unsafe fn erase(&mut self, item
: Bucket
<T
>) {
541 // Erase the element from the table first since drop might panic.
542 self.erase_no_drop(&item
);
546 /// Finds and erases an element from the table, dropping it in place.
547 /// Returns true if an element was found.
548 #[cfg(feature = "raw")]
549 #[cfg_attr(feature = "inline-more", inline)]
550 pub fn erase_entry(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> bool
{
551 // Avoid `Option::map` because it bloats LLVM IR.
552 if let Some(bucket
) = self.find(hash
, eq
) {
553 unsafe { self.erase(bucket) }
;
560 /// Removes an element from the table, returning it.
561 #[cfg_attr(feature = "inline-more", inline)]
562 #[allow(clippy::needless_pass_by_value)]
564 pub unsafe fn remove(&mut self, item
: Bucket
<T
>) -> T
{
565 self.erase_no_drop(&item
);
569 /// Finds and removes an element from the table, returning it.
570 #[cfg_attr(feature = "inline-more", inline)]
571 pub fn remove_entry(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<T
> {
572 // Avoid `Option::map` because it bloats LLVM IR.
573 match self.find(hash
, eq
) {
574 Some(bucket
) => Some(unsafe { self.remove(bucket) }
),
579 /// Marks all table buckets as empty without dropping their contents.
580 #[cfg_attr(feature = "inline-more", inline)]
581 pub fn clear_no_drop(&mut self) {
582 self.table
.clear_no_drop()
585 /// Removes all elements from the table without freeing the backing memory.
586 #[cfg_attr(feature = "inline-more", inline)]
587 pub fn clear(&mut self) {
588 // Ensure that the table is reset even if one of the drops panic
589 let mut self_
= guard(self, |self_
| self_
.clear_no_drop());
591 self_
.drop_elements();
595 unsafe fn drop_elements(&mut self) {
596 if mem
::needs_drop
::<T
>() && self.len() != 0 {
597 for item
in self.iter() {
603 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
604 #[cfg_attr(feature = "inline-more", inline)]
605 pub fn shrink_to(&mut self, min_size
: usize, hasher
: impl Fn(&T
) -> u64) {
606 // Calculate the minimal number of elements that we need to reserve
608 let min_size
= usize::max(self.table
.items
, min_size
);
610 *self = Self::new_in(self.table
.alloc
.clone());
614 // Calculate the number of buckets that we need for this number of
615 // elements. If the calculation overflows then the requested bucket
616 // count must be larger than what we have right and nothing needs to be
618 let min_buckets
= match capacity_to_buckets(min_size
) {
619 Some(buckets
) => buckets
,
623 // If we have more buckets than we need, shrink the table.
624 if min_buckets
< self.buckets() {
625 // Fast path if the table is empty
626 if self.table
.items
== 0 {
627 *self = Self::with_capacity_in(min_size
, self.table
.alloc
.clone())
629 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
631 .resize(min_size
, hasher
, Fallibility
::Infallible
)
634 unsafe { hint::unreachable_unchecked() }
640 /// Ensures that at least `additional` items can be inserted into the table
641 /// without reallocation.
642 #[cfg_attr(feature = "inline-more", inline)]
643 pub fn reserve(&mut self, additional
: usize, hasher
: impl Fn(&T
) -> u64) {
644 if additional
> self.table
.growth_left
{
645 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
647 .reserve_rehash(additional
, hasher
, Fallibility
::Infallible
)
650 unsafe { hint::unreachable_unchecked() }
655 /// Tries to ensure that at least `additional` items can be inserted into
656 /// the table without reallocation.
657 #[cfg_attr(feature = "inline-more", inline)]
661 hasher
: impl Fn(&T
) -> u64,
662 ) -> Result
<(), TryReserveError
> {
663 if additional
> self.table
.growth_left
{
664 self.reserve_rehash(additional
, hasher
, Fallibility
::Fallible
)
670 /// Out-of-line slow path for `reserve` and `try_reserve`.
676 hasher
: impl Fn(&T
) -> u64,
677 fallibility
: Fallibility
,
678 ) -> Result
<(), TryReserveError
> {
679 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
680 let new_items
= match self.table
.items
.checked_add(additional
) {
681 Some(new_items
) => new_items
,
682 None
=> return Err(fallibility
.capacity_overflow()),
684 let full_capacity
= bucket_mask_to_capacity(self.table
.bucket_mask
);
685 if new_items
<= full_capacity
/ 2 {
686 // Rehash in-place without re-allocating if we have plenty of spare
687 // capacity that is locked up due to DELETED entries.
688 self.rehash_in_place(hasher
);
691 // Otherwise, conservatively resize to at least the next size up
692 // to avoid churning deletes into frequent rehashes.
694 usize::max(new_items
, full_capacity
+ 1),
701 /// Rehashes the contents of the table in place (i.e. without changing the
704 /// If `hasher` panics then some the table's contents may be lost.
705 fn rehash_in_place(&mut self, hasher
: impl Fn(&T
) -> u64) {
707 // If the hash function panics then properly clean up any elements
708 // that we haven't rehashed yet. We unfortunately can't preserve the
709 // element since we lost their hash and have no way of recovering it
710 // without risking another panic.
711 self.table
.prepare_rehash_in_place();
713 let mut guard
= guard(&mut self.table
, move |self_
| {
714 if mem
::needs_drop
::<T
>() {
715 for i
in 0..self_
.buckets() {
716 if *self_
.ctrl(i
) == DELETED
{
717 self_
.set_ctrl(i
, EMPTY
);
718 self_
.bucket
::<T
>(i
).drop();
723 self_
.growth_left
= bucket_mask_to_capacity(self_
.bucket_mask
) - self_
.items
;
726 // At this point, DELETED elements are elements that we haven't
727 // rehashed yet. Find them and re-insert them at their ideal
729 'outer
: for i
in 0..guard
.buckets() {
730 if *guard
.ctrl(i
) != DELETED
{
735 // Hash the current item
736 let item
= guard
.bucket(i
);
737 let hash
= hasher(item
.as_ref());
739 // Search for a suitable place to put it
740 let new_i
= guard
.find_insert_slot(hash
);
742 // Probing works by scanning through all of the control
743 // bytes in groups, which may not be aligned to the group
744 // size. If both the new and old position fall within the
745 // same unaligned group, then there is no benefit in moving
746 // it and we can just continue to the next item.
747 if likely(guard
.is_in_same_group(i
, new_i
, hash
)) {
748 guard
.set_ctrl_h2(i
, hash
);
752 // We are moving the current item to a new position. Write
753 // our H2 to the control byte of the new position.
754 let prev_ctrl
= guard
.replace_ctrl_h2(new_i
, hash
);
755 if prev_ctrl
== EMPTY
{
756 guard
.set_ctrl(i
, EMPTY
);
757 // If the target slot is empty, simply move the current
758 // element into the new slot and clear the old control
760 guard
.bucket(new_i
).copy_from_nonoverlapping(&item
);
763 // If the target slot is occupied, swap the two elements
764 // and then continue processing the element that we just
765 // swapped into the old slot.
766 debug_assert_eq
!(prev_ctrl
, DELETED
);
767 mem
::swap(guard
.bucket(new_i
).as_mut(), item
.as_mut());
773 guard
.growth_left
= bucket_mask_to_capacity(guard
.bucket_mask
) - guard
.items
;
778 /// Allocates a new table of a different size and moves the contents of the
779 /// current table into it.
783 hasher
: impl Fn(&T
) -> u64,
784 fallibility
: Fallibility
,
785 ) -> Result
<(), TryReserveError
> {
789 .prepare_resize(TableLayout
::new
::<T
>(), capacity
, fallibility
)?
;
791 // Copy all elements to the new table.
792 for item
in self.iter() {
794 let hash
= hasher(item
.as_ref());
796 // We can use a simpler version of insert() here since:
797 // - there are no DELETED entries.
798 // - we know there is enough space in the table.
799 // - all elements are unique.
800 let (index
, _
) = new_table
.prepare_insert_slot(hash
);
801 new_table
.bucket(index
).copy_from_nonoverlapping(&item
);
804 // We successfully copied all elements without panicking. Now replace
805 // self with the new table. The old table will have its memory freed but
806 // the items will not be dropped (since they have been moved into the
808 mem
::swap(&mut self.table
, &mut new_table
);
814 /// Inserts a new element into the table, and returns its raw bucket.
816 /// This does not check if the given element already exists in the table.
817 #[cfg_attr(feature = "inline-more", inline)]
818 pub fn insert(&mut self, hash
: u64, value
: T
, hasher
: impl Fn(&T
) -> u64) -> Bucket
<T
> {
820 let mut index
= self.table
.find_insert_slot(hash
);
822 // We can avoid growing the table once we have reached our load
823 // factor if we are replacing a tombstone. This works since the
824 // number of EMPTY slots does not change in this case.
825 let old_ctrl
= *self.table
.ctrl(index
);
826 if unlikely(self.table
.growth_left
== 0 && special_is_empty(old_ctrl
)) {
827 self.reserve(1, hasher
);
828 index
= self.table
.find_insert_slot(hash
);
831 self.table
.record_item_insert_at(index
, old_ctrl
, hash
);
833 let bucket
= self.bucket(index
);
839 /// Attempts to insert a new element without growing the table and return its raw bucket.
841 /// Returns an `Err` containing the given element if inserting it would require growing the
844 /// This does not check if the given element already exists in the table.
845 #[cfg(feature = "raw")]
846 #[cfg_attr(feature = "inline-more", inline)]
847 pub fn try_insert_no_grow(&mut self, hash
: u64, value
: T
) -> Result
<Bucket
<T
>, T
> {
849 match self.table
.prepare_insert_no_grow(hash
) {
851 let bucket
= self.bucket(index
);
855 Err(()) => Err(value
),
860 /// Inserts a new element into the table, and returns a mutable reference to it.
862 /// This does not check if the given element already exists in the table.
863 #[cfg_attr(feature = "inline-more", inline)]
864 pub fn insert_entry(&mut self, hash
: u64, value
: T
, hasher
: impl Fn(&T
) -> u64) -> &mut T
{
865 unsafe { self.insert(hash, value, hasher).as_mut() }
868 /// Inserts a new element into the table, without growing the table.
870 /// There must be enough space in the table to insert the new element.
872 /// This does not check if the given element already exists in the table.
873 #[cfg_attr(feature = "inline-more", inline)]
874 #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
875 pub fn insert_no_grow(&mut self, hash
: u64, value
: T
) -> Bucket
<T
> {
877 let (index
, old_ctrl
) = self.table
.prepare_insert_slot(hash
);
878 let bucket
= self.table
.bucket(index
);
880 // If we are replacing a DELETED entry then we don't need to update
882 self.table
.growth_left
-= special_is_empty(old_ctrl
) as usize;
885 self.table
.items
+= 1;
890 /// Temporary removes a bucket, applying the given function to the removed
891 /// element and optionally put back the returned value in the same bucket.
893 /// Returns `true` if the bucket still contains an element
895 /// This does not check if the given bucket is actually occupied.
896 #[cfg_attr(feature = "inline-more", inline)]
897 pub unsafe fn replace_bucket_with
<F
>(&mut self, bucket
: Bucket
<T
>, f
: F
) -> bool
899 F
: FnOnce(T
) -> Option
<T
>,
901 let index
= self.bucket_index(&bucket
);
902 let old_ctrl
= *self.table
.ctrl(index
);
903 debug_assert
!(is_full(old_ctrl
));
904 let old_growth_left
= self.table
.growth_left
;
905 let item
= self.remove(bucket
);
906 if let Some(new_item
) = f(item
) {
907 self.table
.growth_left
= old_growth_left
;
908 self.table
.set_ctrl(index
, old_ctrl
);
909 self.table
.items
+= 1;
910 self.bucket(index
).write(new_item
);
917 /// Searches for an element in the table.
919 pub fn find(&self, hash
: u64, mut eq
: impl FnMut(&T
) -> bool
) -> Option
<Bucket
<T
>> {
921 for bucket
in self.iter_hash(hash
) {
922 let elm
= bucket
.as_ref();
931 /// Gets a reference to an element in the table.
933 pub fn get(&self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<&T
> {
934 // Avoid `Option::map` because it bloats LLVM IR.
935 match self.find(hash
, eq
) {
936 Some(bucket
) => Some(unsafe { bucket.as_ref() }
),
941 /// Gets a mutable reference to an element in the table.
943 pub fn get_mut(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<&mut T
> {
944 // Avoid `Option::map` because it bloats LLVM IR.
945 match self.find(hash
, eq
) {
946 Some(bucket
) => Some(unsafe { bucket.as_mut() }
),
951 /// Attempts to get mutable references to `N` entries in the table at once.
953 /// Returns an array of length `N` with the results of each query. For soundness,
954 /// at most one mutable reference will be returned to any entry. An
955 /// `Err(UnavailableMutError::Duplicate(i))` in the returned array indicates that a suitable
956 /// entry exists, but a mutable reference to it already occurs at index `i` in the returned
959 /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
960 /// the `i`th key to be looked up.
962 /// This method is available only if the `nightly` feature is enabled.
963 #[cfg(feature = "nightly")]
964 pub fn get_each_mut
<const N
: usize>(
967 mut eq
: impl FnMut(usize, &T
) -> bool
,
968 ) -> [Result
<&'_
mut T
, UnavailableMutError
>; N
] {
969 // Collect the requested buckets.
970 // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
971 let mut buckets
: [MaybeUninit
<Option
<Bucket
<T
>>>; N
] =
972 unsafe { MaybeUninit::uninit().assume_init() }
;
974 buckets
[i
] = MaybeUninit
::new(self.find(hashes
[i
], |k
| eq(i
, k
)));
976 let buckets
: [Option
<Bucket
<T
>>; N
] = unsafe { MaybeUninit::array_assume_init(buckets) }
;
978 // Walk through the buckets, checking for duplicates and building up the output array.
979 // TODO use `MaybeUninit::uninit_array` here instead once that's stable.
980 let mut out
: [MaybeUninit
<Result
<&'_
mut T
, UnavailableMutError
>>; N
] =
981 unsafe { MaybeUninit::uninit().assume_init() }
;
983 out
[i
] = MaybeUninit
::new(
984 #[allow(clippy::never_loop)]
987 match (&buckets
[j
], &buckets
[i
]) {
988 // These two buckets are the same, and we can't safely return a second
989 // mutable reference to the same entry.
990 (Some(prev
), Some(cur
)) if prev
.as_ptr() == cur
.as_ptr() => {
991 break 'outer
Err(UnavailableMutError
::Duplicate(j
));
996 // This bucket is distinct from all previous buckets (or it doesn't exist), so
997 // we're clear to return the result of the lookup.
998 break match &buckets
[i
] {
999 None
=> Err(UnavailableMutError
::Absent
),
1000 Some(bkt
) => unsafe { Ok(bkt.as_mut()) }
,
1006 unsafe { MaybeUninit::array_assume_init(out) }
1009 /// Returns the number of elements the map can hold without reallocating.
1011 /// This number is a lower bound; the table might be able to hold
1012 /// more, but is guaranteed to be able to hold at least this many.
1013 #[cfg_attr(feature = "inline-more", inline)]
1014 pub fn capacity(&self) -> usize {
1015 self.table
.items
+ self.table
.growth_left
1018 /// Returns the number of elements in the table.
1019 #[cfg_attr(feature = "inline-more", inline)]
1020 pub fn len(&self) -> usize {
1024 /// Returns the number of buckets in the table.
1025 #[cfg_attr(feature = "inline-more", inline)]
1026 pub fn buckets(&self) -> usize {
1027 self.table
.bucket_mask
+ 1
1030 /// Returns an iterator over every element in the table. It is up to
1031 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1032 /// Because we cannot make the `next` method unsafe on the `RawIter`
1033 /// struct, we have to make the `iter` method unsafe.
1034 #[cfg_attr(feature = "inline-more", inline)]
1035 pub unsafe fn iter(&self) -> RawIter
<T
> {
1036 let data
= Bucket
::from_base_index(self.data_end(), 0);
1038 iter
: RawIterRange
::new(self.table
.ctrl
.as_ptr(), data
, self.table
.buckets()),
1039 items
: self.table
.items
,
1043 /// Returns an iterator over occupied buckets that could match a given hash.
1045 /// In rare cases, the iterator may return a bucket with a different hash.
1047 /// It is up to the caller to ensure that the `RawTable` outlives the
1048 /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1049 /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1050 #[cfg_attr(feature = "inline-more", inline)]
1051 pub unsafe fn iter_hash(&self, hash
: u64) -> RawIterHash
<'_
, T
, A
> {
1052 RawIterHash
::new(self, hash
)
1055 /// Returns an iterator which removes all elements from the table without
1056 /// freeing the memory.
1057 #[cfg_attr(feature = "inline-more", inline)]
1058 pub fn drain(&mut self) -> RawDrain
<'_
, T
, A
> {
1060 let iter
= self.iter();
1061 self.drain_iter_from(iter
)
1065 /// Returns an iterator which removes all elements from the table without
1066 /// freeing the memory.
1068 /// Iteration starts at the provided iterator's current location.
1070 /// It is up to the caller to ensure that the iterator is valid for this
1071 /// `RawTable` and covers all items that remain in the table.
1072 #[cfg_attr(feature = "inline-more", inline)]
1073 pub unsafe fn drain_iter_from(&mut self, iter
: RawIter
<T
>) -> RawDrain
<'_
, T
, A
> {
1074 debug_assert_eq
!(iter
.len(), self.len());
1077 table
: ManuallyDrop
::new(mem
::replace(self, Self::new_in(self.table
.alloc
.clone()))),
1078 orig_table
: NonNull
::from(self),
1079 marker
: PhantomData
,
1083 /// Returns an iterator which consumes all elements from the table.
1085 /// Iteration starts at the provided iterator's current location.
1087 /// It is up to the caller to ensure that the iterator is valid for this
1088 /// `RawTable` and covers all items that remain in the table.
1089 pub unsafe fn into_iter_from(self, iter
: RawIter
<T
>) -> RawIntoIter
<T
, A
> {
1090 debug_assert_eq
!(iter
.len(), self.len());
1092 let alloc
= self.table
.alloc
.clone();
1093 let allocation
= self.into_allocation();
1097 marker
: PhantomData
,
1102 /// Converts the table into a raw allocation. The contents of the table
1103 /// should be dropped using a `RawIter` before freeing the allocation.
1104 #[cfg_attr(feature = "inline-more", inline)]
1105 pub(crate) fn into_allocation(self) -> Option
<(NonNull
<u8>, Layout
)> {
1106 let alloc
= if self.table
.is_empty_singleton() {
1109 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1110 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(self.table
.buckets()) {
1112 None
=> unsafe { hint::unreachable_unchecked() }
,
1115 unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }
,
1124 unsafe impl<T
, A
: Allocator
+ Clone
> Send
for RawTable
<T
, A
> where T
: Send {}
1125 unsafe impl<T
, A
: Allocator
+ Clone
> Sync
for RawTable
<T
, A
> where T
: Sync {}
1127 impl<A
> RawTableInner
<A
> {
1128 #[cfg_attr(feature = "inline-more", inline)]
1129 const fn new_in(alloc
: A
) -> Self {
1131 // Be careful to cast the entire slice to a raw pointer.
1132 ctrl
: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }
,
1141 impl<A
: Allocator
+ Clone
> RawTableInner
<A
> {
1142 #[cfg_attr(feature = "inline-more", inline)]
1143 unsafe fn new_uninitialized(
1145 table_layout
: TableLayout
,
1147 fallibility
: Fallibility
,
1148 ) -> Result
<Self, TryReserveError
> {
1149 debug_assert
!(buckets
.is_power_of_two());
1151 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
1152 let (layout
, ctrl_offset
) = match table_layout
.calculate_layout_for(buckets
) {
1154 None
=> return Err(fallibility
.capacity_overflow()),
1157 let ptr
: NonNull
<u8> = match do_alloc(&alloc
, layout
) {
1158 Ok(block
) => block
.cast(),
1159 Err(_
) => return Err(fallibility
.alloc_err(layout
)),
1162 let ctrl
= NonNull
::new_unchecked(ptr
.as_ptr().add(ctrl_offset
));
1165 bucket_mask
: buckets
- 1,
1167 growth_left
: bucket_mask_to_capacity(buckets
- 1),
1173 fn fallible_with_capacity(
1175 table_layout
: TableLayout
,
1177 fallibility
: Fallibility
,
1178 ) -> Result
<Self, TryReserveError
> {
1180 Ok(Self::new_in(alloc
))
1184 capacity_to_buckets(capacity
).ok_or_else(|| fallibility
.capacity_overflow())?
;
1186 let result
= Self::new_uninitialized(alloc
, table_layout
, buckets
, fallibility
)?
;
1187 result
.ctrl(0).write_bytes(EMPTY
, result
.num_ctrl_bytes());
1194 /// Searches for an empty or deleted bucket which is suitable for inserting
1195 /// a new element and sets the hash for that slot.
1197 /// There must be at least 1 empty bucket in the table.
1199 unsafe fn prepare_insert_slot(&self, hash
: u64) -> (usize, u8) {
1200 let index
= self.find_insert_slot(hash
);
1201 let old_ctrl
= *self.ctrl(index
);
1202 self.set_ctrl_h2(index
, hash
);
1206 /// Searches for an empty or deleted bucket which is suitable for inserting
1209 /// There must be at least 1 empty bucket in the table.
1211 fn find_insert_slot(&self, hash
: u64) -> usize {
1212 let mut probe_seq
= self.probe_seq(hash
);
1215 let group
= Group
::load(self.ctrl(probe_seq
.pos
));
1216 if let Some(bit
) = group
.match_empty_or_deleted().lowest_set_bit() {
1217 let result
= (probe_seq
.pos
+ bit
) & self.bucket_mask
;
1219 // In tables smaller than the group width, trailing control
1220 // bytes outside the range of the table are filled with
1221 // EMPTY entries. These will unfortunately trigger a
1222 // match, but once masked may point to a full bucket that
1223 // is already occupied. We detect this situation here and
1224 // perform a second scan starting at the begining of the
1225 // table. This second scan is guaranteed to find an empty
1226 // slot (due to the load factor) before hitting the trailing
1227 // control bytes (containing EMPTY).
1228 if unlikely(is_full(*self.ctrl(result
))) {
1229 debug_assert
!(self.bucket_mask
< Group
::WIDTH
);
1230 debug_assert_ne
!(probe_seq
.pos
, 0);
1231 return Group
::load_aligned(self.ctrl(0))
1232 .match_empty_or_deleted()
1233 .lowest_set_bit_nonzero();
1239 probe_seq
.move_next(self.bucket_mask
);
1243 #[allow(clippy::mut_mut)]
1245 unsafe fn prepare_rehash_in_place(&mut self) {
1246 // Bulk convert all full control bytes to DELETED, and all DELETED
1247 // control bytes to EMPTY. This effectively frees up all buckets
1248 // containing a DELETED entry.
1249 for i
in (0..self.buckets()).step_by(Group
::WIDTH
) {
1250 let group
= Group
::load_aligned(self.ctrl(i
));
1251 let group
= group
.convert_special_to_empty_and_full_to_deleted();
1252 group
.store_aligned(self.ctrl(i
));
1255 // Fix up the trailing control bytes. See the comments in set_ctrl
1256 // for the handling of tables smaller than the group width.
1257 if self.buckets() < Group
::WIDTH
{
1259 .copy_to(self.ctrl(Group
::WIDTH
), self.buckets());
1262 .copy_to(self.ctrl(self.buckets()), Group
::WIDTH
);
1266 #[cfg_attr(feature = "inline-more", inline)]
1267 unsafe fn bucket
<T
>(&self, index
: usize) -> Bucket
<T
> {
1268 debug_assert_ne
!(self.bucket_mask
, 0);
1269 debug_assert
!(index
< self.buckets());
1270 Bucket
::from_base_index(self.data_end(), index
)
1273 #[cfg_attr(feature = "inline-more", inline)]
1274 unsafe fn data_end
<T
>(&self) -> NonNull
<T
> {
1275 NonNull
::new_unchecked(self.ctrl
.as_ptr().cast())
1278 /// Returns an iterator-like object for a probe sequence on the table.
1280 /// This iterator never terminates, but is guaranteed to visit each bucket
1281 /// group exactly once. The loop using `probe_seq` must terminate upon
1282 /// reaching a group containing an empty bucket.
1284 fn probe_seq(&self, hash
: u64) -> ProbeSeq
{
1286 pos
: h1(hash
) & self.bucket_mask
,
1291 /// Returns the index of a bucket for which a value must be inserted if there is enough rooom
1292 /// in the table, otherwise returns error
1293 #[cfg(feature = "raw")]
1295 unsafe fn prepare_insert_no_grow(&mut self, hash
: u64) -> Result
<usize, ()> {
1296 let index
= self.find_insert_slot(hash
);
1297 let old_ctrl
= *self.ctrl(index
);
1298 if unlikely(self.growth_left
== 0 && special_is_empty(old_ctrl
)) {
1301 self.record_item_insert_at(index
, old_ctrl
, hash
);
1307 unsafe fn record_item_insert_at(&mut self, index
: usize, old_ctrl
: u8, hash
: u64) {
1308 self.growth_left
-= special_is_empty(old_ctrl
) as usize;
1309 self.set_ctrl_h2(index
, hash
);
1314 fn is_in_same_group(&self, i
: usize, new_i
: usize, hash
: u64) -> bool
{
1315 let probe_seq_pos
= self.probe_seq(hash
).pos
;
1317 |pos
: usize| (pos
.wrapping_sub(probe_seq_pos
) & self.bucket_mask
) / Group
::WIDTH
;
1318 probe_index(i
) == probe_index(new_i
)
1321 /// Sets a control byte to the hash, and possibly also the replicated control byte at
1322 /// the end of the array.
1324 unsafe fn set_ctrl_h2(&self, index
: usize, hash
: u64) {
1325 self.set_ctrl(index
, h2(hash
))
1329 unsafe fn replace_ctrl_h2(&self, index
: usize, hash
: u64) -> u8 {
1330 let prev_ctrl
= *self.ctrl(index
);
1331 self.set_ctrl_h2(index
, hash
);
1335 /// Sets a control byte, and possibly also the replicated control byte at
1336 /// the end of the array.
1338 unsafe fn set_ctrl(&self, index
: usize, ctrl
: u8) {
1339 // Replicate the first Group::WIDTH control bytes at the end of
1340 // the array without using a branch:
1341 // - If index >= Group::WIDTH then index == index2.
1342 // - Otherwise index2 == self.bucket_mask + 1 + index.
1344 // The very last replicated control byte is never actually read because
1345 // we mask the initial index for unaligned loads, but we write it
1346 // anyways because it makes the set_ctrl implementation simpler.
1348 // If there are fewer buckets than Group::WIDTH then this code will
1349 // replicate the buckets at the end of the trailing group. For example
1350 // with 2 buckets and a group size of 4, the control bytes will look
1353 // Real | Replicated
1354 // ---------------------------------------------
1355 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
1356 // ---------------------------------------------
1357 let index2
= ((index
.wrapping_sub(Group
::WIDTH
)) & self.bucket_mask
) + Group
::WIDTH
;
1359 *self.ctrl(index
) = ctrl
;
1360 *self.ctrl(index2
) = ctrl
;
1363 /// Returns a pointer to a control byte.
1365 unsafe fn ctrl(&self, index
: usize) -> *mut u8 {
1366 debug_assert
!(index
< self.num_ctrl_bytes());
1367 self.ctrl
.as_ptr().add(index
)
1371 fn buckets(&self) -> usize {
1372 self.bucket_mask
+ 1
1376 fn num_ctrl_bytes(&self) -> usize {
1377 self.bucket_mask
+ 1 + Group
::WIDTH
1381 fn is_empty_singleton(&self) -> bool
{
1382 self.bucket_mask
== 0
1385 #[allow(clippy::mut_mut)]
1387 unsafe fn prepare_resize(
1389 table_layout
: TableLayout
,
1391 fallibility
: Fallibility
,
1392 ) -> Result
<crate::scopeguard
::ScopeGuard
<Self, impl FnMut(&mut Self)>, TryReserveError
> {
1393 debug_assert
!(self.items
<= capacity
);
1395 // Allocate and initialize the new table.
1396 let mut new_table
= RawTableInner
::fallible_with_capacity(
1402 new_table
.growth_left
-= self.items
;
1403 new_table
.items
= self.items
;
1405 // The hash function may panic, in which case we simply free the new
1406 // table without dropping any elements that may have been copied into
1409 // This guard is also used to free the old table on success, see
1410 // the comment at the bottom of this function.
1411 Ok(guard(new_table
, move |self_
| {
1412 if !self_
.is_empty_singleton() {
1413 self_
.free_buckets(table_layout
);
1419 unsafe fn free_buckets(&mut self, table_layout
: TableLayout
) {
1420 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1421 let (layout
, ctrl_offset
) = match table_layout
.calculate_layout_for(self.buckets()) {
1423 None
=> hint
::unreachable_unchecked(),
1425 self.alloc
.deallocate(
1426 NonNull
::new_unchecked(self.ctrl
.as_ptr().sub(ctrl_offset
)),
1431 /// Marks all table buckets as empty without dropping their contents.
1433 fn clear_no_drop(&mut self) {
1434 if !self.is_empty_singleton() {
1436 self.ctrl(0).write_bytes(EMPTY
, self.num_ctrl_bytes());
1440 self.growth_left
= bucket_mask_to_capacity(self.bucket_mask
);
1444 unsafe fn erase(&mut self, index
: usize) {
1445 debug_assert
!(is_full(*self.ctrl(index
)));
1446 let index_before
= index
.wrapping_sub(Group
::WIDTH
) & self.bucket_mask
;
1447 let empty_before
= Group
::load(self.ctrl(index_before
)).match_empty();
1448 let empty_after
= Group
::load(self.ctrl(index
)).match_empty();
1450 // If we are inside a continuous block of Group::WIDTH full or deleted
1451 // cells then a probe window may have seen a full block when trying to
1452 // insert. We therefore need to keep that block non-empty so that
1453 // lookups will continue searching to the next probe window.
1455 // Note that in this context `leading_zeros` refers to the bytes at the
1456 // end of a group, while `trailing_zeros` refers to the bytes at the
1457 // begining of a group.
1458 let ctrl
= if empty_before
.leading_zeros() + empty_after
.trailing_zeros() >= Group
::WIDTH
{
1461 self.growth_left
+= 1;
1464 self.set_ctrl(index
, ctrl
);
1469 impl<T
: Clone
, A
: Allocator
+ Clone
> Clone
for RawTable
<T
, A
> {
1470 fn clone(&self) -> Self {
1471 if self.table
.is_empty_singleton() {
1472 Self::new_in(self.table
.alloc
.clone())
1475 let mut new_table
= ManuallyDrop
::new(
1476 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
1477 match Self::new_uninitialized(
1478 self.table
.alloc
.clone(),
1479 self.table
.buckets(),
1480 Fallibility
::Infallible
,
1483 Err(_
) => hint
::unreachable_unchecked(),
1487 new_table
.clone_from_spec(self, |new_table
| {
1488 // We need to free the memory allocated for the new table.
1489 new_table
.free_buckets();
1492 // Return the newly created table.
1493 ManuallyDrop
::into_inner(new_table
)
1498 fn clone_from(&mut self, source
: &Self) {
1499 if source
.table
.is_empty_singleton() {
1500 *self = Self::new_in(self.table
.alloc
.clone());
1503 // First, drop all our elements without clearing the control bytes.
1504 self.drop_elements();
1506 // If necessary, resize our table to match the source.
1507 if self.buckets() != source
.buckets() {
1508 // Skip our drop by using ptr::write.
1509 if !self.table
.is_empty_singleton() {
1510 self.free_buckets();
1512 (self as *mut Self).write(
1513 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1514 match Self::new_uninitialized(
1515 self.table
.alloc
.clone(),
1517 Fallibility
::Infallible
,
1520 Err(_
) => hint
::unreachable_unchecked(),
1525 self.clone_from_spec(source
, |self_
| {
1526 // We need to leave the table in an empty state.
1527 self_
.clear_no_drop()
1534 /// Specialization of `clone_from` for `Copy` types
1535 trait RawTableClone
{
1536 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self));
1538 impl<T
: Clone
, A
: Allocator
+ Clone
> RawTableClone
for RawTable
<T
, A
> {
1539 #[cfg_attr(feature = "inline-more", inline)]
1541 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self)) {
1542 self.clone_from_impl(source
, on_panic
);
1546 #[cfg(feature = "nightly")]
1547 impl<T
: Copy
, A
: Allocator
+ Clone
> RawTableClone
for RawTable
<T
, A
> {
1548 #[cfg_attr(feature = "inline-more", inline)]
1549 unsafe fn clone_from_spec(&mut self, source
: &Self, _on_panic
: impl FnMut(&mut Self)) {
1553 .copy_to_nonoverlapping(self.table
.ctrl(0), self.table
.num_ctrl_bytes());
1556 .copy_to_nonoverlapping(self.data_start(), self.table
.buckets());
1558 self.table
.items
= source
.table
.items
;
1559 self.table
.growth_left
= source
.table
.growth_left
;
1563 impl<T
: Clone
, A
: Allocator
+ Clone
> RawTable
<T
, A
> {
1564 /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
1565 #[cfg_attr(feature = "inline-more", inline)]
1566 unsafe fn clone_from_impl(&mut self, source
: &Self, mut on_panic
: impl FnMut(&mut Self)) {
1567 // Copy the control bytes unchanged. We do this in a single pass
1571 .copy_to_nonoverlapping(self.table
.ctrl(0), self.table
.num_ctrl_bytes());
1573 // The cloning of elements may panic, in which case we need
1574 // to make sure we drop only the elements that have been
1576 let mut guard
= guard((0, &mut *self), |(index
, self_
)| {
1577 if mem
::needs_drop
::<T
>() && self_
.len() != 0 {
1578 for i
in 0..=*index
{
1579 if is_full(*self_
.table
.ctrl(i
)) {
1580 self_
.bucket(i
).drop();
1585 // Depending on whether we were called from clone or clone_from, we
1586 // either need to free the memory for the destination table or just
1587 // clear the control bytes.
1591 for from
in source
.iter() {
1592 let index
= source
.bucket_index(&from
);
1593 let to
= guard
.1.bucket(index
);
1594 to
.write(from
.as_ref().clone());
1596 // Update the index in case we need to unwind.
1600 // Successfully cloned all items, no need to clean up.
1603 self.table
.items
= source
.table
.items
;
1604 self.table
.growth_left
= source
.table
.growth_left
;
1607 /// Variant of `clone_from` to use when a hasher is available.
1608 #[cfg(feature = "raw")]
1609 pub fn clone_from_with_hasher(&mut self, source
: &Self, hasher
: impl Fn(&T
) -> u64) {
1610 // If we have enough capacity in the table, just clear it and insert
1611 // elements one by one. We don't do this if we have the same number of
1612 // buckets as the source since we can just copy the contents directly
1614 if self.table
.buckets() != source
.table
.buckets()
1615 && bucket_mask_to_capacity(self.table
.bucket_mask
) >= source
.len()
1619 let guard_self
= guard(&mut *self, |self_
| {
1620 // Clear the partially copied table if a panic occurs, otherwise
1621 // items and growth_left will be out of sync with the contents
1627 for item
in source
.iter() {
1629 let item
= item
.as_ref().clone();
1630 let hash
= hasher(&item
);
1632 // We can use a simpler version of insert() here since:
1633 // - there are no DELETED entries.
1634 // - we know there is enough space in the table.
1635 // - all elements are unique.
1636 let (index
, _
) = guard_self
.table
.prepare_insert_slot(hash
);
1637 guard_self
.bucket(index
).write(item
);
1641 // Successfully cloned all items, no need to clean up.
1642 mem
::forget(guard_self
);
1644 self.table
.items
= source
.table
.items
;
1645 self.table
.growth_left
-= source
.table
.items
;
1647 self.clone_from(source
);
1652 impl<T
, A
: Allocator
+ Clone
+ Default
> Default
for RawTable
<T
, A
> {
1653 #[cfg_attr(feature = "inline-more", inline)]
1654 fn default() -> Self {
1655 Self::new_in(Default
::default())
1659 #[cfg(feature = "nightly")]
1660 unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
1661 #[cfg_attr(feature = "inline-more", inline)]
1662 fn drop(&mut self) {
1663 if !self.table
.is_empty_singleton() {
1665 self.drop_elements();
1666 self.free_buckets();
1671 #[cfg(not(feature = "nightly"))]
1672 impl<T
, A
: Allocator
+ Clone
> Drop
for RawTable
<T
, A
> {
1673 #[cfg_attr(feature = "inline-more", inline)]
1674 fn drop(&mut self) {
1675 if !self.table
.is_empty_singleton() {
1677 self.drop_elements();
1678 self.free_buckets();
1684 impl<T
, A
: Allocator
+ Clone
> IntoIterator
for RawTable
<T
, A
> {
1686 type IntoIter
= RawIntoIter
<T
, A
>;
1688 #[cfg_attr(feature = "inline-more", inline)]
1689 fn into_iter(self) -> RawIntoIter
<T
, A
> {
1691 let iter
= self.iter();
1692 self.into_iter_from(iter
)
1697 /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1698 /// not track an item count.
1699 pub(crate) struct RawIterRange
<T
> {
1700 // Mask of full buckets in the current group. Bits are cleared from this
1701 // mask as each element is processed.
1702 current_group
: BitMask
,
1704 // Pointer to the buckets for the current group.
1707 // Pointer to the next group of control bytes,
1708 // Must be aligned to the group size.
1709 next_ctrl
: *const u8,
1711 // Pointer one past the last control byte of this range.
1715 impl<T
> RawIterRange
<T
> {
1716 /// Returns a `RawIterRange` covering a subset of a table.
1718 /// The control byte address must be aligned to the group size.
1719 #[cfg_attr(feature = "inline-more", inline)]
1720 unsafe fn new(ctrl
: *const u8, data
: Bucket
<T
>, len
: usize) -> Self {
1721 debug_assert_ne
!(len
, 0);
1722 debug_assert_eq
!(ctrl
as usize % Group
::WIDTH
, 0);
1723 let end
= ctrl
.add(len
);
1725 // Load the first group and advance ctrl to point to the next group
1726 let current_group
= Group
::load_aligned(ctrl
).match_full();
1727 let next_ctrl
= ctrl
.add(Group
::WIDTH
);
1737 /// Splits a `RawIterRange` into two halves.
1739 /// Returns `None` if the remaining range is smaller than or equal to the
1741 #[cfg_attr(feature = "inline-more", inline)]
1742 #[cfg(feature = "rayon")]
1743 pub(crate) fn split(mut self) -> (Self, Option
<RawIterRange
<T
>>) {
1745 if self.end
<= self.next_ctrl
{
1746 // Nothing to split if the group that we are current processing
1750 // len is the remaining number of elements after the group that
1751 // we are currently processing. It must be a multiple of the
1752 // group size (small tables are caught by the check above).
1753 let len
= offset_from(self.end
, self.next_ctrl
);
1754 debug_assert_eq
!(len
% Group
::WIDTH
, 0);
1756 // Split the remaining elements into two halves, but round the
1757 // midpoint down in case there is an odd number of groups
1758 // remaining. This ensures that:
1759 // - The tail is at least 1 group long.
1760 // - The split is roughly even considering we still have the
1761 // current group to process.
1762 let mid
= (len
/ 2) & !(Group
::WIDTH
- 1);
1764 let tail
= Self::new(
1765 self.next_ctrl
.add(mid
),
1766 self.data
.next_n(Group
::WIDTH
).next_n(mid
),
1770 self.data
.next_n(Group
::WIDTH
).next_n(mid
).ptr
,
1773 debug_assert_eq
!(self.end
, tail
.end
);
1774 self.end
= self.next_ctrl
.add(mid
);
1775 debug_assert_eq
!(self.end
.add(Group
::WIDTH
), tail
.next_ctrl
);
1782 // We make raw iterators unconditionally Send and Sync, and let the PhantomData
1783 // in the actual iterator implementations determine the real Send/Sync bounds.
1784 unsafe impl<T
> Send
for RawIterRange
<T
> {}
1785 unsafe impl<T
> Sync
for RawIterRange
<T
> {}
1787 impl<T
> Clone
for RawIterRange
<T
> {
1788 #[cfg_attr(feature = "inline-more", inline)]
1789 fn clone(&self) -> Self {
1791 data
: self.data
.clone(),
1792 next_ctrl
: self.next_ctrl
,
1793 current_group
: self.current_group
,
1799 impl<T
> Iterator
for RawIterRange
<T
> {
1800 type Item
= Bucket
<T
>;
1802 #[cfg_attr(feature = "inline-more", inline)]
1803 fn next(&mut self) -> Option
<Bucket
<T
>> {
1806 if let Some(index
) = self.current_group
.lowest_set_bit() {
1807 self.current_group
= self.current_group
.remove_lowest_bit();
1808 return Some(self.data
.next_n(index
));
1811 if self.next_ctrl
>= self.end
{
1815 // We might read past self.end up to the next group boundary,
1816 // but this is fine because it only occurs on tables smaller
1817 // than the group size where the trailing control bytes are all
1818 // EMPTY. On larger tables self.end is guaranteed to be aligned
1819 // to the group size (since tables are power-of-two sized).
1820 self.current_group
= Group
::load_aligned(self.next_ctrl
).match_full();
1821 self.data
= self.data
.next_n(Group
::WIDTH
);
1822 self.next_ctrl
= self.next_ctrl
.add(Group
::WIDTH
);
1827 #[cfg_attr(feature = "inline-more", inline)]
1828 fn size_hint(&self) -> (usize, Option
<usize>) {
1829 // We don't have an item count, so just guess based on the range size.
1832 Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }
),
1837 impl<T
> FusedIterator
for RawIterRange
<T
> {}
1839 /// Iterator which returns a raw pointer to every full bucket in the table.
1841 /// For maximum flexibility this iterator is not bound by a lifetime, but you
1842 /// must observe several rules when using it:
1843 /// - You must not free the hash table while iterating (including via growing/shrinking).
1844 /// - It is fine to erase a bucket that has been yielded by the iterator.
1845 /// - Erasing a bucket that has not yet been yielded by the iterator may still
1846 /// result in the iterator yielding that bucket (unless `reflect_remove` is called).
1847 /// - It is unspecified whether an element inserted after the iterator was
1848 /// created will be yielded by that iterator (unless `reflect_insert` is called).
1849 /// - The order in which the iterator yields bucket is unspecified and may
1850 /// change in the future.
1851 pub struct RawIter
<T
> {
1852 pub(crate) iter
: RawIterRange
<T
>,
1856 impl<T
> RawIter
<T
> {
1857 /// Refresh the iterator so that it reflects a removal from the given bucket.
1859 /// For the iterator to remain valid, this method must be called once
1860 /// for each removed bucket before `next` is called again.
1862 /// This method should be called _before_ the removal is made. It is not necessary to call this
1863 /// method if you are removing an item that this iterator yielded in the past.
1864 #[cfg(feature = "raw")]
1865 pub fn reflect_remove(&mut self, b
: &Bucket
<T
>) {
1866 self.reflect_toggle_full(b
, false);
1869 /// Refresh the iterator so that it reflects an insertion into the given bucket.
1871 /// For the iterator to remain valid, this method must be called once
1872 /// for each insert before `next` is called again.
1874 /// This method does not guarantee that an insertion of a bucket witha greater
1875 /// index than the last one yielded will be reflected in the iterator.
1877 /// This method should be called _after_ the given insert is made.
1878 #[cfg(feature = "raw")]
1879 pub fn reflect_insert(&mut self, b
: &Bucket
<T
>) {
1880 self.reflect_toggle_full(b
, true);
1883 /// Refresh the iterator so that it reflects a change to the state of the given bucket.
1884 #[cfg(feature = "raw")]
1885 fn reflect_toggle_full(&mut self, b
: &Bucket
<T
>, is_insert
: bool
) {
1887 if b
.as_ptr() > self.iter
.data
.as_ptr() {
1888 // The iterator has already passed the bucket's group.
1889 // So the toggle isn't relevant to this iterator.
1893 if self.iter
.next_ctrl
< self.iter
.end
1894 && b
.as_ptr() <= self.iter
.data
.next_n(Group
::WIDTH
).as_ptr()
1896 // The iterator has not yet reached the bucket's group.
1897 // We don't need to reload anything, but we do need to adjust the item count.
1899 if cfg
!(debug_assertions
) {
1900 // Double-check that the user isn't lying to us by checking the bucket state.
1901 // To do that, we need to find its control byte. We know that self.iter.data is
1902 // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
1903 let offset
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1904 let ctrl
= self.iter
.next_ctrl
.sub(Group
::WIDTH
).add(offset
);
1905 // This method should be called _before_ a removal, or _after_ an insert,
1906 // so in both cases the ctrl byte should indicate that the bucket is full.
1907 assert
!(is_full(*ctrl
));
1919 // The iterator is at the bucket group that the toggled bucket is in.
1920 // We need to do two things:
1922 // - Determine if the iterator already yielded the toggled bucket.
1923 // If it did, we're done.
1924 // - Otherwise, update the iterator cached group so that it won't
1925 // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1926 // We'll also need ot update the item count accordingly.
1927 if let Some(index
) = self.iter
.current_group
.lowest_set_bit() {
1928 let next_bucket
= self.iter
.data
.next_n(index
);
1929 if b
.as_ptr() > next_bucket
.as_ptr() {
1930 // The toggled bucket is "before" the bucket the iterator would yield next. We
1931 // therefore don't need to do anything --- the iterator has already passed the
1932 // bucket in question.
1934 // The item count must already be correct, since a removal or insert "prior" to
1935 // the iterator's position wouldn't affect the item count.
1937 // The removed bucket is an upcoming bucket. We need to make sure it does _not_
1938 // get yielded, and also that it's no longer included in the item count.
1940 // NOTE: We can't just reload the group here, both since that might reflect
1941 // inserts we've already passed, and because that might inadvertently unset the
1942 // bits for _other_ removals. If we do that, we'd have to also decrement the
1943 // item count for those other bits that we unset. But the presumably subsequent
1944 // call to reflect for those buckets might _also_ decrement the item count.
1945 // Instead, we _just_ flip the bit for the particular bucket the caller asked
1947 let our_bit
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1948 let was_full
= self.iter
.current_group
.flip(our_bit
);
1949 debug_assert_ne
!(was_full
, is_insert
);
1957 if cfg
!(debug_assertions
) {
1958 if b
.as_ptr() == next_bucket
.as_ptr() {
1959 // The removed bucket should no longer be next
1960 debug_assert_ne
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1962 // We should not have changed what bucket comes next.
1963 debug_assert_eq
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1968 // We must have already iterated past the removed item.
1973 unsafe fn drop_elements(&mut self) {
1974 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1982 impl<T
> Clone
for RawIter
<T
> {
1983 #[cfg_attr(feature = "inline-more", inline)]
1984 fn clone(&self) -> Self {
1986 iter
: self.iter
.clone(),
1992 impl<T
> Iterator
for RawIter
<T
> {
1993 type Item
= Bucket
<T
>;
1995 #[cfg_attr(feature = "inline-more", inline)]
1996 fn next(&mut self) -> Option
<Bucket
<T
>> {
1997 if let Some(b
) = self.iter
.next() {
2001 // We don't check against items == 0 here to allow the
2002 // compiler to optimize away the item count entirely if the
2003 // iterator length is never queried.
2004 debug_assert_eq
!(self.items
, 0);
2009 #[cfg_attr(feature = "inline-more", inline)]
2010 fn size_hint(&self) -> (usize, Option
<usize>) {
2011 (self.items
, Some(self.items
))
2015 impl<T
> ExactSizeIterator
for RawIter
<T
> {}
2016 impl<T
> FusedIterator
for RawIter
<T
> {}
2018 /// Iterator which consumes a table and returns elements.
2019 pub struct RawIntoIter
<T
, A
: Allocator
+ Clone
= Global
> {
2021 allocation
: Option
<(NonNull
<u8>, Layout
)>,
2022 marker
: PhantomData
<T
>,
2026 impl<T
, A
: Allocator
+ Clone
> RawIntoIter
<T
, A
> {
2027 #[cfg_attr(feature = "inline-more", inline)]
2028 pub fn iter(&self) -> RawIter
<T
> {
2033 unsafe impl<T
, A
: Allocator
+ Clone
> Send
for RawIntoIter
<T
, A
> where T
: Send {}
2034 unsafe impl<T
, A
: Allocator
+ Clone
> Sync
for RawIntoIter
<T
, A
> where T
: Sync {}
2036 #[cfg(feature = "nightly")]
2037 unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
2038 #[cfg_attr(feature = "inline-more", inline)]
2039 fn drop(&mut self) {
2041 // Drop all remaining elements
2042 self.iter
.drop_elements();
2045 if let Some((ptr
, layout
)) = self.allocation
{
2046 self.alloc
.deallocate(ptr
, layout
);
2051 #[cfg(not(feature = "nightly"))]
2052 impl<T
, A
: Allocator
+ Clone
> Drop
for RawIntoIter
<T
, A
> {
2053 #[cfg_attr(feature = "inline-more", inline)]
2054 fn drop(&mut self) {
2056 // Drop all remaining elements
2057 self.iter
.drop_elements();
2060 if let Some((ptr
, layout
)) = self.allocation
{
2061 self.alloc
.deallocate(ptr
, layout
);
2067 impl<T
, A
: Allocator
+ Clone
> Iterator
for RawIntoIter
<T
, A
> {
2070 #[cfg_attr(feature = "inline-more", inline)]
2071 fn next(&mut self) -> Option
<T
> {
2072 unsafe { Some(self.iter.next()?.read()) }
2075 #[cfg_attr(feature = "inline-more", inline)]
2076 fn size_hint(&self) -> (usize, Option
<usize>) {
2077 self.iter
.size_hint()
2081 impl<T
, A
: Allocator
+ Clone
> ExactSizeIterator
for RawIntoIter
<T
, A
> {}
2082 impl<T
, A
: Allocator
+ Clone
> FusedIterator
for RawIntoIter
<T
, A
> {}
2084 /// Iterator which consumes elements without freeing the table storage.
2085 pub struct RawDrain
<'a
, T
, A
: Allocator
+ Clone
= Global
> {
2088 // The table is moved into the iterator for the duration of the drain. This
2089 // ensures that an empty table is left if the drain iterator is leaked
2090 // without dropping.
2091 table
: ManuallyDrop
<RawTable
<T
, A
>>,
2092 orig_table
: NonNull
<RawTable
<T
, A
>>,
2094 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
2095 // covariant over T.
2096 marker
: PhantomData
<&'a RawTable
<T
, A
>>,
2099 impl<T
, A
: Allocator
+ Clone
> RawDrain
<'_
, T
, A
> {
2100 #[cfg_attr(feature = "inline-more", inline)]
2101 pub fn iter(&self) -> RawIter
<T
> {
2106 unsafe impl<T
, A
: Allocator
+ Copy
> Send
for RawDrain
<'_
, T
, A
> where T
: Send {}
2107 unsafe impl<T
, A
: Allocator
+ Copy
> Sync
for RawDrain
<'_
, T
, A
> where T
: Sync {}
2109 impl<T
, A
: Allocator
+ Clone
> Drop
for RawDrain
<'_
, T
, A
> {
2110 #[cfg_attr(feature = "inline-more", inline)]
2111 fn drop(&mut self) {
2113 // Drop all remaining elements. Note that this may panic.
2114 self.iter
.drop_elements();
2116 // Reset the contents of the table now that all elements have been
2118 self.table
.clear_no_drop();
2120 // Move the now empty table back to its original location.
2123 .copy_from_nonoverlapping(&*self.table
, 1);
2128 impl<T
, A
: Allocator
+ Clone
> Iterator
for RawDrain
<'_
, T
, A
> {
2131 #[cfg_attr(feature = "inline-more", inline)]
2132 fn next(&mut self) -> Option
<T
> {
2134 let item
= self.iter
.next()?
;
2139 #[cfg_attr(feature = "inline-more", inline)]
2140 fn size_hint(&self) -> (usize, Option
<usize>) {
2141 self.iter
.size_hint()
2145 impl<T
, A
: Allocator
+ Clone
> ExactSizeIterator
for RawDrain
<'_
, T
, A
> {}
2146 impl<T
, A
: Allocator
+ Clone
> FusedIterator
for RawDrain
<'_
, T
, A
> {}
2148 /// Iterator over occupied buckets that could match a given hash.
2150 /// In rare cases, the iterator may return a bucket with a different hash.
2151 pub struct RawIterHash
<'a
, T
, A
: Allocator
+ Clone
= Global
> {
2152 inner
: RawIterHashInner
<'a
, A
>,
2153 _marker
: PhantomData
<T
>,
2156 struct RawIterHashInner
<'a
, A
: Allocator
+ Clone
> {
2157 table
: &'a RawTableInner
<A
>,
2159 // The top 7 bits of the hash.
2162 // The sequence of groups to probe in the search.
2163 probe_seq
: ProbeSeq
,
2167 // The elements within the group with a matching h2-hash.
2168 bitmask
: BitMaskIter
,
2171 impl<'a
, T
, A
: Allocator
+ Clone
> RawIterHash
<'a
, T
, A
> {
2172 #[cfg_attr(feature = "inline-more", inline)]
2173 fn new(table
: &'a RawTable
<T
, A
>, hash
: u64) -> Self {
2175 inner
: RawIterHashInner
::new(&table
.table
, hash
),
2176 _marker
: PhantomData
,
2180 impl<'a
, A
: Allocator
+ Clone
> RawIterHashInner
<'a
, A
> {
2181 #[cfg_attr(feature = "inline-more", inline)]
2182 fn new(table
: &'a RawTableInner
<A
>, hash
: u64) -> Self {
2184 let h2_hash
= h2(hash
);
2185 let probe_seq
= table
.probe_seq(hash
);
2186 let group
= Group
::load(table
.ctrl(probe_seq
.pos
));
2187 let bitmask
= group
.match_byte(h2_hash
).into_iter();
2200 impl<'a
, T
, A
: Allocator
+ Clone
> Iterator
for RawIterHash
<'a
, T
, A
> {
2201 type Item
= Bucket
<T
>;
2203 fn next(&mut self) -> Option
<Bucket
<T
>> {
2205 match self.inner
.next() {
2206 Some(index
) => Some(self.inner
.table
.bucket(index
)),
2213 impl<'a
, A
: Allocator
+ Clone
> Iterator
for RawIterHashInner
<'a
, A
> {
2216 fn next(&mut self) -> Option
<Self::Item
> {
2219 if let Some(bit
) = self.bitmask
.next() {
2220 let index
= (self.probe_seq
.pos
+ bit
) & self.table
.bucket_mask
;
2223 if likely(self.group
.match_empty().any_bit_set()) {
2226 self.probe_seq
.move_next(self.table
.bucket_mask
);
2227 self.group
= Group
::load(self.table
.ctrl(self.probe_seq
.pos
));
2228 self.bitmask
= self.group
.match_byte(self.h2_hash
).into_iter();
2240 let mut table
= RawTable
::new();
2241 let hasher
= |i
: &u64| *i
;
2243 table
.insert(i
, i
, hasher
);
2248 assert_eq
!(table
.find(i
, |x
| *x
== i
).map(|b
| b
.read()), Some(i
));
2250 assert
!(table
.find(i
+ 100, |x
| *x
== i
+ 100).is_none());
2253 table
.rehash_in_place(hasher
);
2257 assert_eq
!(table
.find(i
, |x
| *x
== i
).map(|b
| b
.read()), Some(i
));
2259 assert
!(table
.find(i
+ 100, |x
| *x
== i
+ 100).is_none());