1 use crate::alloc
::alloc
::{alloc, dealloc, handle_alloc_error}
;
2 use crate::scopeguard
::guard
;
3 use crate::TryReserveError
;
4 use core
::alloc
::Layout
;
6 use core
::iter
::FusedIterator
;
7 use core
::marker
::PhantomData
;
9 use core
::mem
::ManuallyDrop
;
10 use core
::ptr
::NonNull
;
13 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14 // at once instead of 8. We don't bother with AVX since it would require
15 // runtime dispatch and wouldn't gain us much anyways: the probability of
16 // finding a match drops off drastically after the first few buckets.
18 // I attempted an implementation on ARM using NEON instructions, but it
19 // turns out that most NEON instructions have multi-cycle latency, which in
20 // the end outweighs any gains over the generic implementation.
22 target_feature
= "sse2",
23 any(target_arch
= "x86", target_arch
= "x86_64"),
29 #[path = "generic.rs"]
37 use self::bitmask
::{BitMask, BitMaskIter}
;
40 // Branch prediction hint. This is currently only available on nightly but it
41 // consistently improves performance by 10-15%.
42 #[cfg(feature = "nightly")]
43 use core
::intrinsics
::{likely, unlikely}
;
44 #[cfg(not(feature = "nightly"))]
46 fn likely(b
: bool
) -> bool
{
49 #[cfg(not(feature = "nightly"))]
51 fn unlikely(b
: bool
) -> bool
{
55 #[cfg(feature = "nightly")]
56 #[cfg_attr(feature = "inline-more", inline)]
57 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
58 to
.offset_from(from
) as usize
60 #[cfg(not(feature = "nightly"))]
61 #[cfg_attr(feature = "inline-more", inline)]
62 unsafe fn offset_from
<T
>(to
: *const T
, from
: *const T
) -> usize {
63 (to
as usize - from
as usize) / mem
::size_of
::<T
>()
66 /// Whether memory allocation errors should return an error or abort.
67 #[derive(Copy, Clone)]
74 /// Error to return on capacity overflow.
75 #[cfg_attr(feature = "inline-more", inline)]
76 fn capacity_overflow(self) -> TryReserveError
{
78 Fallibility
::Fallible
=> TryReserveError
::CapacityOverflow
,
79 Fallibility
::Infallible
=> panic
!("Hash table capacity overflow"),
83 /// Error to return on allocation error.
84 #[cfg_attr(feature = "inline-more", inline)]
85 fn alloc_err(self, layout
: Layout
) -> TryReserveError
{
87 Fallibility
::Fallible
=> TryReserveError
::AllocError { layout }
,
88 Fallibility
::Infallible
=> handle_alloc_error(layout
),
93 /// Control byte value for an empty bucket.
94 const EMPTY
: u8 = 0b1111_1111;
96 /// Control byte value for a deleted bucket.
97 const DELETED
: u8 = 0b1000_0000;
99 /// Checks whether a control byte represents a full bucket (top bit is clear).
101 fn is_full(ctrl
: u8) -> bool
{
105 /// Checks whether a control byte represents a special value (top bit is set).
107 fn is_special(ctrl
: u8) -> bool
{
111 /// Checks whether a special control value is EMPTY (just check 1 bit).
113 fn special_is_empty(ctrl
: u8) -> bool
{
114 debug_assert
!(is_special(ctrl
));
118 /// Primary hash function, used to select the initial bucket to probe from.
120 #[allow(clippy::cast_possible_truncation)]
121 fn h1(hash
: u64) -> usize {
122 // On 32-bit platforms we simply ignore the higher hash bits.
126 /// Secondary hash function, saved in the low 7 bits of the control byte.
128 #[allow(clippy::cast_possible_truncation)]
129 fn h2(hash
: u64) -> u8 {
130 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
131 // value, some hash functions (such as FxHash) produce a usize result
132 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
133 let hash_len
= usize::min(mem
::size_of
::<usize>(), mem
::size_of
::<u64>());
134 let top7
= hash
>> (hash_len
* 8 - 7);
135 (top7
& 0x7f) as u8 // truncation
138 /// Probe sequence based on triangular numbers, which is guaranteed (since our
139 /// table size is a power of two) to visit every group of elements exactly once.
141 /// A triangular probe has us jump by 1 more group every time. So first we
142 /// jump by 1 group (meaning we just continue our linear scan), then 2 groups
143 /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
145 /// Proof that the probe will visit every group in the table:
146 /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
153 impl Iterator
for ProbeSeq
{
157 fn next(&mut self) -> Option
<usize> {
158 // We should have found an empty bucket by now and ended the probe.
160 self.stride
<= self.bucket_mask
,
161 "Went past end of probe sequence"
164 let result
= self.pos
;
165 self.stride
+= Group
::WIDTH
;
166 self.pos
+= self.stride
;
167 self.pos
&= self.bucket_mask
;
172 /// Returns the number of buckets needed to hold the given number of items,
173 /// taking the maximum load factor into account.
175 /// Returns `None` if an overflow occurs.
176 // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
177 #[cfg_attr(target_os = "emscripten", inline(never))]
178 #[cfg_attr(not(target_os = "emscripten"), inline)]
179 fn capacity_to_buckets(cap
: usize) -> Option
<usize> {
180 debug_assert_ne
!(cap
, 0);
182 // For small tables we require at least 1 empty bucket so that lookups are
183 // guaranteed to terminate if an element doesn't exist in the table.
185 // We don't bother with a table size of 2 buckets since that can only
186 // hold a single element. Instead we skip directly to a 4 bucket table
187 // which can hold 3 elements.
188 return Some(if cap
< 4 { 4 }
else { 8 }
);
191 // Otherwise require 1/8 buckets to be empty (87.5% load)
193 // Be careful when modifying this, calculate_layout relies on the
194 // overflow check here.
195 let adjusted_cap
= cap
.checked_mul(8)?
/ 7;
197 // Any overflows will have been caught by the checked_mul. Also, any
198 // rounding errors from the division above will be cleaned up by
199 // next_power_of_two (which can't overflow because of the previous divison).
200 Some(adjusted_cap
.next_power_of_two())
203 /// Returns the maximum effective capacity for the given bucket mask, taking
204 /// the maximum load factor into account.
206 fn bucket_mask_to_capacity(bucket_mask
: usize) -> usize {
208 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
209 // Keep in mind that the bucket mask is one less than the bucket count.
212 // For larger tables we reserve 12.5% of the slots as empty.
213 ((bucket_mask
+ 1) / 8) * 7
217 /// Returns a Layout which describes the allocation required for a hash table,
218 /// and the offset of the control bytes in the allocation.
219 /// (the offset is also one past last element of buckets)
221 /// Returns `None` if an overflow occurs.
222 #[cfg_attr(feature = "inline-more", inline)]
223 #[cfg(feature = "nightly")]
224 fn calculate_layout
<T
>(buckets
: usize) -> Option
<(Layout
, usize)> {
225 debug_assert
!(buckets
.is_power_of_two());
228 let data
= Layout
::array
::<T
>(buckets
).ok()?
;
230 // Array of control bytes. This must be aligned to the group size.
232 // We add `Group::WIDTH` control bytes at the end of the array which
233 // replicate the bytes at the start of the array and thus avoids the need to
234 // perform bounds-checking while probing.
236 // There is no possible overflow here since buckets is a power of two and
237 // Group::WIDTH is a small number.
238 let ctrl
= unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) }
;
240 data
.extend(ctrl
).ok()
243 /// Returns a Layout which describes the allocation required for a hash table,
244 /// and the offset of the control bytes in the allocation.
245 /// (the offset is also one past last element of buckets)
247 /// Returns `None` if an overflow occurs.
248 #[cfg_attr(feature = "inline-more", inline)]
249 #[cfg(not(feature = "nightly"))]
250 fn calculate_layout
<T
>(buckets
: usize) -> Option
<(Layout
, usize)> {
251 debug_assert
!(buckets
.is_power_of_two());
253 // Manual layout calculation since Layout methods are not yet stable.
254 let ctrl_align
= usize::max(mem
::align_of
::<T
>(), Group
::WIDTH
);
255 let ctrl_offset
= mem
::size_of
::<T
>()
256 .checked_mul(buckets
)?
257 .checked_add(ctrl_align
- 1)?
259 let len
= ctrl_offset
.checked_add(buckets
+ Group
::WIDTH
)?
;
262 unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }
,
267 /// A reference to a hash table bucket containing a `T`.
269 /// This is usually just a pointer to the element itself. However if the element
270 /// is a ZST, then we instead track the index of the element in the table so
271 /// that `erase` works properly.
272 pub struct Bucket
<T
> {
273 // Actually it is pointer to next element than element itself
274 // this is needed to maintain pointer arithmetic invariants
275 // keeping direct pointer to element introduces difficulty.
276 // Using `NonNull` for variance and niche layout
280 // This Send impl is needed for rayon support. This is safe since Bucket is
281 // never exposed in a public API.
282 unsafe impl<T
> Send
for Bucket
<T
> {}
284 impl<T
> Clone
for Bucket
<T
> {
285 #[cfg_attr(feature = "inline-more", inline)]
286 fn clone(&self) -> Self {
287 Self { ptr: self.ptr }
292 #[cfg_attr(feature = "inline-more", inline)]
293 unsafe fn from_base_index(base
: NonNull
<T
>, index
: usize) -> Self {
294 let ptr
= if mem
::size_of
::<T
>() == 0 {
295 // won't overflow because index must be less than length
296 (index
+ 1) as *mut T
298 base
.as_ptr().sub(index
)
301 ptr
: NonNull
::new_unchecked(ptr
),
304 #[cfg_attr(feature = "inline-more", inline)]
305 unsafe fn to_base_index(&self, base
: NonNull
<T
>) -> usize {
306 if mem
::size_of
::<T
>() == 0 {
307 self.ptr
.as_ptr() as usize - 1
309 offset_from(base
.as_ptr(), self.ptr
.as_ptr())
312 #[cfg_attr(feature = "inline-more", inline)]
313 pub unsafe fn as_ptr(&self) -> *mut T
{
314 if mem
::size_of
::<T
>() == 0 {
315 // Just return an arbitrary ZST pointer which is properly aligned
316 mem
::align_of
::<T
>() as *mut T
318 self.ptr
.as_ptr().sub(1)
321 #[cfg_attr(feature = "inline-more", inline)]
322 unsafe fn next_n(&self, offset
: usize) -> Self {
323 let ptr
= if mem
::size_of
::<T
>() == 0 {
324 (self.ptr
.as_ptr() as usize + offset
) as *mut T
326 self.ptr
.as_ptr().sub(offset
)
329 ptr
: NonNull
::new_unchecked(ptr
),
332 #[cfg_attr(feature = "inline-more", inline)]
333 pub unsafe fn drop(&self) {
334 self.as_ptr().drop_in_place();
336 #[cfg_attr(feature = "inline-more", inline)]
337 pub unsafe fn read(&self) -> T
{
340 #[cfg_attr(feature = "inline-more", inline)]
341 pub unsafe fn write(&self, val
: T
) {
342 self.as_ptr().write(val
);
344 #[cfg_attr(feature = "inline-more", inline)]
345 pub unsafe fn as_ref
<'a
>(&self) -> &'a T
{
348 #[cfg_attr(feature = "inline-more", inline)]
349 pub unsafe fn as_mut
<'a
>(&self) -> &'a
mut T
{
352 #[cfg_attr(feature = "inline-more", inline)]
353 pub unsafe fn copy_from_nonoverlapping(&self, other
: &Self) {
354 self.as_ptr().copy_from_nonoverlapping(other
.as_ptr(), 1);
358 /// A raw hash table with an unsafe API.
359 pub struct RawTable
<T
> {
360 // Mask to get an index from a hash value. The value is one less than the
361 // number of buckets in the table.
364 // [Padding], T1, T2, ..., Tlast, C1, C2, ...
368 // Number of elements that can be inserted before we need to grow the table
371 // Number of elements in the table, only really used by len()
374 // Tell dropck that we own instances of T.
375 marker
: PhantomData
<T
>,
378 impl<T
> RawTable
<T
> {
379 /// Creates a new empty hash table without allocating any memory.
381 /// In effect this returns a table with exactly 1 bucket. However we can
382 /// leave the data pointer dangling since that bucket is never written to
383 /// due to our load factor forcing us to always have at least 1 free bucket.
384 #[cfg_attr(feature = "inline-more", inline)]
385 pub const fn new() -> Self {
387 // Be careful to cast the entire slice to a raw pointer.
388 ctrl
: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }
,
396 /// Allocates a new hash table with the given number of buckets.
398 /// The control bytes are left uninitialized.
399 #[cfg_attr(feature = "inline-more", inline)]
400 unsafe fn new_uninitialized(
402 fallability
: Fallibility
,
403 ) -> Result
<Self, TryReserveError
> {
404 debug_assert
!(buckets
.is_power_of_two());
406 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
407 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(buckets
) {
409 None
=> return Err(fallability
.capacity_overflow()),
411 let ptr
= match NonNull
::new(alloc(layout
)) {
413 None
=> return Err(fallability
.alloc_err(layout
)),
415 let ctrl
= NonNull
::new_unchecked(ptr
.as_ptr().add(ctrl_offset
));
418 bucket_mask
: buckets
- 1,
420 growth_left
: bucket_mask_to_capacity(buckets
- 1),
425 /// Attempts to allocate a new hash table with at least enough capacity
426 /// for inserting the given number of elements without reallocating.
427 fn fallible_with_capacity(
429 fallability
: Fallibility
,
430 ) -> Result
<Self, TryReserveError
> {
435 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
436 let buckets
= match capacity_to_buckets(capacity
) {
437 Some(buckets
) => buckets
,
438 None
=> return Err(fallability
.capacity_overflow()),
440 let result
= Self::new_uninitialized(buckets
, fallability
)?
;
441 result
.ctrl(0).write_bytes(EMPTY
, result
.num_ctrl_bytes());
448 /// Attempts to allocate a new hash table with at least enough capacity
449 /// for inserting the given number of elements without reallocating.
450 #[cfg(feature = "raw")]
451 pub fn try_with_capacity(capacity
: usize) -> Result
<Self, TryReserveError
> {
452 Self::fallible_with_capacity(capacity
, Fallibility
::Fallible
)
455 /// Allocates a new hash table with at least enough capacity for inserting
456 /// the given number of elements without reallocating.
457 pub fn with_capacity(capacity
: usize) -> Self {
458 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
459 match Self::fallible_with_capacity(capacity
, Fallibility
::Infallible
) {
460 Ok(capacity
) => capacity
,
461 Err(_
) => unsafe { hint::unreachable_unchecked() }
,
465 /// Deallocates the table without dropping any entries.
466 #[cfg_attr(feature = "inline-more", inline)]
467 unsafe fn free_buckets(&mut self) {
468 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
469 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(self.buckets()) {
471 None
=> hint
::unreachable_unchecked(),
473 dealloc(self.ctrl
.as_ptr().sub(ctrl_offset
), layout
);
476 /// Returns pointer to one past last element of data table.
477 #[cfg_attr(feature = "inline-more", inline)]
478 pub unsafe fn data_end(&self) -> NonNull
<T
> {
479 NonNull
::new_unchecked(self.ctrl
.as_ptr() as *mut T
)
482 /// Returns pointer to start of data table.
483 #[cfg_attr(feature = "inline-more", inline)]
484 #[cfg(feature = "nightly")]
485 pub unsafe fn data_start(&self) -> *mut T
{
486 self.data_end().as_ptr().wrapping_sub(self.buckets())
489 /// Returns the index of a bucket from a `Bucket`.
490 #[cfg_attr(feature = "inline-more", inline)]
491 pub unsafe fn bucket_index(&self, bucket
: &Bucket
<T
>) -> usize {
492 bucket
.to_base_index(self.data_end())
495 /// Returns a pointer to a control byte.
496 #[cfg_attr(feature = "inline-more", inline)]
497 unsafe fn ctrl(&self, index
: usize) -> *mut u8 {
498 debug_assert
!(index
< self.num_ctrl_bytes());
499 self.ctrl
.as_ptr().add(index
)
502 /// Returns a pointer to an element in the table.
503 #[cfg_attr(feature = "inline-more", inline)]
504 pub unsafe fn bucket(&self, index
: usize) -> Bucket
<T
> {
505 debug_assert_ne
!(self.bucket_mask
, 0);
506 debug_assert
!(index
< self.buckets());
507 Bucket
::from_base_index(self.data_end(), index
)
510 /// Erases an element from the table without dropping it.
511 #[cfg_attr(feature = "inline-more", inline)]
512 #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
513 pub unsafe fn erase_no_drop(&mut self, item
: &Bucket
<T
>) {
514 let index
= self.bucket_index(item
);
515 debug_assert
!(is_full(*self.ctrl(index
)));
516 let index_before
= index
.wrapping_sub(Group
::WIDTH
) & self.bucket_mask
;
517 let empty_before
= Group
::load(self.ctrl(index_before
)).match_empty();
518 let empty_after
= Group
::load(self.ctrl(index
)).match_empty();
520 // If we are inside a continuous block of Group::WIDTH full or deleted
521 // cells then a probe window may have seen a full block when trying to
522 // insert. We therefore need to keep that block non-empty so that
523 // lookups will continue searching to the next probe window.
525 // Note that in this context `leading_zeros` refers to the bytes at the
526 // end of a group, while `trailing_zeros` refers to the bytes at the
527 // begining of a group.
528 let ctrl
= if empty_before
.leading_zeros() + empty_after
.trailing_zeros() >= Group
::WIDTH
{
531 self.growth_left
+= 1;
534 self.set_ctrl(index
, ctrl
);
538 /// Erases an element from the table, dropping it in place.
539 #[cfg_attr(feature = "inline-more", inline)]
540 #[allow(clippy::needless_pass_by_value)]
542 pub unsafe fn erase(&mut self, item
: Bucket
<T
>) {
543 // Erase the element from the table first since drop might panic.
544 self.erase_no_drop(&item
);
548 /// Finds and erases an element from the table, dropping it in place.
549 /// Returns true if an element was found.
550 #[cfg(feature = "raw")]
551 #[cfg_attr(feature = "inline-more", inline)]
552 pub fn erase_entry(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> bool
{
553 // Avoid `Option::map` because it bloats LLVM IR.
554 if let Some(bucket
) = self.find(hash
, eq
) {
555 unsafe { self.erase(bucket) }
;
562 /// Removes an element from the table, returning it.
563 #[cfg_attr(feature = "inline-more", inline)]
564 #[allow(clippy::needless_pass_by_value)]
566 pub unsafe fn remove(&mut self, item
: Bucket
<T
>) -> T
{
567 self.erase_no_drop(&item
);
571 /// Finds and removes an element from the table, returning it.
572 #[cfg_attr(feature = "inline-more", inline)]
573 pub fn remove_entry(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<T
> {
574 // Avoid `Option::map` because it bloats LLVM IR.
575 match self.find(hash
, eq
) {
576 Some(bucket
) => Some(unsafe { self.remove(bucket) }
),
581 /// Returns an iterator for a probe sequence on the table.
583 /// This iterator never terminates, but is guaranteed to visit each bucket
584 /// group exactly once. The loop using `probe_seq` must terminate upon
585 /// reaching a group containing an empty bucket.
586 #[cfg_attr(feature = "inline-more", inline)]
587 fn probe_seq(&self, hash
: u64) -> ProbeSeq
{
589 bucket_mask
: self.bucket_mask
,
590 pos
: h1(hash
) & self.bucket_mask
,
595 /// Sets a control byte, and possibly also the replicated control byte at
596 /// the end of the array.
597 #[cfg_attr(feature = "inline-more", inline)]
598 unsafe fn set_ctrl(&self, index
: usize, ctrl
: u8) {
599 // Replicate the first Group::WIDTH control bytes at the end of
600 // the array without using a branch:
601 // - If index >= Group::WIDTH then index == index2.
602 // - Otherwise index2 == self.bucket_mask + 1 + index.
604 // The very last replicated control byte is never actually read because
605 // we mask the initial index for unaligned loads, but we write it
606 // anyways because it makes the set_ctrl implementation simpler.
608 // If there are fewer buckets than Group::WIDTH then this code will
609 // replicate the buckets at the end of the trailing group. For example
610 // with 2 buckets and a group size of 4, the control bytes will look
614 // ---------------------------------------------
615 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
616 // ---------------------------------------------
617 let index2
= ((index
.wrapping_sub(Group
::WIDTH
)) & self.bucket_mask
) + Group
::WIDTH
;
619 *self.ctrl(index
) = ctrl
;
620 *self.ctrl(index2
) = ctrl
;
623 /// Searches for an empty or deleted bucket which is suitable for inserting
626 /// There must be at least 1 empty bucket in the table.
627 #[cfg_attr(feature = "inline-more", inline)]
628 fn find_insert_slot(&self, hash
: u64) -> usize {
629 for pos
in self.probe_seq(hash
) {
631 let group
= Group
::load(self.ctrl(pos
));
632 if let Some(bit
) = group
.match_empty_or_deleted().lowest_set_bit() {
633 let result
= (pos
+ bit
) & self.bucket_mask
;
635 // In tables smaller than the group width, trailing control
636 // bytes outside the range of the table are filled with
637 // EMPTY entries. These will unfortunately trigger a
638 // match, but once masked may point to a full bucket that
639 // is already occupied. We detect this situation here and
640 // perform a second scan starting at the begining of the
641 // table. This second scan is guaranteed to find an empty
642 // slot (due to the load factor) before hitting the trailing
643 // control bytes (containing EMPTY).
644 if unlikely(is_full(*self.ctrl(result
))) {
645 debug_assert
!(self.bucket_mask
< Group
::WIDTH
);
646 debug_assert_ne
!(pos
, 0);
647 return Group
::load_aligned(self.ctrl(0))
648 .match_empty_or_deleted()
649 .lowest_set_bit_nonzero();
657 // probe_seq never returns.
661 /// Marks all table buckets as empty without dropping their contents.
662 #[cfg_attr(feature = "inline-more", inline)]
663 pub fn clear_no_drop(&mut self) {
664 if !self.is_empty_singleton() {
666 self.ctrl(0).write_bytes(EMPTY
, self.num_ctrl_bytes());
670 self.growth_left
= bucket_mask_to_capacity(self.bucket_mask
);
673 /// Removes all elements from the table without freeing the backing memory.
674 #[cfg_attr(feature = "inline-more", inline)]
675 pub fn clear(&mut self) {
676 // Ensure that the table is reset even if one of the drops panic
677 let self_
= guard(self, |self_
| self_
.clear_no_drop());
679 if mem
::needs_drop
::<T
>() && self_
.len() != 0 {
681 for item
in self_
.iter() {
688 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
689 #[cfg_attr(feature = "inline-more", inline)]
690 pub fn shrink_to(&mut self, min_size
: usize, hasher
: impl Fn(&T
) -> u64) {
691 // Calculate the minimal number of elements that we need to reserve
693 let min_size
= usize::max(self.items
, min_size
);
699 // Calculate the number of buckets that we need for this number of
700 // elements. If the calculation overflows then the requested bucket
701 // count must be larger than what we have right and nothing needs to be
703 let min_buckets
= match capacity_to_buckets(min_size
) {
704 Some(buckets
) => buckets
,
708 // If we have more buckets than we need, shrink the table.
709 if min_buckets
< self.buckets() {
710 // Fast path if the table is empty
712 *self = Self::with_capacity(min_size
)
714 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
716 .resize(min_size
, hasher
, Fallibility
::Infallible
)
719 unsafe { hint::unreachable_unchecked() }
725 /// Ensures that at least `additional` items can be inserted into the table
726 /// without reallocation.
727 #[cfg_attr(feature = "inline-more", inline)]
728 pub fn reserve(&mut self, additional
: usize, hasher
: impl Fn(&T
) -> u64) {
729 if additional
> self.growth_left
{
730 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
732 .reserve_rehash(additional
, hasher
, Fallibility
::Infallible
)
735 unsafe { hint::unreachable_unchecked() }
740 /// Tries to ensure that at least `additional` items can be inserted into
741 /// the table without reallocation.
742 #[cfg_attr(feature = "inline-more", inline)]
746 hasher
: impl Fn(&T
) -> u64,
747 ) -> Result
<(), TryReserveError
> {
748 if additional
> self.growth_left
{
749 self.reserve_rehash(additional
, hasher
, Fallibility
::Fallible
)
755 /// Out-of-line slow path for `reserve` and `try_reserve`.
761 hasher
: impl Fn(&T
) -> u64,
762 fallability
: Fallibility
,
763 ) -> Result
<(), TryReserveError
> {
764 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
765 let new_items
= match self.items
.checked_add(additional
) {
766 Some(new_items
) => new_items
,
767 None
=> return Err(fallability
.capacity_overflow()),
769 let full_capacity
= bucket_mask_to_capacity(self.bucket_mask
);
770 if new_items
<= full_capacity
/ 2 {
771 // Rehash in-place without re-allocating if we have plenty of spare
772 // capacity that is locked up due to DELETED entries.
773 self.rehash_in_place(hasher
);
776 // Otherwise, conservatively resize to at least the next size up
777 // to avoid churning deletes into frequent rehashes.
779 usize::max(new_items
, full_capacity
+ 1),
786 /// Rehashes the contents of the table in place (i.e. without changing the
789 /// If `hasher` panics then some the table's contents may be lost.
790 fn rehash_in_place(&mut self, hasher
: impl Fn(&T
) -> u64) {
792 // Bulk convert all full control bytes to DELETED, and all DELETED
793 // control bytes to EMPTY. This effectively frees up all buckets
794 // containing a DELETED entry.
795 for i
in (0..self.buckets()).step_by(Group
::WIDTH
) {
796 let group
= Group
::load_aligned(self.ctrl(i
));
797 let group
= group
.convert_special_to_empty_and_full_to_deleted();
798 group
.store_aligned(self.ctrl(i
));
801 // Fix up the trailing control bytes. See the comments in set_ctrl
802 // for the handling of tables smaller than the group width.
803 if self.buckets() < Group
::WIDTH
{
805 .copy_to(self.ctrl(Group
::WIDTH
), self.buckets());
808 .copy_to(self.ctrl(self.buckets()), Group
::WIDTH
);
811 // If the hash function panics then properly clean up any elements
812 // that we haven't rehashed yet. We unfortunately can't preserve the
813 // element since we lost their hash and have no way of recovering it
814 // without risking another panic.
815 let mut guard
= guard(self, |self_
| {
816 if mem
::needs_drop
::<T
>() {
817 for i
in 0..self_
.buckets() {
818 if *self_
.ctrl(i
) == DELETED
{
819 self_
.set_ctrl(i
, EMPTY
);
820 self_
.bucket(i
).drop();
825 self_
.growth_left
= bucket_mask_to_capacity(self_
.bucket_mask
) - self_
.items
;
828 // At this point, DELETED elements are elements that we haven't
829 // rehashed yet. Find them and re-insert them at their ideal
831 'outer
: for i
in 0..guard
.buckets() {
832 if *guard
.ctrl(i
) != DELETED
{
836 // Hash the current item
837 let item
= guard
.bucket(i
);
838 let hash
= hasher(item
.as_ref());
840 // Search for a suitable place to put it
841 let new_i
= guard
.find_insert_slot(hash
);
843 // Probing works by scanning through all of the control
844 // bytes in groups, which may not be aligned to the group
845 // size. If both the new and old position fall within the
846 // same unaligned group, then there is no benefit in moving
847 // it and we can just continue to the next item.
848 let probe_index
= |pos
: usize| {
849 (pos
.wrapping_sub(guard
.probe_seq(hash
).pos
) & guard
.bucket_mask
)
852 if likely(probe_index(i
) == probe_index(new_i
)) {
853 guard
.set_ctrl(i
, h2(hash
));
857 // We are moving the current item to a new position. Write
858 // our H2 to the control byte of the new position.
859 let prev_ctrl
= *guard
.ctrl(new_i
);
860 guard
.set_ctrl(new_i
, h2(hash
));
862 if prev_ctrl
== EMPTY
{
863 // If the target slot is empty, simply move the current
864 // element into the new slot and clear the old control
866 guard
.set_ctrl(i
, EMPTY
);
867 guard
.bucket(new_i
).copy_from_nonoverlapping(&item
);
870 // If the target slot is occupied, swap the two elements
871 // and then continue processing the element that we just
872 // swapped into the old slot.
873 debug_assert_eq
!(prev_ctrl
, DELETED
);
874 mem
::swap(guard
.bucket(new_i
).as_mut(), item
.as_mut());
880 guard
.growth_left
= bucket_mask_to_capacity(guard
.bucket_mask
) - guard
.items
;
885 /// Allocates a new table of a different size and moves the contents of the
886 /// current table into it.
890 hasher
: impl Fn(&T
) -> u64,
891 fallability
: Fallibility
,
892 ) -> Result
<(), TryReserveError
> {
894 debug_assert
!(self.items
<= capacity
);
896 // Allocate and initialize the new table.
897 let mut new_table
= Self::fallible_with_capacity(capacity
, fallability
)?
;
898 new_table
.growth_left
-= self.items
;
899 new_table
.items
= self.items
;
901 // The hash function may panic, in which case we simply free the new
902 // table without dropping any elements that may have been copied into
905 // This guard is also used to free the old table on success, see
906 // the comment at the bottom of this function.
907 let mut new_table
= guard(ManuallyDrop
::new(new_table
), |new_table
| {
908 if !new_table
.is_empty_singleton() {
909 new_table
.free_buckets();
913 // Copy all elements to the new table.
914 for item
in self.iter() {
916 let hash
= hasher(item
.as_ref());
918 // We can use a simpler version of insert() here since:
919 // - there are no DELETED entries.
920 // - we know there is enough space in the table.
921 // - all elements are unique.
922 let index
= new_table
.find_insert_slot(hash
);
923 new_table
.set_ctrl(index
, h2(hash
));
924 new_table
.bucket(index
).copy_from_nonoverlapping(&item
);
927 // We successfully copied all elements without panicking. Now replace
928 // self with the new table. The old table will have its memory freed but
929 // the items will not be dropped (since they have been moved into the
931 mem
::swap(self, &mut new_table
);
937 /// Inserts a new element into the table, and returns its raw bucket.
939 /// This does not check if the given element already exists in the table.
940 #[cfg_attr(feature = "inline-more", inline)]
941 pub fn insert(&mut self, hash
: u64, value
: T
, hasher
: impl Fn(&T
) -> u64) -> Bucket
<T
> {
943 let mut index
= self.find_insert_slot(hash
);
945 // We can avoid growing the table once we have reached our load
946 // factor if we are replacing a tombstone. This works since the
947 // number of EMPTY slots does not change in this case.
948 let old_ctrl
= *self.ctrl(index
);
949 if unlikely(self.growth_left
== 0 && special_is_empty(old_ctrl
)) {
950 self.reserve(1, hasher
);
951 index
= self.find_insert_slot(hash
);
954 let bucket
= self.bucket(index
);
955 self.growth_left
-= special_is_empty(old_ctrl
) as usize;
956 self.set_ctrl(index
, h2(hash
));
963 /// Inserts a new element into the table, and returns a mutable reference to it.
965 /// This does not check if the given element already exists in the table.
966 #[cfg_attr(feature = "inline-more", inline)]
967 pub fn insert_entry(&mut self, hash
: u64, value
: T
, hasher
: impl Fn(&T
) -> u64) -> &mut T
{
968 unsafe { self.insert(hash, value, hasher).as_mut() }
971 /// Inserts a new element into the table, without growing the table.
973 /// There must be enough space in the table to insert the new element.
975 /// This does not check if the given element already exists in the table.
976 #[cfg_attr(feature = "inline-more", inline)]
977 #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
978 pub fn insert_no_grow(&mut self, hash
: u64, value
: T
) -> Bucket
<T
> {
980 let index
= self.find_insert_slot(hash
);
981 let bucket
= self.bucket(index
);
983 // If we are replacing a DELETED entry then we don't need to update
985 let old_ctrl
= *self.ctrl(index
);
986 self.growth_left
-= special_is_empty(old_ctrl
) as usize;
988 self.set_ctrl(index
, h2(hash
));
995 /// Temporary removes a bucket, applying the given function to the removed
996 /// element and optionally put back the returned value in the same bucket.
998 /// Returns `true` if the bucket still contains an element
1000 /// This does not check if the given bucket is actually occupied.
1001 #[cfg_attr(feature = "inline-more", inline)]
1002 pub unsafe fn replace_bucket_with
<F
>(&mut self, bucket
: Bucket
<T
>, f
: F
) -> bool
1004 F
: FnOnce(T
) -> Option
<T
>,
1006 let index
= self.bucket_index(&bucket
);
1007 let old_ctrl
= *self.ctrl(index
);
1008 debug_assert
!(is_full(old_ctrl
));
1009 let old_growth_left
= self.growth_left
;
1010 let item
= self.remove(bucket
);
1011 if let Some(new_item
) = f(item
) {
1012 self.growth_left
= old_growth_left
;
1013 self.set_ctrl(index
, old_ctrl
);
1015 self.bucket(index
).write(new_item
);
1022 /// Searches for an element in the table.
1024 pub fn find(&self, hash
: u64, mut eq
: impl FnMut(&T
) -> bool
) -> Option
<Bucket
<T
>> {
1026 for bucket
in self.iter_hash(hash
) {
1027 let elm
= bucket
.as_ref();
1028 if likely(eq(elm
)) {
1029 return Some(bucket
);
1036 /// Gets a reference to an element in the table.
1038 pub fn get(&self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<&T
> {
1039 // Avoid `Option::map` because it bloats LLVM IR.
1040 match self.find(hash
, eq
) {
1041 Some(bucket
) => Some(unsafe { bucket.as_ref() }
),
1046 /// Gets a mutable reference to an element in the table.
1048 pub fn get_mut(&mut self, hash
: u64, eq
: impl FnMut(&T
) -> bool
) -> Option
<&mut T
> {
1049 // Avoid `Option::map` because it bloats LLVM IR.
1050 match self.find(hash
, eq
) {
1051 Some(bucket
) => Some(unsafe { bucket.as_mut() }
),
1056 /// Returns the number of elements the map can hold without reallocating.
1058 /// This number is a lower bound; the table might be able to hold
1059 /// more, but is guaranteed to be able to hold at least this many.
1060 #[cfg_attr(feature = "inline-more", inline)]
1061 pub fn capacity(&self) -> usize {
1062 self.items
+ self.growth_left
1065 /// Returns the number of elements in the table.
1066 #[cfg_attr(feature = "inline-more", inline)]
1067 pub fn len(&self) -> usize {
1071 /// Returns the number of buckets in the table.
1072 #[cfg_attr(feature = "inline-more", inline)]
1073 pub fn buckets(&self) -> usize {
1074 self.bucket_mask
+ 1
1077 /// Returns the number of control bytes in the table.
1078 #[cfg_attr(feature = "inline-more", inline)]
1079 fn num_ctrl_bytes(&self) -> usize {
1080 self.bucket_mask
+ 1 + Group
::WIDTH
1083 /// Returns whether this table points to the empty singleton with a capacity
1085 #[cfg_attr(feature = "inline-more", inline)]
1086 fn is_empty_singleton(&self) -> bool
{
1087 self.bucket_mask
== 0
1090 /// Returns an iterator over every element in the table. It is up to
1091 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1092 /// Because we cannot make the `next` method unsafe on the `RawIter`
1093 /// struct, we have to make the `iter` method unsafe.
1094 #[cfg_attr(feature = "inline-more", inline)]
1095 pub unsafe fn iter(&self) -> RawIter
<T
> {
1096 let data
= Bucket
::from_base_index(self.data_end(), 0);
1098 iter
: RawIterRange
::new(self.ctrl
.as_ptr(), data
, self.buckets()),
1103 /// Returns an iterator over occupied buckets that could match a given hash.
1105 /// In rare cases, the iterator may return a bucket with a different hash.
1107 /// It is up to the caller to ensure that the `RawTable` outlives the
1108 /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1109 /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1110 #[cfg_attr(feature = "inline-more", inline)]
1111 pub unsafe fn iter_hash(&self, hash
: u64) -> RawIterHash
<'_
, T
> {
1112 RawIterHash
::new(self, hash
)
1115 /// Returns an iterator which removes all elements from the table without
1116 /// freeing the memory.
1117 #[cfg_attr(feature = "inline-more", inline)]
1118 pub fn drain(&mut self) -> RawDrain
<'_
, T
> {
1120 let iter
= self.iter();
1121 self.drain_iter_from(iter
)
1125 /// Returns an iterator which removes all elements from the table without
1126 /// freeing the memory.
1128 /// Iteration starts at the provided iterator's current location.
1130 /// It is up to the caller to ensure that the iterator is valid for this
1131 /// `RawTable` and covers all items that remain in the table.
1132 #[cfg_attr(feature = "inline-more", inline)]
1133 pub unsafe fn drain_iter_from(&mut self, iter
: RawIter
<T
>) -> RawDrain
<'_
, T
> {
1134 debug_assert_eq
!(iter
.len(), self.len());
1137 table
: ManuallyDrop
::new(mem
::replace(self, Self::new())),
1138 orig_table
: NonNull
::from(self),
1139 marker
: PhantomData
,
1143 /// Returns an iterator which consumes all elements from the table.
1145 /// Iteration starts at the provided iterator's current location.
1147 /// It is up to the caller to ensure that the iterator is valid for this
1148 /// `RawTable` and covers all items that remain in the table.
1149 pub unsafe fn into_iter_from(self, iter
: RawIter
<T
>) -> RawIntoIter
<T
> {
1150 debug_assert_eq
!(iter
.len(), self.len());
1152 let alloc
= self.into_alloc();
1156 marker
: PhantomData
,
1160 /// Converts the table into a raw allocation. The contents of the table
1161 /// should be dropped using a `RawIter` before freeing the allocation.
1162 #[cfg_attr(feature = "inline-more", inline)]
1163 pub(crate) fn into_alloc(self) -> Option
<(NonNull
<u8>, Layout
)> {
1164 let alloc
= if self.is_empty_singleton() {
1167 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1168 let (layout
, ctrl_offset
) = match calculate_layout
::<T
>(self.buckets()) {
1170 None
=> unsafe { hint::unreachable_unchecked() }
,
1173 unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }
,
1182 unsafe impl<T
> Send
for RawTable
<T
> where T
: Send {}
1183 unsafe impl<T
> Sync
for RawTable
<T
> where T
: Sync {}
1185 impl<T
: Clone
> Clone
for RawTable
<T
> {
1186 fn clone(&self) -> Self {
1187 if self.is_empty_singleton() {
1191 let mut new_table
= ManuallyDrop
::new(
1192 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
1193 match Self::new_uninitialized(self.buckets(), Fallibility
::Infallible
) {
1195 Err(_
) => hint
::unreachable_unchecked(),
1199 new_table
.clone_from_spec(self, |new_table
| {
1200 // We need to free the memory allocated for the new table.
1201 new_table
.free_buckets();
1204 // Return the newly created table.
1205 ManuallyDrop
::into_inner(new_table
)
1210 fn clone_from(&mut self, source
: &Self) {
1211 if source
.is_empty_singleton() {
1212 *self = Self::new();
1215 // First, drop all our elements without clearing the control bytes.
1216 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1217 for item
in self.iter() {
1222 // If necessary, resize our table to match the source.
1223 if self.buckets() != source
.buckets() {
1224 // Skip our drop by using ptr::write.
1225 if !self.is_empty_singleton() {
1226 self.free_buckets();
1228 (self as *mut Self).write(
1229 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1230 match Self::new_uninitialized(source
.buckets(), Fallibility
::Infallible
) {
1232 Err(_
) => hint
::unreachable_unchecked(),
1237 self.clone_from_spec(source
, |self_
| {
1238 // We need to leave the table in an empty state.
1239 self_
.clear_no_drop()
1246 /// Specialization of `clone_from` for `Copy` types
1247 trait RawTableClone
{
1248 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self));
1250 impl<T
: Clone
> RawTableClone
for RawTable
<T
> {
1251 #[cfg_attr(feature = "inline-more", inline)]
1253 unsafe fn clone_from_spec(&mut self, source
: &Self, on_panic
: impl FnMut(&mut Self)) {
1254 self.clone_from_impl(source
, on_panic
);
1258 #[cfg(feature = "nightly")]
1259 impl<T
: Copy
> RawTableClone
for RawTable
<T
> {
1260 #[cfg_attr(feature = "inline-more", inline)]
1261 unsafe fn clone_from_spec(&mut self, source
: &Self, _on_panic
: impl FnMut(&mut Self)) {
1264 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1267 .copy_to_nonoverlapping(self.data_start(), self.buckets());
1269 self.items
= source
.items
;
1270 self.growth_left
= source
.growth_left
;
1274 impl<T
: Clone
> RawTable
<T
> {
1275 /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
1276 #[cfg_attr(feature = "inline-more", inline)]
1277 unsafe fn clone_from_impl(&mut self, source
: &Self, mut on_panic
: impl FnMut(&mut Self)) {
1278 // Copy the control bytes unchanged. We do this in a single pass
1281 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1283 // The cloning of elements may panic, in which case we need
1284 // to make sure we drop only the elements that have been
1286 let mut guard
= guard((0, &mut *self), |(index
, self_
)| {
1287 if mem
::needs_drop
::<T
>() && self_
.len() != 0 {
1288 for i
in 0..=*index
{
1289 if is_full(*self_
.ctrl(i
)) {
1290 self_
.bucket(i
).drop();
1295 // Depending on whether we were called from clone or clone_from, we
1296 // either need to free the memory for the destination table or just
1297 // clear the control bytes.
1301 for from
in source
.iter() {
1302 let index
= source
.bucket_index(&from
);
1303 let to
= guard
.1.bucket(index
);
1304 to
.write(from
.as_ref().clone());
1306 // Update the index in case we need to unwind.
1310 // Successfully cloned all items, no need to clean up.
1313 self.items
= source
.items
;
1314 self.growth_left
= source
.growth_left
;
1317 /// Variant of `clone_from` to use when a hasher is available.
1318 #[cfg(feature = "raw")]
1319 pub fn clone_from_with_hasher(&mut self, source
: &Self, hasher
: impl Fn(&T
) -> u64) {
1320 // If we have enough capacity in the table, just clear it and insert
1321 // elements one by one. We don't do this if we have the same number of
1322 // buckets as the source since we can just copy the contents directly
1324 if self.buckets() != source
.buckets()
1325 && bucket_mask_to_capacity(self.bucket_mask
) >= source
.len()
1329 let guard_self
= guard(&mut *self, |self_
| {
1330 // Clear the partially copied table if a panic occurs, otherwise
1331 // items and growth_left will be out of sync with the contents
1337 for item
in source
.iter() {
1339 let item
= item
.as_ref().clone();
1340 let hash
= hasher(&item
);
1342 // We can use a simpler version of insert() here since:
1343 // - there are no DELETED entries.
1344 // - we know there is enough space in the table.
1345 // - all elements are unique.
1346 let index
= guard_self
.find_insert_slot(hash
);
1347 guard_self
.set_ctrl(index
, h2(hash
));
1348 guard_self
.bucket(index
).write(item
);
1352 // Successfully cloned all items, no need to clean up.
1353 mem
::forget(guard_self
);
1355 self.items
= source
.items
;
1356 self.growth_left
-= source
.items
;
1358 self.clone_from(source
);
1363 #[cfg(feature = "nightly")]
1364 unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
1365 #[cfg_attr(feature = "inline-more", inline)]
1366 fn drop(&mut self) {
1367 if !self.is_empty_singleton() {
1369 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1370 for item
in self.iter() {
1374 self.free_buckets();
1379 #[cfg(not(feature = "nightly"))]
1380 impl<T
> Drop
for RawTable
<T
> {
1381 #[cfg_attr(feature = "inline-more", inline)]
1382 fn drop(&mut self) {
1383 if !self.is_empty_singleton() {
1385 if mem
::needs_drop
::<T
>() && self.len() != 0 {
1386 for item
in self.iter() {
1390 self.free_buckets();
1396 impl<T
> IntoIterator
for RawTable
<T
> {
1398 type IntoIter
= RawIntoIter
<T
>;
1400 #[cfg_attr(feature = "inline-more", inline)]
1401 fn into_iter(self) -> RawIntoIter
<T
> {
1403 let iter
= self.iter();
1404 self.into_iter_from(iter
)
1409 /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1410 /// not track an item count.
1411 pub(crate) struct RawIterRange
<T
> {
1412 // Mask of full buckets in the current group. Bits are cleared from this
1413 // mask as each element is processed.
1414 current_group
: BitMask
,
1416 // Pointer to the buckets for the current group.
1419 // Pointer to the next group of control bytes,
1420 // Must be aligned to the group size.
1421 next_ctrl
: *const u8,
1423 // Pointer one past the last control byte of this range.
1427 impl<T
> RawIterRange
<T
> {
1428 /// Returns a `RawIterRange` covering a subset of a table.
1430 /// The control byte address must be aligned to the group size.
1431 #[cfg_attr(feature = "inline-more", inline)]
1432 unsafe fn new(ctrl
: *const u8, data
: Bucket
<T
>, len
: usize) -> Self {
1433 debug_assert_ne
!(len
, 0);
1434 debug_assert_eq
!(ctrl
as usize % Group
::WIDTH
, 0);
1435 let end
= ctrl
.add(len
);
1437 // Load the first group and advance ctrl to point to the next group
1438 let current_group
= Group
::load_aligned(ctrl
).match_full();
1439 let next_ctrl
= ctrl
.add(Group
::WIDTH
);
1449 /// Splits a `RawIterRange` into two halves.
1451 /// Returns `None` if the remaining range is smaller than or equal to the
1453 #[cfg_attr(feature = "inline-more", inline)]
1454 #[cfg(feature = "rayon")]
1455 pub(crate) fn split(mut self) -> (Self, Option
<RawIterRange
<T
>>) {
1457 if self.end
<= self.next_ctrl
{
1458 // Nothing to split if the group that we are current processing
1462 // len is the remaining number of elements after the group that
1463 // we are currently processing. It must be a multiple of the
1464 // group size (small tables are caught by the check above).
1465 let len
= offset_from(self.end
, self.next_ctrl
);
1466 debug_assert_eq
!(len
% Group
::WIDTH
, 0);
1468 // Split the remaining elements into two halves, but round the
1469 // midpoint down in case there is an odd number of groups
1470 // remaining. This ensures that:
1471 // - The tail is at least 1 group long.
1472 // - The split is roughly even considering we still have the
1473 // current group to process.
1474 let mid
= (len
/ 2) & !(Group
::WIDTH
- 1);
1476 let tail
= Self::new(
1477 self.next_ctrl
.add(mid
),
1478 self.data
.next_n(Group
::WIDTH
).next_n(mid
),
1482 self.data
.next_n(Group
::WIDTH
).next_n(mid
).ptr
,
1485 debug_assert_eq
!(self.end
, tail
.end
);
1486 self.end
= self.next_ctrl
.add(mid
);
1487 debug_assert_eq
!(self.end
.add(Group
::WIDTH
), tail
.next_ctrl
);
1494 // We make raw iterators unconditionally Send and Sync, and let the PhantomData
1495 // in the actual iterator implementations determine the real Send/Sync bounds.
1496 unsafe impl<T
> Send
for RawIterRange
<T
> {}
1497 unsafe impl<T
> Sync
for RawIterRange
<T
> {}
1499 impl<T
> Clone
for RawIterRange
<T
> {
1500 #[cfg_attr(feature = "inline-more", inline)]
1501 fn clone(&self) -> Self {
1503 data
: self.data
.clone(),
1504 next_ctrl
: self.next_ctrl
,
1505 current_group
: self.current_group
,
1511 impl<T
> Iterator
for RawIterRange
<T
> {
1512 type Item
= Bucket
<T
>;
1514 #[cfg_attr(feature = "inline-more", inline)]
1515 fn next(&mut self) -> Option
<Bucket
<T
>> {
1518 if let Some(index
) = self.current_group
.lowest_set_bit() {
1519 self.current_group
= self.current_group
.remove_lowest_bit();
1520 return Some(self.data
.next_n(index
));
1523 if self.next_ctrl
>= self.end
{
1527 // We might read past self.end up to the next group boundary,
1528 // but this is fine because it only occurs on tables smaller
1529 // than the group size where the trailing control bytes are all
1530 // EMPTY. On larger tables self.end is guaranteed to be aligned
1531 // to the group size (since tables are power-of-two sized).
1532 self.current_group
= Group
::load_aligned(self.next_ctrl
).match_full();
1533 self.data
= self.data
.next_n(Group
::WIDTH
);
1534 self.next_ctrl
= self.next_ctrl
.add(Group
::WIDTH
);
1539 #[cfg_attr(feature = "inline-more", inline)]
1540 fn size_hint(&self) -> (usize, Option
<usize>) {
1541 // We don't have an item count, so just guess based on the range size.
1544 Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }
),
1549 impl<T
> FusedIterator
for RawIterRange
<T
> {}
1551 /// Iterator which returns a raw pointer to every full bucket in the table.
1553 /// For maximum flexibility this iterator is not bound by a lifetime, but you
1554 /// must observe several rules when using it:
1555 /// - You must not free the hash table while iterating (including via growing/shrinking).
1556 /// - It is fine to erase a bucket that has been yielded by the iterator.
1557 /// - Erasing a bucket that has not yet been yielded by the iterator may still
1558 /// result in the iterator yielding that bucket (unless `reflect_remove` is called).
1559 /// - It is unspecified whether an element inserted after the iterator was
1560 /// created will be yielded by that iterator (unless `reflect_insert` is called).
1561 /// - The order in which the iterator yields bucket is unspecified and may
1562 /// change in the future.
1563 pub struct RawIter
<T
> {
1564 pub(crate) iter
: RawIterRange
<T
>,
1568 impl<T
> RawIter
<T
> {
1569 /// Refresh the iterator so that it reflects a removal from the given bucket.
1571 /// For the iterator to remain valid, this method must be called once
1572 /// for each removed bucket before `next` is called again.
1574 /// This method should be called _before_ the removal is made. It is not necessary to call this
1575 /// method if you are removing an item that this iterator yielded in the past.
1576 #[cfg(feature = "raw")]
1577 pub fn reflect_remove(&mut self, b
: &Bucket
<T
>) {
1578 self.reflect_toggle_full(b
, false);
1581 /// Refresh the iterator so that it reflects an insertion into the given bucket.
1583 /// For the iterator to remain valid, this method must be called once
1584 /// for each insert before `next` is called again.
1586 /// This method does not guarantee that an insertion of a bucket witha greater
1587 /// index than the last one yielded will be reflected in the iterator.
1589 /// This method should be called _after_ the given insert is made.
1590 #[cfg(feature = "raw")]
1591 pub fn reflect_insert(&mut self, b
: &Bucket
<T
>) {
1592 self.reflect_toggle_full(b
, true);
1595 /// Refresh the iterator so that it reflects a change to the state of the given bucket.
1596 #[cfg(feature = "raw")]
1597 fn reflect_toggle_full(&mut self, b
: &Bucket
<T
>, is_insert
: bool
) {
1599 if b
.as_ptr() > self.iter
.data
.as_ptr() {
1600 // The iterator has already passed the bucket's group.
1601 // So the toggle isn't relevant to this iterator.
1605 if self.iter
.next_ctrl
< self.iter
.end
1606 && b
.as_ptr() <= self.iter
.data
.next_n(Group
::WIDTH
).as_ptr()
1608 // The iterator has not yet reached the bucket's group.
1609 // We don't need to reload anything, but we do need to adjust the item count.
1611 if cfg
!(debug_assertions
) {
1612 // Double-check that the user isn't lying to us by checking the bucket state.
1613 // To do that, we need to find its control byte. We know that self.iter.data is
1614 // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
1615 let offset
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1616 let ctrl
= self.iter
.next_ctrl
.sub(Group
::WIDTH
).add(offset
);
1617 // This method should be called _before_ a removal, or _after_ an insert,
1618 // so in both cases the ctrl byte should indicate that the bucket is full.
1619 assert
!(is_full(*ctrl
));
1631 // The iterator is at the bucket group that the toggled bucket is in.
1632 // We need to do two things:
1634 // - Determine if the iterator already yielded the toggled bucket.
1635 // If it did, we're done.
1636 // - Otherwise, update the iterator cached group so that it won't
1637 // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1638 // We'll also need ot update the item count accordingly.
1639 if let Some(index
) = self.iter
.current_group
.lowest_set_bit() {
1640 let next_bucket
= self.iter
.data
.next_n(index
);
1641 if b
.as_ptr() > next_bucket
.as_ptr() {
1642 // The toggled bucket is "before" the bucket the iterator would yield next. We
1643 // therefore don't need to do anything --- the iterator has already passed the
1644 // bucket in question.
1646 // The item count must already be correct, since a removal or insert "prior" to
1647 // the iterator's position wouldn't affect the item count.
1649 // The removed bucket is an upcoming bucket. We need to make sure it does _not_
1650 // get yielded, and also that it's no longer included in the item count.
1652 // NOTE: We can't just reload the group here, both since that might reflect
1653 // inserts we've already passed, and because that might inadvertently unset the
1654 // bits for _other_ removals. If we do that, we'd have to also decrement the
1655 // item count for those other bits that we unset. But the presumably subsequent
1656 // call to reflect for those buckets might _also_ decrement the item count.
1657 // Instead, we _just_ flip the bit for the particular bucket the caller asked
1659 let our_bit
= offset_from(self.iter
.data
.as_ptr(), b
.as_ptr());
1660 let was_full
= self.iter
.current_group
.flip(our_bit
);
1661 debug_assert_ne
!(was_full
, is_insert
);
1669 if cfg
!(debug_assertions
) {
1670 if b
.as_ptr() == next_bucket
.as_ptr() {
1671 // The removed bucket should no longer be next
1672 debug_assert_ne
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1674 // We should not have changed what bucket comes next.
1675 debug_assert_eq
!(self.iter
.current_group
.lowest_set_bit(), Some(index
));
1680 // We must have already iterated past the removed item.
1686 impl<T
> Clone
for RawIter
<T
> {
1687 #[cfg_attr(feature = "inline-more", inline)]
1688 fn clone(&self) -> Self {
1690 iter
: self.iter
.clone(),
1696 impl<T
> Iterator
for RawIter
<T
> {
1697 type Item
= Bucket
<T
>;
1699 #[cfg_attr(feature = "inline-more", inline)]
1700 fn next(&mut self) -> Option
<Bucket
<T
>> {
1701 if let Some(b
) = self.iter
.next() {
1705 // We don't check against items == 0 here to allow the
1706 // compiler to optimize away the item count entirely if the
1707 // iterator length is never queried.
1708 debug_assert_eq
!(self.items
, 0);
1713 #[cfg_attr(feature = "inline-more", inline)]
1714 fn size_hint(&self) -> (usize, Option
<usize>) {
1715 (self.items
, Some(self.items
))
1719 impl<T
> ExactSizeIterator
for RawIter
<T
> {}
1720 impl<T
> FusedIterator
for RawIter
<T
> {}
1722 /// Iterator which consumes a table and returns elements.
1723 pub struct RawIntoIter
<T
> {
1725 alloc
: Option
<(NonNull
<u8>, Layout
)>,
1726 marker
: PhantomData
<T
>,
1729 impl<T
> RawIntoIter
<T
> {
1730 #[cfg_attr(feature = "inline-more", inline)]
1731 pub fn iter(&self) -> RawIter
<T
> {
1736 unsafe impl<T
> Send
for RawIntoIter
<T
> where T
: Send {}
1737 unsafe impl<T
> Sync
for RawIntoIter
<T
> where T
: Sync {}
1739 #[cfg(feature = "nightly")]
1740 unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
1741 #[cfg_attr(feature = "inline-more", inline)]
1742 fn drop(&mut self) {
1744 // Drop all remaining elements
1745 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1746 while let Some(item
) = self.iter
.next() {
1752 if let Some((ptr
, layout
)) = self.alloc
{
1753 dealloc(ptr
.as_ptr(), layout
);
1758 #[cfg(not(feature = "nightly"))]
1759 impl<T
> Drop
for RawIntoIter
<T
> {
1760 #[cfg_attr(feature = "inline-more", inline)]
1761 fn drop(&mut self) {
1763 // Drop all remaining elements
1764 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1765 while let Some(item
) = self.iter
.next() {
1771 if let Some((ptr
, layout
)) = self.alloc
{
1772 dealloc(ptr
.as_ptr(), layout
);
1778 impl<T
> Iterator
for RawIntoIter
<T
> {
1781 #[cfg_attr(feature = "inline-more", inline)]
1782 fn next(&mut self) -> Option
<T
> {
1783 unsafe { Some(self.iter.next()?.read()) }
1786 #[cfg_attr(feature = "inline-more", inline)]
1787 fn size_hint(&self) -> (usize, Option
<usize>) {
1788 self.iter
.size_hint()
1792 impl<T
> ExactSizeIterator
for RawIntoIter
<T
> {}
1793 impl<T
> FusedIterator
for RawIntoIter
<T
> {}
1795 /// Iterator which consumes elements without freeing the table storage.
1796 pub struct RawDrain
<'a
, T
> {
1799 // The table is moved into the iterator for the duration of the drain. This
1800 // ensures that an empty table is left if the drain iterator is leaked
1801 // without dropping.
1802 table
: ManuallyDrop
<RawTable
<T
>>,
1803 orig_table
: NonNull
<RawTable
<T
>>,
1805 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
1806 // covariant over T.
1807 marker
: PhantomData
<&'a RawTable
<T
>>,
1810 impl<T
> RawDrain
<'_
, T
> {
1811 #[cfg_attr(feature = "inline-more", inline)]
1812 pub fn iter(&self) -> RawIter
<T
> {
1817 unsafe impl<T
> Send
for RawDrain
<'_
, T
> where T
: Send {}
1818 unsafe impl<T
> Sync
for RawDrain
<'_
, T
> where T
: Sync {}
1820 impl<T
> Drop
for RawDrain
<'_
, T
> {
1821 #[cfg_attr(feature = "inline-more", inline)]
1822 fn drop(&mut self) {
1824 // Drop all remaining elements. Note that this may panic.
1825 if mem
::needs_drop
::<T
>() && self.iter
.len() != 0 {
1826 while let Some(item
) = self.iter
.next() {
1831 // Reset the contents of the table now that all elements have been
1833 self.table
.clear_no_drop();
1835 // Move the now empty table back to its original location.
1838 .copy_from_nonoverlapping(&*self.table
, 1);
1843 impl<T
> Iterator
for RawDrain
<'_
, T
> {
1846 #[cfg_attr(feature = "inline-more", inline)]
1847 fn next(&mut self) -> Option
<T
> {
1849 let item
= self.iter
.next()?
;
1854 #[cfg_attr(feature = "inline-more", inline)]
1855 fn size_hint(&self) -> (usize, Option
<usize>) {
1856 self.iter
.size_hint()
1860 impl<T
> ExactSizeIterator
for RawDrain
<'_
, T
> {}
1861 impl<T
> FusedIterator
for RawDrain
<'_
, T
> {}
1863 /// Iterator over occupied buckets that could match a given hash.
1865 /// In rare cases, the iterator may return a bucket with a different hash.
1866 pub struct RawIterHash
<'a
, T
> {
1867 table
: &'a RawTable
<T
>,
1869 // The top 7 bits of the hash.
1872 // The sequence of groups to probe in the search.
1873 probe_seq
: ProbeSeq
,
1875 // The current group and its position.
1879 // The elements within the group with a matching h2-hash.
1880 bitmask
: BitMaskIter
,
1883 impl<'a
, T
> RawIterHash
<'a
, T
> {
1884 fn new(table
: &'a RawTable
<T
>, hash
: u64) -> Self {
1886 let h2_hash
= h2(hash
);
1887 let mut probe_seq
= table
.probe_seq(hash
);
1888 let pos
= probe_seq
.next().unwrap();
1889 let group
= Group
::load(table
.ctrl(pos
));
1890 let bitmask
= group
.match_byte(h2_hash
).into_iter();
1904 impl<'a
, T
> Iterator
for RawIterHash
<'a
, T
> {
1905 type Item
= Bucket
<T
>;
1907 fn next(&mut self) -> Option
<Bucket
<T
>> {
1910 if let Some(bit
) = self.bitmask
.next() {
1911 let index
= (self.pos
+ bit
) & self.table
.bucket_mask
;
1912 let bucket
= self.table
.bucket(index
);
1913 return Some(bucket
);
1915 if likely(self.group
.match_empty().any_bit_set()) {
1918 self.pos
= self.probe_seq
.next().unwrap();
1919 self.group
= Group
::load(self.table
.ctrl(self.pos
));
1920 self.bitmask
= self.group
.match_byte(self.h2_hash
).into_iter();