]> git.proxmox.com Git - rustc.git/blob - vendor/hashbrown/src/raw/mod.rs
New upstream version 1.49.0~beta.4+dfsg1
[rustc.git] / vendor / hashbrown / src / raw / mod.rs
1 use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error};
2 use crate::scopeguard::guard;
3 use crate::TryReserveError;
4 use core::alloc::Layout;
5 use core::hint;
6 use core::iter::FusedIterator;
7 use core::marker::PhantomData;
8 use core::mem;
9 use core::mem::ManuallyDrop;
10 use core::ptr::NonNull;
11
12 cfg_if! {
13 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14 // at once instead of 8. We don't bother with AVX since it would require
15 // runtime dispatch and wouldn't gain us much anyways: the probability of
16 // finding a match drops off drastically after the first few buckets.
17 //
18 // I attempted an implementation on ARM using NEON instructions, but it
19 // turns out that most NEON instructions have multi-cycle latency, which in
20 // the end outweighs any gains over the generic implementation.
21 if #[cfg(all(
22 target_feature = "sse2",
23 any(target_arch = "x86", target_arch = "x86_64"),
24 not(miri)
25 ))] {
26 mod sse2;
27 use sse2 as imp;
28 } else {
29 #[path = "generic.rs"]
30 mod generic;
31 use generic as imp;
32 }
33 }
34
35 mod bitmask;
36
37 use self::bitmask::{BitMask, BitMaskIter};
38 use self::imp::Group;
39
40 // Branch prediction hint. This is currently only available on nightly but it
41 // consistently improves performance by 10-15%.
42 #[cfg(feature = "nightly")]
43 use core::intrinsics::{likely, unlikely};
44 #[cfg(not(feature = "nightly"))]
45 #[inline]
46 fn likely(b: bool) -> bool {
47 b
48 }
49 #[cfg(not(feature = "nightly"))]
50 #[inline]
51 fn unlikely(b: bool) -> bool {
52 b
53 }
54
55 #[cfg(feature = "nightly")]
56 #[cfg_attr(feature = "inline-more", inline)]
57 unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
58 to.offset_from(from) as usize
59 }
60 #[cfg(not(feature = "nightly"))]
61 #[cfg_attr(feature = "inline-more", inline)]
62 unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
63 (to as usize - from as usize) / mem::size_of::<T>()
64 }
65
66 /// Whether memory allocation errors should return an error or abort.
67 #[derive(Copy, Clone)]
68 enum Fallibility {
69 Fallible,
70 Infallible,
71 }
72
73 impl Fallibility {
74 /// Error to return on capacity overflow.
75 #[cfg_attr(feature = "inline-more", inline)]
76 fn capacity_overflow(self) -> TryReserveError {
77 match self {
78 Fallibility::Fallible => TryReserveError::CapacityOverflow,
79 Fallibility::Infallible => panic!("Hash table capacity overflow"),
80 }
81 }
82
83 /// Error to return on allocation error.
84 #[cfg_attr(feature = "inline-more", inline)]
85 fn alloc_err(self, layout: Layout) -> TryReserveError {
86 match self {
87 Fallibility::Fallible => TryReserveError::AllocError { layout },
88 Fallibility::Infallible => handle_alloc_error(layout),
89 }
90 }
91 }
92
93 /// Control byte value for an empty bucket.
94 const EMPTY: u8 = 0b1111_1111;
95
96 /// Control byte value for a deleted bucket.
97 const DELETED: u8 = 0b1000_0000;
98
99 /// Checks whether a control byte represents a full bucket (top bit is clear).
100 #[inline]
101 fn is_full(ctrl: u8) -> bool {
102 ctrl & 0x80 == 0
103 }
104
105 /// Checks whether a control byte represents a special value (top bit is set).
106 #[inline]
107 fn is_special(ctrl: u8) -> bool {
108 ctrl & 0x80 != 0
109 }
110
111 /// Checks whether a special control value is EMPTY (just check 1 bit).
112 #[inline]
113 fn special_is_empty(ctrl: u8) -> bool {
114 debug_assert!(is_special(ctrl));
115 ctrl & 0x01 != 0
116 }
117
118 /// Primary hash function, used to select the initial bucket to probe from.
119 #[inline]
120 #[allow(clippy::cast_possible_truncation)]
121 fn h1(hash: u64) -> usize {
122 // On 32-bit platforms we simply ignore the higher hash bits.
123 hash as usize
124 }
125
126 /// Secondary hash function, saved in the low 7 bits of the control byte.
127 #[inline]
128 #[allow(clippy::cast_possible_truncation)]
129 fn h2(hash: u64) -> u8 {
130 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
131 // value, some hash functions (such as FxHash) produce a usize result
132 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
133 let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
134 let top7 = hash >> (hash_len * 8 - 7);
135 (top7 & 0x7f) as u8 // truncation
136 }
137
138 /// Probe sequence based on triangular numbers, which is guaranteed (since our
139 /// table size is a power of two) to visit every group of elements exactly once.
140 ///
141 /// A triangular probe has us jump by 1 more group every time. So first we
142 /// jump by 1 group (meaning we just continue our linear scan), then 2 groups
143 /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
144 ///
145 /// Proof that the probe will visit every group in the table:
146 /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
147 struct ProbeSeq {
148 bucket_mask: usize,
149 pos: usize,
150 stride: usize,
151 }
152
153 impl Iterator for ProbeSeq {
154 type Item = usize;
155
156 #[inline]
157 fn next(&mut self) -> Option<usize> {
158 // We should have found an empty bucket by now and ended the probe.
159 debug_assert!(
160 self.stride <= self.bucket_mask,
161 "Went past end of probe sequence"
162 );
163
164 let result = self.pos;
165 self.stride += Group::WIDTH;
166 self.pos += self.stride;
167 self.pos &= self.bucket_mask;
168 Some(result)
169 }
170 }
171
172 /// Returns the number of buckets needed to hold the given number of items,
173 /// taking the maximum load factor into account.
174 ///
175 /// Returns `None` if an overflow occurs.
176 // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
177 #[cfg_attr(target_os = "emscripten", inline(never))]
178 #[cfg_attr(not(target_os = "emscripten"), inline)]
179 fn capacity_to_buckets(cap: usize) -> Option<usize> {
180 debug_assert_ne!(cap, 0);
181
182 // For small tables we require at least 1 empty bucket so that lookups are
183 // guaranteed to terminate if an element doesn't exist in the table.
184 if cap < 8 {
185 // We don't bother with a table size of 2 buckets since that can only
186 // hold a single element. Instead we skip directly to a 4 bucket table
187 // which can hold 3 elements.
188 return Some(if cap < 4 { 4 } else { 8 });
189 }
190
191 // Otherwise require 1/8 buckets to be empty (87.5% load)
192 //
193 // Be careful when modifying this, calculate_layout relies on the
194 // overflow check here.
195 let adjusted_cap = cap.checked_mul(8)? / 7;
196
197 // Any overflows will have been caught by the checked_mul. Also, any
198 // rounding errors from the division above will be cleaned up by
199 // next_power_of_two (which can't overflow because of the previous divison).
200 Some(adjusted_cap.next_power_of_two())
201 }
202
203 /// Returns the maximum effective capacity for the given bucket mask, taking
204 /// the maximum load factor into account.
205 #[inline]
206 fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
207 if bucket_mask < 8 {
208 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
209 // Keep in mind that the bucket mask is one less than the bucket count.
210 bucket_mask
211 } else {
212 // For larger tables we reserve 12.5% of the slots as empty.
213 ((bucket_mask + 1) / 8) * 7
214 }
215 }
216
217 /// Returns a Layout which describes the allocation required for a hash table,
218 /// and the offset of the control bytes in the allocation.
219 /// (the offset is also one past last element of buckets)
220 ///
221 /// Returns `None` if an overflow occurs.
222 #[cfg_attr(feature = "inline-more", inline)]
223 #[cfg(feature = "nightly")]
224 fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
225 debug_assert!(buckets.is_power_of_two());
226
227 // Array of buckets
228 let data = Layout::array::<T>(buckets).ok()?;
229
230 // Array of control bytes. This must be aligned to the group size.
231 //
232 // We add `Group::WIDTH` control bytes at the end of the array which
233 // replicate the bytes at the start of the array and thus avoids the need to
234 // perform bounds-checking while probing.
235 //
236 // There is no possible overflow here since buckets is a power of two and
237 // Group::WIDTH is a small number.
238 let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
239
240 data.extend(ctrl).ok()
241 }
242
243 /// Returns a Layout which describes the allocation required for a hash table,
244 /// and the offset of the control bytes in the allocation.
245 /// (the offset is also one past last element of buckets)
246 ///
247 /// Returns `None` if an overflow occurs.
248 #[cfg_attr(feature = "inline-more", inline)]
249 #[cfg(not(feature = "nightly"))]
250 fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
251 debug_assert!(buckets.is_power_of_two());
252
253 // Manual layout calculation since Layout methods are not yet stable.
254 let ctrl_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
255 let ctrl_offset = mem::size_of::<T>()
256 .checked_mul(buckets)?
257 .checked_add(ctrl_align - 1)?
258 & !(ctrl_align - 1);
259 let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
260
261 Some((
262 unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
263 ctrl_offset,
264 ))
265 }
266
267 /// A reference to a hash table bucket containing a `T`.
268 ///
269 /// This is usually just a pointer to the element itself. However if the element
270 /// is a ZST, then we instead track the index of the element in the table so
271 /// that `erase` works properly.
272 pub struct Bucket<T> {
273 // Actually it is pointer to next element than element itself
274 // this is needed to maintain pointer arithmetic invariants
275 // keeping direct pointer to element introduces difficulty.
276 // Using `NonNull` for variance and niche layout
277 ptr: NonNull<T>,
278 }
279
280 // This Send impl is needed for rayon support. This is safe since Bucket is
281 // never exposed in a public API.
282 unsafe impl<T> Send for Bucket<T> {}
283
284 impl<T> Clone for Bucket<T> {
285 #[cfg_attr(feature = "inline-more", inline)]
286 fn clone(&self) -> Self {
287 Self { ptr: self.ptr }
288 }
289 }
290
291 impl<T> Bucket<T> {
292 #[cfg_attr(feature = "inline-more", inline)]
293 unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
294 let ptr = if mem::size_of::<T>() == 0 {
295 // won't overflow because index must be less than length
296 (index + 1) as *mut T
297 } else {
298 base.as_ptr().sub(index)
299 };
300 Self {
301 ptr: NonNull::new_unchecked(ptr),
302 }
303 }
304 #[cfg_attr(feature = "inline-more", inline)]
305 unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
306 if mem::size_of::<T>() == 0 {
307 self.ptr.as_ptr() as usize - 1
308 } else {
309 offset_from(base.as_ptr(), self.ptr.as_ptr())
310 }
311 }
312 #[cfg_attr(feature = "inline-more", inline)]
313 pub unsafe fn as_ptr(&self) -> *mut T {
314 if mem::size_of::<T>() == 0 {
315 // Just return an arbitrary ZST pointer which is properly aligned
316 mem::align_of::<T>() as *mut T
317 } else {
318 self.ptr.as_ptr().sub(1)
319 }
320 }
321 #[cfg_attr(feature = "inline-more", inline)]
322 unsafe fn next_n(&self, offset: usize) -> Self {
323 let ptr = if mem::size_of::<T>() == 0 {
324 (self.ptr.as_ptr() as usize + offset) as *mut T
325 } else {
326 self.ptr.as_ptr().sub(offset)
327 };
328 Self {
329 ptr: NonNull::new_unchecked(ptr),
330 }
331 }
332 #[cfg_attr(feature = "inline-more", inline)]
333 pub unsafe fn drop(&self) {
334 self.as_ptr().drop_in_place();
335 }
336 #[cfg_attr(feature = "inline-more", inline)]
337 pub unsafe fn read(&self) -> T {
338 self.as_ptr().read()
339 }
340 #[cfg_attr(feature = "inline-more", inline)]
341 pub unsafe fn write(&self, val: T) {
342 self.as_ptr().write(val);
343 }
344 #[cfg_attr(feature = "inline-more", inline)]
345 pub unsafe fn as_ref<'a>(&self) -> &'a T {
346 &*self.as_ptr()
347 }
348 #[cfg_attr(feature = "inline-more", inline)]
349 pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
350 &mut *self.as_ptr()
351 }
352 #[cfg_attr(feature = "inline-more", inline)]
353 pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
354 self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
355 }
356 }
357
358 /// A raw hash table with an unsafe API.
359 pub struct RawTable<T> {
360 // Mask to get an index from a hash value. The value is one less than the
361 // number of buckets in the table.
362 bucket_mask: usize,
363
364 // [Padding], T1, T2, ..., Tlast, C1, C2, ...
365 // ^ points here
366 ctrl: NonNull<u8>,
367
368 // Number of elements that can be inserted before we need to grow the table
369 growth_left: usize,
370
371 // Number of elements in the table, only really used by len()
372 items: usize,
373
374 // Tell dropck that we own instances of T.
375 marker: PhantomData<T>,
376 }
377
378 impl<T> RawTable<T> {
379 /// Creates a new empty hash table without allocating any memory.
380 ///
381 /// In effect this returns a table with exactly 1 bucket. However we can
382 /// leave the data pointer dangling since that bucket is never written to
383 /// due to our load factor forcing us to always have at least 1 free bucket.
384 #[cfg_attr(feature = "inline-more", inline)]
385 pub const fn new() -> Self {
386 Self {
387 // Be careful to cast the entire slice to a raw pointer.
388 ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
389 bucket_mask: 0,
390 items: 0,
391 growth_left: 0,
392 marker: PhantomData,
393 }
394 }
395
396 /// Allocates a new hash table with the given number of buckets.
397 ///
398 /// The control bytes are left uninitialized.
399 #[cfg_attr(feature = "inline-more", inline)]
400 unsafe fn new_uninitialized(
401 buckets: usize,
402 fallability: Fallibility,
403 ) -> Result<Self, TryReserveError> {
404 debug_assert!(buckets.is_power_of_two());
405
406 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
407 let (layout, ctrl_offset) = match calculate_layout::<T>(buckets) {
408 Some(lco) => lco,
409 None => return Err(fallability.capacity_overflow()),
410 };
411 let ptr = match NonNull::new(alloc(layout)) {
412 Some(ptr) => ptr,
413 None => return Err(fallability.alloc_err(layout)),
414 };
415 let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
416 Ok(Self {
417 ctrl,
418 bucket_mask: buckets - 1,
419 items: 0,
420 growth_left: bucket_mask_to_capacity(buckets - 1),
421 marker: PhantomData,
422 })
423 }
424
425 /// Attempts to allocate a new hash table with at least enough capacity
426 /// for inserting the given number of elements without reallocating.
427 fn fallible_with_capacity(
428 capacity: usize,
429 fallability: Fallibility,
430 ) -> Result<Self, TryReserveError> {
431 if capacity == 0 {
432 Ok(Self::new())
433 } else {
434 unsafe {
435 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
436 let buckets = match capacity_to_buckets(capacity) {
437 Some(buckets) => buckets,
438 None => return Err(fallability.capacity_overflow()),
439 };
440 let result = Self::new_uninitialized(buckets, fallability)?;
441 result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
442
443 Ok(result)
444 }
445 }
446 }
447
448 /// Attempts to allocate a new hash table with at least enough capacity
449 /// for inserting the given number of elements without reallocating.
450 #[cfg(feature = "raw")]
451 pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> {
452 Self::fallible_with_capacity(capacity, Fallibility::Fallible)
453 }
454
455 /// Allocates a new hash table with at least enough capacity for inserting
456 /// the given number of elements without reallocating.
457 pub fn with_capacity(capacity: usize) -> Self {
458 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
459 match Self::fallible_with_capacity(capacity, Fallibility::Infallible) {
460 Ok(capacity) => capacity,
461 Err(_) => unsafe { hint::unreachable_unchecked() },
462 }
463 }
464
465 /// Deallocates the table without dropping any entries.
466 #[cfg_attr(feature = "inline-more", inline)]
467 unsafe fn free_buckets(&mut self) {
468 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
469 let (layout, ctrl_offset) = match calculate_layout::<T>(self.buckets()) {
470 Some(lco) => lco,
471 None => hint::unreachable_unchecked(),
472 };
473 dealloc(self.ctrl.as_ptr().sub(ctrl_offset), layout);
474 }
475
476 /// Returns pointer to one past last element of data table.
477 #[cfg_attr(feature = "inline-more", inline)]
478 pub unsafe fn data_end(&self) -> NonNull<T> {
479 NonNull::new_unchecked(self.ctrl.as_ptr() as *mut T)
480 }
481
482 /// Returns pointer to start of data table.
483 #[cfg_attr(feature = "inline-more", inline)]
484 #[cfg(feature = "nightly")]
485 pub unsafe fn data_start(&self) -> *mut T {
486 self.data_end().as_ptr().wrapping_sub(self.buckets())
487 }
488
489 /// Returns the index of a bucket from a `Bucket`.
490 #[cfg_attr(feature = "inline-more", inline)]
491 pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
492 bucket.to_base_index(self.data_end())
493 }
494
495 /// Returns a pointer to a control byte.
496 #[cfg_attr(feature = "inline-more", inline)]
497 unsafe fn ctrl(&self, index: usize) -> *mut u8 {
498 debug_assert!(index < self.num_ctrl_bytes());
499 self.ctrl.as_ptr().add(index)
500 }
501
502 /// Returns a pointer to an element in the table.
503 #[cfg_attr(feature = "inline-more", inline)]
504 pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
505 debug_assert_ne!(self.bucket_mask, 0);
506 debug_assert!(index < self.buckets());
507 Bucket::from_base_index(self.data_end(), index)
508 }
509
510 /// Erases an element from the table without dropping it.
511 #[cfg_attr(feature = "inline-more", inline)]
512 #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
513 pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
514 let index = self.bucket_index(item);
515 debug_assert!(is_full(*self.ctrl(index)));
516 let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
517 let empty_before = Group::load(self.ctrl(index_before)).match_empty();
518 let empty_after = Group::load(self.ctrl(index)).match_empty();
519
520 // If we are inside a continuous block of Group::WIDTH full or deleted
521 // cells then a probe window may have seen a full block when trying to
522 // insert. We therefore need to keep that block non-empty so that
523 // lookups will continue searching to the next probe window.
524 //
525 // Note that in this context `leading_zeros` refers to the bytes at the
526 // end of a group, while `trailing_zeros` refers to the bytes at the
527 // begining of a group.
528 let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
529 DELETED
530 } else {
531 self.growth_left += 1;
532 EMPTY
533 };
534 self.set_ctrl(index, ctrl);
535 self.items -= 1;
536 }
537
538 /// Erases an element from the table, dropping it in place.
539 #[cfg_attr(feature = "inline-more", inline)]
540 #[allow(clippy::needless_pass_by_value)]
541 #[allow(deprecated)]
542 pub unsafe fn erase(&mut self, item: Bucket<T>) {
543 // Erase the element from the table first since drop might panic.
544 self.erase_no_drop(&item);
545 item.drop();
546 }
547
548 /// Finds and erases an element from the table, dropping it in place.
549 /// Returns true if an element was found.
550 #[cfg(feature = "raw")]
551 #[cfg_attr(feature = "inline-more", inline)]
552 pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool {
553 // Avoid `Option::map` because it bloats LLVM IR.
554 if let Some(bucket) = self.find(hash, eq) {
555 unsafe { self.erase(bucket) };
556 true
557 } else {
558 false
559 }
560 }
561
562 /// Removes an element from the table, returning it.
563 #[cfg_attr(feature = "inline-more", inline)]
564 #[allow(clippy::needless_pass_by_value)]
565 #[allow(deprecated)]
566 pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
567 self.erase_no_drop(&item);
568 item.read()
569 }
570
571 /// Finds and removes an element from the table, returning it.
572 #[cfg_attr(feature = "inline-more", inline)]
573 pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
574 // Avoid `Option::map` because it bloats LLVM IR.
575 match self.find(hash, eq) {
576 Some(bucket) => Some(unsafe { self.remove(bucket) }),
577 None => None,
578 }
579 }
580
581 /// Returns an iterator for a probe sequence on the table.
582 ///
583 /// This iterator never terminates, but is guaranteed to visit each bucket
584 /// group exactly once. The loop using `probe_seq` must terminate upon
585 /// reaching a group containing an empty bucket.
586 #[cfg_attr(feature = "inline-more", inline)]
587 fn probe_seq(&self, hash: u64) -> ProbeSeq {
588 ProbeSeq {
589 bucket_mask: self.bucket_mask,
590 pos: h1(hash) & self.bucket_mask,
591 stride: 0,
592 }
593 }
594
595 /// Sets a control byte, and possibly also the replicated control byte at
596 /// the end of the array.
597 #[cfg_attr(feature = "inline-more", inline)]
598 unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
599 // Replicate the first Group::WIDTH control bytes at the end of
600 // the array without using a branch:
601 // - If index >= Group::WIDTH then index == index2.
602 // - Otherwise index2 == self.bucket_mask + 1 + index.
603 //
604 // The very last replicated control byte is never actually read because
605 // we mask the initial index for unaligned loads, but we write it
606 // anyways because it makes the set_ctrl implementation simpler.
607 //
608 // If there are fewer buckets than Group::WIDTH then this code will
609 // replicate the buckets at the end of the trailing group. For example
610 // with 2 buckets and a group size of 4, the control bytes will look
611 // like this:
612 //
613 // Real | Replicated
614 // ---------------------------------------------
615 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
616 // ---------------------------------------------
617 let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
618
619 *self.ctrl(index) = ctrl;
620 *self.ctrl(index2) = ctrl;
621 }
622
623 /// Searches for an empty or deleted bucket which is suitable for inserting
624 /// a new element.
625 ///
626 /// There must be at least 1 empty bucket in the table.
627 #[cfg_attr(feature = "inline-more", inline)]
628 fn find_insert_slot(&self, hash: u64) -> usize {
629 for pos in self.probe_seq(hash) {
630 unsafe {
631 let group = Group::load(self.ctrl(pos));
632 if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
633 let result = (pos + bit) & self.bucket_mask;
634
635 // In tables smaller than the group width, trailing control
636 // bytes outside the range of the table are filled with
637 // EMPTY entries. These will unfortunately trigger a
638 // match, but once masked may point to a full bucket that
639 // is already occupied. We detect this situation here and
640 // perform a second scan starting at the begining of the
641 // table. This second scan is guaranteed to find an empty
642 // slot (due to the load factor) before hitting the trailing
643 // control bytes (containing EMPTY).
644 if unlikely(is_full(*self.ctrl(result))) {
645 debug_assert!(self.bucket_mask < Group::WIDTH);
646 debug_assert_ne!(pos, 0);
647 return Group::load_aligned(self.ctrl(0))
648 .match_empty_or_deleted()
649 .lowest_set_bit_nonzero();
650 } else {
651 return result;
652 }
653 }
654 }
655 }
656
657 // probe_seq never returns.
658 unreachable!();
659 }
660
661 /// Marks all table buckets as empty without dropping their contents.
662 #[cfg_attr(feature = "inline-more", inline)]
663 pub fn clear_no_drop(&mut self) {
664 if !self.is_empty_singleton() {
665 unsafe {
666 self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
667 }
668 }
669 self.items = 0;
670 self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
671 }
672
673 /// Removes all elements from the table without freeing the backing memory.
674 #[cfg_attr(feature = "inline-more", inline)]
675 pub fn clear(&mut self) {
676 // Ensure that the table is reset even if one of the drops panic
677 let self_ = guard(self, |self_| self_.clear_no_drop());
678
679 if mem::needs_drop::<T>() && self_.len() != 0 {
680 unsafe {
681 for item in self_.iter() {
682 item.drop();
683 }
684 }
685 }
686 }
687
688 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
689 #[cfg_attr(feature = "inline-more", inline)]
690 pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
691 // Calculate the minimal number of elements that we need to reserve
692 // space for.
693 let min_size = usize::max(self.items, min_size);
694 if min_size == 0 {
695 *self = Self::new();
696 return;
697 }
698
699 // Calculate the number of buckets that we need for this number of
700 // elements. If the calculation overflows then the requested bucket
701 // count must be larger than what we have right and nothing needs to be
702 // done.
703 let min_buckets = match capacity_to_buckets(min_size) {
704 Some(buckets) => buckets,
705 None => return,
706 };
707
708 // If we have more buckets than we need, shrink the table.
709 if min_buckets < self.buckets() {
710 // Fast path if the table is empty
711 if self.items == 0 {
712 *self = Self::with_capacity(min_size)
713 } else {
714 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
715 if self
716 .resize(min_size, hasher, Fallibility::Infallible)
717 .is_err()
718 {
719 unsafe { hint::unreachable_unchecked() }
720 }
721 }
722 }
723 }
724
725 /// Ensures that at least `additional` items can be inserted into the table
726 /// without reallocation.
727 #[cfg_attr(feature = "inline-more", inline)]
728 pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
729 if additional > self.growth_left {
730 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
731 if self
732 .reserve_rehash(additional, hasher, Fallibility::Infallible)
733 .is_err()
734 {
735 unsafe { hint::unreachable_unchecked() }
736 }
737 }
738 }
739
740 /// Tries to ensure that at least `additional` items can be inserted into
741 /// the table without reallocation.
742 #[cfg_attr(feature = "inline-more", inline)]
743 pub fn try_reserve(
744 &mut self,
745 additional: usize,
746 hasher: impl Fn(&T) -> u64,
747 ) -> Result<(), TryReserveError> {
748 if additional > self.growth_left {
749 self.reserve_rehash(additional, hasher, Fallibility::Fallible)
750 } else {
751 Ok(())
752 }
753 }
754
755 /// Out-of-line slow path for `reserve` and `try_reserve`.
756 #[cold]
757 #[inline(never)]
758 fn reserve_rehash(
759 &mut self,
760 additional: usize,
761 hasher: impl Fn(&T) -> u64,
762 fallability: Fallibility,
763 ) -> Result<(), TryReserveError> {
764 // Avoid `Option::ok_or_else` because it bloats LLVM IR.
765 let new_items = match self.items.checked_add(additional) {
766 Some(new_items) => new_items,
767 None => return Err(fallability.capacity_overflow()),
768 };
769 let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
770 if new_items <= full_capacity / 2 {
771 // Rehash in-place without re-allocating if we have plenty of spare
772 // capacity that is locked up due to DELETED entries.
773 self.rehash_in_place(hasher);
774 Ok(())
775 } else {
776 // Otherwise, conservatively resize to at least the next size up
777 // to avoid churning deletes into frequent rehashes.
778 self.resize(
779 usize::max(new_items, full_capacity + 1),
780 hasher,
781 fallability,
782 )
783 }
784 }
785
786 /// Rehashes the contents of the table in place (i.e. without changing the
787 /// allocation).
788 ///
789 /// If `hasher` panics then some the table's contents may be lost.
790 fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
791 unsafe {
792 // Bulk convert all full control bytes to DELETED, and all DELETED
793 // control bytes to EMPTY. This effectively frees up all buckets
794 // containing a DELETED entry.
795 for i in (0..self.buckets()).step_by(Group::WIDTH) {
796 let group = Group::load_aligned(self.ctrl(i));
797 let group = group.convert_special_to_empty_and_full_to_deleted();
798 group.store_aligned(self.ctrl(i));
799 }
800
801 // Fix up the trailing control bytes. See the comments in set_ctrl
802 // for the handling of tables smaller than the group width.
803 if self.buckets() < Group::WIDTH {
804 self.ctrl(0)
805 .copy_to(self.ctrl(Group::WIDTH), self.buckets());
806 } else {
807 self.ctrl(0)
808 .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
809 }
810
811 // If the hash function panics then properly clean up any elements
812 // that we haven't rehashed yet. We unfortunately can't preserve the
813 // element since we lost their hash and have no way of recovering it
814 // without risking another panic.
815 let mut guard = guard(self, |self_| {
816 if mem::needs_drop::<T>() {
817 for i in 0..self_.buckets() {
818 if *self_.ctrl(i) == DELETED {
819 self_.set_ctrl(i, EMPTY);
820 self_.bucket(i).drop();
821 self_.items -= 1;
822 }
823 }
824 }
825 self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
826 });
827
828 // At this point, DELETED elements are elements that we haven't
829 // rehashed yet. Find them and re-insert them at their ideal
830 // position.
831 'outer: for i in 0..guard.buckets() {
832 if *guard.ctrl(i) != DELETED {
833 continue;
834 }
835 'inner: loop {
836 // Hash the current item
837 let item = guard.bucket(i);
838 let hash = hasher(item.as_ref());
839
840 // Search for a suitable place to put it
841 let new_i = guard.find_insert_slot(hash);
842
843 // Probing works by scanning through all of the control
844 // bytes in groups, which may not be aligned to the group
845 // size. If both the new and old position fall within the
846 // same unaligned group, then there is no benefit in moving
847 // it and we can just continue to the next item.
848 let probe_index = |pos: usize| {
849 (pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
850 / Group::WIDTH
851 };
852 if likely(probe_index(i) == probe_index(new_i)) {
853 guard.set_ctrl(i, h2(hash));
854 continue 'outer;
855 }
856
857 // We are moving the current item to a new position. Write
858 // our H2 to the control byte of the new position.
859 let prev_ctrl = *guard.ctrl(new_i);
860 guard.set_ctrl(new_i, h2(hash));
861
862 if prev_ctrl == EMPTY {
863 // If the target slot is empty, simply move the current
864 // element into the new slot and clear the old control
865 // byte.
866 guard.set_ctrl(i, EMPTY);
867 guard.bucket(new_i).copy_from_nonoverlapping(&item);
868 continue 'outer;
869 } else {
870 // If the target slot is occupied, swap the two elements
871 // and then continue processing the element that we just
872 // swapped into the old slot.
873 debug_assert_eq!(prev_ctrl, DELETED);
874 mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
875 continue 'inner;
876 }
877 }
878 }
879
880 guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
881 mem::forget(guard);
882 }
883 }
884
885 /// Allocates a new table of a different size and moves the contents of the
886 /// current table into it.
887 fn resize(
888 &mut self,
889 capacity: usize,
890 hasher: impl Fn(&T) -> u64,
891 fallability: Fallibility,
892 ) -> Result<(), TryReserveError> {
893 unsafe {
894 debug_assert!(self.items <= capacity);
895
896 // Allocate and initialize the new table.
897 let mut new_table = Self::fallible_with_capacity(capacity, fallability)?;
898 new_table.growth_left -= self.items;
899 new_table.items = self.items;
900
901 // The hash function may panic, in which case we simply free the new
902 // table without dropping any elements that may have been copied into
903 // it.
904 //
905 // This guard is also used to free the old table on success, see
906 // the comment at the bottom of this function.
907 let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
908 if !new_table.is_empty_singleton() {
909 new_table.free_buckets();
910 }
911 });
912
913 // Copy all elements to the new table.
914 for item in self.iter() {
915 // This may panic.
916 let hash = hasher(item.as_ref());
917
918 // We can use a simpler version of insert() here since:
919 // - there are no DELETED entries.
920 // - we know there is enough space in the table.
921 // - all elements are unique.
922 let index = new_table.find_insert_slot(hash);
923 new_table.set_ctrl(index, h2(hash));
924 new_table.bucket(index).copy_from_nonoverlapping(&item);
925 }
926
927 // We successfully copied all elements without panicking. Now replace
928 // self with the new table. The old table will have its memory freed but
929 // the items will not be dropped (since they have been moved into the
930 // new table).
931 mem::swap(self, &mut new_table);
932
933 Ok(())
934 }
935 }
936
937 /// Inserts a new element into the table, and returns its raw bucket.
938 ///
939 /// This does not check if the given element already exists in the table.
940 #[cfg_attr(feature = "inline-more", inline)]
941 pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
942 unsafe {
943 let mut index = self.find_insert_slot(hash);
944
945 // We can avoid growing the table once we have reached our load
946 // factor if we are replacing a tombstone. This works since the
947 // number of EMPTY slots does not change in this case.
948 let old_ctrl = *self.ctrl(index);
949 if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
950 self.reserve(1, hasher);
951 index = self.find_insert_slot(hash);
952 }
953
954 let bucket = self.bucket(index);
955 self.growth_left -= special_is_empty(old_ctrl) as usize;
956 self.set_ctrl(index, h2(hash));
957 bucket.write(value);
958 self.items += 1;
959 bucket
960 }
961 }
962
963 /// Inserts a new element into the table, and returns a mutable reference to it.
964 ///
965 /// This does not check if the given element already exists in the table.
966 #[cfg_attr(feature = "inline-more", inline)]
967 pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T {
968 unsafe { self.insert(hash, value, hasher).as_mut() }
969 }
970
971 /// Inserts a new element into the table, without growing the table.
972 ///
973 /// There must be enough space in the table to insert the new element.
974 ///
975 /// This does not check if the given element already exists in the table.
976 #[cfg_attr(feature = "inline-more", inline)]
977 #[cfg(any(feature = "raw", feature = "rustc-internal-api"))]
978 pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
979 unsafe {
980 let index = self.find_insert_slot(hash);
981 let bucket = self.bucket(index);
982
983 // If we are replacing a DELETED entry then we don't need to update
984 // the load counter.
985 let old_ctrl = *self.ctrl(index);
986 self.growth_left -= special_is_empty(old_ctrl) as usize;
987
988 self.set_ctrl(index, h2(hash));
989 bucket.write(value);
990 self.items += 1;
991 bucket
992 }
993 }
994
995 /// Temporary removes a bucket, applying the given function to the removed
996 /// element and optionally put back the returned value in the same bucket.
997 ///
998 /// Returns `true` if the bucket still contains an element
999 ///
1000 /// This does not check if the given bucket is actually occupied.
1001 #[cfg_attr(feature = "inline-more", inline)]
1002 pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool
1003 where
1004 F: FnOnce(T) -> Option<T>,
1005 {
1006 let index = self.bucket_index(&bucket);
1007 let old_ctrl = *self.ctrl(index);
1008 debug_assert!(is_full(old_ctrl));
1009 let old_growth_left = self.growth_left;
1010 let item = self.remove(bucket);
1011 if let Some(new_item) = f(item) {
1012 self.growth_left = old_growth_left;
1013 self.set_ctrl(index, old_ctrl);
1014 self.items += 1;
1015 self.bucket(index).write(new_item);
1016 true
1017 } else {
1018 false
1019 }
1020 }
1021
1022 /// Searches for an element in the table.
1023 #[inline]
1024 pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
1025 unsafe {
1026 for bucket in self.iter_hash(hash) {
1027 let elm = bucket.as_ref();
1028 if likely(eq(elm)) {
1029 return Some(bucket);
1030 }
1031 }
1032 None
1033 }
1034 }
1035
1036 /// Gets a reference to an element in the table.
1037 #[inline]
1038 pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
1039 // Avoid `Option::map` because it bloats LLVM IR.
1040 match self.find(hash, eq) {
1041 Some(bucket) => Some(unsafe { bucket.as_ref() }),
1042 None => None,
1043 }
1044 }
1045
1046 /// Gets a mutable reference to an element in the table.
1047 #[inline]
1048 pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
1049 // Avoid `Option::map` because it bloats LLVM IR.
1050 match self.find(hash, eq) {
1051 Some(bucket) => Some(unsafe { bucket.as_mut() }),
1052 None => None,
1053 }
1054 }
1055
1056 /// Returns the number of elements the map can hold without reallocating.
1057 ///
1058 /// This number is a lower bound; the table might be able to hold
1059 /// more, but is guaranteed to be able to hold at least this many.
1060 #[cfg_attr(feature = "inline-more", inline)]
1061 pub fn capacity(&self) -> usize {
1062 self.items + self.growth_left
1063 }
1064
1065 /// Returns the number of elements in the table.
1066 #[cfg_attr(feature = "inline-more", inline)]
1067 pub fn len(&self) -> usize {
1068 self.items
1069 }
1070
1071 /// Returns the number of buckets in the table.
1072 #[cfg_attr(feature = "inline-more", inline)]
1073 pub fn buckets(&self) -> usize {
1074 self.bucket_mask + 1
1075 }
1076
1077 /// Returns the number of control bytes in the table.
1078 #[cfg_attr(feature = "inline-more", inline)]
1079 fn num_ctrl_bytes(&self) -> usize {
1080 self.bucket_mask + 1 + Group::WIDTH
1081 }
1082
1083 /// Returns whether this table points to the empty singleton with a capacity
1084 /// of 0.
1085 #[cfg_attr(feature = "inline-more", inline)]
1086 fn is_empty_singleton(&self) -> bool {
1087 self.bucket_mask == 0
1088 }
1089
1090 /// Returns an iterator over every element in the table. It is up to
1091 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
1092 /// Because we cannot make the `next` method unsafe on the `RawIter`
1093 /// struct, we have to make the `iter` method unsafe.
1094 #[cfg_attr(feature = "inline-more", inline)]
1095 pub unsafe fn iter(&self) -> RawIter<T> {
1096 let data = Bucket::from_base_index(self.data_end(), 0);
1097 RawIter {
1098 iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
1099 items: self.items,
1100 }
1101 }
1102
1103 /// Returns an iterator over occupied buckets that could match a given hash.
1104 ///
1105 /// In rare cases, the iterator may return a bucket with a different hash.
1106 ///
1107 /// It is up to the caller to ensure that the `RawTable` outlives the
1108 /// `RawIterHash`. Because we cannot make the `next` method unsafe on the
1109 /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
1110 #[cfg_attr(feature = "inline-more", inline)]
1111 pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T> {
1112 RawIterHash::new(self, hash)
1113 }
1114
1115 /// Returns an iterator which removes all elements from the table without
1116 /// freeing the memory.
1117 #[cfg_attr(feature = "inline-more", inline)]
1118 pub fn drain(&mut self) -> RawDrain<'_, T> {
1119 unsafe {
1120 let iter = self.iter();
1121 self.drain_iter_from(iter)
1122 }
1123 }
1124
1125 /// Returns an iterator which removes all elements from the table without
1126 /// freeing the memory.
1127 ///
1128 /// Iteration starts at the provided iterator's current location.
1129 ///
1130 /// It is up to the caller to ensure that the iterator is valid for this
1131 /// `RawTable` and covers all items that remain in the table.
1132 #[cfg_attr(feature = "inline-more", inline)]
1133 pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T> {
1134 debug_assert_eq!(iter.len(), self.len());
1135 RawDrain {
1136 iter,
1137 table: ManuallyDrop::new(mem::replace(self, Self::new())),
1138 orig_table: NonNull::from(self),
1139 marker: PhantomData,
1140 }
1141 }
1142
1143 /// Returns an iterator which consumes all elements from the table.
1144 ///
1145 /// Iteration starts at the provided iterator's current location.
1146 ///
1147 /// It is up to the caller to ensure that the iterator is valid for this
1148 /// `RawTable` and covers all items that remain in the table.
1149 pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T> {
1150 debug_assert_eq!(iter.len(), self.len());
1151
1152 let alloc = self.into_alloc();
1153 RawIntoIter {
1154 iter,
1155 alloc,
1156 marker: PhantomData,
1157 }
1158 }
1159
1160 /// Converts the table into a raw allocation. The contents of the table
1161 /// should be dropped using a `RawIter` before freeing the allocation.
1162 #[cfg_attr(feature = "inline-more", inline)]
1163 pub(crate) fn into_alloc(self) -> Option<(NonNull<u8>, Layout)> {
1164 let alloc = if self.is_empty_singleton() {
1165 None
1166 } else {
1167 // Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
1168 let (layout, ctrl_offset) = match calculate_layout::<T>(self.buckets()) {
1169 Some(lco) => lco,
1170 None => unsafe { hint::unreachable_unchecked() },
1171 };
1172 Some((
1173 unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
1174 layout,
1175 ))
1176 };
1177 mem::forget(self);
1178 alloc
1179 }
1180 }
1181
1182 unsafe impl<T> Send for RawTable<T> where T: Send {}
1183 unsafe impl<T> Sync for RawTable<T> where T: Sync {}
1184
1185 impl<T: Clone> Clone for RawTable<T> {
1186 fn clone(&self) -> Self {
1187 if self.is_empty_singleton() {
1188 Self::new()
1189 } else {
1190 unsafe {
1191 let mut new_table = ManuallyDrop::new(
1192 // Avoid `Result::ok_or_else` because it bloats LLVM IR.
1193 match Self::new_uninitialized(self.buckets(), Fallibility::Infallible) {
1194 Ok(table) => table,
1195 Err(_) => hint::unreachable_unchecked(),
1196 },
1197 );
1198
1199 new_table.clone_from_spec(self, |new_table| {
1200 // We need to free the memory allocated for the new table.
1201 new_table.free_buckets();
1202 });
1203
1204 // Return the newly created table.
1205 ManuallyDrop::into_inner(new_table)
1206 }
1207 }
1208 }
1209
1210 fn clone_from(&mut self, source: &Self) {
1211 if source.is_empty_singleton() {
1212 *self = Self::new();
1213 } else {
1214 unsafe {
1215 // First, drop all our elements without clearing the control bytes.
1216 if mem::needs_drop::<T>() && self.len() != 0 {
1217 for item in self.iter() {
1218 item.drop();
1219 }
1220 }
1221
1222 // If necessary, resize our table to match the source.
1223 if self.buckets() != source.buckets() {
1224 // Skip our drop by using ptr::write.
1225 if !self.is_empty_singleton() {
1226 self.free_buckets();
1227 }
1228 (self as *mut Self).write(
1229 // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
1230 match Self::new_uninitialized(source.buckets(), Fallibility::Infallible) {
1231 Ok(table) => table,
1232 Err(_) => hint::unreachable_unchecked(),
1233 },
1234 );
1235 }
1236
1237 self.clone_from_spec(source, |self_| {
1238 // We need to leave the table in an empty state.
1239 self_.clear_no_drop()
1240 });
1241 }
1242 }
1243 }
1244 }
1245
1246 /// Specialization of `clone_from` for `Copy` types
1247 trait RawTableClone {
1248 unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self));
1249 }
1250 impl<T: Clone> RawTableClone for RawTable<T> {
1251 #[cfg_attr(feature = "inline-more", inline)]
1252 default_fn! {
1253 unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) {
1254 self.clone_from_impl(source, on_panic);
1255 }
1256 }
1257 }
1258 #[cfg(feature = "nightly")]
1259 impl<T: Copy> RawTableClone for RawTable<T> {
1260 #[cfg_attr(feature = "inline-more", inline)]
1261 unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) {
1262 source
1263 .ctrl(0)
1264 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1265 source
1266 .data_start()
1267 .copy_to_nonoverlapping(self.data_start(), self.buckets());
1268
1269 self.items = source.items;
1270 self.growth_left = source.growth_left;
1271 }
1272 }
1273
1274 impl<T: Clone> RawTable<T> {
1275 /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`.
1276 #[cfg_attr(feature = "inline-more", inline)]
1277 unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) {
1278 // Copy the control bytes unchanged. We do this in a single pass
1279 source
1280 .ctrl(0)
1281 .copy_to_nonoverlapping(self.ctrl(0), self.num_ctrl_bytes());
1282
1283 // The cloning of elements may panic, in which case we need
1284 // to make sure we drop only the elements that have been
1285 // cloned so far.
1286 let mut guard = guard((0, &mut *self), |(index, self_)| {
1287 if mem::needs_drop::<T>() && self_.len() != 0 {
1288 for i in 0..=*index {
1289 if is_full(*self_.ctrl(i)) {
1290 self_.bucket(i).drop();
1291 }
1292 }
1293 }
1294
1295 // Depending on whether we were called from clone or clone_from, we
1296 // either need to free the memory for the destination table or just
1297 // clear the control bytes.
1298 on_panic(self_);
1299 });
1300
1301 for from in source.iter() {
1302 let index = source.bucket_index(&from);
1303 let to = guard.1.bucket(index);
1304 to.write(from.as_ref().clone());
1305
1306 // Update the index in case we need to unwind.
1307 guard.0 = index;
1308 }
1309
1310 // Successfully cloned all items, no need to clean up.
1311 mem::forget(guard);
1312
1313 self.items = source.items;
1314 self.growth_left = source.growth_left;
1315 }
1316
1317 /// Variant of `clone_from` to use when a hasher is available.
1318 #[cfg(feature = "raw")]
1319 pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) {
1320 // If we have enough capacity in the table, just clear it and insert
1321 // elements one by one. We don't do this if we have the same number of
1322 // buckets as the source since we can just copy the contents directly
1323 // in that case.
1324 if self.buckets() != source.buckets()
1325 && bucket_mask_to_capacity(self.bucket_mask) >= source.len()
1326 {
1327 self.clear();
1328
1329 let guard_self = guard(&mut *self, |self_| {
1330 // Clear the partially copied table if a panic occurs, otherwise
1331 // items and growth_left will be out of sync with the contents
1332 // of the table.
1333 self_.clear();
1334 });
1335
1336 unsafe {
1337 for item in source.iter() {
1338 // This may panic.
1339 let item = item.as_ref().clone();
1340 let hash = hasher(&item);
1341
1342 // We can use a simpler version of insert() here since:
1343 // - there are no DELETED entries.
1344 // - we know there is enough space in the table.
1345 // - all elements are unique.
1346 let index = guard_self.find_insert_slot(hash);
1347 guard_self.set_ctrl(index, h2(hash));
1348 guard_self.bucket(index).write(item);
1349 }
1350 }
1351
1352 // Successfully cloned all items, no need to clean up.
1353 mem::forget(guard_self);
1354
1355 self.items = source.items;
1356 self.growth_left -= source.items;
1357 } else {
1358 self.clone_from(source);
1359 }
1360 }
1361 }
1362
1363 #[cfg(feature = "nightly")]
1364 unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
1365 #[cfg_attr(feature = "inline-more", inline)]
1366 fn drop(&mut self) {
1367 if !self.is_empty_singleton() {
1368 unsafe {
1369 if mem::needs_drop::<T>() && self.len() != 0 {
1370 for item in self.iter() {
1371 item.drop();
1372 }
1373 }
1374 self.free_buckets();
1375 }
1376 }
1377 }
1378 }
1379 #[cfg(not(feature = "nightly"))]
1380 impl<T> Drop for RawTable<T> {
1381 #[cfg_attr(feature = "inline-more", inline)]
1382 fn drop(&mut self) {
1383 if !self.is_empty_singleton() {
1384 unsafe {
1385 if mem::needs_drop::<T>() && self.len() != 0 {
1386 for item in self.iter() {
1387 item.drop();
1388 }
1389 }
1390 self.free_buckets();
1391 }
1392 }
1393 }
1394 }
1395
1396 impl<T> IntoIterator for RawTable<T> {
1397 type Item = T;
1398 type IntoIter = RawIntoIter<T>;
1399
1400 #[cfg_attr(feature = "inline-more", inline)]
1401 fn into_iter(self) -> RawIntoIter<T> {
1402 unsafe {
1403 let iter = self.iter();
1404 self.into_iter_from(iter)
1405 }
1406 }
1407 }
1408
1409 /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1410 /// not track an item count.
1411 pub(crate) struct RawIterRange<T> {
1412 // Mask of full buckets in the current group. Bits are cleared from this
1413 // mask as each element is processed.
1414 current_group: BitMask,
1415
1416 // Pointer to the buckets for the current group.
1417 data: Bucket<T>,
1418
1419 // Pointer to the next group of control bytes,
1420 // Must be aligned to the group size.
1421 next_ctrl: *const u8,
1422
1423 // Pointer one past the last control byte of this range.
1424 end: *const u8,
1425 }
1426
1427 impl<T> RawIterRange<T> {
1428 /// Returns a `RawIterRange` covering a subset of a table.
1429 ///
1430 /// The control byte address must be aligned to the group size.
1431 #[cfg_attr(feature = "inline-more", inline)]
1432 unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
1433 debug_assert_ne!(len, 0);
1434 debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1435 let end = ctrl.add(len);
1436
1437 // Load the first group and advance ctrl to point to the next group
1438 let current_group = Group::load_aligned(ctrl).match_full();
1439 let next_ctrl = ctrl.add(Group::WIDTH);
1440
1441 Self {
1442 current_group,
1443 data,
1444 next_ctrl,
1445 end,
1446 }
1447 }
1448
1449 /// Splits a `RawIterRange` into two halves.
1450 ///
1451 /// Returns `None` if the remaining range is smaller than or equal to the
1452 /// group width.
1453 #[cfg_attr(feature = "inline-more", inline)]
1454 #[cfg(feature = "rayon")]
1455 pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
1456 unsafe {
1457 if self.end <= self.next_ctrl {
1458 // Nothing to split if the group that we are current processing
1459 // is the last one.
1460 (self, None)
1461 } else {
1462 // len is the remaining number of elements after the group that
1463 // we are currently processing. It must be a multiple of the
1464 // group size (small tables are caught by the check above).
1465 let len = offset_from(self.end, self.next_ctrl);
1466 debug_assert_eq!(len % Group::WIDTH, 0);
1467
1468 // Split the remaining elements into two halves, but round the
1469 // midpoint down in case there is an odd number of groups
1470 // remaining. This ensures that:
1471 // - The tail is at least 1 group long.
1472 // - The split is roughly even considering we still have the
1473 // current group to process.
1474 let mid = (len / 2) & !(Group::WIDTH - 1);
1475
1476 let tail = Self::new(
1477 self.next_ctrl.add(mid),
1478 self.data.next_n(Group::WIDTH).next_n(mid),
1479 len - mid,
1480 );
1481 debug_assert_eq!(
1482 self.data.next_n(Group::WIDTH).next_n(mid).ptr,
1483 tail.data.ptr
1484 );
1485 debug_assert_eq!(self.end, tail.end);
1486 self.end = self.next_ctrl.add(mid);
1487 debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
1488 (self, Some(tail))
1489 }
1490 }
1491 }
1492 }
1493
1494 // We make raw iterators unconditionally Send and Sync, and let the PhantomData
1495 // in the actual iterator implementations determine the real Send/Sync bounds.
1496 unsafe impl<T> Send for RawIterRange<T> {}
1497 unsafe impl<T> Sync for RawIterRange<T> {}
1498
1499 impl<T> Clone for RawIterRange<T> {
1500 #[cfg_attr(feature = "inline-more", inline)]
1501 fn clone(&self) -> Self {
1502 Self {
1503 data: self.data.clone(),
1504 next_ctrl: self.next_ctrl,
1505 current_group: self.current_group,
1506 end: self.end,
1507 }
1508 }
1509 }
1510
1511 impl<T> Iterator for RawIterRange<T> {
1512 type Item = Bucket<T>;
1513
1514 #[cfg_attr(feature = "inline-more", inline)]
1515 fn next(&mut self) -> Option<Bucket<T>> {
1516 unsafe {
1517 loop {
1518 if let Some(index) = self.current_group.lowest_set_bit() {
1519 self.current_group = self.current_group.remove_lowest_bit();
1520 return Some(self.data.next_n(index));
1521 }
1522
1523 if self.next_ctrl >= self.end {
1524 return None;
1525 }
1526
1527 // We might read past self.end up to the next group boundary,
1528 // but this is fine because it only occurs on tables smaller
1529 // than the group size where the trailing control bytes are all
1530 // EMPTY. On larger tables self.end is guaranteed to be aligned
1531 // to the group size (since tables are power-of-two sized).
1532 self.current_group = Group::load_aligned(self.next_ctrl).match_full();
1533 self.data = self.data.next_n(Group::WIDTH);
1534 self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
1535 }
1536 }
1537 }
1538
1539 #[cfg_attr(feature = "inline-more", inline)]
1540 fn size_hint(&self) -> (usize, Option<usize>) {
1541 // We don't have an item count, so just guess based on the range size.
1542 (
1543 0,
1544 Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
1545 )
1546 }
1547 }
1548
1549 impl<T> FusedIterator for RawIterRange<T> {}
1550
1551 /// Iterator which returns a raw pointer to every full bucket in the table.
1552 ///
1553 /// For maximum flexibility this iterator is not bound by a lifetime, but you
1554 /// must observe several rules when using it:
1555 /// - You must not free the hash table while iterating (including via growing/shrinking).
1556 /// - It is fine to erase a bucket that has been yielded by the iterator.
1557 /// - Erasing a bucket that has not yet been yielded by the iterator may still
1558 /// result in the iterator yielding that bucket (unless `reflect_remove` is called).
1559 /// - It is unspecified whether an element inserted after the iterator was
1560 /// created will be yielded by that iterator (unless `reflect_insert` is called).
1561 /// - The order in which the iterator yields bucket is unspecified and may
1562 /// change in the future.
1563 pub struct RawIter<T> {
1564 pub(crate) iter: RawIterRange<T>,
1565 items: usize,
1566 }
1567
1568 impl<T> RawIter<T> {
1569 /// Refresh the iterator so that it reflects a removal from the given bucket.
1570 ///
1571 /// For the iterator to remain valid, this method must be called once
1572 /// for each removed bucket before `next` is called again.
1573 ///
1574 /// This method should be called _before_ the removal is made. It is not necessary to call this
1575 /// method if you are removing an item that this iterator yielded in the past.
1576 #[cfg(feature = "raw")]
1577 pub fn reflect_remove(&mut self, b: &Bucket<T>) {
1578 self.reflect_toggle_full(b, false);
1579 }
1580
1581 /// Refresh the iterator so that it reflects an insertion into the given bucket.
1582 ///
1583 /// For the iterator to remain valid, this method must be called once
1584 /// for each insert before `next` is called again.
1585 ///
1586 /// This method does not guarantee that an insertion of a bucket witha greater
1587 /// index than the last one yielded will be reflected in the iterator.
1588 ///
1589 /// This method should be called _after_ the given insert is made.
1590 #[cfg(feature = "raw")]
1591 pub fn reflect_insert(&mut self, b: &Bucket<T>) {
1592 self.reflect_toggle_full(b, true);
1593 }
1594
1595 /// Refresh the iterator so that it reflects a change to the state of the given bucket.
1596 #[cfg(feature = "raw")]
1597 fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
1598 unsafe {
1599 if b.as_ptr() > self.iter.data.as_ptr() {
1600 // The iterator has already passed the bucket's group.
1601 // So the toggle isn't relevant to this iterator.
1602 return;
1603 }
1604
1605 if self.iter.next_ctrl < self.iter.end
1606 && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
1607 {
1608 // The iterator has not yet reached the bucket's group.
1609 // We don't need to reload anything, but we do need to adjust the item count.
1610
1611 if cfg!(debug_assertions) {
1612 // Double-check that the user isn't lying to us by checking the bucket state.
1613 // To do that, we need to find its control byte. We know that self.iter.data is
1614 // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
1615 let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
1616 let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
1617 // This method should be called _before_ a removal, or _after_ an insert,
1618 // so in both cases the ctrl byte should indicate that the bucket is full.
1619 assert!(is_full(*ctrl));
1620 }
1621
1622 if is_insert {
1623 self.items += 1;
1624 } else {
1625 self.items -= 1;
1626 }
1627
1628 return;
1629 }
1630
1631 // The iterator is at the bucket group that the toggled bucket is in.
1632 // We need to do two things:
1633 //
1634 // - Determine if the iterator already yielded the toggled bucket.
1635 // If it did, we're done.
1636 // - Otherwise, update the iterator cached group so that it won't
1637 // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
1638 // We'll also need ot update the item count accordingly.
1639 if let Some(index) = self.iter.current_group.lowest_set_bit() {
1640 let next_bucket = self.iter.data.next_n(index);
1641 if b.as_ptr() > next_bucket.as_ptr() {
1642 // The toggled bucket is "before" the bucket the iterator would yield next. We
1643 // therefore don't need to do anything --- the iterator has already passed the
1644 // bucket in question.
1645 //
1646 // The item count must already be correct, since a removal or insert "prior" to
1647 // the iterator's position wouldn't affect the item count.
1648 } else {
1649 // The removed bucket is an upcoming bucket. We need to make sure it does _not_
1650 // get yielded, and also that it's no longer included in the item count.
1651 //
1652 // NOTE: We can't just reload the group here, both since that might reflect
1653 // inserts we've already passed, and because that might inadvertently unset the
1654 // bits for _other_ removals. If we do that, we'd have to also decrement the
1655 // item count for those other bits that we unset. But the presumably subsequent
1656 // call to reflect for those buckets might _also_ decrement the item count.
1657 // Instead, we _just_ flip the bit for the particular bucket the caller asked
1658 // us to reflect.
1659 let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
1660 let was_full = self.iter.current_group.flip(our_bit);
1661 debug_assert_ne!(was_full, is_insert);
1662
1663 if is_insert {
1664 self.items += 1;
1665 } else {
1666 self.items -= 1;
1667 }
1668
1669 if cfg!(debug_assertions) {
1670 if b.as_ptr() == next_bucket.as_ptr() {
1671 // The removed bucket should no longer be next
1672 debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
1673 } else {
1674 // We should not have changed what bucket comes next.
1675 debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
1676 }
1677 }
1678 }
1679 } else {
1680 // We must have already iterated past the removed item.
1681 }
1682 }
1683 }
1684 }
1685
1686 impl<T> Clone for RawIter<T> {
1687 #[cfg_attr(feature = "inline-more", inline)]
1688 fn clone(&self) -> Self {
1689 Self {
1690 iter: self.iter.clone(),
1691 items: self.items,
1692 }
1693 }
1694 }
1695
1696 impl<T> Iterator for RawIter<T> {
1697 type Item = Bucket<T>;
1698
1699 #[cfg_attr(feature = "inline-more", inline)]
1700 fn next(&mut self) -> Option<Bucket<T>> {
1701 if let Some(b) = self.iter.next() {
1702 self.items -= 1;
1703 Some(b)
1704 } else {
1705 // We don't check against items == 0 here to allow the
1706 // compiler to optimize away the item count entirely if the
1707 // iterator length is never queried.
1708 debug_assert_eq!(self.items, 0);
1709 None
1710 }
1711 }
1712
1713 #[cfg_attr(feature = "inline-more", inline)]
1714 fn size_hint(&self) -> (usize, Option<usize>) {
1715 (self.items, Some(self.items))
1716 }
1717 }
1718
1719 impl<T> ExactSizeIterator for RawIter<T> {}
1720 impl<T> FusedIterator for RawIter<T> {}
1721
1722 /// Iterator which consumes a table and returns elements.
1723 pub struct RawIntoIter<T> {
1724 iter: RawIter<T>,
1725 alloc: Option<(NonNull<u8>, Layout)>,
1726 marker: PhantomData<T>,
1727 }
1728
1729 impl<T> RawIntoIter<T> {
1730 #[cfg_attr(feature = "inline-more", inline)]
1731 pub fn iter(&self) -> RawIter<T> {
1732 self.iter.clone()
1733 }
1734 }
1735
1736 unsafe impl<T> Send for RawIntoIter<T> where T: Send {}
1737 unsafe impl<T> Sync for RawIntoIter<T> where T: Sync {}
1738
1739 #[cfg(feature = "nightly")]
1740 unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
1741 #[cfg_attr(feature = "inline-more", inline)]
1742 fn drop(&mut self) {
1743 unsafe {
1744 // Drop all remaining elements
1745 if mem::needs_drop::<T>() && self.iter.len() != 0 {
1746 while let Some(item) = self.iter.next() {
1747 item.drop();
1748 }
1749 }
1750
1751 // Free the table
1752 if let Some((ptr, layout)) = self.alloc {
1753 dealloc(ptr.as_ptr(), layout);
1754 }
1755 }
1756 }
1757 }
1758 #[cfg(not(feature = "nightly"))]
1759 impl<T> Drop for RawIntoIter<T> {
1760 #[cfg_attr(feature = "inline-more", inline)]
1761 fn drop(&mut self) {
1762 unsafe {
1763 // Drop all remaining elements
1764 if mem::needs_drop::<T>() && self.iter.len() != 0 {
1765 while let Some(item) = self.iter.next() {
1766 item.drop();
1767 }
1768 }
1769
1770 // Free the table
1771 if let Some((ptr, layout)) = self.alloc {
1772 dealloc(ptr.as_ptr(), layout);
1773 }
1774 }
1775 }
1776 }
1777
1778 impl<T> Iterator for RawIntoIter<T> {
1779 type Item = T;
1780
1781 #[cfg_attr(feature = "inline-more", inline)]
1782 fn next(&mut self) -> Option<T> {
1783 unsafe { Some(self.iter.next()?.read()) }
1784 }
1785
1786 #[cfg_attr(feature = "inline-more", inline)]
1787 fn size_hint(&self) -> (usize, Option<usize>) {
1788 self.iter.size_hint()
1789 }
1790 }
1791
1792 impl<T> ExactSizeIterator for RawIntoIter<T> {}
1793 impl<T> FusedIterator for RawIntoIter<T> {}
1794
1795 /// Iterator which consumes elements without freeing the table storage.
1796 pub struct RawDrain<'a, T> {
1797 iter: RawIter<T>,
1798
1799 // The table is moved into the iterator for the duration of the drain. This
1800 // ensures that an empty table is left if the drain iterator is leaked
1801 // without dropping.
1802 table: ManuallyDrop<RawTable<T>>,
1803 orig_table: NonNull<RawTable<T>>,
1804
1805 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
1806 // covariant over T.
1807 marker: PhantomData<&'a RawTable<T>>,
1808 }
1809
1810 impl<T> RawDrain<'_, T> {
1811 #[cfg_attr(feature = "inline-more", inline)]
1812 pub fn iter(&self) -> RawIter<T> {
1813 self.iter.clone()
1814 }
1815 }
1816
1817 unsafe impl<T> Send for RawDrain<'_, T> where T: Send {}
1818 unsafe impl<T> Sync for RawDrain<'_, T> where T: Sync {}
1819
1820 impl<T> Drop for RawDrain<'_, T> {
1821 #[cfg_attr(feature = "inline-more", inline)]
1822 fn drop(&mut self) {
1823 unsafe {
1824 // Drop all remaining elements. Note that this may panic.
1825 if mem::needs_drop::<T>() && self.iter.len() != 0 {
1826 while let Some(item) = self.iter.next() {
1827 item.drop();
1828 }
1829 }
1830
1831 // Reset the contents of the table now that all elements have been
1832 // dropped.
1833 self.table.clear_no_drop();
1834
1835 // Move the now empty table back to its original location.
1836 self.orig_table
1837 .as_ptr()
1838 .copy_from_nonoverlapping(&*self.table, 1);
1839 }
1840 }
1841 }
1842
1843 impl<T> Iterator for RawDrain<'_, T> {
1844 type Item = T;
1845
1846 #[cfg_attr(feature = "inline-more", inline)]
1847 fn next(&mut self) -> Option<T> {
1848 unsafe {
1849 let item = self.iter.next()?;
1850 Some(item.read())
1851 }
1852 }
1853
1854 #[cfg_attr(feature = "inline-more", inline)]
1855 fn size_hint(&self) -> (usize, Option<usize>) {
1856 self.iter.size_hint()
1857 }
1858 }
1859
1860 impl<T> ExactSizeIterator for RawDrain<'_, T> {}
1861 impl<T> FusedIterator for RawDrain<'_, T> {}
1862
1863 /// Iterator over occupied buckets that could match a given hash.
1864 ///
1865 /// In rare cases, the iterator may return a bucket with a different hash.
1866 pub struct RawIterHash<'a, T> {
1867 table: &'a RawTable<T>,
1868
1869 // The top 7 bits of the hash.
1870 h2_hash: u8,
1871
1872 // The sequence of groups to probe in the search.
1873 probe_seq: ProbeSeq,
1874
1875 // The current group and its position.
1876 pos: usize,
1877 group: Group,
1878
1879 // The elements within the group with a matching h2-hash.
1880 bitmask: BitMaskIter,
1881 }
1882
1883 impl<'a, T> RawIterHash<'a, T> {
1884 fn new(table: &'a RawTable<T>, hash: u64) -> Self {
1885 unsafe {
1886 let h2_hash = h2(hash);
1887 let mut probe_seq = table.probe_seq(hash);
1888 let pos = probe_seq.next().unwrap();
1889 let group = Group::load(table.ctrl(pos));
1890 let bitmask = group.match_byte(h2_hash).into_iter();
1891
1892 RawIterHash {
1893 table,
1894 h2_hash,
1895 probe_seq,
1896 pos,
1897 group,
1898 bitmask,
1899 }
1900 }
1901 }
1902 }
1903
1904 impl<'a, T> Iterator for RawIterHash<'a, T> {
1905 type Item = Bucket<T>;
1906
1907 fn next(&mut self) -> Option<Bucket<T>> {
1908 unsafe {
1909 loop {
1910 if let Some(bit) = self.bitmask.next() {
1911 let index = (self.pos + bit) & self.table.bucket_mask;
1912 let bucket = self.table.bucket(index);
1913 return Some(bucket);
1914 }
1915 if likely(self.group.match_empty().any_bit_set()) {
1916 return None;
1917 }
1918 self.pos = self.probe_seq.next().unwrap();
1919 self.group = Group::load(self.table.ctrl(self.pos));
1920 self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
1921 }
1922 }
1923 }
1924 }