]> git.proxmox.com Git - rustc.git/blame - vendor/hashbrown/src/raw/mod.rs
New upstream version 1.46.0+dfsg1
[rustc.git] / vendor / hashbrown / src / raw / mod.rs
CommitLineData
48663c56
XL
1use crate::alloc::alloc::{alloc, dealloc, handle_alloc_error};
2use crate::scopeguard::guard;
3use crate::CollectionAllocErr;
4use core::alloc::Layout;
5use core::hint;
6use core::iter::FusedIterator;
7use core::marker::PhantomData;
8use core::mem;
9use core::mem::ManuallyDrop;
10use core::ptr::NonNull;
11
12cfg_if! {
13 // Use the SSE2 implementation if possible: it allows us to scan 16 buckets
14 // at once instead of 8. We don't bother with AVX since it would require
15 // runtime dispatch and wouldn't gain us much anyways: the probability of
16 // finding a match drops off drastically after the first few buckets.
17 //
18 // I attempted an implementation on ARM using NEON instructions, but it
19 // turns out that most NEON instructions have multi-cycle latency, which in
20 // the end outweighs any gains over the generic implementation.
21 if #[cfg(all(
22 target_feature = "sse2",
23 any(target_arch = "x86", target_arch = "x86_64"),
24 not(miri)
25 ))] {
e74abb32
XL
26 mod sse2;
27 use sse2 as imp;
48663c56
XL
28 } else {
29 #[path = "generic.rs"]
e74abb32
XL
30 mod generic;
31 use generic as imp;
48663c56
XL
32 }
33}
34
35mod bitmask;
36
37use self::bitmask::BitMask;
38use self::imp::Group;
39
40// Branch prediction hint. This is currently only available on nightly but it
41// consistently improves performance by 10-15%.
42#[cfg(feature = "nightly")]
43use core::intrinsics::{likely, unlikely};
44#[cfg(not(feature = "nightly"))]
45#[inline]
46fn likely(b: bool) -> bool {
47 b
48}
49#[cfg(not(feature = "nightly"))]
50#[inline]
51fn unlikely(b: bool) -> bool {
52 b
53}
54
55#[cfg(feature = "nightly")]
e74abb32 56#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
57unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
58 to.offset_from(from) as usize
59}
60#[cfg(not(feature = "nightly"))]
e74abb32 61#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
62unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize {
63 (to as usize - from as usize) / mem::size_of::<T>()
64}
65
66/// Whether memory allocation errors should return an error or abort.
67#[derive(Copy, Clone)]
68enum Fallibility {
69 Fallible,
70 Infallible,
71}
72
73impl Fallibility {
74 /// Error to return on capacity overflow.
e74abb32 75 #[cfg_attr(feature = "inline-more", inline)]
48663c56 76 fn capacity_overflow(self) -> CollectionAllocErr {
e74abb32 77 use Fallibility::*;
48663c56 78 match self {
e74abb32
XL
79 Fallible => CollectionAllocErr::CapacityOverflow,
80 Infallible => panic!("Hash table capacity overflow"),
48663c56
XL
81 }
82 }
83
84 /// Error to return on allocation error.
e74abb32 85 #[cfg_attr(feature = "inline-more", inline)]
48663c56 86 fn alloc_err(self, layout: Layout) -> CollectionAllocErr {
e74abb32 87 use Fallibility::*;
48663c56 88 match self {
e74abb32
XL
89 Fallible => CollectionAllocErr::AllocErr { layout },
90 Infallible => handle_alloc_error(layout),
48663c56
XL
91 }
92 }
93}
94
95/// Control byte value for an empty bucket.
96const EMPTY: u8 = 0b1111_1111;
97
98/// Control byte value for a deleted bucket.
99const DELETED: u8 = 0b1000_0000;
100
101/// Checks whether a control byte represents a full bucket (top bit is clear).
102#[inline]
103fn is_full(ctrl: u8) -> bool {
104 ctrl & 0x80 == 0
105}
106
107/// Checks whether a control byte represents a special value (top bit is set).
108#[inline]
109fn is_special(ctrl: u8) -> bool {
110 ctrl & 0x80 != 0
111}
112
113/// Checks whether a special control value is EMPTY (just check 1 bit).
114#[inline]
115fn special_is_empty(ctrl: u8) -> bool {
116 debug_assert!(is_special(ctrl));
117 ctrl & 0x01 != 0
118}
119
120/// Primary hash function, used to select the initial bucket to probe from.
121#[inline]
122#[allow(clippy::cast_possible_truncation)]
123fn h1(hash: u64) -> usize {
124 // On 32-bit platforms we simply ignore the higher hash bits.
125 hash as usize
126}
127
128/// Secondary hash function, saved in the low 7 bits of the control byte.
129#[inline]
130#[allow(clippy::cast_possible_truncation)]
131fn h2(hash: u64) -> u8 {
132 // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
133 // value, some hash functions (such as FxHash) produce a usize result
134 // instead, which means that the top 32 bits are 0 on 32-bit platforms.
135 let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
136 let top7 = hash >> (hash_len * 8 - 7);
137 (top7 & 0x7f) as u8 // truncation
138}
139
140/// Probe sequence based on triangular numbers, which is guaranteed (since our
141/// table size is a power of two) to visit every group of elements exactly once.
142///
143/// A triangular probe has us jump by 1 more group every time. So first we
144/// jump by 1 group (meaning we just continue our linear scan), then 2 groups
145/// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
146///
147/// Proof that the probe will visit every group in the table:
148/// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/>
149struct ProbeSeq {
150 bucket_mask: usize,
151 pos: usize,
152 stride: usize,
153}
154
155impl Iterator for ProbeSeq {
156 type Item = usize;
157
158 #[inline]
159 fn next(&mut self) -> Option<usize> {
160 // We should have found an empty bucket by now and ended the probe.
161 debug_assert!(
162 self.stride <= self.bucket_mask,
163 "Went past end of probe sequence"
164 );
165
166 let result = self.pos;
167 self.stride += Group::WIDTH;
168 self.pos += self.stride;
169 self.pos &= self.bucket_mask;
170 Some(result)
171 }
172}
173
174/// Returns the number of buckets needed to hold the given number of items,
175/// taking the maximum load factor into account.
176///
177/// Returns `None` if an overflow occurs.
e74abb32 178#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
179// Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258
180#[cfg_attr(target_os = "emscripten", inline(never))]
181fn capacity_to_buckets(cap: usize) -> Option<usize> {
182 let adjusted_cap = if cap < 8 {
183 // Need at least 1 free bucket on small tables
184 cap + 1
185 } else {
186 // Otherwise require 1/8 buckets to be empty (87.5% load)
187 //
188 // Be careful when modifying this, calculate_layout relies on the
189 // overflow check here.
190 cap.checked_mul(8)? / 7
191 };
192
193 // Any overflows will have been caught by the checked_mul. Also, any
194 // rounding errors from the division above will be cleaned up by
195 // next_power_of_two (which can't overflow because of the previous divison).
196 Some(adjusted_cap.next_power_of_two())
197}
198
199/// Returns the maximum effective capacity for the given bucket mask, taking
200/// the maximum load factor into account.
e74abb32 201#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
202fn bucket_mask_to_capacity(bucket_mask: usize) -> usize {
203 if bucket_mask < 8 {
204 // For tables with 1/2/4/8 buckets, we always reserve one empty slot.
205 // Keep in mind that the bucket mask is one less than the bucket count.
206 bucket_mask
207 } else {
208 // For larger tables we reserve 12.5% of the slots as empty.
209 ((bucket_mask + 1) / 8) * 7
210 }
211}
212
213// Returns a Layout which describes the allocation required for a hash table,
214// and the offset of the buckets in the allocation.
215///
216/// Returns `None` if an overflow occurs.
e74abb32 217#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
218#[cfg(feature = "nightly")]
219fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
220 debug_assert!(buckets.is_power_of_two());
221
222 // Array of buckets
223 let data = Layout::array::<T>(buckets).ok()?;
224
225 // Array of control bytes. This must be aligned to the group size.
226 //
227 // We add `Group::WIDTH` control bytes at the end of the array which
228 // replicate the bytes at the start of the array and thus avoids the need to
229 // perform bounds-checking while probing.
230 //
231 // There is no possible overflow here since buckets is a power of two and
232 // Group::WIDTH is a small number.
233 let ctrl = unsafe { Layout::from_size_align_unchecked(buckets + Group::WIDTH, Group::WIDTH) };
234
235 ctrl.extend(data).ok()
236}
237
238// Returns a Layout which describes the allocation required for a hash table,
239// and the offset of the buckets in the allocation.
e74abb32 240#[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
241#[cfg(not(feature = "nightly"))]
242fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
243 debug_assert!(buckets.is_power_of_two());
244
245 // Manual layout calculation since Layout methods are not yet stable.
246 let data_align = usize::max(mem::align_of::<T>(), Group::WIDTH);
247 let data_offset = (buckets + Group::WIDTH).checked_add(data_align - 1)? & !(data_align - 1);
248 let len = data_offset.checked_add(mem::size_of::<T>().checked_mul(buckets)?)?;
249
250 Some((
251 unsafe { Layout::from_size_align_unchecked(len, data_align) },
252 data_offset,
253 ))
254}
255
256/// A reference to a hash table bucket containing a `T`.
257///
258/// This is usually just a pointer to the element itself. However if the element
259/// is a ZST, then we instead track the index of the element in the table so
260/// that `erase` works properly.
261pub struct Bucket<T> {
262 // Using *const for variance
263 ptr: *const T,
264}
265
266// This Send impl is needed for rayon support. This is safe since Bucket is
267// never exposed in a public API.
268unsafe impl<T> Send for Bucket<T> {}
269
270impl<T> Clone for Bucket<T> {
e74abb32 271 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
272 fn clone(&self) -> Self {
273 Self { ptr: self.ptr }
274 }
275}
276
277impl<T> Bucket<T> {
e74abb32 278 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
279 unsafe fn from_base_index(base: *const T, index: usize) -> Self {
280 let ptr = if mem::size_of::<T>() == 0 {
281 index as *const T
282 } else {
283 base.add(index)
284 };
285 Self { ptr }
286 }
e74abb32 287 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
288 pub unsafe fn as_ptr(&self) -> *mut T {
289 if mem::size_of::<T>() == 0 {
290 // Just return an arbitrary ZST pointer which is properly aligned
291 mem::align_of::<T>() as *mut T
292 } else {
293 self.ptr as *mut T
294 }
295 }
e74abb32 296 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
297 unsafe fn add(&self, offset: usize) -> Self {
298 let ptr = if mem::size_of::<T>() == 0 {
299 (self.ptr as usize + offset) as *const T
300 } else {
301 self.ptr.add(offset)
302 };
303 Self { ptr }
304 }
e74abb32 305 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
306 pub unsafe fn drop(&self) {
307 self.as_ptr().drop_in_place();
308 }
e74abb32 309 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
310 pub unsafe fn read(&self) -> T {
311 self.as_ptr().read()
312 }
e74abb32 313 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
314 pub unsafe fn write(&self, val: T) {
315 self.as_ptr().write(val);
316 }
e74abb32 317 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
318 pub unsafe fn as_ref<'a>(&self) -> &'a T {
319 &*self.as_ptr()
320 }
e74abb32 321 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
322 pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
323 &mut *self.as_ptr()
324 }
e74abb32 325 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
326 pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
327 self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1);
328 }
329}
330
331/// A raw hash table with an unsafe API.
332pub struct RawTable<T> {
333 // Mask to get an index from a hash value. The value is one less than the
334 // number of buckets in the table.
335 bucket_mask: usize,
336
337 // Pointer to the array of control bytes
338 ctrl: NonNull<u8>,
339
340 // Pointer to the array of buckets
341 data: NonNull<T>,
342
343 // Number of elements that can be inserted before we need to grow the table
344 growth_left: usize,
345
346 // Number of elements in the table, only really used by len()
347 items: usize,
348
349 // Tell dropck that we own instances of T.
350 marker: PhantomData<T>,
351}
352
353impl<T> RawTable<T> {
354 /// Creates a new empty hash table without allocating any memory.
355 ///
356 /// In effect this returns a table with exactly 1 bucket. However we can
357 /// leave the data pointer dangling since that bucket is never written to
358 /// due to our load factor forcing us to always have at least 1 free bucket.
e74abb32 359 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
360 pub fn new() -> Self {
361 Self {
362 data: NonNull::dangling(),
363 // Be careful to cast the entire slice to a raw pointer.
364 ctrl: unsafe { NonNull::new_unchecked(Group::static_empty().as_ptr() as *mut u8) },
365 bucket_mask: 0,
366 items: 0,
367 growth_left: 0,
368 marker: PhantomData,
369 }
370 }
371
372 /// Allocates a new hash table with the given number of buckets.
373 ///
374 /// The control bytes are left uninitialized.
e74abb32 375 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
376 unsafe fn new_uninitialized(
377 buckets: usize,
378 fallability: Fallibility,
379 ) -> Result<Self, CollectionAllocErr> {
e74abb32 380 debug_assert!(buckets.is_power_of_two());
48663c56
XL
381 let (layout, data_offset) =
382 calculate_layout::<T>(buckets).ok_or_else(|| fallability.capacity_overflow())?;
383 let ctrl = NonNull::new(alloc(layout)).ok_or_else(|| fallability.alloc_err(layout))?;
384 let data = NonNull::new_unchecked(ctrl.as_ptr().add(data_offset) as *mut T);
385 Ok(Self {
386 data,
387 ctrl,
388 bucket_mask: buckets - 1,
389 items: 0,
390 growth_left: bucket_mask_to_capacity(buckets - 1),
391 marker: PhantomData,
392 })
393 }
394
395 /// Attempts to allocate a new hash table with at least enough capacity
396 /// for inserting the given number of elements without reallocating.
397 fn try_with_capacity(
398 capacity: usize,
399 fallability: Fallibility,
400 ) -> Result<Self, CollectionAllocErr> {
401 if capacity == 0 {
402 Ok(Self::new())
403 } else {
404 unsafe {
405 let buckets =
406 capacity_to_buckets(capacity).ok_or_else(|| fallability.capacity_overflow())?;
407 let result = Self::new_uninitialized(buckets, fallability)?;
408 result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
409
410 Ok(result)
411 }
412 }
413 }
414
415 /// Allocates a new hash table with at least enough capacity for inserting
416 /// the given number of elements without reallocating.
417 pub fn with_capacity(capacity: usize) -> Self {
418 Self::try_with_capacity(capacity, Fallibility::Infallible)
419 .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() })
420 }
421
422 /// Deallocates the table without dropping any entries.
e74abb32 423 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
424 unsafe fn free_buckets(&mut self) {
425 let (layout, _) =
426 calculate_layout::<T>(self.buckets()).unwrap_or_else(|| hint::unreachable_unchecked());
427 dealloc(self.ctrl.as_ptr(), layout);
428 }
429
430 /// Returns the index of a bucket from a `Bucket`.
e74abb32 431 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
432 unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
433 if mem::size_of::<T>() == 0 {
434 bucket.ptr as usize
435 } else {
436 offset_from(bucket.ptr, self.data.as_ptr())
437 }
438 }
439
440 /// Returns a pointer to a control byte.
e74abb32 441 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
442 unsafe fn ctrl(&self, index: usize) -> *mut u8 {
443 debug_assert!(index < self.num_ctrl_bytes());
444 self.ctrl.as_ptr().add(index)
445 }
446
447 /// Returns a pointer to an element in the table.
e74abb32 448 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
449 pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
450 debug_assert_ne!(self.bucket_mask, 0);
451 debug_assert!(index < self.buckets());
452 Bucket::from_base_index(self.data.as_ptr(), index)
453 }
454
455 /// Erases an element from the table without dropping it.
e74abb32 456 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
457 pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
458 let index = self.bucket_index(item);
e74abb32 459 debug_assert!(is_full(*self.ctrl(index)));
48663c56
XL
460 let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
461 let empty_before = Group::load(self.ctrl(index_before)).match_empty();
462 let empty_after = Group::load(self.ctrl(index)).match_empty();
463
464 // If we are inside a continuous block of Group::WIDTH full or deleted
465 // cells then a probe window may have seen a full block when trying to
466 // insert. We therefore need to keep that block non-empty so that
467 // lookups will continue searching to the next probe window.
468 //
469 // Note that in this context `leading_zeros` refers to the bytes at the
470 // end of a group, while `trailing_zeros` refers to the bytes at the
471 // begining of a group.
472 let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
473 DELETED
474 } else {
475 self.growth_left += 1;
476 EMPTY
477 };
478 self.set_ctrl(index, ctrl);
479 self.items -= 1;
480 }
481
482 /// Returns an iterator for a probe sequence on the table.
483 ///
484 /// This iterator never terminates, but is guaranteed to visit each bucket
485 /// group exactly once. The loop using `probe_seq` must terminate upon
486 /// reaching a group containing an empty bucket.
e74abb32 487 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
488 fn probe_seq(&self, hash: u64) -> ProbeSeq {
489 ProbeSeq {
490 bucket_mask: self.bucket_mask,
491 pos: h1(hash) & self.bucket_mask,
492 stride: 0,
493 }
494 }
495
496 /// Sets a control byte, and possibly also the replicated control byte at
497 /// the end of the array.
e74abb32 498 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
499 unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
500 // Replicate the first Group::WIDTH control bytes at the end of
501 // the array without using a branch:
502 // - If index >= Group::WIDTH then index == index2.
503 // - Otherwise index2 == self.bucket_mask + 1 + index.
504 //
505 // The very last replicated control byte is never actually read because
506 // we mask the initial index for unaligned loads, but we write it
507 // anyways because it makes the set_ctrl implementation simpler.
508 //
509 // If there are fewer buckets than Group::WIDTH then this code will
510 // replicate the buckets at the end of the trailing group. For example
511 // with 2 buckets and a group size of 4, the control bytes will look
512 // like this:
513 //
514 // Real | Replicated
515 // ---------------------------------------------
516 // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
517 // ---------------------------------------------
518 let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
519
520 *self.ctrl(index) = ctrl;
521 *self.ctrl(index2) = ctrl;
522 }
523
524 /// Searches for an empty or deleted bucket which is suitable for inserting
525 /// a new element.
526 ///
527 /// There must be at least 1 empty bucket in the table.
e74abb32 528 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
529 fn find_insert_slot(&self, hash: u64) -> usize {
530 for pos in self.probe_seq(hash) {
531 unsafe {
532 let group = Group::load(self.ctrl(pos));
533 if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
534 let result = (pos + bit) & self.bucket_mask;
535
536 // In tables smaller than the group width, trailing control
537 // bytes outside the range of the table are filled with
538 // EMPTY entries. These will unfortunately trigger a
539 // match, but once masked may point to a full bucket that
540 // is already occupied. We detect this situation here and
541 // perform a second scan starting at the begining of the
542 // table. This second scan is guaranteed to find an empty
543 // slot (due to the load factor) before hitting the trailing
544 // control bytes (containing EMPTY).
545 if unlikely(is_full(*self.ctrl(result))) {
546 debug_assert!(self.bucket_mask < Group::WIDTH);
547 debug_assert_ne!(pos, 0);
548 return Group::load_aligned(self.ctrl(0))
549 .match_empty_or_deleted()
550 .lowest_set_bit_nonzero();
551 } else {
552 return result;
553 }
554 }
555 }
556 }
557
558 // probe_seq never returns.
559 unreachable!();
560 }
561
562 /// Marks all table buckets as empty without dropping their contents.
e74abb32 563 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
564 pub fn clear_no_drop(&mut self) {
565 if !self.is_empty_singleton() {
566 unsafe {
567 self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes());
568 }
569 }
570 self.items = 0;
571 self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
572 }
573
574 /// Removes all elements from the table without freeing the backing memory.
e74abb32 575 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
576 pub fn clear(&mut self) {
577 // Ensure that the table is reset even if one of the drops panic
578 let self_ = guard(self, |self_| self_.clear_no_drop());
579
580 if mem::needs_drop::<T>() {
581 unsafe {
582 for item in self_.iter() {
583 item.drop();
584 }
585 }
586 }
587 }
588
589 /// Shrinks the table to fit `max(self.len(), min_size)` elements.
e74abb32 590 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
591 pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) {
592 // Calculate the minimal number of elements that we need to reserve
593 // space for.
594 let min_size = usize::max(self.items, min_size);
595 if min_size == 0 {
596 *self = Self::new();
597 return;
598 }
599
600 // Calculate the number of buckets that we need for this number of
601 // elements. If the calculation overflows then the requested bucket
602 // count must be larger than what we have right and nothing needs to be
603 // done.
604 let min_buckets = match capacity_to_buckets(min_size) {
605 Some(buckets) => buckets,
606 None => return,
607 };
608
609 // If we have more buckets than we need, shrink the table.
610 if min_buckets < self.buckets() {
611 // Fast path if the table is empty
612 if self.items == 0 {
613 *self = Self::with_capacity(min_size)
614 } else {
615 self.resize(min_size, hasher, Fallibility::Infallible)
616 .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
617 }
618 }
619 }
620
621 /// Ensures that at least `additional` items can be inserted into the table
622 /// without reallocation.
e74abb32 623 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
624 pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
625 if additional > self.growth_left {
626 self.reserve_rehash(additional, hasher, Fallibility::Infallible)
627 .unwrap_or_else(|_| unsafe { hint::unreachable_unchecked() });
628 }
629 }
630
631 /// Tries to ensure that at least `additional` items can be inserted into
632 /// the table without reallocation.
e74abb32 633 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
634 pub fn try_reserve(
635 &mut self,
636 additional: usize,
637 hasher: impl Fn(&T) -> u64,
638 ) -> Result<(), CollectionAllocErr> {
639 if additional > self.growth_left {
640 self.reserve_rehash(additional, hasher, Fallibility::Fallible)
641 } else {
642 Ok(())
643 }
644 }
645
646 /// Out-of-line slow path for `reserve` and `try_reserve`.
647 #[cold]
648 #[inline(never)]
649 fn reserve_rehash(
650 &mut self,
651 additional: usize,
652 hasher: impl Fn(&T) -> u64,
653 fallability: Fallibility,
654 ) -> Result<(), CollectionAllocErr> {
655 let new_items = self
656 .items
657 .checked_add(additional)
658 .ok_or_else(|| fallability.capacity_overflow())?;
659
416331ca
XL
660 let full_capacity = bucket_mask_to_capacity(self.bucket_mask);
661 if new_items <= full_capacity / 2 {
662 // Rehash in-place without re-allocating if we have plenty of spare
663 // capacity that is locked up due to DELETED entries.
48663c56
XL
664 self.rehash_in_place(hasher);
665 Ok(())
666 } else {
416331ca
XL
667 // Otherwise, conservatively resize to at least the next size up
668 // to avoid churning deletes into frequent rehashes.
669 self.resize(
670 usize::max(new_items, full_capacity + 1),
671 hasher,
672 fallability,
673 )
48663c56
XL
674 }
675 }
676
677 /// Rehashes the contents of the table in place (i.e. without changing the
678 /// allocation).
679 ///
680 /// If `hasher` panics then some the table's contents may be lost.
681 fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
682 unsafe {
683 // Bulk convert all full control bytes to DELETED, and all DELETED
684 // control bytes to EMPTY. This effectively frees up all buckets
685 // containing a DELETED entry.
686 for i in (0..self.buckets()).step_by(Group::WIDTH) {
687 let group = Group::load_aligned(self.ctrl(i));
688 let group = group.convert_special_to_empty_and_full_to_deleted();
689 group.store_aligned(self.ctrl(i));
690 }
691
692 // Fix up the trailing control bytes. See the comments in set_ctrl
693 // for the handling of tables smaller than the group width.
694 if self.buckets() < Group::WIDTH {
695 self.ctrl(0)
696 .copy_to(self.ctrl(Group::WIDTH), self.buckets());
697 } else {
698 self.ctrl(0)
699 .copy_to(self.ctrl(self.buckets()), Group::WIDTH);
700 }
701
702 // If the hash function panics then properly clean up any elements
703 // that we haven't rehashed yet. We unfortunately can't preserve the
704 // element since we lost their hash and have no way of recovering it
705 // without risking another panic.
706 let mut guard = guard(self, |self_| {
707 if mem::needs_drop::<T>() {
708 for i in 0..self_.buckets() {
709 if *self_.ctrl(i) == DELETED {
710 self_.set_ctrl(i, EMPTY);
711 self_.bucket(i).drop();
712 self_.items -= 1;
713 }
714 }
715 }
716 self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items;
717 });
718
719 // At this point, DELETED elements are elements that we haven't
720 // rehashed yet. Find them and re-insert them at their ideal
721 // position.
722 'outer: for i in 0..guard.buckets() {
723 if *guard.ctrl(i) != DELETED {
724 continue;
725 }
726 'inner: loop {
727 // Hash the current item
728 let item = guard.bucket(i);
729 let hash = hasher(item.as_ref());
730
731 // Search for a suitable place to put it
732 let new_i = guard.find_insert_slot(hash);
733
734 // Probing works by scanning through all of the control
735 // bytes in groups, which may not be aligned to the group
736 // size. If both the new and old position fall within the
737 // same unaligned group, then there is no benefit in moving
738 // it and we can just continue to the next item.
739 let probe_index = |pos: usize| {
740 (pos.wrapping_sub(guard.probe_seq(hash).pos) & guard.bucket_mask)
741 / Group::WIDTH
742 };
743 if likely(probe_index(i) == probe_index(new_i)) {
744 guard.set_ctrl(i, h2(hash));
745 continue 'outer;
746 }
747
748 // We are moving the current item to a new position. Write
749 // our H2 to the control byte of the new position.
750 let prev_ctrl = *guard.ctrl(new_i);
751 guard.set_ctrl(new_i, h2(hash));
752
753 if prev_ctrl == EMPTY {
754 // If the target slot is empty, simply move the current
755 // element into the new slot and clear the old control
756 // byte.
757 guard.set_ctrl(i, EMPTY);
758 guard.bucket(new_i).copy_from_nonoverlapping(&item);
759 continue 'outer;
760 } else {
761 // If the target slot is occupied, swap the two elements
762 // and then continue processing the element that we just
763 // swapped into the old slot.
764 debug_assert_eq!(prev_ctrl, DELETED);
765 mem::swap(guard.bucket(new_i).as_mut(), item.as_mut());
766 continue 'inner;
767 }
768 }
769 }
770
771 guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items;
772 mem::forget(guard);
773 }
774 }
775
776 /// Allocates a new table of a different size and moves the contents of the
777 /// current table into it.
778 fn resize(
779 &mut self,
780 capacity: usize,
781 hasher: impl Fn(&T) -> u64,
782 fallability: Fallibility,
783 ) -> Result<(), CollectionAllocErr> {
784 unsafe {
785 debug_assert!(self.items <= capacity);
786
787 // Allocate and initialize the new table.
788 let mut new_table = Self::try_with_capacity(capacity, fallability)?;
789 new_table.growth_left -= self.items;
790 new_table.items = self.items;
791
792 // The hash function may panic, in which case we simply free the new
793 // table without dropping any elements that may have been copied into
794 // it.
795 //
796 // This guard is also used to free the old table on success, see
797 // the comment at the bottom of this function.
798 let mut new_table = guard(ManuallyDrop::new(new_table), |new_table| {
799 if !new_table.is_empty_singleton() {
800 new_table.free_buckets();
801 }
802 });
803
804 // Copy all elements to the new table.
805 for item in self.iter() {
806 // This may panic.
807 let hash = hasher(item.as_ref());
808
809 // We can use a simpler version of insert() here since:
810 // - there are no DELETED entries.
811 // - we know there is enough space in the table.
812 // - all elements are unique.
813 let index = new_table.find_insert_slot(hash);
814 new_table.set_ctrl(index, h2(hash));
815 new_table.bucket(index).copy_from_nonoverlapping(&item);
816 }
817
818 // We successfully copied all elements without panicking. Now replace
819 // self with the new table. The old table will have its memory freed but
820 // the items will not be dropped (since they have been moved into the
821 // new table).
822 mem::swap(self, &mut new_table);
823
824 Ok(())
825 }
826 }
827
828 /// Inserts a new element into the table.
829 ///
830 /// This does not check if the given element already exists in the table.
e74abb32 831 #[cfg_attr(feature = "inline-more", inline)]
48663c56 832 pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
e74abb32
XL
833 unsafe {
834 let mut index = self.find_insert_slot(hash);
835
836 // We can avoid growing the table once we have reached our load
837 // factor if we are replacing a tombstone. This works since the
838 // number of EMPTY slots does not change in this case.
839 let old_ctrl = *self.ctrl(index);
840 if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
841 self.reserve(1, hasher);
842 index = self.find_insert_slot(hash);
843 }
844
845 let bucket = self.bucket(index);
846 self.growth_left -= special_is_empty(old_ctrl) as usize;
847 self.set_ctrl(index, h2(hash));
848 bucket.write(value);
849 self.items += 1;
850 bucket
851 }
48663c56
XL
852 }
853
854 /// Inserts a new element into the table, without growing the table.
855 ///
856 /// There must be enough space in the table to insert the new element.
857 ///
858 /// This does not check if the given element already exists in the table.
e74abb32
XL
859 #[cfg_attr(feature = "inline-more", inline)]
860 #[cfg(feature = "rustc-internal-api")]
48663c56
XL
861 pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> {
862 unsafe {
863 let index = self.find_insert_slot(hash);
864 let bucket = self.bucket(index);
865
866 // If we are replacing a DELETED entry then we don't need to update
867 // the load counter.
868 let old_ctrl = *self.ctrl(index);
869 self.growth_left -= special_is_empty(old_ctrl) as usize;
870
871 self.set_ctrl(index, h2(hash));
872 bucket.write(value);
873 self.items += 1;
874 bucket
875 }
876 }
877
878 /// Searches for an element in the table.
879 #[inline]
880 pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
881 unsafe {
882 for pos in self.probe_seq(hash) {
883 let group = Group::load(self.ctrl(pos));
884 for bit in group.match_byte(h2(hash)) {
885 let index = (pos + bit) & self.bucket_mask;
886 let bucket = self.bucket(index);
887 if likely(eq(bucket.as_ref())) {
888 return Some(bucket);
889 }
890 }
891 if likely(group.match_empty().any_bit_set()) {
892 return None;
893 }
894 }
895 }
896
897 // probe_seq never returns.
898 unreachable!();
899 }
900
901 /// Returns the number of elements the map can hold without reallocating.
902 ///
903 /// This number is a lower bound; the table might be able to hold
904 /// more, but is guaranteed to be able to hold at least this many.
e74abb32 905 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
906 pub fn capacity(&self) -> usize {
907 self.items + self.growth_left
908 }
909
910 /// Returns the number of elements in the table.
e74abb32 911 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
912 pub fn len(&self) -> usize {
913 self.items
914 }
915
916 /// Returns the number of buckets in the table.
e74abb32 917 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
918 pub fn buckets(&self) -> usize {
919 self.bucket_mask + 1
920 }
921
922 /// Returns the number of control bytes in the table.
e74abb32 923 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
924 fn num_ctrl_bytes(&self) -> usize {
925 self.bucket_mask + 1 + Group::WIDTH
926 }
927
928 /// Returns whether this table points to the empty singleton with a capacity
929 /// of 0.
e74abb32 930 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
931 fn is_empty_singleton(&self) -> bool {
932 self.bucket_mask == 0
933 }
934
935 /// Returns an iterator over every element in the table. It is up to
936 /// the caller to ensure that the `RawTable` outlives the `RawIter`.
937 /// Because we cannot make the `next` method unsafe on the `RawIter`
938 /// struct, we have to make the `iter` method unsafe.
e74abb32 939 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
940 pub unsafe fn iter(&self) -> RawIter<T> {
941 let data = Bucket::from_base_index(self.data.as_ptr(), 0);
942 RawIter {
943 iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
944 items: self.items,
945 }
946 }
947
948 /// Returns an iterator which removes all elements from the table without
949 /// freeing the memory. It is up to the caller to ensure that the `RawTable`
950 /// outlives the `RawDrain`. Because we cannot make the `next` method unsafe
951 /// on the `RawDrain`, we have to make the `drain` method unsafe.
e74abb32 952 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
953 pub unsafe fn drain(&mut self) -> RawDrain<'_, T> {
954 RawDrain {
955 iter: self.iter(),
956 table: ManuallyDrop::new(mem::replace(self, Self::new())),
957 orig_table: NonNull::from(self),
958 marker: PhantomData,
959 }
960 }
961
962 /// Converts the table into a raw allocation. The contents of the table
963 /// should be dropped using a `RawIter` before freeing the allocation.
e74abb32
XL
964 #[cfg_attr(feature = "inline-more", inline)]
965 pub(crate) fn into_alloc(self) -> Option<(NonNull<u8>, Layout)> {
48663c56
XL
966 let alloc = if self.is_empty_singleton() {
967 None
968 } else {
969 let (layout, _) = calculate_layout::<T>(self.buckets())
970 .unwrap_or_else(|| unsafe { hint::unreachable_unchecked() });
971 Some((self.ctrl.cast(), layout))
972 };
973 mem::forget(self);
974 alloc
975 }
976}
977
978unsafe impl<T> Send for RawTable<T> where T: Send {}
979unsafe impl<T> Sync for RawTable<T> where T: Sync {}
980
981impl<T: Clone> Clone for RawTable<T> {
982 fn clone(&self) -> Self {
983 if self.is_empty_singleton() {
984 Self::new()
985 } else {
986 unsafe {
987 let mut new_table = ManuallyDrop::new(
988 Self::new_uninitialized(self.buckets(), Fallibility::Infallible)
989 .unwrap_or_else(|_| hint::unreachable_unchecked()),
990 );
991
992 // Copy the control bytes unchanged. We do this in a single pass
993 self.ctrl(0)
994 .copy_to_nonoverlapping(new_table.ctrl(0), self.num_ctrl_bytes());
995
996 {
997 // The cloning of elements may panic, in which case we need
998 // to make sure we drop only the elements that have been
999 // cloned so far.
1000 let mut guard = guard((0, &mut new_table), |(index, new_table)| {
1001 if mem::needs_drop::<T>() {
1002 for i in 0..=*index {
1003 if is_full(*new_table.ctrl(i)) {
1004 new_table.bucket(i).drop();
1005 }
1006 }
1007 }
1008 new_table.free_buckets();
1009 });
1010
1011 for from in self.iter() {
1012 let index = self.bucket_index(&from);
1013 let to = guard.1.bucket(index);
1014 to.write(from.as_ref().clone());
1015
1016 // Update the index in case we need to unwind.
1017 guard.0 = index;
1018 }
1019
1020 // Successfully cloned all items, no need to clean up.
1021 mem::forget(guard);
1022 }
1023
1024 // Return the newly created table.
1025 new_table.items = self.items;
1026 new_table.growth_left = self.growth_left;
1027 ManuallyDrop::into_inner(new_table)
1028 }
1029 }
1030 }
1031}
1032
1033#[cfg(feature = "nightly")]
1034unsafe impl<#[may_dangle] T> Drop for RawTable<T> {
e74abb32 1035 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1036 fn drop(&mut self) {
1037 if !self.is_empty_singleton() {
1038 unsafe {
1039 if mem::needs_drop::<T>() {
1040 for item in self.iter() {
1041 item.drop();
1042 }
1043 }
1044 self.free_buckets();
1045 }
1046 }
1047 }
1048}
1049#[cfg(not(feature = "nightly"))]
1050impl<T> Drop for RawTable<T> {
e74abb32 1051 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1052 fn drop(&mut self) {
1053 if !self.is_empty_singleton() {
1054 unsafe {
1055 if mem::needs_drop::<T>() {
1056 for item in self.iter() {
1057 item.drop();
1058 }
1059 }
1060 self.free_buckets();
1061 }
1062 }
1063 }
1064}
1065
1066impl<T> IntoIterator for RawTable<T> {
1067 type Item = T;
1068 type IntoIter = RawIntoIter<T>;
1069
e74abb32 1070 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1071 fn into_iter(self) -> RawIntoIter<T> {
1072 unsafe {
1073 let iter = self.iter();
1074 let alloc = self.into_alloc();
1075 RawIntoIter {
1076 iter,
1077 alloc,
1078 marker: PhantomData,
1079 }
1080 }
1081 }
1082}
1083
1084/// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does
1085/// not track an item count.
e74abb32 1086pub(crate) struct RawIterRange<T> {
48663c56
XL
1087 // Mask of full buckets in the current group. Bits are cleared from this
1088 // mask as each element is processed.
1089 current_group: BitMask,
1090
1091 // Pointer to the buckets for the current group.
1092 data: Bucket<T>,
1093
1094 // Pointer to the next group of control bytes,
1095 // Must be aligned to the group size.
1096 next_ctrl: *const u8,
1097
1098 // Pointer one past the last control byte of this range.
1099 end: *const u8,
1100}
1101
1102impl<T> RawIterRange<T> {
1103 /// Returns a `RawIterRange` covering a subset of a table.
1104 ///
1105 /// The control byte address must be aligned to the group size.
e74abb32 1106 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1107 unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
1108 debug_assert_ne!(len, 0);
1109 debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
1110 let end = ctrl.add(len);
1111
1112 // Load the first group and advance ctrl to point to the next group
1113 let current_group = Group::load_aligned(ctrl).match_full();
1114 let next_ctrl = ctrl.add(Group::WIDTH);
1115
1116 Self {
1117 current_group,
1118 data,
1119 next_ctrl,
1120 end,
1121 }
1122 }
1123
1124 /// Splits a `RawIterRange` into two halves.
1125 ///
1126 /// Returns `None` if the remaining range is smaller than or equal to the
1127 /// group width.
e74abb32 1128 #[cfg_attr(feature = "inline-more", inline)]
48663c56 1129 #[cfg(feature = "rayon")]
e74abb32 1130 pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) {
48663c56
XL
1131 unsafe {
1132 if self.end <= self.next_ctrl {
1133 // Nothing to split if the group that we are current processing
1134 // is the last one.
1135 (self, None)
1136 } else {
1137 // len is the remaining number of elements after the group that
1138 // we are currently processing. It must be a multiple of the
1139 // group size (small tables are caught by the check above).
1140 let len = offset_from(self.end, self.next_ctrl);
1141 debug_assert_eq!(len % Group::WIDTH, 0);
1142
1143 // Split the remaining elements into two halves, but round the
1144 // midpoint down in case there is an odd number of groups
1145 // remaining. This ensures that:
1146 // - The tail is at least 1 group long.
1147 // - The split is roughly even considering we still have the
1148 // current group to process.
1149 let mid = (len / 2) & !(Group::WIDTH - 1);
1150
1151 let tail = Self::new(
1152 self.next_ctrl.add(mid),
1153 self.data.add(Group::WIDTH).add(mid),
1154 len - mid,
1155 );
1156 debug_assert_eq!(self.data.add(Group::WIDTH).add(mid).ptr, tail.data.ptr);
1157 debug_assert_eq!(self.end, tail.end);
1158 self.end = self.next_ctrl.add(mid);
1159 debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl);
1160 (self, Some(tail))
1161 }
1162 }
1163 }
1164}
1165
1166// We make raw iterators unconditionally Send and Sync, and let the PhantomData
1167// in the actual iterator implementations determine the real Send/Sync bounds.
1168unsafe impl<T> Send for RawIterRange<T> {}
1169unsafe impl<T> Sync for RawIterRange<T> {}
1170
1171impl<T> Clone for RawIterRange<T> {
e74abb32 1172 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1173 fn clone(&self) -> Self {
1174 Self {
1175 data: self.data.clone(),
1176 next_ctrl: self.next_ctrl,
1177 current_group: self.current_group,
1178 end: self.end,
1179 }
1180 }
1181}
1182
1183impl<T> Iterator for RawIterRange<T> {
1184 type Item = Bucket<T>;
1185
e74abb32 1186 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1187 fn next(&mut self) -> Option<Bucket<T>> {
1188 unsafe {
1189 loop {
1190 if let Some(index) = self.current_group.lowest_set_bit() {
1191 self.current_group = self.current_group.remove_lowest_bit();
1192 return Some(self.data.add(index));
1193 }
1194
1195 if self.next_ctrl >= self.end {
1196 return None;
1197 }
1198
1199 // We might read past self.end up to the next group boundary,
1200 // but this is fine because it only occurs on tables smaller
1201 // than the group size where the trailing control bytes are all
1202 // EMPTY. On larger tables self.end is guaranteed to be aligned
1203 // to the group size (since tables are power-of-two sized).
1204 self.current_group = Group::load_aligned(self.next_ctrl).match_full();
1205 self.data = self.data.add(Group::WIDTH);
1206 self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
1207 }
1208 }
1209 }
1210
e74abb32 1211 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1212 fn size_hint(&self) -> (usize, Option<usize>) {
1213 // We don't have an item count, so just guess based on the range size.
1214 (
1215 0,
1216 Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }),
1217 )
1218 }
1219}
1220
1221impl<T> FusedIterator for RawIterRange<T> {}
1222
1223/// Iterator which returns a raw pointer to every full bucket in the table.
1224pub struct RawIter<T> {
e74abb32 1225 pub(crate) iter: RawIterRange<T>,
48663c56
XL
1226 items: usize,
1227}
1228
1229impl<T> Clone for RawIter<T> {
e74abb32 1230 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1231 fn clone(&self) -> Self {
1232 Self {
1233 iter: self.iter.clone(),
1234 items: self.items,
1235 }
1236 }
1237}
1238
1239impl<T> Iterator for RawIter<T> {
1240 type Item = Bucket<T>;
1241
e74abb32 1242 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1243 fn next(&mut self) -> Option<Bucket<T>> {
1244 if let Some(b) = self.iter.next() {
1245 self.items -= 1;
1246 Some(b)
1247 } else {
1248 // We don't check against items == 0 here to allow the
1249 // compiler to optimize away the item count entirely if the
1250 // iterator length is never queried.
1251 debug_assert_eq!(self.items, 0);
1252 None
1253 }
1254 }
1255
e74abb32 1256 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1257 fn size_hint(&self) -> (usize, Option<usize>) {
1258 (self.items, Some(self.items))
1259 }
1260}
1261
1262impl<T> ExactSizeIterator for RawIter<T> {}
1263impl<T> FusedIterator for RawIter<T> {}
1264
1265/// Iterator which consumes a table and returns elements.
1266pub struct RawIntoIter<T> {
1267 iter: RawIter<T>,
1268 alloc: Option<(NonNull<u8>, Layout)>,
1269 marker: PhantomData<T>,
1270}
1271
1272impl<T> RawIntoIter<T> {
e74abb32 1273 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1274 pub fn iter(&self) -> RawIter<T> {
1275 self.iter.clone()
1276 }
1277}
1278
1279unsafe impl<T> Send for RawIntoIter<T> where T: Send {}
1280unsafe impl<T> Sync for RawIntoIter<T> where T: Sync {}
1281
1282#[cfg(feature = "nightly")]
1283unsafe impl<#[may_dangle] T> Drop for RawIntoIter<T> {
e74abb32 1284 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1285 fn drop(&mut self) {
1286 unsafe {
1287 // Drop all remaining elements
1288 if mem::needs_drop::<T>() {
1289 while let Some(item) = self.iter.next() {
1290 item.drop();
1291 }
1292 }
1293
1294 // Free the table
1295 if let Some((ptr, layout)) = self.alloc {
1296 dealloc(ptr.as_ptr(), layout);
1297 }
1298 }
1299 }
1300}
1301#[cfg(not(feature = "nightly"))]
1302impl<T> Drop for RawIntoIter<T> {
e74abb32 1303 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1304 fn drop(&mut self) {
1305 unsafe {
1306 // Drop all remaining elements
1307 if mem::needs_drop::<T>() {
1308 while let Some(item) = self.iter.next() {
1309 item.drop();
1310 }
1311 }
1312
1313 // Free the table
1314 if let Some((ptr, layout)) = self.alloc {
1315 dealloc(ptr.as_ptr(), layout);
1316 }
1317 }
1318 }
1319}
1320
1321impl<T> Iterator for RawIntoIter<T> {
1322 type Item = T;
1323
e74abb32 1324 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1325 fn next(&mut self) -> Option<T> {
1326 unsafe { Some(self.iter.next()?.read()) }
1327 }
1328
e74abb32 1329 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1330 fn size_hint(&self) -> (usize, Option<usize>) {
1331 self.iter.size_hint()
1332 }
1333}
1334
1335impl<T> ExactSizeIterator for RawIntoIter<T> {}
1336impl<T> FusedIterator for RawIntoIter<T> {}
1337
1338/// Iterator which consumes elements without freeing the table storage.
1339pub struct RawDrain<'a, T> {
1340 iter: RawIter<T>,
1341
1342 // The table is moved into the iterator for the duration of the drain. This
1343 // ensures that an empty table is left if the drain iterator is leaked
1344 // without dropping.
1345 table: ManuallyDrop<RawTable<T>>,
1346 orig_table: NonNull<RawTable<T>>,
1347
1348 // We don't use a &'a mut RawTable<T> because we want RawDrain to be
1349 // covariant over T.
1350 marker: PhantomData<&'a RawTable<T>>,
1351}
1352
1353impl<T> RawDrain<'_, T> {
e74abb32 1354 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1355 pub fn iter(&self) -> RawIter<T> {
1356 self.iter.clone()
1357 }
1358}
1359
1360unsafe impl<T> Send for RawDrain<'_, T> where T: Send {}
1361unsafe impl<T> Sync for RawDrain<'_, T> where T: Sync {}
1362
1363impl<T> Drop for RawDrain<'_, T> {
e74abb32 1364 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1365 fn drop(&mut self) {
1366 unsafe {
1367 // Drop all remaining elements. Note that this may panic.
1368 if mem::needs_drop::<T>() {
1369 while let Some(item) = self.iter.next() {
1370 item.drop();
1371 }
1372 }
1373
1374 // Reset the contents of the table now that all elements have been
1375 // dropped.
1376 self.table.clear_no_drop();
1377
1378 // Move the now empty table back to its original location.
1379 self.orig_table
1380 .as_ptr()
1381 .copy_from_nonoverlapping(&*self.table, 1);
1382 }
1383 }
1384}
1385
1386impl<T> Iterator for RawDrain<'_, T> {
1387 type Item = T;
1388
e74abb32 1389 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1390 fn next(&mut self) -> Option<T> {
1391 unsafe {
1392 let item = self.iter.next()?;
1393 Some(item.read())
1394 }
1395 }
1396
e74abb32 1397 #[cfg_attr(feature = "inline-more", inline)]
48663c56
XL
1398 fn size_hint(&self) -> (usize, Option<usize>) {
1399 self.iter.size_hint()
1400 }
1401}
1402
1403impl<T> ExactSizeIterator for RawDrain<'_, T> {}
1404impl<T> FusedIterator for RawDrain<'_, T> {}