]>
Commit | Line | Data |
---|---|---|
6a06907d | 1 | use crate::alloc::alloc::{handle_alloc_error, Layout}; |
48663c56 | 2 | use crate::scopeguard::guard; |
3dfed10e | 3 | use crate::TryReserveError; |
6a06907d XL |
4 | #[cfg(feature = "nightly")] |
5 | use crate::UnavailableMutError; | |
48663c56 XL |
6 | use core::hint; |
7 | use core::iter::FusedIterator; | |
8 | use core::marker::PhantomData; | |
9 | use core::mem; | |
10 | use core::mem::ManuallyDrop; | |
6a06907d XL |
11 | #[cfg(feature = "nightly")] |
12 | use core::mem::MaybeUninit; | |
48663c56 XL |
13 | use core::ptr::NonNull; |
14 | ||
15 | cfg_if! { | |
16 | // Use the SSE2 implementation if possible: it allows us to scan 16 buckets | |
17 | // at once instead of 8. We don't bother with AVX since it would require | |
18 | // runtime dispatch and wouldn't gain us much anyways: the probability of | |
19 | // finding a match drops off drastically after the first few buckets. | |
20 | // | |
21 | // I attempted an implementation on ARM using NEON instructions, but it | |
22 | // turns out that most NEON instructions have multi-cycle latency, which in | |
23 | // the end outweighs any gains over the generic implementation. | |
24 | if #[cfg(all( | |
25 | target_feature = "sse2", | |
26 | any(target_arch = "x86", target_arch = "x86_64"), | |
27 | not(miri) | |
28 | ))] { | |
e74abb32 XL |
29 | mod sse2; |
30 | use sse2 as imp; | |
48663c56 XL |
31 | } else { |
32 | #[path = "generic.rs"] | |
e74abb32 XL |
33 | mod generic; |
34 | use generic as imp; | |
48663c56 XL |
35 | } |
36 | } | |
37 | ||
6a06907d XL |
38 | mod alloc; |
39 | pub(crate) use self::alloc::{do_alloc, Allocator, Global}; | |
40 | ||
48663c56 XL |
41 | mod bitmask; |
42 | ||
3dfed10e | 43 | use self::bitmask::{BitMask, BitMaskIter}; |
48663c56 XL |
44 | use self::imp::Group; |
45 | ||
46 | // Branch prediction hint. This is currently only available on nightly but it | |
47 | // consistently improves performance by 10-15%. | |
48 | #[cfg(feature = "nightly")] | |
49 | use core::intrinsics::{likely, unlikely}; | |
6a06907d XL |
50 | |
51 | // On stable we can use #[cold] to get a equivalent effect: this attributes | |
52 | // suggests that the function is unlikely to be called | |
53 | #[cfg(not(feature = "nightly"))] | |
54 | #[inline] | |
55 | #[cold] | |
56 | fn cold() {} | |
57 | ||
48663c56 XL |
58 | #[cfg(not(feature = "nightly"))] |
59 | #[inline] | |
60 | fn likely(b: bool) -> bool { | |
6a06907d XL |
61 | if !b { |
62 | cold() | |
63 | } | |
48663c56 XL |
64 | b |
65 | } | |
66 | #[cfg(not(feature = "nightly"))] | |
67 | #[inline] | |
68 | fn unlikely(b: bool) -> bool { | |
6a06907d XL |
69 | if b { |
70 | cold() | |
71 | } | |
48663c56 XL |
72 | b |
73 | } | |
74 | ||
75 | #[cfg(feature = "nightly")] | |
e74abb32 | 76 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
77 | unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize { |
78 | to.offset_from(from) as usize | |
79 | } | |
80 | #[cfg(not(feature = "nightly"))] | |
e74abb32 | 81 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
82 | unsafe fn offset_from<T>(to: *const T, from: *const T) -> usize { |
83 | (to as usize - from as usize) / mem::size_of::<T>() | |
84 | } | |
85 | ||
86 | /// Whether memory allocation errors should return an error or abort. | |
87 | #[derive(Copy, Clone)] | |
88 | enum Fallibility { | |
89 | Fallible, | |
90 | Infallible, | |
91 | } | |
92 | ||
93 | impl Fallibility { | |
94 | /// Error to return on capacity overflow. | |
e74abb32 | 95 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 96 | fn capacity_overflow(self) -> TryReserveError { |
48663c56 | 97 | match self { |
3dfed10e XL |
98 | Fallibility::Fallible => TryReserveError::CapacityOverflow, |
99 | Fallibility::Infallible => panic!("Hash table capacity overflow"), | |
48663c56 XL |
100 | } |
101 | } | |
102 | ||
103 | /// Error to return on allocation error. | |
e74abb32 | 104 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 105 | fn alloc_err(self, layout: Layout) -> TryReserveError { |
48663c56 | 106 | match self { |
3dfed10e XL |
107 | Fallibility::Fallible => TryReserveError::AllocError { layout }, |
108 | Fallibility::Infallible => handle_alloc_error(layout), | |
48663c56 XL |
109 | } |
110 | } | |
111 | } | |
112 | ||
113 | /// Control byte value for an empty bucket. | |
114 | const EMPTY: u8 = 0b1111_1111; | |
115 | ||
116 | /// Control byte value for a deleted bucket. | |
117 | const DELETED: u8 = 0b1000_0000; | |
118 | ||
119 | /// Checks whether a control byte represents a full bucket (top bit is clear). | |
120 | #[inline] | |
121 | fn is_full(ctrl: u8) -> bool { | |
122 | ctrl & 0x80 == 0 | |
123 | } | |
124 | ||
125 | /// Checks whether a control byte represents a special value (top bit is set). | |
126 | #[inline] | |
127 | fn is_special(ctrl: u8) -> bool { | |
128 | ctrl & 0x80 != 0 | |
129 | } | |
130 | ||
131 | /// Checks whether a special control value is EMPTY (just check 1 bit). | |
132 | #[inline] | |
133 | fn special_is_empty(ctrl: u8) -> bool { | |
134 | debug_assert!(is_special(ctrl)); | |
135 | ctrl & 0x01 != 0 | |
136 | } | |
137 | ||
138 | /// Primary hash function, used to select the initial bucket to probe from. | |
139 | #[inline] | |
140 | #[allow(clippy::cast_possible_truncation)] | |
141 | fn h1(hash: u64) -> usize { | |
142 | // On 32-bit platforms we simply ignore the higher hash bits. | |
143 | hash as usize | |
144 | } | |
145 | ||
146 | /// Secondary hash function, saved in the low 7 bits of the control byte. | |
147 | #[inline] | |
148 | #[allow(clippy::cast_possible_truncation)] | |
149 | fn h2(hash: u64) -> u8 { | |
150 | // Grab the top 7 bits of the hash. While the hash is normally a full 64-bit | |
151 | // value, some hash functions (such as FxHash) produce a usize result | |
152 | // instead, which means that the top 32 bits are 0 on 32-bit platforms. | |
153 | let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>()); | |
154 | let top7 = hash >> (hash_len * 8 - 7); | |
155 | (top7 & 0x7f) as u8 // truncation | |
156 | } | |
157 | ||
158 | /// Probe sequence based on triangular numbers, which is guaranteed (since our | |
159 | /// table size is a power of two) to visit every group of elements exactly once. | |
160 | /// | |
161 | /// A triangular probe has us jump by 1 more group every time. So first we | |
162 | /// jump by 1 group (meaning we just continue our linear scan), then 2 groups | |
163 | /// (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on. | |
164 | /// | |
165 | /// Proof that the probe will visit every group in the table: | |
166 | /// <https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/> | |
167 | struct ProbeSeq { | |
48663c56 XL |
168 | pos: usize, |
169 | stride: usize, | |
170 | } | |
171 | ||
6a06907d | 172 | impl ProbeSeq { |
48663c56 | 173 | #[inline] |
6a06907d | 174 | fn move_next(&mut self, bucket_mask: usize) { |
48663c56 XL |
175 | // We should have found an empty bucket by now and ended the probe. |
176 | debug_assert!( | |
6a06907d | 177 | self.stride <= bucket_mask, |
48663c56 XL |
178 | "Went past end of probe sequence" |
179 | ); | |
180 | ||
48663c56 XL |
181 | self.stride += Group::WIDTH; |
182 | self.pos += self.stride; | |
6a06907d | 183 | self.pos &= bucket_mask; |
48663c56 XL |
184 | } |
185 | } | |
186 | ||
187 | /// Returns the number of buckets needed to hold the given number of items, | |
188 | /// taking the maximum load factor into account. | |
189 | /// | |
190 | /// Returns `None` if an overflow occurs. | |
48663c56 XL |
191 | // Workaround for emscripten bug emscripten-core/emscripten-fastcomp#258 |
192 | #[cfg_attr(target_os = "emscripten", inline(never))] | |
3dfed10e | 193 | #[cfg_attr(not(target_os = "emscripten"), inline)] |
48663c56 | 194 | fn capacity_to_buckets(cap: usize) -> Option<usize> { |
3dfed10e XL |
195 | debug_assert_ne!(cap, 0); |
196 | ||
197 | // For small tables we require at least 1 empty bucket so that lookups are | |
198 | // guaranteed to terminate if an element doesn't exist in the table. | |
199 | if cap < 8 { | |
200 | // We don't bother with a table size of 2 buckets since that can only | |
201 | // hold a single element. Instead we skip directly to a 4 bucket table | |
202 | // which can hold 3 elements. | |
203 | return Some(if cap < 4 { 4 } else { 8 }); | |
204 | } | |
205 | ||
206 | // Otherwise require 1/8 buckets to be empty (87.5% load) | |
207 | // | |
208 | // Be careful when modifying this, calculate_layout relies on the | |
209 | // overflow check here. | |
210 | let adjusted_cap = cap.checked_mul(8)? / 7; | |
48663c56 XL |
211 | |
212 | // Any overflows will have been caught by the checked_mul. Also, any | |
213 | // rounding errors from the division above will be cleaned up by | |
214 | // next_power_of_two (which can't overflow because of the previous divison). | |
215 | Some(adjusted_cap.next_power_of_two()) | |
216 | } | |
217 | ||
218 | /// Returns the maximum effective capacity for the given bucket mask, taking | |
219 | /// the maximum load factor into account. | |
3dfed10e | 220 | #[inline] |
48663c56 XL |
221 | fn bucket_mask_to_capacity(bucket_mask: usize) -> usize { |
222 | if bucket_mask < 8 { | |
223 | // For tables with 1/2/4/8 buckets, we always reserve one empty slot. | |
224 | // Keep in mind that the bucket mask is one less than the bucket count. | |
225 | bucket_mask | |
226 | } else { | |
227 | // For larger tables we reserve 12.5% of the slots as empty. | |
228 | ((bucket_mask + 1) / 8) * 7 | |
229 | } | |
230 | } | |
231 | ||
6a06907d XL |
232 | /// Helper which allows the max calculation for ctrl_align to be statically computed for each T |
233 | /// while keeping the rest of `calculate_layout_for` independent of `T` | |
234 | #[derive(Copy, Clone)] | |
235 | struct TableLayout { | |
236 | size: usize, | |
237 | ctrl_align: usize, | |
238 | } | |
239 | ||
240 | impl TableLayout { | |
241 | #[inline] | |
242 | fn new<T>() -> Self { | |
243 | let layout = Layout::new::<T>(); | |
244 | Self { | |
245 | size: layout.size(), | |
246 | ctrl_align: usize::max(layout.align(), Group::WIDTH), | |
247 | } | |
248 | } | |
48663c56 | 249 | |
6a06907d XL |
250 | #[inline] |
251 | fn calculate_layout_for(self, buckets: usize) -> Option<(Layout, usize)> { | |
252 | debug_assert!(buckets.is_power_of_two()); | |
48663c56 | 253 | |
6a06907d XL |
254 | let TableLayout { size, ctrl_align } = self; |
255 | // Manual layout calculation since Layout methods are not yet stable. | |
256 | let ctrl_offset = | |
257 | size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1); | |
258 | let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?; | |
48663c56 | 259 | |
6a06907d XL |
260 | Some(( |
261 | unsafe { Layout::from_size_align_unchecked(len, ctrl_align) }, | |
262 | ctrl_offset, | |
263 | )) | |
264 | } | |
48663c56 XL |
265 | } |
266 | ||
3dfed10e XL |
267 | /// Returns a Layout which describes the allocation required for a hash table, |
268 | /// and the offset of the control bytes in the allocation. | |
269 | /// (the offset is also one past last element of buckets) | |
270 | /// | |
271 | /// Returns `None` if an overflow occurs. | |
e74abb32 | 272 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 273 | fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> { |
6a06907d | 274 | TableLayout::new::<T>().calculate_layout_for(buckets) |
48663c56 XL |
275 | } |
276 | ||
277 | /// A reference to a hash table bucket containing a `T`. | |
278 | /// | |
279 | /// This is usually just a pointer to the element itself. However if the element | |
280 | /// is a ZST, then we instead track the index of the element in the table so | |
281 | /// that `erase` works properly. | |
282 | pub struct Bucket<T> { | |
3dfed10e XL |
283 | // Actually it is pointer to next element than element itself |
284 | // this is needed to maintain pointer arithmetic invariants | |
285 | // keeping direct pointer to element introduces difficulty. | |
286 | // Using `NonNull` for variance and niche layout | |
287 | ptr: NonNull<T>, | |
48663c56 XL |
288 | } |
289 | ||
290 | // This Send impl is needed for rayon support. This is safe since Bucket is | |
291 | // never exposed in a public API. | |
292 | unsafe impl<T> Send for Bucket<T> {} | |
293 | ||
294 | impl<T> Clone for Bucket<T> { | |
e74abb32 | 295 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
296 | fn clone(&self) -> Self { |
297 | Self { ptr: self.ptr } | |
298 | } | |
299 | } | |
300 | ||
301 | impl<T> Bucket<T> { | |
e74abb32 | 302 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 303 | unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self { |
48663c56 | 304 | let ptr = if mem::size_of::<T>() == 0 { |
3dfed10e XL |
305 | // won't overflow because index must be less than length |
306 | (index + 1) as *mut T | |
48663c56 | 307 | } else { |
3dfed10e | 308 | base.as_ptr().sub(index) |
48663c56 | 309 | }; |
3dfed10e XL |
310 | Self { |
311 | ptr: NonNull::new_unchecked(ptr), | |
312 | } | |
313 | } | |
314 | #[cfg_attr(feature = "inline-more", inline)] | |
315 | unsafe fn to_base_index(&self, base: NonNull<T>) -> usize { | |
316 | if mem::size_of::<T>() == 0 { | |
317 | self.ptr.as_ptr() as usize - 1 | |
318 | } else { | |
319 | offset_from(base.as_ptr(), self.ptr.as_ptr()) | |
320 | } | |
48663c56 | 321 | } |
e74abb32 | 322 | #[cfg_attr(feature = "inline-more", inline)] |
6a06907d | 323 | pub fn as_ptr(&self) -> *mut T { |
48663c56 XL |
324 | if mem::size_of::<T>() == 0 { |
325 | // Just return an arbitrary ZST pointer which is properly aligned | |
326 | mem::align_of::<T>() as *mut T | |
327 | } else { | |
6a06907d | 328 | unsafe { self.ptr.as_ptr().sub(1) } |
48663c56 XL |
329 | } |
330 | } | |
e74abb32 | 331 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 332 | unsafe fn next_n(&self, offset: usize) -> Self { |
48663c56 | 333 | let ptr = if mem::size_of::<T>() == 0 { |
3dfed10e | 334 | (self.ptr.as_ptr() as usize + offset) as *mut T |
48663c56 | 335 | } else { |
3dfed10e | 336 | self.ptr.as_ptr().sub(offset) |
48663c56 | 337 | }; |
3dfed10e XL |
338 | Self { |
339 | ptr: NonNull::new_unchecked(ptr), | |
340 | } | |
48663c56 | 341 | } |
e74abb32 | 342 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
343 | pub unsafe fn drop(&self) { |
344 | self.as_ptr().drop_in_place(); | |
345 | } | |
e74abb32 | 346 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
347 | pub unsafe fn read(&self) -> T { |
348 | self.as_ptr().read() | |
349 | } | |
e74abb32 | 350 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
351 | pub unsafe fn write(&self, val: T) { |
352 | self.as_ptr().write(val); | |
353 | } | |
e74abb32 | 354 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
355 | pub unsafe fn as_ref<'a>(&self) -> &'a T { |
356 | &*self.as_ptr() | |
357 | } | |
e74abb32 | 358 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
359 | pub unsafe fn as_mut<'a>(&self) -> &'a mut T { |
360 | &mut *self.as_ptr() | |
361 | } | |
e74abb32 | 362 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
363 | pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) { |
364 | self.as_ptr().copy_from_nonoverlapping(other.as_ptr(), 1); | |
365 | } | |
366 | } | |
367 | ||
368 | /// A raw hash table with an unsafe API. | |
6a06907d XL |
369 | pub struct RawTable<T, A: Allocator + Clone = Global> { |
370 | table: RawTableInner<A>, | |
371 | // Tell dropck that we own instances of T. | |
372 | marker: PhantomData<T>, | |
373 | } | |
374 | ||
375 | /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless | |
376 | /// of how many different key-value types are used. | |
377 | struct RawTableInner<A> { | |
48663c56 XL |
378 | // Mask to get an index from a hash value. The value is one less than the |
379 | // number of buckets in the table. | |
380 | bucket_mask: usize, | |
381 | ||
3dfed10e XL |
382 | // [Padding], T1, T2, ..., Tlast, C1, C2, ... |
383 | // ^ points here | |
48663c56 XL |
384 | ctrl: NonNull<u8>, |
385 | ||
48663c56 XL |
386 | // Number of elements that can be inserted before we need to grow the table |
387 | growth_left: usize, | |
388 | ||
389 | // Number of elements in the table, only really used by len() | |
390 | items: usize, | |
391 | ||
6a06907d | 392 | alloc: A, |
48663c56 XL |
393 | } |
394 | ||
6a06907d | 395 | impl<T> RawTable<T, Global> { |
48663c56 XL |
396 | /// Creates a new empty hash table without allocating any memory. |
397 | /// | |
398 | /// In effect this returns a table with exactly 1 bucket. However we can | |
399 | /// leave the data pointer dangling since that bucket is never written to | |
400 | /// due to our load factor forcing us to always have at least 1 free bucket. | |
e74abb32 | 401 | #[cfg_attr(feature = "inline-more", inline)] |
1b1a35ee | 402 | pub const fn new() -> Self { |
48663c56 | 403 | Self { |
6a06907d XL |
404 | table: RawTableInner::new_in(Global), |
405 | marker: PhantomData, | |
406 | } | |
407 | } | |
408 | ||
409 | /// Attempts to allocate a new hash table with at least enough capacity | |
410 | /// for inserting the given number of elements without reallocating. | |
411 | #[cfg(feature = "raw")] | |
412 | pub fn try_with_capacity(capacity: usize) -> Result<Self, TryReserveError> { | |
413 | Self::try_with_capacity_in(capacity, Global) | |
414 | } | |
415 | ||
416 | /// Allocates a new hash table with at least enough capacity for inserting | |
417 | /// the given number of elements without reallocating. | |
418 | pub fn with_capacity(capacity: usize) -> Self { | |
419 | Self::with_capacity_in(capacity, Global) | |
420 | } | |
421 | } | |
422 | ||
423 | impl<T, A: Allocator + Clone> RawTable<T, A> { | |
424 | /// Creates a new empty hash table without allocating any memory, using the | |
425 | /// given allocator. | |
426 | /// | |
427 | /// In effect this returns a table with exactly 1 bucket. However we can | |
428 | /// leave the data pointer dangling since that bucket is never written to | |
429 | /// due to our load factor forcing us to always have at least 1 free bucket. | |
430 | #[cfg_attr(feature = "inline-more", inline)] | |
431 | pub fn new_in(alloc: A) -> Self { | |
432 | Self { | |
433 | table: RawTableInner::new_in(alloc), | |
48663c56 XL |
434 | marker: PhantomData, |
435 | } | |
436 | } | |
437 | ||
438 | /// Allocates a new hash table with the given number of buckets. | |
439 | /// | |
440 | /// The control bytes are left uninitialized. | |
e74abb32 | 441 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 442 | unsafe fn new_uninitialized( |
6a06907d | 443 | alloc: A, |
48663c56 | 444 | buckets: usize, |
6a06907d | 445 | fallibility: Fallibility, |
3dfed10e | 446 | ) -> Result<Self, TryReserveError> { |
e74abb32 | 447 | debug_assert!(buckets.is_power_of_two()); |
3dfed10e | 448 | |
48663c56 | 449 | Ok(Self { |
6a06907d XL |
450 | table: RawTableInner::new_uninitialized( |
451 | alloc, | |
452 | TableLayout::new::<T>(), | |
453 | buckets, | |
454 | fallibility, | |
455 | )?, | |
48663c56 XL |
456 | marker: PhantomData, |
457 | }) | |
458 | } | |
459 | ||
460 | /// Attempts to allocate a new hash table with at least enough capacity | |
461 | /// for inserting the given number of elements without reallocating. | |
3dfed10e | 462 | fn fallible_with_capacity( |
6a06907d | 463 | alloc: A, |
48663c56 | 464 | capacity: usize, |
6a06907d | 465 | fallibility: Fallibility, |
3dfed10e | 466 | ) -> Result<Self, TryReserveError> { |
6a06907d XL |
467 | Ok(Self { |
468 | table: RawTableInner::fallible_with_capacity( | |
469 | alloc, | |
470 | TableLayout::new::<T>(), | |
471 | capacity, | |
472 | fallibility, | |
473 | )?, | |
474 | marker: PhantomData, | |
475 | }) | |
48663c56 XL |
476 | } |
477 | ||
6a06907d XL |
478 | /// Attempts to allocate a new hash table using the given allocator, with at least enough |
479 | /// capacity for inserting the given number of elements without reallocating. | |
3dfed10e | 480 | #[cfg(feature = "raw")] |
6a06907d XL |
481 | pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> { |
482 | Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) | |
3dfed10e XL |
483 | } |
484 | ||
6a06907d XL |
485 | /// Allocates a new hash table using the given allocator, with at least enough capacity for |
486 | /// inserting the given number of elements without reallocating. | |
487 | pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { | |
3dfed10e | 488 | // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. |
6a06907d | 489 | match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { |
3dfed10e XL |
490 | Ok(capacity) => capacity, |
491 | Err(_) => unsafe { hint::unreachable_unchecked() }, | |
492 | } | |
48663c56 XL |
493 | } |
494 | ||
495 | /// Deallocates the table without dropping any entries. | |
e74abb32 | 496 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 497 | unsafe fn free_buckets(&mut self) { |
6a06907d | 498 | self.table.free_buckets(TableLayout::new::<T>()) |
3dfed10e XL |
499 | } |
500 | ||
501 | /// Returns pointer to one past last element of data table. | |
502 | #[cfg_attr(feature = "inline-more", inline)] | |
503 | pub unsafe fn data_end(&self) -> NonNull<T> { | |
6a06907d | 504 | NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) |
3dfed10e XL |
505 | } |
506 | ||
507 | /// Returns pointer to start of data table. | |
508 | #[cfg_attr(feature = "inline-more", inline)] | |
509 | #[cfg(feature = "nightly")] | |
510 | pub unsafe fn data_start(&self) -> *mut T { | |
511 | self.data_end().as_ptr().wrapping_sub(self.buckets()) | |
48663c56 XL |
512 | } |
513 | ||
514 | /// Returns the index of a bucket from a `Bucket`. | |
e74abb32 | 515 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e XL |
516 | pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize { |
517 | bucket.to_base_index(self.data_end()) | |
48663c56 XL |
518 | } |
519 | ||
48663c56 | 520 | /// Returns a pointer to an element in the table. |
e74abb32 | 521 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 522 | pub unsafe fn bucket(&self, index: usize) -> Bucket<T> { |
6a06907d | 523 | debug_assert_ne!(self.table.bucket_mask, 0); |
48663c56 | 524 | debug_assert!(index < self.buckets()); |
3dfed10e | 525 | Bucket::from_base_index(self.data_end(), index) |
48663c56 XL |
526 | } |
527 | ||
528 | /// Erases an element from the table without dropping it. | |
e74abb32 | 529 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 530 | #[deprecated(since = "0.8.1", note = "use erase or remove instead")] |
48663c56 XL |
531 | pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) { |
532 | let index = self.bucket_index(item); | |
6a06907d | 533 | self.table.erase(index) |
48663c56 XL |
534 | } |
535 | ||
3dfed10e XL |
536 | /// Erases an element from the table, dropping it in place. |
537 | #[cfg_attr(feature = "inline-more", inline)] | |
538 | #[allow(clippy::needless_pass_by_value)] | |
539 | #[allow(deprecated)] | |
540 | pub unsafe fn erase(&mut self, item: Bucket<T>) { | |
541 | // Erase the element from the table first since drop might panic. | |
542 | self.erase_no_drop(&item); | |
543 | item.drop(); | |
544 | } | |
545 | ||
29967ef6 XL |
546 | /// Finds and erases an element from the table, dropping it in place. |
547 | /// Returns true if an element was found. | |
548 | #[cfg(feature = "raw")] | |
549 | #[cfg_attr(feature = "inline-more", inline)] | |
550 | pub fn erase_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> bool { | |
551 | // Avoid `Option::map` because it bloats LLVM IR. | |
552 | if let Some(bucket) = self.find(hash, eq) { | |
553 | unsafe { self.erase(bucket) }; | |
554 | true | |
555 | } else { | |
556 | false | |
557 | } | |
558 | } | |
559 | ||
3dfed10e XL |
560 | /// Removes an element from the table, returning it. |
561 | #[cfg_attr(feature = "inline-more", inline)] | |
562 | #[allow(clippy::needless_pass_by_value)] | |
563 | #[allow(deprecated)] | |
564 | pub unsafe fn remove(&mut self, item: Bucket<T>) -> T { | |
565 | self.erase_no_drop(&item); | |
566 | item.read() | |
567 | } | |
568 | ||
29967ef6 XL |
569 | /// Finds and removes an element from the table, returning it. |
570 | #[cfg_attr(feature = "inline-more", inline)] | |
571 | pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> { | |
572 | // Avoid `Option::map` because it bloats LLVM IR. | |
573 | match self.find(hash, eq) { | |
574 | Some(bucket) => Some(unsafe { self.remove(bucket) }), | |
575 | None => None, | |
576 | } | |
577 | } | |
578 | ||
48663c56 | 579 | /// Marks all table buckets as empty without dropping their contents. |
e74abb32 | 580 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 581 | pub fn clear_no_drop(&mut self) { |
6a06907d | 582 | self.table.clear_no_drop() |
48663c56 XL |
583 | } |
584 | ||
585 | /// Removes all elements from the table without freeing the backing memory. | |
e74abb32 | 586 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
587 | pub fn clear(&mut self) { |
588 | // Ensure that the table is reset even if one of the drops panic | |
6a06907d XL |
589 | let mut self_ = guard(self, |self_| self_.clear_no_drop()); |
590 | unsafe { | |
591 | self_.drop_elements(); | |
592 | } | |
593 | } | |
48663c56 | 594 | |
6a06907d XL |
595 | unsafe fn drop_elements(&mut self) { |
596 | if mem::needs_drop::<T>() && self.len() != 0 { | |
597 | for item in self.iter() { | |
598 | item.drop(); | |
48663c56 XL |
599 | } |
600 | } | |
601 | } | |
602 | ||
603 | /// Shrinks the table to fit `max(self.len(), min_size)` elements. | |
e74abb32 | 604 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
605 | pub fn shrink_to(&mut self, min_size: usize, hasher: impl Fn(&T) -> u64) { |
606 | // Calculate the minimal number of elements that we need to reserve | |
607 | // space for. | |
6a06907d | 608 | let min_size = usize::max(self.table.items, min_size); |
48663c56 | 609 | if min_size == 0 { |
6a06907d | 610 | *self = Self::new_in(self.table.alloc.clone()); |
48663c56 XL |
611 | return; |
612 | } | |
613 | ||
614 | // Calculate the number of buckets that we need for this number of | |
615 | // elements. If the calculation overflows then the requested bucket | |
616 | // count must be larger than what we have right and nothing needs to be | |
617 | // done. | |
618 | let min_buckets = match capacity_to_buckets(min_size) { | |
619 | Some(buckets) => buckets, | |
620 | None => return, | |
621 | }; | |
622 | ||
623 | // If we have more buckets than we need, shrink the table. | |
624 | if min_buckets < self.buckets() { | |
625 | // Fast path if the table is empty | |
6a06907d XL |
626 | if self.table.items == 0 { |
627 | *self = Self::with_capacity_in(min_size, self.table.alloc.clone()) | |
48663c56 | 628 | } else { |
3dfed10e | 629 | // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. |
1b1a35ee XL |
630 | if self |
631 | .resize(min_size, hasher, Fallibility::Infallible) | |
632 | .is_err() | |
633 | { | |
3dfed10e XL |
634 | unsafe { hint::unreachable_unchecked() } |
635 | } | |
48663c56 XL |
636 | } |
637 | } | |
638 | } | |
639 | ||
640 | /// Ensures that at least `additional` items can be inserted into the table | |
641 | /// without reallocation. | |
e74abb32 | 642 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 643 | pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { |
6a06907d | 644 | if additional > self.table.growth_left { |
3dfed10e | 645 | // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. |
1b1a35ee XL |
646 | if self |
647 | .reserve_rehash(additional, hasher, Fallibility::Infallible) | |
648 | .is_err() | |
649 | { | |
3dfed10e XL |
650 | unsafe { hint::unreachable_unchecked() } |
651 | } | |
48663c56 XL |
652 | } |
653 | } | |
654 | ||
655 | /// Tries to ensure that at least `additional` items can be inserted into | |
656 | /// the table without reallocation. | |
e74abb32 | 657 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
658 | pub fn try_reserve( |
659 | &mut self, | |
660 | additional: usize, | |
661 | hasher: impl Fn(&T) -> u64, | |
3dfed10e | 662 | ) -> Result<(), TryReserveError> { |
6a06907d | 663 | if additional > self.table.growth_left { |
48663c56 XL |
664 | self.reserve_rehash(additional, hasher, Fallibility::Fallible) |
665 | } else { | |
666 | Ok(()) | |
667 | } | |
668 | } | |
669 | ||
670 | /// Out-of-line slow path for `reserve` and `try_reserve`. | |
671 | #[cold] | |
672 | #[inline(never)] | |
673 | fn reserve_rehash( | |
674 | &mut self, | |
675 | additional: usize, | |
676 | hasher: impl Fn(&T) -> u64, | |
6a06907d | 677 | fallibility: Fallibility, |
3dfed10e XL |
678 | ) -> Result<(), TryReserveError> { |
679 | // Avoid `Option::ok_or_else` because it bloats LLVM IR. | |
6a06907d | 680 | let new_items = match self.table.items.checked_add(additional) { |
3dfed10e | 681 | Some(new_items) => new_items, |
6a06907d | 682 | None => return Err(fallibility.capacity_overflow()), |
3dfed10e | 683 | }; |
6a06907d | 684 | let full_capacity = bucket_mask_to_capacity(self.table.bucket_mask); |
416331ca XL |
685 | if new_items <= full_capacity / 2 { |
686 | // Rehash in-place without re-allocating if we have plenty of spare | |
687 | // capacity that is locked up due to DELETED entries. | |
48663c56 XL |
688 | self.rehash_in_place(hasher); |
689 | Ok(()) | |
690 | } else { | |
416331ca XL |
691 | // Otherwise, conservatively resize to at least the next size up |
692 | // to avoid churning deletes into frequent rehashes. | |
693 | self.resize( | |
694 | usize::max(new_items, full_capacity + 1), | |
695 | hasher, | |
6a06907d | 696 | fallibility, |
416331ca | 697 | ) |
48663c56 XL |
698 | } |
699 | } | |
700 | ||
701 | /// Rehashes the contents of the table in place (i.e. without changing the | |
702 | /// allocation). | |
703 | /// | |
704 | /// If `hasher` panics then some the table's contents may be lost. | |
705 | fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) { | |
706 | unsafe { | |
48663c56 XL |
707 | // If the hash function panics then properly clean up any elements |
708 | // that we haven't rehashed yet. We unfortunately can't preserve the | |
709 | // element since we lost their hash and have no way of recovering it | |
710 | // without risking another panic. | |
6a06907d XL |
711 | self.table.prepare_rehash_in_place(); |
712 | ||
713 | let mut guard = guard(&mut self.table, move |self_| { | |
48663c56 XL |
714 | if mem::needs_drop::<T>() { |
715 | for i in 0..self_.buckets() { | |
716 | if *self_.ctrl(i) == DELETED { | |
717 | self_.set_ctrl(i, EMPTY); | |
6a06907d | 718 | self_.bucket::<T>(i).drop(); |
48663c56 XL |
719 | self_.items -= 1; |
720 | } | |
721 | } | |
722 | } | |
723 | self_.growth_left = bucket_mask_to_capacity(self_.bucket_mask) - self_.items; | |
724 | }); | |
725 | ||
726 | // At this point, DELETED elements are elements that we haven't | |
727 | // rehashed yet. Find them and re-insert them at their ideal | |
728 | // position. | |
729 | 'outer: for i in 0..guard.buckets() { | |
730 | if *guard.ctrl(i) != DELETED { | |
731 | continue; | |
732 | } | |
6a06907d | 733 | |
48663c56 XL |
734 | 'inner: loop { |
735 | // Hash the current item | |
736 | let item = guard.bucket(i); | |
737 | let hash = hasher(item.as_ref()); | |
738 | ||
739 | // Search for a suitable place to put it | |
740 | let new_i = guard.find_insert_slot(hash); | |
741 | ||
742 | // Probing works by scanning through all of the control | |
743 | // bytes in groups, which may not be aligned to the group | |
744 | // size. If both the new and old position fall within the | |
745 | // same unaligned group, then there is no benefit in moving | |
746 | // it and we can just continue to the next item. | |
6a06907d XL |
747 | if likely(guard.is_in_same_group(i, new_i, hash)) { |
748 | guard.set_ctrl_h2(i, hash); | |
48663c56 XL |
749 | continue 'outer; |
750 | } | |
751 | ||
752 | // We are moving the current item to a new position. Write | |
753 | // our H2 to the control byte of the new position. | |
6a06907d | 754 | let prev_ctrl = guard.replace_ctrl_h2(new_i, hash); |
48663c56 | 755 | if prev_ctrl == EMPTY { |
6a06907d | 756 | guard.set_ctrl(i, EMPTY); |
48663c56 XL |
757 | // If the target slot is empty, simply move the current |
758 | // element into the new slot and clear the old control | |
759 | // byte. | |
48663c56 XL |
760 | guard.bucket(new_i).copy_from_nonoverlapping(&item); |
761 | continue 'outer; | |
762 | } else { | |
763 | // If the target slot is occupied, swap the two elements | |
764 | // and then continue processing the element that we just | |
765 | // swapped into the old slot. | |
766 | debug_assert_eq!(prev_ctrl, DELETED); | |
767 | mem::swap(guard.bucket(new_i).as_mut(), item.as_mut()); | |
768 | continue 'inner; | |
769 | } | |
770 | } | |
771 | } | |
772 | ||
773 | guard.growth_left = bucket_mask_to_capacity(guard.bucket_mask) - guard.items; | |
774 | mem::forget(guard); | |
775 | } | |
776 | } | |
777 | ||
778 | /// Allocates a new table of a different size and moves the contents of the | |
779 | /// current table into it. | |
780 | fn resize( | |
781 | &mut self, | |
782 | capacity: usize, | |
783 | hasher: impl Fn(&T) -> u64, | |
6a06907d | 784 | fallibility: Fallibility, |
3dfed10e | 785 | ) -> Result<(), TryReserveError> { |
48663c56 | 786 | unsafe { |
6a06907d XL |
787 | let mut new_table = |
788 | self.table | |
789 | .prepare_resize(TableLayout::new::<T>(), capacity, fallibility)?; | |
48663c56 XL |
790 | |
791 | // Copy all elements to the new table. | |
792 | for item in self.iter() { | |
793 | // This may panic. | |
794 | let hash = hasher(item.as_ref()); | |
795 | ||
796 | // We can use a simpler version of insert() here since: | |
797 | // - there are no DELETED entries. | |
798 | // - we know there is enough space in the table. | |
799 | // - all elements are unique. | |
6a06907d | 800 | let (index, _) = new_table.prepare_insert_slot(hash); |
48663c56 XL |
801 | new_table.bucket(index).copy_from_nonoverlapping(&item); |
802 | } | |
803 | ||
804 | // We successfully copied all elements without panicking. Now replace | |
805 | // self with the new table. The old table will have its memory freed but | |
806 | // the items will not be dropped (since they have been moved into the | |
807 | // new table). | |
6a06907d | 808 | mem::swap(&mut self.table, &mut new_table); |
48663c56 XL |
809 | |
810 | Ok(()) | |
811 | } | |
812 | } | |
813 | ||
29967ef6 | 814 | /// Inserts a new element into the table, and returns its raw bucket. |
48663c56 XL |
815 | /// |
816 | /// This does not check if the given element already exists in the table. | |
e74abb32 | 817 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 818 | pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> { |
e74abb32 | 819 | unsafe { |
6a06907d | 820 | let mut index = self.table.find_insert_slot(hash); |
e74abb32 XL |
821 | |
822 | // We can avoid growing the table once we have reached our load | |
823 | // factor if we are replacing a tombstone. This works since the | |
824 | // number of EMPTY slots does not change in this case. | |
6a06907d XL |
825 | let old_ctrl = *self.table.ctrl(index); |
826 | if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { | |
e74abb32 | 827 | self.reserve(1, hasher); |
6a06907d | 828 | index = self.table.find_insert_slot(hash); |
e74abb32 XL |
829 | } |
830 | ||
6a06907d XL |
831 | self.table.record_item_insert_at(index, old_ctrl, hash); |
832 | ||
e74abb32 | 833 | let bucket = self.bucket(index); |
e74abb32 | 834 | bucket.write(value); |
e74abb32 XL |
835 | bucket |
836 | } | |
48663c56 XL |
837 | } |
838 | ||
6a06907d XL |
839 | /// Attempts to insert a new element without growing the table and return its raw bucket. |
840 | /// | |
841 | /// Returns an `Err` containing the given element if inserting it would require growing the | |
842 | /// table. | |
843 | /// | |
844 | /// This does not check if the given element already exists in the table. | |
845 | #[cfg(feature = "raw")] | |
846 | #[cfg_attr(feature = "inline-more", inline)] | |
847 | pub fn try_insert_no_grow(&mut self, hash: u64, value: T) -> Result<Bucket<T>, T> { | |
848 | unsafe { | |
849 | match self.table.prepare_insert_no_grow(hash) { | |
850 | Ok(index) => { | |
851 | let bucket = self.bucket(index); | |
852 | bucket.write(value); | |
853 | Ok(bucket) | |
854 | } | |
855 | Err(()) => Err(value), | |
856 | } | |
857 | } | |
858 | } | |
859 | ||
29967ef6 XL |
860 | /// Inserts a new element into the table, and returns a mutable reference to it. |
861 | /// | |
862 | /// This does not check if the given element already exists in the table. | |
863 | #[cfg_attr(feature = "inline-more", inline)] | |
864 | pub fn insert_entry(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> &mut T { | |
865 | unsafe { self.insert(hash, value, hasher).as_mut() } | |
866 | } | |
867 | ||
48663c56 XL |
868 | /// Inserts a new element into the table, without growing the table. |
869 | /// | |
870 | /// There must be enough space in the table to insert the new element. | |
871 | /// | |
872 | /// This does not check if the given element already exists in the table. | |
e74abb32 | 873 | #[cfg_attr(feature = "inline-more", inline)] |
3dfed10e | 874 | #[cfg(any(feature = "raw", feature = "rustc-internal-api"))] |
48663c56 XL |
875 | pub fn insert_no_grow(&mut self, hash: u64, value: T) -> Bucket<T> { |
876 | unsafe { | |
6a06907d XL |
877 | let (index, old_ctrl) = self.table.prepare_insert_slot(hash); |
878 | let bucket = self.table.bucket(index); | |
48663c56 XL |
879 | |
880 | // If we are replacing a DELETED entry then we don't need to update | |
881 | // the load counter. | |
6a06907d | 882 | self.table.growth_left -= special_is_empty(old_ctrl) as usize; |
48663c56 | 883 | |
48663c56 | 884 | bucket.write(value); |
6a06907d | 885 | self.table.items += 1; |
48663c56 XL |
886 | bucket |
887 | } | |
888 | } | |
889 | ||
1b1a35ee XL |
890 | /// Temporary removes a bucket, applying the given function to the removed |
891 | /// element and optionally put back the returned value in the same bucket. | |
892 | /// | |
893 | /// Returns `true` if the bucket still contains an element | |
894 | /// | |
895 | /// This does not check if the given bucket is actually occupied. | |
896 | #[cfg_attr(feature = "inline-more", inline)] | |
897 | pub unsafe fn replace_bucket_with<F>(&mut self, bucket: Bucket<T>, f: F) -> bool | |
898 | where | |
899 | F: FnOnce(T) -> Option<T>, | |
900 | { | |
901 | let index = self.bucket_index(&bucket); | |
6a06907d | 902 | let old_ctrl = *self.table.ctrl(index); |
1b1a35ee | 903 | debug_assert!(is_full(old_ctrl)); |
6a06907d | 904 | let old_growth_left = self.table.growth_left; |
1b1a35ee XL |
905 | let item = self.remove(bucket); |
906 | if let Some(new_item) = f(item) { | |
6a06907d XL |
907 | self.table.growth_left = old_growth_left; |
908 | self.table.set_ctrl(index, old_ctrl); | |
909 | self.table.items += 1; | |
1b1a35ee XL |
910 | self.bucket(index).write(new_item); |
911 | true | |
912 | } else { | |
913 | false | |
914 | } | |
915 | } | |
916 | ||
48663c56 XL |
917 | /// Searches for an element in the table. |
918 | #[inline] | |
919 | pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> { | |
920 | unsafe { | |
3dfed10e XL |
921 | for bucket in self.iter_hash(hash) { |
922 | let elm = bucket.as_ref(); | |
923 | if likely(eq(elm)) { | |
924 | return Some(bucket); | |
48663c56 XL |
925 | } |
926 | } | |
3dfed10e | 927 | None |
48663c56 | 928 | } |
48663c56 XL |
929 | } |
930 | ||
29967ef6 XL |
931 | /// Gets a reference to an element in the table. |
932 | #[inline] | |
933 | pub fn get(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { | |
934 | // Avoid `Option::map` because it bloats LLVM IR. | |
935 | match self.find(hash, eq) { | |
936 | Some(bucket) => Some(unsafe { bucket.as_ref() }), | |
937 | None => None, | |
938 | } | |
939 | } | |
940 | ||
941 | /// Gets a mutable reference to an element in the table. | |
942 | #[inline] | |
943 | pub fn get_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { | |
944 | // Avoid `Option::map` because it bloats LLVM IR. | |
945 | match self.find(hash, eq) { | |
946 | Some(bucket) => Some(unsafe { bucket.as_mut() }), | |
947 | None => None, | |
948 | } | |
949 | } | |
950 | ||
6a06907d XL |
951 | /// Attempts to get mutable references to `N` entries in the table at once. |
952 | /// | |
953 | /// Returns an array of length `N` with the results of each query. For soundness, | |
954 | /// at most one mutable reference will be returned to any entry. An | |
955 | /// `Err(UnavailableMutError::Duplicate(i))` in the returned array indicates that a suitable | |
956 | /// entry exists, but a mutable reference to it already occurs at index `i` in the returned | |
957 | /// array. | |
958 | /// | |
959 | /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to | |
960 | /// the `i`th key to be looked up. | |
961 | /// | |
962 | /// This method is available only if the `nightly` feature is enabled. | |
963 | #[cfg(feature = "nightly")] | |
964 | pub fn get_each_mut<const N: usize>( | |
965 | &mut self, | |
966 | hashes: [u64; N], | |
967 | mut eq: impl FnMut(usize, &T) -> bool, | |
968 | ) -> [Result<&'_ mut T, UnavailableMutError>; N] { | |
969 | // Collect the requested buckets. | |
970 | // TODO use `MaybeUninit::uninit_array` here instead once that's stable. | |
971 | let mut buckets: [MaybeUninit<Option<Bucket<T>>>; N] = | |
972 | unsafe { MaybeUninit::uninit().assume_init() }; | |
973 | for i in 0..N { | |
974 | buckets[i] = MaybeUninit::new(self.find(hashes[i], |k| eq(i, k))); | |
975 | } | |
976 | let buckets: [Option<Bucket<T>>; N] = unsafe { MaybeUninit::array_assume_init(buckets) }; | |
977 | ||
978 | // Walk through the buckets, checking for duplicates and building up the output array. | |
979 | // TODO use `MaybeUninit::uninit_array` here instead once that's stable. | |
980 | let mut out: [MaybeUninit<Result<&'_ mut T, UnavailableMutError>>; N] = | |
981 | unsafe { MaybeUninit::uninit().assume_init() }; | |
982 | for i in 0..N { | |
983 | out[i] = MaybeUninit::new( | |
984 | #[allow(clippy::never_loop)] | |
985 | 'outer: loop { | |
986 | for j in 0..i { | |
987 | match (&buckets[j], &buckets[i]) { | |
988 | // These two buckets are the same, and we can't safely return a second | |
989 | // mutable reference to the same entry. | |
990 | (Some(prev), Some(cur)) if prev.as_ptr() == cur.as_ptr() => { | |
991 | break 'outer Err(UnavailableMutError::Duplicate(j)); | |
992 | } | |
993 | _ => {} | |
994 | } | |
995 | } | |
996 | // This bucket is distinct from all previous buckets (or it doesn't exist), so | |
997 | // we're clear to return the result of the lookup. | |
998 | break match &buckets[i] { | |
999 | None => Err(UnavailableMutError::Absent), | |
1000 | Some(bkt) => unsafe { Ok(bkt.as_mut()) }, | |
1001 | }; | |
1002 | }, | |
1003 | ) | |
1004 | } | |
1005 | ||
1006 | unsafe { MaybeUninit::array_assume_init(out) } | |
1007 | } | |
1008 | ||
48663c56 XL |
1009 | /// Returns the number of elements the map can hold without reallocating. |
1010 | /// | |
1011 | /// This number is a lower bound; the table might be able to hold | |
1012 | /// more, but is guaranteed to be able to hold at least this many. | |
e74abb32 | 1013 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1014 | pub fn capacity(&self) -> usize { |
6a06907d | 1015 | self.table.items + self.table.growth_left |
48663c56 XL |
1016 | } |
1017 | ||
1018 | /// Returns the number of elements in the table. | |
e74abb32 | 1019 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1020 | pub fn len(&self) -> usize { |
6a06907d | 1021 | self.table.items |
48663c56 XL |
1022 | } |
1023 | ||
1024 | /// Returns the number of buckets in the table. | |
e74abb32 | 1025 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1026 | pub fn buckets(&self) -> usize { |
6a06907d | 1027 | self.table.bucket_mask + 1 |
48663c56 XL |
1028 | } |
1029 | ||
1030 | /// Returns an iterator over every element in the table. It is up to | |
1031 | /// the caller to ensure that the `RawTable` outlives the `RawIter`. | |
1032 | /// Because we cannot make the `next` method unsafe on the `RawIter` | |
1033 | /// struct, we have to make the `iter` method unsafe. | |
e74abb32 | 1034 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1035 | pub unsafe fn iter(&self) -> RawIter<T> { |
3dfed10e | 1036 | let data = Bucket::from_base_index(self.data_end(), 0); |
48663c56 | 1037 | RawIter { |
6a06907d XL |
1038 | iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), |
1039 | items: self.table.items, | |
48663c56 XL |
1040 | } |
1041 | } | |
1042 | ||
3dfed10e XL |
1043 | /// Returns an iterator over occupied buckets that could match a given hash. |
1044 | /// | |
1045 | /// In rare cases, the iterator may return a bucket with a different hash. | |
1046 | /// | |
1047 | /// It is up to the caller to ensure that the `RawTable` outlives the | |
1048 | /// `RawIterHash`. Because we cannot make the `next` method unsafe on the | |
1049 | /// `RawIterHash` struct, we have to make the `iter_hash` method unsafe. | |
1050 | #[cfg_attr(feature = "inline-more", inline)] | |
6a06907d | 1051 | pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> { |
3dfed10e XL |
1052 | RawIterHash::new(self, hash) |
1053 | } | |
1054 | ||
48663c56 | 1055 | /// Returns an iterator which removes all elements from the table without |
3dfed10e | 1056 | /// freeing the memory. |
e74abb32 | 1057 | #[cfg_attr(feature = "inline-more", inline)] |
6a06907d | 1058 | pub fn drain(&mut self) -> RawDrain<'_, T, A> { |
29967ef6 XL |
1059 | unsafe { |
1060 | let iter = self.iter(); | |
1061 | self.drain_iter_from(iter) | |
1062 | } | |
3dfed10e XL |
1063 | } |
1064 | ||
1065 | /// Returns an iterator which removes all elements from the table without | |
1066 | /// freeing the memory. | |
1067 | /// | |
3dfed10e | 1068 | /// Iteration starts at the provided iterator's current location. |
29967ef6 XL |
1069 | /// |
1070 | /// It is up to the caller to ensure that the iterator is valid for this | |
1071 | /// `RawTable` and covers all items that remain in the table. | |
3dfed10e | 1072 | #[cfg_attr(feature = "inline-more", inline)] |
6a06907d | 1073 | pub unsafe fn drain_iter_from(&mut self, iter: RawIter<T>) -> RawDrain<'_, T, A> { |
3dfed10e | 1074 | debug_assert_eq!(iter.len(), self.len()); |
48663c56 | 1075 | RawDrain { |
3dfed10e | 1076 | iter, |
6a06907d | 1077 | table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), |
48663c56 XL |
1078 | orig_table: NonNull::from(self), |
1079 | marker: PhantomData, | |
1080 | } | |
1081 | } | |
1082 | ||
3dfed10e XL |
1083 | /// Returns an iterator which consumes all elements from the table. |
1084 | /// | |
3dfed10e | 1085 | /// Iteration starts at the provided iterator's current location. |
29967ef6 XL |
1086 | /// |
1087 | /// It is up to the caller to ensure that the iterator is valid for this | |
1088 | /// `RawTable` and covers all items that remain in the table. | |
6a06907d | 1089 | pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> { |
3dfed10e XL |
1090 | debug_assert_eq!(iter.len(), self.len()); |
1091 | ||
6a06907d XL |
1092 | let alloc = self.table.alloc.clone(); |
1093 | let allocation = self.into_allocation(); | |
3dfed10e XL |
1094 | RawIntoIter { |
1095 | iter, | |
6a06907d | 1096 | allocation, |
3dfed10e | 1097 | marker: PhantomData, |
6a06907d | 1098 | alloc, |
3dfed10e XL |
1099 | } |
1100 | } | |
1101 | ||
48663c56 XL |
1102 | /// Converts the table into a raw allocation. The contents of the table |
1103 | /// should be dropped using a `RawIter` before freeing the allocation. | |
e74abb32 | 1104 | #[cfg_attr(feature = "inline-more", inline)] |
6a06907d XL |
1105 | pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> { |
1106 | let alloc = if self.table.is_empty_singleton() { | |
48663c56 XL |
1107 | None |
1108 | } else { | |
3dfed10e | 1109 | // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. |
6a06907d | 1110 | let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) { |
3dfed10e XL |
1111 | Some(lco) => lco, |
1112 | None => unsafe { hint::unreachable_unchecked() }, | |
1113 | }; | |
1114 | Some(( | |
6a06907d | 1115 | unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, |
3dfed10e XL |
1116 | layout, |
1117 | )) | |
48663c56 XL |
1118 | }; |
1119 | mem::forget(self); | |
1120 | alloc | |
1121 | } | |
1122 | } | |
1123 | ||
6a06907d XL |
1124 | unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> where T: Send {} |
1125 | unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> where T: Sync {} | |
1126 | ||
1127 | impl<A> RawTableInner<A> { | |
1128 | #[cfg_attr(feature = "inline-more", inline)] | |
1129 | const fn new_in(alloc: A) -> Self { | |
1130 | Self { | |
1131 | // Be careful to cast the entire slice to a raw pointer. | |
1132 | ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, | |
1133 | bucket_mask: 0, | |
1134 | items: 0, | |
1135 | growth_left: 0, | |
1136 | alloc, | |
1137 | } | |
1138 | } | |
1139 | } | |
1140 | ||
1141 | impl<A: Allocator + Clone> RawTableInner<A> { | |
1142 | #[cfg_attr(feature = "inline-more", inline)] | |
1143 | unsafe fn new_uninitialized( | |
1144 | alloc: A, | |
1145 | table_layout: TableLayout, | |
1146 | buckets: usize, | |
1147 | fallibility: Fallibility, | |
1148 | ) -> Result<Self, TryReserveError> { | |
1149 | debug_assert!(buckets.is_power_of_two()); | |
1150 | ||
1151 | // Avoid `Option::ok_or_else` because it bloats LLVM IR. | |
1152 | let (layout, ctrl_offset) = match table_layout.calculate_layout_for(buckets) { | |
1153 | Some(lco) => lco, | |
1154 | None => return Err(fallibility.capacity_overflow()), | |
1155 | }; | |
1156 | ||
1157 | let ptr: NonNull<u8> = match do_alloc(&alloc, layout) { | |
1158 | Ok(block) => block.cast(), | |
1159 | Err(_) => return Err(fallibility.alloc_err(layout)), | |
1160 | }; | |
1161 | ||
1162 | let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset)); | |
1163 | Ok(Self { | |
1164 | ctrl, | |
1165 | bucket_mask: buckets - 1, | |
1166 | items: 0, | |
1167 | growth_left: bucket_mask_to_capacity(buckets - 1), | |
1168 | alloc, | |
1169 | }) | |
1170 | } | |
1171 | ||
1172 | #[inline] | |
1173 | fn fallible_with_capacity( | |
1174 | alloc: A, | |
1175 | table_layout: TableLayout, | |
1176 | capacity: usize, | |
1177 | fallibility: Fallibility, | |
1178 | ) -> Result<Self, TryReserveError> { | |
1179 | if capacity == 0 { | |
1180 | Ok(Self::new_in(alloc)) | |
1181 | } else { | |
1182 | unsafe { | |
1183 | let buckets = | |
1184 | capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?; | |
1185 | ||
1186 | let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?; | |
1187 | result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes()); | |
1188 | ||
1189 | Ok(result) | |
1190 | } | |
1191 | } | |
1192 | } | |
1193 | ||
1194 | /// Searches for an empty or deleted bucket which is suitable for inserting | |
1195 | /// a new element and sets the hash for that slot. | |
1196 | /// | |
1197 | /// There must be at least 1 empty bucket in the table. | |
1198 | #[inline] | |
1199 | unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { | |
1200 | let index = self.find_insert_slot(hash); | |
1201 | let old_ctrl = *self.ctrl(index); | |
1202 | self.set_ctrl_h2(index, hash); | |
1203 | (index, old_ctrl) | |
1204 | } | |
1205 | ||
1206 | /// Searches for an empty or deleted bucket which is suitable for inserting | |
1207 | /// a new element. | |
1208 | /// | |
1209 | /// There must be at least 1 empty bucket in the table. | |
1210 | #[inline] | |
1211 | fn find_insert_slot(&self, hash: u64) -> usize { | |
1212 | let mut probe_seq = self.probe_seq(hash); | |
1213 | loop { | |
1214 | unsafe { | |
1215 | let group = Group::load(self.ctrl(probe_seq.pos)); | |
1216 | if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() { | |
1217 | let result = (probe_seq.pos + bit) & self.bucket_mask; | |
1218 | ||
1219 | // In tables smaller than the group width, trailing control | |
1220 | // bytes outside the range of the table are filled with | |
1221 | // EMPTY entries. These will unfortunately trigger a | |
1222 | // match, but once masked may point to a full bucket that | |
1223 | // is already occupied. We detect this situation here and | |
1224 | // perform a second scan starting at the begining of the | |
1225 | // table. This second scan is guaranteed to find an empty | |
1226 | // slot (due to the load factor) before hitting the trailing | |
1227 | // control bytes (containing EMPTY). | |
1228 | if unlikely(is_full(*self.ctrl(result))) { | |
1229 | debug_assert!(self.bucket_mask < Group::WIDTH); | |
1230 | debug_assert_ne!(probe_seq.pos, 0); | |
1231 | return Group::load_aligned(self.ctrl(0)) | |
1232 | .match_empty_or_deleted() | |
1233 | .lowest_set_bit_nonzero(); | |
1234 | } | |
1235 | ||
1236 | return result; | |
1237 | } | |
1238 | } | |
1239 | probe_seq.move_next(self.bucket_mask); | |
1240 | } | |
1241 | } | |
1242 | ||
1243 | #[allow(clippy::mut_mut)] | |
1244 | #[inline] | |
1245 | unsafe fn prepare_rehash_in_place(&mut self) { | |
1246 | // Bulk convert all full control bytes to DELETED, and all DELETED | |
1247 | // control bytes to EMPTY. This effectively frees up all buckets | |
1248 | // containing a DELETED entry. | |
1249 | for i in (0..self.buckets()).step_by(Group::WIDTH) { | |
1250 | let group = Group::load_aligned(self.ctrl(i)); | |
1251 | let group = group.convert_special_to_empty_and_full_to_deleted(); | |
1252 | group.store_aligned(self.ctrl(i)); | |
1253 | } | |
1254 | ||
1255 | // Fix up the trailing control bytes. See the comments in set_ctrl | |
1256 | // for the handling of tables smaller than the group width. | |
1257 | if self.buckets() < Group::WIDTH { | |
1258 | self.ctrl(0) | |
1259 | .copy_to(self.ctrl(Group::WIDTH), self.buckets()); | |
1260 | } else { | |
1261 | self.ctrl(0) | |
1262 | .copy_to(self.ctrl(self.buckets()), Group::WIDTH); | |
1263 | } | |
1264 | } | |
1265 | ||
1266 | #[cfg_attr(feature = "inline-more", inline)] | |
1267 | unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> { | |
1268 | debug_assert_ne!(self.bucket_mask, 0); | |
1269 | debug_assert!(index < self.buckets()); | |
1270 | Bucket::from_base_index(self.data_end(), index) | |
1271 | } | |
1272 | ||
1273 | #[cfg_attr(feature = "inline-more", inline)] | |
1274 | unsafe fn data_end<T>(&self) -> NonNull<T> { | |
1275 | NonNull::new_unchecked(self.ctrl.as_ptr().cast()) | |
1276 | } | |
1277 | ||
1278 | /// Returns an iterator-like object for a probe sequence on the table. | |
1279 | /// | |
1280 | /// This iterator never terminates, but is guaranteed to visit each bucket | |
1281 | /// group exactly once. The loop using `probe_seq` must terminate upon | |
1282 | /// reaching a group containing an empty bucket. | |
1283 | #[inline] | |
1284 | fn probe_seq(&self, hash: u64) -> ProbeSeq { | |
1285 | ProbeSeq { | |
1286 | pos: h1(hash) & self.bucket_mask, | |
1287 | stride: 0, | |
1288 | } | |
1289 | } | |
1290 | ||
1291 | /// Returns the index of a bucket for which a value must be inserted if there is enough rooom | |
1292 | /// in the table, otherwise returns error | |
1293 | #[cfg(feature = "raw")] | |
1294 | #[inline] | |
1295 | unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> { | |
1296 | let index = self.find_insert_slot(hash); | |
1297 | let old_ctrl = *self.ctrl(index); | |
1298 | if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) { | |
1299 | Err(()) | |
1300 | } else { | |
1301 | self.record_item_insert_at(index, old_ctrl, hash); | |
1302 | Ok(index) | |
1303 | } | |
1304 | } | |
1305 | ||
1306 | #[inline] | |
1307 | unsafe fn record_item_insert_at(&mut self, index: usize, old_ctrl: u8, hash: u64) { | |
1308 | self.growth_left -= special_is_empty(old_ctrl) as usize; | |
1309 | self.set_ctrl_h2(index, hash); | |
1310 | self.items += 1; | |
1311 | } | |
1312 | ||
1313 | #[inline] | |
1314 | fn is_in_same_group(&self, i: usize, new_i: usize, hash: u64) -> bool { | |
1315 | let probe_seq_pos = self.probe_seq(hash).pos; | |
1316 | let probe_index = | |
1317 | |pos: usize| (pos.wrapping_sub(probe_seq_pos) & self.bucket_mask) / Group::WIDTH; | |
1318 | probe_index(i) == probe_index(new_i) | |
1319 | } | |
1320 | ||
1321 | /// Sets a control byte to the hash, and possibly also the replicated control byte at | |
1322 | /// the end of the array. | |
1323 | #[inline] | |
1324 | unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { | |
1325 | self.set_ctrl(index, h2(hash)) | |
1326 | } | |
1327 | ||
1328 | #[inline] | |
1329 | unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { | |
1330 | let prev_ctrl = *self.ctrl(index); | |
1331 | self.set_ctrl_h2(index, hash); | |
1332 | prev_ctrl | |
1333 | } | |
1334 | ||
1335 | /// Sets a control byte, and possibly also the replicated control byte at | |
1336 | /// the end of the array. | |
1337 | #[inline] | |
1338 | unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { | |
1339 | // Replicate the first Group::WIDTH control bytes at the end of | |
1340 | // the array without using a branch: | |
1341 | // - If index >= Group::WIDTH then index == index2. | |
1342 | // - Otherwise index2 == self.bucket_mask + 1 + index. | |
1343 | // | |
1344 | // The very last replicated control byte is never actually read because | |
1345 | // we mask the initial index for unaligned loads, but we write it | |
1346 | // anyways because it makes the set_ctrl implementation simpler. | |
1347 | // | |
1348 | // If there are fewer buckets than Group::WIDTH then this code will | |
1349 | // replicate the buckets at the end of the trailing group. For example | |
1350 | // with 2 buckets and a group size of 4, the control bytes will look | |
1351 | // like this: | |
1352 | // | |
1353 | // Real | Replicated | |
1354 | // --------------------------------------------- | |
1355 | // | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] | | |
1356 | // --------------------------------------------- | |
1357 | let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH; | |
1358 | ||
1359 | *self.ctrl(index) = ctrl; | |
1360 | *self.ctrl(index2) = ctrl; | |
1361 | } | |
48663c56 | 1362 | |
6a06907d XL |
1363 | /// Returns a pointer to a control byte. |
1364 | #[inline] | |
1365 | unsafe fn ctrl(&self, index: usize) -> *mut u8 { | |
1366 | debug_assert!(index < self.num_ctrl_bytes()); | |
1367 | self.ctrl.as_ptr().add(index) | |
1368 | } | |
1369 | ||
1370 | #[inline] | |
1371 | fn buckets(&self) -> usize { | |
1372 | self.bucket_mask + 1 | |
1373 | } | |
1374 | ||
1375 | #[inline] | |
1376 | fn num_ctrl_bytes(&self) -> usize { | |
1377 | self.bucket_mask + 1 + Group::WIDTH | |
1378 | } | |
1379 | ||
1380 | #[inline] | |
1381 | fn is_empty_singleton(&self) -> bool { | |
1382 | self.bucket_mask == 0 | |
1383 | } | |
1384 | ||
1385 | #[allow(clippy::mut_mut)] | |
1386 | #[inline] | |
1387 | unsafe fn prepare_resize( | |
1388 | &self, | |
1389 | table_layout: TableLayout, | |
1390 | capacity: usize, | |
1391 | fallibility: Fallibility, | |
1392 | ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> { | |
1393 | debug_assert!(self.items <= capacity); | |
1394 | ||
1395 | // Allocate and initialize the new table. | |
1396 | let mut new_table = RawTableInner::fallible_with_capacity( | |
1397 | self.alloc.clone(), | |
1398 | table_layout, | |
1399 | capacity, | |
1400 | fallibility, | |
1401 | )?; | |
1402 | new_table.growth_left -= self.items; | |
1403 | new_table.items = self.items; | |
1404 | ||
1405 | // The hash function may panic, in which case we simply free the new | |
1406 | // table without dropping any elements that may have been copied into | |
1407 | // it. | |
1408 | // | |
1409 | // This guard is also used to free the old table on success, see | |
1410 | // the comment at the bottom of this function. | |
1411 | Ok(guard(new_table, move |self_| { | |
1412 | if !self_.is_empty_singleton() { | |
1413 | self_.free_buckets(table_layout); | |
1414 | } | |
1415 | })) | |
1416 | } | |
1417 | ||
1418 | #[inline] | |
1419 | unsafe fn free_buckets(&mut self, table_layout: TableLayout) { | |
1420 | // Avoid `Option::unwrap_or_else` because it bloats LLVM IR. | |
1421 | let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) { | |
1422 | Some(lco) => lco, | |
1423 | None => hint::unreachable_unchecked(), | |
1424 | }; | |
1425 | self.alloc.deallocate( | |
1426 | NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)), | |
1427 | layout, | |
1428 | ); | |
1429 | } | |
1430 | ||
1431 | /// Marks all table buckets as empty without dropping their contents. | |
1432 | #[inline] | |
1433 | fn clear_no_drop(&mut self) { | |
1434 | if !self.is_empty_singleton() { | |
1435 | unsafe { | |
1436 | self.ctrl(0).write_bytes(EMPTY, self.num_ctrl_bytes()); | |
1437 | } | |
1438 | } | |
1439 | self.items = 0; | |
1440 | self.growth_left = bucket_mask_to_capacity(self.bucket_mask); | |
1441 | } | |
1442 | ||
1443 | #[inline] | |
1444 | unsafe fn erase(&mut self, index: usize) { | |
1445 | debug_assert!(is_full(*self.ctrl(index))); | |
1446 | let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask; | |
1447 | let empty_before = Group::load(self.ctrl(index_before)).match_empty(); | |
1448 | let empty_after = Group::load(self.ctrl(index)).match_empty(); | |
1449 | ||
1450 | // If we are inside a continuous block of Group::WIDTH full or deleted | |
1451 | // cells then a probe window may have seen a full block when trying to | |
1452 | // insert. We therefore need to keep that block non-empty so that | |
1453 | // lookups will continue searching to the next probe window. | |
1454 | // | |
1455 | // Note that in this context `leading_zeros` refers to the bytes at the | |
1456 | // end of a group, while `trailing_zeros` refers to the bytes at the | |
1457 | // begining of a group. | |
1458 | let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH { | |
1459 | DELETED | |
1460 | } else { | |
1461 | self.growth_left += 1; | |
1462 | EMPTY | |
1463 | }; | |
1464 | self.set_ctrl(index, ctrl); | |
1465 | self.items -= 1; | |
1466 | } | |
1467 | } | |
1468 | ||
1469 | impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> { | |
48663c56 | 1470 | fn clone(&self) -> Self { |
6a06907d XL |
1471 | if self.table.is_empty_singleton() { |
1472 | Self::new_in(self.table.alloc.clone()) | |
48663c56 XL |
1473 | } else { |
1474 | unsafe { | |
1475 | let mut new_table = ManuallyDrop::new( | |
3dfed10e | 1476 | // Avoid `Result::ok_or_else` because it bloats LLVM IR. |
6a06907d XL |
1477 | match Self::new_uninitialized( |
1478 | self.table.alloc.clone(), | |
1479 | self.table.buckets(), | |
1480 | Fallibility::Infallible, | |
1481 | ) { | |
3dfed10e XL |
1482 | Ok(table) => table, |
1483 | Err(_) => hint::unreachable_unchecked(), | |
1b1a35ee | 1484 | }, |
48663c56 XL |
1485 | ); |
1486 | ||
3dfed10e XL |
1487 | new_table.clone_from_spec(self, |new_table| { |
1488 | // We need to free the memory allocated for the new table. | |
1489 | new_table.free_buckets(); | |
1490 | }); | |
48663c56 | 1491 | |
3dfed10e XL |
1492 | // Return the newly created table. |
1493 | ManuallyDrop::into_inner(new_table) | |
1494 | } | |
1495 | } | |
1496 | } | |
48663c56 | 1497 | |
3dfed10e | 1498 | fn clone_from(&mut self, source: &Self) { |
6a06907d XL |
1499 | if source.table.is_empty_singleton() { |
1500 | *self = Self::new_in(self.table.alloc.clone()); | |
3dfed10e XL |
1501 | } else { |
1502 | unsafe { | |
1503 | // First, drop all our elements without clearing the control bytes. | |
6a06907d | 1504 | self.drop_elements(); |
48663c56 | 1505 | |
3dfed10e XL |
1506 | // If necessary, resize our table to match the source. |
1507 | if self.buckets() != source.buckets() { | |
1508 | // Skip our drop by using ptr::write. | |
6a06907d | 1509 | if !self.table.is_empty_singleton() { |
3dfed10e XL |
1510 | self.free_buckets(); |
1511 | } | |
1512 | (self as *mut Self).write( | |
1513 | // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. | |
6a06907d XL |
1514 | match Self::new_uninitialized( |
1515 | self.table.alloc.clone(), | |
1516 | source.buckets(), | |
1517 | Fallibility::Infallible, | |
1518 | ) { | |
3dfed10e XL |
1519 | Ok(table) => table, |
1520 | Err(_) => hint::unreachable_unchecked(), | |
1b1a35ee | 1521 | }, |
3dfed10e | 1522 | ); |
48663c56 XL |
1523 | } |
1524 | ||
3dfed10e XL |
1525 | self.clone_from_spec(source, |self_| { |
1526 | // We need to leave the table in an empty state. | |
1527 | self_.clear_no_drop() | |
1528 | }); | |
48663c56 XL |
1529 | } |
1530 | } | |
1531 | } | |
1532 | } | |
1533 | ||
3dfed10e XL |
1534 | /// Specialization of `clone_from` for `Copy` types |
1535 | trait RawTableClone { | |
1536 | unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)); | |
1537 | } | |
6a06907d | 1538 | impl<T: Clone, A: Allocator + Clone> RawTableClone for RawTable<T, A> { |
3dfed10e XL |
1539 | #[cfg_attr(feature = "inline-more", inline)] |
1540 | default_fn! { | |
1541 | unsafe fn clone_from_spec(&mut self, source: &Self, on_panic: impl FnMut(&mut Self)) { | |
1542 | self.clone_from_impl(source, on_panic); | |
1543 | } | |
1544 | } | |
1545 | } | |
1546 | #[cfg(feature = "nightly")] | |
6a06907d | 1547 | impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> { |
3dfed10e XL |
1548 | #[cfg_attr(feature = "inline-more", inline)] |
1549 | unsafe fn clone_from_spec(&mut self, source: &Self, _on_panic: impl FnMut(&mut Self)) { | |
1550 | source | |
6a06907d | 1551 | .table |
3dfed10e | 1552 | .ctrl(0) |
6a06907d | 1553 | .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); |
3dfed10e XL |
1554 | source |
1555 | .data_start() | |
6a06907d | 1556 | .copy_to_nonoverlapping(self.data_start(), self.table.buckets()); |
3dfed10e | 1557 | |
6a06907d XL |
1558 | self.table.items = source.table.items; |
1559 | self.table.growth_left = source.table.growth_left; | |
3dfed10e XL |
1560 | } |
1561 | } | |
1562 | ||
6a06907d | 1563 | impl<T: Clone, A: Allocator + Clone> RawTable<T, A> { |
3dfed10e XL |
1564 | /// Common code for clone and clone_from. Assumes `self.buckets() == source.buckets()`. |
1565 | #[cfg_attr(feature = "inline-more", inline)] | |
1566 | unsafe fn clone_from_impl(&mut self, source: &Self, mut on_panic: impl FnMut(&mut Self)) { | |
1567 | // Copy the control bytes unchanged. We do this in a single pass | |
1568 | source | |
6a06907d | 1569 | .table |
3dfed10e | 1570 | .ctrl(0) |
6a06907d | 1571 | .copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes()); |
3dfed10e XL |
1572 | |
1573 | // The cloning of elements may panic, in which case we need | |
1574 | // to make sure we drop only the elements that have been | |
1575 | // cloned so far. | |
1576 | let mut guard = guard((0, &mut *self), |(index, self_)| { | |
1577 | if mem::needs_drop::<T>() && self_.len() != 0 { | |
1578 | for i in 0..=*index { | |
6a06907d | 1579 | if is_full(*self_.table.ctrl(i)) { |
3dfed10e XL |
1580 | self_.bucket(i).drop(); |
1581 | } | |
1582 | } | |
1583 | } | |
1584 | ||
1585 | // Depending on whether we were called from clone or clone_from, we | |
1586 | // either need to free the memory for the destination table or just | |
1587 | // clear the control bytes. | |
1588 | on_panic(self_); | |
1589 | }); | |
1590 | ||
1591 | for from in source.iter() { | |
1592 | let index = source.bucket_index(&from); | |
1593 | let to = guard.1.bucket(index); | |
1594 | to.write(from.as_ref().clone()); | |
1595 | ||
1596 | // Update the index in case we need to unwind. | |
1597 | guard.0 = index; | |
1598 | } | |
1599 | ||
1600 | // Successfully cloned all items, no need to clean up. | |
1601 | mem::forget(guard); | |
1602 | ||
6a06907d XL |
1603 | self.table.items = source.table.items; |
1604 | self.table.growth_left = source.table.growth_left; | |
3dfed10e XL |
1605 | } |
1606 | ||
1607 | /// Variant of `clone_from` to use when a hasher is available. | |
1608 | #[cfg(feature = "raw")] | |
1609 | pub fn clone_from_with_hasher(&mut self, source: &Self, hasher: impl Fn(&T) -> u64) { | |
1610 | // If we have enough capacity in the table, just clear it and insert | |
1611 | // elements one by one. We don't do this if we have the same number of | |
1612 | // buckets as the source since we can just copy the contents directly | |
1613 | // in that case. | |
6a06907d XL |
1614 | if self.table.buckets() != source.table.buckets() |
1615 | && bucket_mask_to_capacity(self.table.bucket_mask) >= source.len() | |
3dfed10e XL |
1616 | { |
1617 | self.clear(); | |
1618 | ||
1619 | let guard_self = guard(&mut *self, |self_| { | |
1620 | // Clear the partially copied table if a panic occurs, otherwise | |
1621 | // items and growth_left will be out of sync with the contents | |
1622 | // of the table. | |
1623 | self_.clear(); | |
1624 | }); | |
1625 | ||
1626 | unsafe { | |
1627 | for item in source.iter() { | |
1628 | // This may panic. | |
1629 | let item = item.as_ref().clone(); | |
1630 | let hash = hasher(&item); | |
1631 | ||
1632 | // We can use a simpler version of insert() here since: | |
1633 | // - there are no DELETED entries. | |
1634 | // - we know there is enough space in the table. | |
1635 | // - all elements are unique. | |
6a06907d | 1636 | let (index, _) = guard_self.table.prepare_insert_slot(hash); |
3dfed10e XL |
1637 | guard_self.bucket(index).write(item); |
1638 | } | |
1639 | } | |
1640 | ||
1641 | // Successfully cloned all items, no need to clean up. | |
1642 | mem::forget(guard_self); | |
1643 | ||
6a06907d XL |
1644 | self.table.items = source.table.items; |
1645 | self.table.growth_left -= source.table.items; | |
3dfed10e XL |
1646 | } else { |
1647 | self.clone_from(source); | |
1648 | } | |
1649 | } | |
1650 | } | |
1651 | ||
6a06907d XL |
1652 | impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> { |
1653 | #[cfg_attr(feature = "inline-more", inline)] | |
1654 | fn default() -> Self { | |
1655 | Self::new_in(Default::default()) | |
1656 | } | |
1657 | } | |
1658 | ||
48663c56 | 1659 | #[cfg(feature = "nightly")] |
6a06907d | 1660 | unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> { |
e74abb32 | 1661 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1662 | fn drop(&mut self) { |
6a06907d | 1663 | if !self.table.is_empty_singleton() { |
48663c56 | 1664 | unsafe { |
6a06907d | 1665 | self.drop_elements(); |
48663c56 XL |
1666 | self.free_buckets(); |
1667 | } | |
1668 | } | |
1669 | } | |
1670 | } | |
1671 | #[cfg(not(feature = "nightly"))] | |
6a06907d | 1672 | impl<T, A: Allocator + Clone> Drop for RawTable<T, A> { |
e74abb32 | 1673 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1674 | fn drop(&mut self) { |
6a06907d | 1675 | if !self.table.is_empty_singleton() { |
48663c56 | 1676 | unsafe { |
6a06907d | 1677 | self.drop_elements(); |
48663c56 XL |
1678 | self.free_buckets(); |
1679 | } | |
1680 | } | |
1681 | } | |
1682 | } | |
1683 | ||
6a06907d | 1684 | impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> { |
48663c56 | 1685 | type Item = T; |
6a06907d | 1686 | type IntoIter = RawIntoIter<T, A>; |
48663c56 | 1687 | |
e74abb32 | 1688 | #[cfg_attr(feature = "inline-more", inline)] |
6a06907d | 1689 | fn into_iter(self) -> RawIntoIter<T, A> { |
48663c56 XL |
1690 | unsafe { |
1691 | let iter = self.iter(); | |
3dfed10e | 1692 | self.into_iter_from(iter) |
48663c56 XL |
1693 | } |
1694 | } | |
1695 | } | |
1696 | ||
1697 | /// Iterator over a sub-range of a table. Unlike `RawIter` this iterator does | |
1698 | /// not track an item count. | |
e74abb32 | 1699 | pub(crate) struct RawIterRange<T> { |
48663c56 XL |
1700 | // Mask of full buckets in the current group. Bits are cleared from this |
1701 | // mask as each element is processed. | |
1702 | current_group: BitMask, | |
1703 | ||
1704 | // Pointer to the buckets for the current group. | |
1705 | data: Bucket<T>, | |
1706 | ||
1707 | // Pointer to the next group of control bytes, | |
1708 | // Must be aligned to the group size. | |
1709 | next_ctrl: *const u8, | |
1710 | ||
1711 | // Pointer one past the last control byte of this range. | |
1712 | end: *const u8, | |
1713 | } | |
1714 | ||
1715 | impl<T> RawIterRange<T> { | |
1716 | /// Returns a `RawIterRange` covering a subset of a table. | |
1717 | /// | |
1718 | /// The control byte address must be aligned to the group size. | |
e74abb32 | 1719 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1720 | unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self { |
1721 | debug_assert_ne!(len, 0); | |
1722 | debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); | |
1723 | let end = ctrl.add(len); | |
1724 | ||
1725 | // Load the first group and advance ctrl to point to the next group | |
1726 | let current_group = Group::load_aligned(ctrl).match_full(); | |
1727 | let next_ctrl = ctrl.add(Group::WIDTH); | |
1728 | ||
1729 | Self { | |
1730 | current_group, | |
1731 | data, | |
1732 | next_ctrl, | |
1733 | end, | |
1734 | } | |
1735 | } | |
1736 | ||
1737 | /// Splits a `RawIterRange` into two halves. | |
1738 | /// | |
1739 | /// Returns `None` if the remaining range is smaller than or equal to the | |
1740 | /// group width. | |
e74abb32 | 1741 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 | 1742 | #[cfg(feature = "rayon")] |
e74abb32 | 1743 | pub(crate) fn split(mut self) -> (Self, Option<RawIterRange<T>>) { |
48663c56 XL |
1744 | unsafe { |
1745 | if self.end <= self.next_ctrl { | |
1746 | // Nothing to split if the group that we are current processing | |
1747 | // is the last one. | |
1748 | (self, None) | |
1749 | } else { | |
1750 | // len is the remaining number of elements after the group that | |
1751 | // we are currently processing. It must be a multiple of the | |
1752 | // group size (small tables are caught by the check above). | |
1753 | let len = offset_from(self.end, self.next_ctrl); | |
1754 | debug_assert_eq!(len % Group::WIDTH, 0); | |
1755 | ||
1756 | // Split the remaining elements into two halves, but round the | |
1757 | // midpoint down in case there is an odd number of groups | |
1758 | // remaining. This ensures that: | |
1759 | // - The tail is at least 1 group long. | |
1760 | // - The split is roughly even considering we still have the | |
1761 | // current group to process. | |
1762 | let mid = (len / 2) & !(Group::WIDTH - 1); | |
1763 | ||
1764 | let tail = Self::new( | |
1765 | self.next_ctrl.add(mid), | |
3dfed10e | 1766 | self.data.next_n(Group::WIDTH).next_n(mid), |
48663c56 XL |
1767 | len - mid, |
1768 | ); | |
3dfed10e XL |
1769 | debug_assert_eq!( |
1770 | self.data.next_n(Group::WIDTH).next_n(mid).ptr, | |
1771 | tail.data.ptr | |
1772 | ); | |
48663c56 XL |
1773 | debug_assert_eq!(self.end, tail.end); |
1774 | self.end = self.next_ctrl.add(mid); | |
1775 | debug_assert_eq!(self.end.add(Group::WIDTH), tail.next_ctrl); | |
1776 | (self, Some(tail)) | |
1777 | } | |
1778 | } | |
1779 | } | |
1780 | } | |
1781 | ||
1782 | // We make raw iterators unconditionally Send and Sync, and let the PhantomData | |
1783 | // in the actual iterator implementations determine the real Send/Sync bounds. | |
1784 | unsafe impl<T> Send for RawIterRange<T> {} | |
1785 | unsafe impl<T> Sync for RawIterRange<T> {} | |
1786 | ||
1787 | impl<T> Clone for RawIterRange<T> { | |
e74abb32 | 1788 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1789 | fn clone(&self) -> Self { |
1790 | Self { | |
1791 | data: self.data.clone(), | |
1792 | next_ctrl: self.next_ctrl, | |
1793 | current_group: self.current_group, | |
1794 | end: self.end, | |
1795 | } | |
1796 | } | |
1797 | } | |
1798 | ||
1799 | impl<T> Iterator for RawIterRange<T> { | |
1800 | type Item = Bucket<T>; | |
1801 | ||
e74abb32 | 1802 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1803 | fn next(&mut self) -> Option<Bucket<T>> { |
1804 | unsafe { | |
1805 | loop { | |
1806 | if let Some(index) = self.current_group.lowest_set_bit() { | |
1807 | self.current_group = self.current_group.remove_lowest_bit(); | |
3dfed10e | 1808 | return Some(self.data.next_n(index)); |
48663c56 XL |
1809 | } |
1810 | ||
1811 | if self.next_ctrl >= self.end { | |
1812 | return None; | |
1813 | } | |
1814 | ||
1815 | // We might read past self.end up to the next group boundary, | |
1816 | // but this is fine because it only occurs on tables smaller | |
1817 | // than the group size where the trailing control bytes are all | |
1818 | // EMPTY. On larger tables self.end is guaranteed to be aligned | |
1819 | // to the group size (since tables are power-of-two sized). | |
1820 | self.current_group = Group::load_aligned(self.next_ctrl).match_full(); | |
3dfed10e | 1821 | self.data = self.data.next_n(Group::WIDTH); |
48663c56 XL |
1822 | self.next_ctrl = self.next_ctrl.add(Group::WIDTH); |
1823 | } | |
1824 | } | |
1825 | } | |
1826 | ||
e74abb32 | 1827 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1828 | fn size_hint(&self) -> (usize, Option<usize>) { |
1829 | // We don't have an item count, so just guess based on the range size. | |
1830 | ( | |
1831 | 0, | |
1832 | Some(unsafe { offset_from(self.end, self.next_ctrl) + Group::WIDTH }), | |
1833 | ) | |
1834 | } | |
1835 | } | |
1836 | ||
1837 | impl<T> FusedIterator for RawIterRange<T> {} | |
1838 | ||
1839 | /// Iterator which returns a raw pointer to every full bucket in the table. | |
3dfed10e XL |
1840 | /// |
1841 | /// For maximum flexibility this iterator is not bound by a lifetime, but you | |
1842 | /// must observe several rules when using it: | |
1843 | /// - You must not free the hash table while iterating (including via growing/shrinking). | |
1844 | /// - It is fine to erase a bucket that has been yielded by the iterator. | |
1845 | /// - Erasing a bucket that has not yet been yielded by the iterator may still | |
1846 | /// result in the iterator yielding that bucket (unless `reflect_remove` is called). | |
1847 | /// - It is unspecified whether an element inserted after the iterator was | |
1848 | /// created will be yielded by that iterator (unless `reflect_insert` is called). | |
1849 | /// - The order in which the iterator yields bucket is unspecified and may | |
1850 | /// change in the future. | |
48663c56 | 1851 | pub struct RawIter<T> { |
e74abb32 | 1852 | pub(crate) iter: RawIterRange<T>, |
48663c56 XL |
1853 | items: usize, |
1854 | } | |
1855 | ||
3dfed10e XL |
1856 | impl<T> RawIter<T> { |
1857 | /// Refresh the iterator so that it reflects a removal from the given bucket. | |
1858 | /// | |
1859 | /// For the iterator to remain valid, this method must be called once | |
1860 | /// for each removed bucket before `next` is called again. | |
1861 | /// | |
1862 | /// This method should be called _before_ the removal is made. It is not necessary to call this | |
1863 | /// method if you are removing an item that this iterator yielded in the past. | |
1864 | #[cfg(feature = "raw")] | |
1865 | pub fn reflect_remove(&mut self, b: &Bucket<T>) { | |
1866 | self.reflect_toggle_full(b, false); | |
1867 | } | |
1868 | ||
1869 | /// Refresh the iterator so that it reflects an insertion into the given bucket. | |
1870 | /// | |
1871 | /// For the iterator to remain valid, this method must be called once | |
1872 | /// for each insert before `next` is called again. | |
1873 | /// | |
1874 | /// This method does not guarantee that an insertion of a bucket witha greater | |
1875 | /// index than the last one yielded will be reflected in the iterator. | |
1876 | /// | |
1877 | /// This method should be called _after_ the given insert is made. | |
1878 | #[cfg(feature = "raw")] | |
1879 | pub fn reflect_insert(&mut self, b: &Bucket<T>) { | |
1880 | self.reflect_toggle_full(b, true); | |
1881 | } | |
1882 | ||
1883 | /// Refresh the iterator so that it reflects a change to the state of the given bucket. | |
1884 | #[cfg(feature = "raw")] | |
1885 | fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) { | |
1886 | unsafe { | |
1887 | if b.as_ptr() > self.iter.data.as_ptr() { | |
1888 | // The iterator has already passed the bucket's group. | |
1889 | // So the toggle isn't relevant to this iterator. | |
1890 | return; | |
1891 | } | |
1892 | ||
1893 | if self.iter.next_ctrl < self.iter.end | |
1894 | && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr() | |
1895 | { | |
1896 | // The iterator has not yet reached the bucket's group. | |
1897 | // We don't need to reload anything, but we do need to adjust the item count. | |
1898 | ||
1899 | if cfg!(debug_assertions) { | |
1900 | // Double-check that the user isn't lying to us by checking the bucket state. | |
1901 | // To do that, we need to find its control byte. We know that self.iter.data is | |
1902 | // at self.iter.next_ctrl - Group::WIDTH, so we work from there: | |
1903 | let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr()); | |
1904 | let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset); | |
1905 | // This method should be called _before_ a removal, or _after_ an insert, | |
1906 | // so in both cases the ctrl byte should indicate that the bucket is full. | |
1907 | assert!(is_full(*ctrl)); | |
1908 | } | |
1909 | ||
1910 | if is_insert { | |
1911 | self.items += 1; | |
1912 | } else { | |
1913 | self.items -= 1; | |
1914 | } | |
1915 | ||
1916 | return; | |
1917 | } | |
1918 | ||
1919 | // The iterator is at the bucket group that the toggled bucket is in. | |
1920 | // We need to do two things: | |
1921 | // | |
1922 | // - Determine if the iterator already yielded the toggled bucket. | |
1923 | // If it did, we're done. | |
1924 | // - Otherwise, update the iterator cached group so that it won't | |
1925 | // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket. | |
1926 | // We'll also need ot update the item count accordingly. | |
1927 | if let Some(index) = self.iter.current_group.lowest_set_bit() { | |
1928 | let next_bucket = self.iter.data.next_n(index); | |
1929 | if b.as_ptr() > next_bucket.as_ptr() { | |
1930 | // The toggled bucket is "before" the bucket the iterator would yield next. We | |
1931 | // therefore don't need to do anything --- the iterator has already passed the | |
1932 | // bucket in question. | |
1933 | // | |
1934 | // The item count must already be correct, since a removal or insert "prior" to | |
1935 | // the iterator's position wouldn't affect the item count. | |
1936 | } else { | |
1937 | // The removed bucket is an upcoming bucket. We need to make sure it does _not_ | |
1938 | // get yielded, and also that it's no longer included in the item count. | |
1939 | // | |
1940 | // NOTE: We can't just reload the group here, both since that might reflect | |
1941 | // inserts we've already passed, and because that might inadvertently unset the | |
1942 | // bits for _other_ removals. If we do that, we'd have to also decrement the | |
1943 | // item count for those other bits that we unset. But the presumably subsequent | |
1944 | // call to reflect for those buckets might _also_ decrement the item count. | |
1945 | // Instead, we _just_ flip the bit for the particular bucket the caller asked | |
1946 | // us to reflect. | |
1947 | let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr()); | |
1948 | let was_full = self.iter.current_group.flip(our_bit); | |
1949 | debug_assert_ne!(was_full, is_insert); | |
1950 | ||
1951 | if is_insert { | |
1952 | self.items += 1; | |
1953 | } else { | |
1954 | self.items -= 1; | |
1955 | } | |
1956 | ||
1957 | if cfg!(debug_assertions) { | |
1958 | if b.as_ptr() == next_bucket.as_ptr() { | |
1959 | // The removed bucket should no longer be next | |
1960 | debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index)); | |
1961 | } else { | |
1962 | // We should not have changed what bucket comes next. | |
1963 | debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index)); | |
1964 | } | |
1965 | } | |
1966 | } | |
1967 | } else { | |
1968 | // We must have already iterated past the removed item. | |
1969 | } | |
1970 | } | |
1971 | } | |
6a06907d XL |
1972 | |
1973 | unsafe fn drop_elements(&mut self) { | |
1974 | if mem::needs_drop::<T>() && self.len() != 0 { | |
1975 | for item in self { | |
1976 | item.drop(); | |
1977 | } | |
1978 | } | |
1979 | } | |
3dfed10e XL |
1980 | } |
1981 | ||
48663c56 | 1982 | impl<T> Clone for RawIter<T> { |
e74abb32 | 1983 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1984 | fn clone(&self) -> Self { |
1985 | Self { | |
1986 | iter: self.iter.clone(), | |
1987 | items: self.items, | |
1988 | } | |
1989 | } | |
1990 | } | |
1991 | ||
1992 | impl<T> Iterator for RawIter<T> { | |
1993 | type Item = Bucket<T>; | |
1994 | ||
e74abb32 | 1995 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
1996 | fn next(&mut self) -> Option<Bucket<T>> { |
1997 | if let Some(b) = self.iter.next() { | |
1998 | self.items -= 1; | |
1999 | Some(b) | |
2000 | } else { | |
2001 | // We don't check against items == 0 here to allow the | |
2002 | // compiler to optimize away the item count entirely if the | |
2003 | // iterator length is never queried. | |
2004 | debug_assert_eq!(self.items, 0); | |
2005 | None | |
2006 | } | |
2007 | } | |
2008 | ||
e74abb32 | 2009 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2010 | fn size_hint(&self) -> (usize, Option<usize>) { |
2011 | (self.items, Some(self.items)) | |
2012 | } | |
2013 | } | |
2014 | ||
2015 | impl<T> ExactSizeIterator for RawIter<T> {} | |
2016 | impl<T> FusedIterator for RawIter<T> {} | |
2017 | ||
2018 | /// Iterator which consumes a table and returns elements. | |
6a06907d | 2019 | pub struct RawIntoIter<T, A: Allocator + Clone = Global> { |
48663c56 | 2020 | iter: RawIter<T>, |
6a06907d | 2021 | allocation: Option<(NonNull<u8>, Layout)>, |
48663c56 | 2022 | marker: PhantomData<T>, |
6a06907d | 2023 | alloc: A, |
48663c56 XL |
2024 | } |
2025 | ||
6a06907d | 2026 | impl<T, A: Allocator + Clone> RawIntoIter<T, A> { |
e74abb32 | 2027 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2028 | pub fn iter(&self) -> RawIter<T> { |
2029 | self.iter.clone() | |
2030 | } | |
2031 | } | |
2032 | ||
6a06907d XL |
2033 | unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> where T: Send {} |
2034 | unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> where T: Sync {} | |
48663c56 XL |
2035 | |
2036 | #[cfg(feature = "nightly")] | |
6a06907d | 2037 | unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { |
e74abb32 | 2038 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2039 | fn drop(&mut self) { |
2040 | unsafe { | |
2041 | // Drop all remaining elements | |
6a06907d | 2042 | self.iter.drop_elements(); |
48663c56 XL |
2043 | |
2044 | // Free the table | |
6a06907d XL |
2045 | if let Some((ptr, layout)) = self.allocation { |
2046 | self.alloc.deallocate(ptr, layout); | |
48663c56 XL |
2047 | } |
2048 | } | |
2049 | } | |
2050 | } | |
2051 | #[cfg(not(feature = "nightly"))] | |
6a06907d | 2052 | impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { |
e74abb32 | 2053 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2054 | fn drop(&mut self) { |
2055 | unsafe { | |
2056 | // Drop all remaining elements | |
6a06907d | 2057 | self.iter.drop_elements(); |
48663c56 XL |
2058 | |
2059 | // Free the table | |
6a06907d XL |
2060 | if let Some((ptr, layout)) = self.allocation { |
2061 | self.alloc.deallocate(ptr, layout); | |
48663c56 XL |
2062 | } |
2063 | } | |
2064 | } | |
2065 | } | |
2066 | ||
6a06907d | 2067 | impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> { |
48663c56 XL |
2068 | type Item = T; |
2069 | ||
e74abb32 | 2070 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2071 | fn next(&mut self) -> Option<T> { |
2072 | unsafe { Some(self.iter.next()?.read()) } | |
2073 | } | |
2074 | ||
e74abb32 | 2075 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2076 | fn size_hint(&self) -> (usize, Option<usize>) { |
2077 | self.iter.size_hint() | |
2078 | } | |
2079 | } | |
2080 | ||
6a06907d XL |
2081 | impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {} |
2082 | impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {} | |
48663c56 XL |
2083 | |
2084 | /// Iterator which consumes elements without freeing the table storage. | |
6a06907d | 2085 | pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { |
48663c56 XL |
2086 | iter: RawIter<T>, |
2087 | ||
2088 | // The table is moved into the iterator for the duration of the drain. This | |
2089 | // ensures that an empty table is left if the drain iterator is leaked | |
2090 | // without dropping. | |
6a06907d XL |
2091 | table: ManuallyDrop<RawTable<T, A>>, |
2092 | orig_table: NonNull<RawTable<T, A>>, | |
48663c56 XL |
2093 | |
2094 | // We don't use a &'a mut RawTable<T> because we want RawDrain to be | |
2095 | // covariant over T. | |
6a06907d | 2096 | marker: PhantomData<&'a RawTable<T, A>>, |
48663c56 XL |
2097 | } |
2098 | ||
6a06907d | 2099 | impl<T, A: Allocator + Clone> RawDrain<'_, T, A> { |
e74abb32 | 2100 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2101 | pub fn iter(&self) -> RawIter<T> { |
2102 | self.iter.clone() | |
2103 | } | |
2104 | } | |
2105 | ||
6a06907d XL |
2106 | unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> where T: Send {} |
2107 | unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> where T: Sync {} | |
48663c56 | 2108 | |
6a06907d | 2109 | impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> { |
e74abb32 | 2110 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2111 | fn drop(&mut self) { |
2112 | unsafe { | |
2113 | // Drop all remaining elements. Note that this may panic. | |
6a06907d | 2114 | self.iter.drop_elements(); |
48663c56 XL |
2115 | |
2116 | // Reset the contents of the table now that all elements have been | |
2117 | // dropped. | |
2118 | self.table.clear_no_drop(); | |
2119 | ||
2120 | // Move the now empty table back to its original location. | |
2121 | self.orig_table | |
2122 | .as_ptr() | |
2123 | .copy_from_nonoverlapping(&*self.table, 1); | |
2124 | } | |
2125 | } | |
2126 | } | |
2127 | ||
6a06907d | 2128 | impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> { |
48663c56 XL |
2129 | type Item = T; |
2130 | ||
e74abb32 | 2131 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2132 | fn next(&mut self) -> Option<T> { |
2133 | unsafe { | |
2134 | let item = self.iter.next()?; | |
2135 | Some(item.read()) | |
2136 | } | |
2137 | } | |
2138 | ||
e74abb32 | 2139 | #[cfg_attr(feature = "inline-more", inline)] |
48663c56 XL |
2140 | fn size_hint(&self) -> (usize, Option<usize>) { |
2141 | self.iter.size_hint() | |
2142 | } | |
2143 | } | |
2144 | ||
6a06907d XL |
2145 | impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {} |
2146 | impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {} | |
3dfed10e XL |
2147 | |
2148 | /// Iterator over occupied buckets that could match a given hash. | |
2149 | /// | |
2150 | /// In rare cases, the iterator may return a bucket with a different hash. | |
6a06907d XL |
2151 | pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> { |
2152 | inner: RawIterHashInner<'a, A>, | |
2153 | _marker: PhantomData<T>, | |
2154 | } | |
2155 | ||
2156 | struct RawIterHashInner<'a, A: Allocator + Clone> { | |
2157 | table: &'a RawTableInner<A>, | |
3dfed10e XL |
2158 | |
2159 | // The top 7 bits of the hash. | |
2160 | h2_hash: u8, | |
2161 | ||
2162 | // The sequence of groups to probe in the search. | |
2163 | probe_seq: ProbeSeq, | |
2164 | ||
3dfed10e XL |
2165 | group: Group, |
2166 | ||
2167 | // The elements within the group with a matching h2-hash. | |
2168 | bitmask: BitMaskIter, | |
2169 | } | |
2170 | ||
6a06907d XL |
2171 | impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> { |
2172 | #[cfg_attr(feature = "inline-more", inline)] | |
2173 | fn new(table: &'a RawTable<T, A>, hash: u64) -> Self { | |
2174 | RawIterHash { | |
2175 | inner: RawIterHashInner::new(&table.table, hash), | |
2176 | _marker: PhantomData, | |
2177 | } | |
2178 | } | |
2179 | } | |
2180 | impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> { | |
2181 | #[cfg_attr(feature = "inline-more", inline)] | |
2182 | fn new(table: &'a RawTableInner<A>, hash: u64) -> Self { | |
3dfed10e XL |
2183 | unsafe { |
2184 | let h2_hash = h2(hash); | |
6a06907d XL |
2185 | let probe_seq = table.probe_seq(hash); |
2186 | let group = Group::load(table.ctrl(probe_seq.pos)); | |
3dfed10e XL |
2187 | let bitmask = group.match_byte(h2_hash).into_iter(); |
2188 | ||
6a06907d | 2189 | RawIterHashInner { |
3dfed10e XL |
2190 | table, |
2191 | h2_hash, | |
2192 | probe_seq, | |
3dfed10e XL |
2193 | group, |
2194 | bitmask, | |
2195 | } | |
2196 | } | |
2197 | } | |
2198 | } | |
2199 | ||
6a06907d | 2200 | impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> { |
3dfed10e XL |
2201 | type Item = Bucket<T>; |
2202 | ||
2203 | fn next(&mut self) -> Option<Bucket<T>> { | |
6a06907d XL |
2204 | unsafe { |
2205 | match self.inner.next() { | |
2206 | Some(index) => Some(self.inner.table.bucket(index)), | |
2207 | None => None, | |
2208 | } | |
2209 | } | |
2210 | } | |
2211 | } | |
2212 | ||
2213 | impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> { | |
2214 | type Item = usize; | |
2215 | ||
2216 | fn next(&mut self) -> Option<Self::Item> { | |
3dfed10e XL |
2217 | unsafe { |
2218 | loop { | |
2219 | if let Some(bit) = self.bitmask.next() { | |
6a06907d XL |
2220 | let index = (self.probe_seq.pos + bit) & self.table.bucket_mask; |
2221 | return Some(index); | |
3dfed10e XL |
2222 | } |
2223 | if likely(self.group.match_empty().any_bit_set()) { | |
2224 | return None; | |
2225 | } | |
6a06907d XL |
2226 | self.probe_seq.move_next(self.table.bucket_mask); |
2227 | self.group = Group::load(self.table.ctrl(self.probe_seq.pos)); | |
3dfed10e XL |
2228 | self.bitmask = self.group.match_byte(self.h2_hash).into_iter(); |
2229 | } | |
2230 | } | |
2231 | } | |
2232 | } | |
6a06907d XL |
2233 | |
2234 | #[cfg(test)] | |
2235 | mod test_map { | |
2236 | use super::*; | |
2237 | ||
2238 | #[test] | |
2239 | fn rehash() { | |
2240 | let mut table = RawTable::new(); | |
2241 | let hasher = |i: &u64| *i; | |
2242 | for i in 0..100 { | |
2243 | table.insert(i, i, hasher); | |
2244 | } | |
2245 | ||
2246 | for i in 0..100 { | |
2247 | unsafe { | |
2248 | assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); | |
2249 | } | |
2250 | assert!(table.find(i + 100, |x| *x == i + 100).is_none()); | |
2251 | } | |
2252 | ||
2253 | table.rehash_in_place(hasher); | |
2254 | ||
2255 | for i in 0..100 { | |
2256 | unsafe { | |
2257 | assert_eq!(table.find(i, |x| *x == i).map(|b| b.read()), Some(i)); | |
2258 | } | |
2259 | assert!(table.find(i + 100, |x| *x == i + 100).is_none()); | |
2260 | } | |
2261 | } | |
2262 | } |