]> git.proxmox.com Git - rustc.git/blob - src/libstd/collections/hash/table.rs
New upstream version 1.27.1+dfsg1
[rustc.git] / src / libstd / collections / hash / table.rs
1 // Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use alloc::{Global, Alloc, Layout, CollectionAllocErr, oom};
12 use cmp;
13 use hash::{BuildHasher, Hash, Hasher};
14 use marker;
15 use mem::{align_of, size_of, needs_drop};
16 use mem;
17 use ops::{Deref, DerefMut};
18 use ptr::{self, Unique, NonNull};
19
20 use self::BucketState::*;
21
22 /// Integer type used for stored hash values.
23 ///
24 /// No more than bit_width(usize) bits are needed to select a bucket.
25 ///
26 /// The most significant bit is ours to use for tagging `SafeHash`.
27 ///
28 /// (Even if we could have usize::MAX bytes allocated for buckets,
29 /// each bucket stores at least a `HashUint`, so there can be no more than
30 /// usize::MAX / size_of(usize) buckets.)
31 type HashUint = usize;
32
33 const EMPTY_BUCKET: HashUint = 0;
34 const EMPTY: usize = 1;
35
36 /// Special `Unique<HashUint>` that uses the lower bit of the pointer
37 /// to expose a boolean tag.
38 /// Note: when the pointer is initialized to EMPTY `.ptr()` will return
39 /// null and the tag functions shouldn't be used.
40 struct TaggedHashUintPtr(Unique<HashUint>);
41
42 impl TaggedHashUintPtr {
43 #[inline]
44 unsafe fn new(ptr: *mut HashUint) -> Self {
45 debug_assert!(ptr as usize & 1 == 0 || ptr as usize == EMPTY as usize);
46 TaggedHashUintPtr(Unique::new_unchecked(ptr))
47 }
48
49 #[inline]
50 fn set_tag(&mut self, value: bool) {
51 let mut usize_ptr = self.0.as_ptr() as usize;
52 unsafe {
53 if value {
54 usize_ptr |= 1;
55 } else {
56 usize_ptr &= !1;
57 }
58 self.0 = Unique::new_unchecked(usize_ptr as *mut HashUint)
59 }
60 }
61
62 #[inline]
63 fn tag(&self) -> bool {
64 (self.0.as_ptr() as usize) & 1 == 1
65 }
66
67 #[inline]
68 fn ptr(&self) -> *mut HashUint {
69 (self.0.as_ptr() as usize & !1) as *mut HashUint
70 }
71 }
72
73 /// The raw hashtable, providing safe-ish access to the unzipped and highly
74 /// optimized arrays of hashes, and key-value pairs.
75 ///
76 /// This design is a lot faster than the naive
77 /// `Vec<Option<(u64, K, V)>>`, because we don't pay for the overhead of an
78 /// option on every element, and we get a generally more cache-aware design.
79 ///
80 /// Essential invariants of this structure:
81 ///
82 /// - if `t.hashes[i] == EMPTY_BUCKET`, then `Bucket::at_index(&t, i).raw`
83 /// points to 'undefined' contents. Don't read from it. This invariant is
84 /// enforced outside this module with the `EmptyBucket`, `FullBucket`,
85 /// and `SafeHash` types.
86 ///
87 /// - An `EmptyBucket` is only constructed at an index with
88 /// a hash of EMPTY_BUCKET.
89 ///
90 /// - A `FullBucket` is only constructed at an index with a
91 /// non-EMPTY_BUCKET hash.
92 ///
93 /// - A `SafeHash` is only constructed for non-`EMPTY_BUCKET` hash. We get
94 /// around hashes of zero by changing them to 0x8000_0000_0000_0000,
95 /// which will likely map to the same bucket, while not being confused
96 /// with "empty".
97 ///
98 /// - Both "arrays represented by pointers" are the same length:
99 /// `capacity`. This is set at creation and never changes. The arrays
100 /// are unzipped and are more cache aware (scanning through 8 hashes
101 /// brings in at most 2 cache lines, since they're all right beside each
102 /// other). This layout may waste space in padding such as in a map from
103 /// u64 to u8, but is a more cache conscious layout as the key-value pairs
104 /// are only very shortly probed and the desired value will be in the same
105 /// or next cache line.
106 ///
107 /// You can kind of think of this module/data structure as a safe wrapper
108 /// around just the "table" part of the hashtable. It enforces some
109 /// invariants at the type level and employs some performance trickery,
110 /// but in general is just a tricked out `Vec<Option<(u64, K, V)>>`.
111 ///
112 /// The hashtable also exposes a special boolean tag. The tag defaults to false
113 /// when the RawTable is created and is accessible with the `tag` and `set_tag`
114 /// functions.
115 pub struct RawTable<K, V> {
116 capacity_mask: usize,
117 size: usize,
118 hashes: TaggedHashUintPtr,
119
120 // Because K/V do not appear directly in any of the types in the struct,
121 // inform rustc that in fact instances of K and V are reachable from here.
122 marker: marker::PhantomData<(K, V)>,
123 }
124
125 // An unsafe view of a RawTable bucket
126 // Valid indexes are within [0..table_capacity)
127 pub struct RawBucket<K, V> {
128 hash_start: *mut HashUint,
129 // We use *const to ensure covariance with respect to K and V
130 pair_start: *const (K, V),
131 idx: usize,
132 _marker: marker::PhantomData<(K, V)>,
133 }
134
135 impl<K, V> Copy for RawBucket<K, V> {}
136 impl<K, V> Clone for RawBucket<K, V> {
137 fn clone(&self) -> RawBucket<K, V> {
138 *self
139 }
140 }
141
142 pub struct Bucket<K, V, M> {
143 raw: RawBucket<K, V>,
144 table: M,
145 }
146
147 impl<K, V, M: Copy> Copy for Bucket<K, V, M> {}
148 impl<K, V, M: Copy> Clone for Bucket<K, V, M> {
149 fn clone(&self) -> Bucket<K, V, M> {
150 *self
151 }
152 }
153
154 pub struct EmptyBucket<K, V, M> {
155 raw: RawBucket<K, V>,
156 table: M,
157 }
158
159 pub struct FullBucket<K, V, M> {
160 raw: RawBucket<K, V>,
161 table: M,
162 }
163
164 pub type FullBucketMut<'table, K, V> = FullBucket<K, V, &'table mut RawTable<K, V>>;
165
166 pub enum BucketState<K, V, M> {
167 Empty(EmptyBucket<K, V, M>),
168 Full(FullBucket<K, V, M>),
169 }
170
171 // A GapThenFull encapsulates the state of two consecutive buckets at once.
172 // The first bucket, called the gap, is known to be empty.
173 // The second bucket is full.
174 pub struct GapThenFull<K, V, M> {
175 gap: EmptyBucket<K, V, ()>,
176 full: FullBucket<K, V, M>,
177 }
178
179 /// A hash that is not zero, since we use a hash of zero to represent empty
180 /// buckets.
181 #[derive(PartialEq, Copy, Clone)]
182 pub struct SafeHash {
183 hash: HashUint,
184 }
185
186 impl SafeHash {
187 /// Peek at the hash value, which is guaranteed to be non-zero.
188 #[inline(always)]
189 pub fn inspect(&self) -> HashUint {
190 self.hash
191 }
192
193 #[inline(always)]
194 pub fn new(hash: u64) -> Self {
195 // We need to avoid 0 in order to prevent collisions with
196 // EMPTY_HASH. We can maintain our precious uniform distribution
197 // of initial indexes by unconditionally setting the MSB,
198 // effectively reducing the hashes by one bit.
199 //
200 // Truncate hash to fit in `HashUint`.
201 let hash_bits = size_of::<HashUint>() * 8;
202 SafeHash { hash: (1 << (hash_bits - 1)) | (hash as HashUint) }
203 }
204 }
205
206 /// We need to remove hashes of 0. That's reserved for empty buckets.
207 /// This function wraps up `hash_keyed` to be the only way outside this
208 /// module to generate a SafeHash.
209 pub fn make_hash<T: ?Sized, S>(hash_state: &S, t: &T) -> SafeHash
210 where T: Hash,
211 S: BuildHasher
212 {
213 let mut state = hash_state.build_hasher();
214 t.hash(&mut state);
215 SafeHash::new(state.finish())
216 }
217
218 // `replace` casts a `*HashUint` to a `*SafeHash`. Since we statically
219 // ensure that a `FullBucket` points to an index with a non-zero hash,
220 // and a `SafeHash` is just a `HashUint` with a different name, this is
221 // safe.
222 //
223 // This test ensures that a `SafeHash` really IS the same size as a
224 // `HashUint`. If you need to change the size of `SafeHash` (and
225 // consequently made this test fail), `replace` needs to be
226 // modified to no longer assume this.
227 #[test]
228 fn can_alias_safehash_as_hash() {
229 assert_eq!(size_of::<SafeHash>(), size_of::<HashUint>())
230 }
231
232 // RawBucket methods are unsafe as it's possible to
233 // make a RawBucket point to invalid memory using safe code.
234 impl<K, V> RawBucket<K, V> {
235 unsafe fn hash(&self) -> *mut HashUint {
236 self.hash_start.offset(self.idx as isize)
237 }
238 unsafe fn pair(&self) -> *mut (K, V) {
239 self.pair_start.offset(self.idx as isize) as *mut (K, V)
240 }
241 unsafe fn hash_pair(&self) -> (*mut HashUint, *mut (K, V)) {
242 (self.hash(), self.pair())
243 }
244 }
245
246 // Buckets hold references to the table.
247 impl<K, V, M> FullBucket<K, V, M> {
248 /// Borrow a reference to the table.
249 pub fn table(&self) -> &M {
250 &self.table
251 }
252 /// Borrow a mutable reference to the table.
253 pub fn table_mut(&mut self) -> &mut M {
254 &mut self.table
255 }
256 /// Move out the reference to the table.
257 pub fn into_table(self) -> M {
258 self.table
259 }
260 /// Get the raw index.
261 pub fn index(&self) -> usize {
262 self.raw.idx
263 }
264 /// Get the raw bucket.
265 pub fn raw(&self) -> RawBucket<K, V> {
266 self.raw
267 }
268 }
269
270 impl<K, V, M> EmptyBucket<K, V, M> {
271 /// Borrow a reference to the table.
272 pub fn table(&self) -> &M {
273 &self.table
274 }
275 /// Borrow a mutable reference to the table.
276 pub fn table_mut(&mut self) -> &mut M {
277 &mut self.table
278 }
279 }
280
281 impl<K, V, M> Bucket<K, V, M> {
282 /// Get the raw index.
283 pub fn index(&self) -> usize {
284 self.raw.idx
285 }
286 /// get the table.
287 pub fn into_table(self) -> M {
288 self.table
289 }
290 }
291
292 impl<K, V, M> Deref for FullBucket<K, V, M>
293 where M: Deref<Target = RawTable<K, V>>
294 {
295 type Target = RawTable<K, V>;
296 fn deref(&self) -> &RawTable<K, V> {
297 &self.table
298 }
299 }
300
301 /// `Put` is implemented for types which provide access to a table and cannot be invalidated
302 /// by filling a bucket. A similar implementation for `Take` is possible.
303 pub trait Put<K, V> {
304 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V>;
305 }
306
307
308 impl<'t, K, V> Put<K, V> for &'t mut RawTable<K, V> {
309 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
310 *self
311 }
312 }
313
314 impl<K, V, M> Put<K, V> for Bucket<K, V, M>
315 where M: Put<K, V>
316 {
317 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
318 self.table.borrow_table_mut()
319 }
320 }
321
322 impl<K, V, M> Put<K, V> for FullBucket<K, V, M>
323 where M: Put<K, V>
324 {
325 unsafe fn borrow_table_mut(&mut self) -> &mut RawTable<K, V> {
326 self.table.borrow_table_mut()
327 }
328 }
329
330 impl<K, V, M: Deref<Target = RawTable<K, V>>> Bucket<K, V, M> {
331 pub fn new(table: M, hash: SafeHash) -> Bucket<K, V, M> {
332 Bucket::at_index(table, hash.inspect() as usize)
333 }
334
335 pub fn new_from(r: RawBucket<K, V>, t: M)
336 -> Bucket<K, V, M>
337 {
338 Bucket {
339 raw: r,
340 table: t,
341 }
342 }
343
344 pub fn at_index(table: M, ib_index: usize) -> Bucket<K, V, M> {
345 // if capacity is 0, then the RawBucket will be populated with bogus pointers.
346 // This is an uncommon case though, so avoid it in release builds.
347 debug_assert!(table.capacity() > 0,
348 "Table should have capacity at this point");
349 let ib_index = ib_index & table.capacity_mask;
350 Bucket {
351 raw: table.raw_bucket_at(ib_index),
352 table,
353 }
354 }
355
356 pub fn first(table: M) -> Bucket<K, V, M> {
357 Bucket {
358 raw: table.raw_bucket_at(0),
359 table,
360 }
361 }
362
363 // "So a few of the first shall be last: for many be called,
364 // but few chosen."
365 //
366 // We'll most likely encounter a few buckets at the beginning that
367 // have their initial buckets near the end of the table. They were
368 // placed at the beginning as the probe wrapped around the table
369 // during insertion. We must skip forward to a bucket that won't
370 // get reinserted too early and won't unfairly steal others spot.
371 // This eliminates the need for robin hood.
372 pub fn head_bucket(table: M) -> Bucket<K, V, M> {
373 let mut bucket = Bucket::first(table);
374
375 loop {
376 bucket = match bucket.peek() {
377 Full(full) => {
378 if full.displacement() == 0 {
379 // This bucket occupies its ideal spot.
380 // It indicates the start of another "cluster".
381 bucket = full.into_bucket();
382 break;
383 }
384 // Leaving this bucket in the last cluster for later.
385 full.into_bucket()
386 }
387 Empty(b) => {
388 // Encountered a hole between clusters.
389 b.into_bucket()
390 }
391 };
392 bucket.next();
393 }
394 bucket
395 }
396
397 /// Reads a bucket at a given index, returning an enum indicating whether
398 /// it's initialized or not. You need to match on this enum to get
399 /// the appropriate types to call most of the other functions in
400 /// this module.
401 pub fn peek(self) -> BucketState<K, V, M> {
402 match unsafe { *self.raw.hash() } {
403 EMPTY_BUCKET => {
404 Empty(EmptyBucket {
405 raw: self.raw,
406 table: self.table,
407 })
408 }
409 _ => {
410 Full(FullBucket {
411 raw: self.raw,
412 table: self.table,
413 })
414 }
415 }
416 }
417
418 /// Modifies the bucket in place to make it point to the next slot.
419 pub fn next(&mut self) {
420 self.raw.idx = self.raw.idx.wrapping_add(1) & self.table.capacity_mask;
421 }
422
423 /// Modifies the bucket in place to make it point to the previous slot.
424 pub fn prev(&mut self) {
425 self.raw.idx = self.raw.idx.wrapping_sub(1) & self.table.capacity_mask;
426 }
427 }
428
429 impl<K, V, M: Deref<Target = RawTable<K, V>>> EmptyBucket<K, V, M> {
430 #[inline]
431 pub fn next(self) -> Bucket<K, V, M> {
432 let mut bucket = self.into_bucket();
433 bucket.next();
434 bucket
435 }
436
437 #[inline]
438 pub fn into_bucket(self) -> Bucket<K, V, M> {
439 Bucket {
440 raw: self.raw,
441 table: self.table,
442 }
443 }
444
445 pub fn gap_peek(self) -> Result<GapThenFull<K, V, M>, Bucket<K, V, M>> {
446 let gap = EmptyBucket {
447 raw: self.raw,
448 table: (),
449 };
450
451 match self.next().peek() {
452 Full(bucket) => {
453 Ok(GapThenFull {
454 gap,
455 full: bucket,
456 })
457 }
458 Empty(e) => Err(e.into_bucket()),
459 }
460 }
461 }
462
463 impl<K, V, M> EmptyBucket<K, V, M>
464 where M: Put<K, V>
465 {
466 /// Puts given key and value pair, along with the key's hash,
467 /// into this bucket in the hashtable. Note how `self` is 'moved' into
468 /// this function, because this slot will no longer be empty when
469 /// we return! A `FullBucket` is returned for later use, pointing to
470 /// the newly-filled slot in the hashtable.
471 ///
472 /// Use `make_hash` to construct a `SafeHash` to pass to this function.
473 pub fn put(mut self, hash: SafeHash, key: K, value: V) -> FullBucket<K, V, M> {
474 unsafe {
475 *self.raw.hash() = hash.inspect();
476 ptr::write(self.raw.pair(), (key, value));
477
478 self.table.borrow_table_mut().size += 1;
479 }
480
481 FullBucket {
482 raw: self.raw,
483 table: self.table,
484 }
485 }
486 }
487
488 impl<K, V, M: Deref<Target = RawTable<K, V>>> FullBucket<K, V, M> {
489 #[inline]
490 pub fn next(self) -> Bucket<K, V, M> {
491 let mut bucket = self.into_bucket();
492 bucket.next();
493 bucket
494 }
495
496 #[inline]
497 pub fn into_bucket(self) -> Bucket<K, V, M> {
498 Bucket {
499 raw: self.raw,
500 table: self.table,
501 }
502 }
503
504 /// Duplicates the current position. This can be useful for operations
505 /// on two or more buckets.
506 pub fn stash(self) -> FullBucket<K, V, Self> {
507 FullBucket {
508 raw: self.raw,
509 table: self,
510 }
511 }
512
513 /// Get the distance between this bucket and the 'ideal' location
514 /// as determined by the key's hash stored in it.
515 ///
516 /// In the cited blog posts above, this is called the "distance to
517 /// initial bucket", or DIB. Also known as "probe count".
518 pub fn displacement(&self) -> usize {
519 // Calculates the distance one has to travel when going from
520 // `hash mod capacity` onwards to `idx mod capacity`, wrapping around
521 // if the destination is not reached before the end of the table.
522 (self.raw.idx.wrapping_sub(self.hash().inspect() as usize)) & self.table.capacity_mask
523 }
524
525 #[inline]
526 pub fn hash(&self) -> SafeHash {
527 unsafe { SafeHash { hash: *self.raw.hash() } }
528 }
529
530 /// Gets references to the key and value at a given index.
531 pub fn read(&self) -> (&K, &V) {
532 unsafe {
533 let pair_ptr = self.raw.pair();
534 (&(*pair_ptr).0, &(*pair_ptr).1)
535 }
536 }
537 }
538
539 // We take a mutable reference to the table instead of accepting anything that
540 // implements `DerefMut` to prevent fn `take` from being called on `stash`ed
541 // buckets.
542 impl<'t, K, V> FullBucket<K, V, &'t mut RawTable<K, V>> {
543 /// Removes this bucket's key and value from the hashtable.
544 ///
545 /// This works similarly to `put`, building an `EmptyBucket` out of the
546 /// taken bucket.
547 pub fn take(self) -> (EmptyBucket<K, V, &'t mut RawTable<K, V>>, K, V) {
548 self.table.size -= 1;
549
550 unsafe {
551 *self.raw.hash() = EMPTY_BUCKET;
552 let (k, v) = ptr::read(self.raw.pair());
553 (EmptyBucket {
554 raw: self.raw,
555 table: self.table,
556 },
557 k,
558 v)
559 }
560 }
561 }
562
563 // This use of `Put` is misleading and restrictive, but safe and sufficient for our use cases
564 // where `M` is a full bucket or table reference type with mutable access to the table.
565 impl<K, V, M> FullBucket<K, V, M>
566 where M: Put<K, V>
567 {
568 pub fn replace(&mut self, h: SafeHash, k: K, v: V) -> (SafeHash, K, V) {
569 unsafe {
570 let old_hash = ptr::replace(self.raw.hash() as *mut SafeHash, h);
571 let (old_key, old_val) = ptr::replace(self.raw.pair(), (k, v));
572
573 (old_hash, old_key, old_val)
574 }
575 }
576 }
577
578 impl<K, V, M> FullBucket<K, V, M>
579 where M: Deref<Target = RawTable<K, V>> + DerefMut
580 {
581 /// Gets mutable references to the key and value at a given index.
582 pub fn read_mut(&mut self) -> (&mut K, &mut V) {
583 unsafe {
584 let pair_ptr = self.raw.pair();
585 (&mut (*pair_ptr).0, &mut (*pair_ptr).1)
586 }
587 }
588 }
589
590 impl<'t, K, V, M> FullBucket<K, V, M>
591 where M: Deref<Target = RawTable<K, V>> + 't
592 {
593 /// Exchange a bucket state for immutable references into the table.
594 /// Because the underlying reference to the table is also consumed,
595 /// no further changes to the structure of the table are possible;
596 /// in exchange for this, the returned references have a longer lifetime
597 /// than the references returned by `read()`.
598 pub fn into_refs(self) -> (&'t K, &'t V) {
599 unsafe {
600 let pair_ptr = self.raw.pair();
601 (&(*pair_ptr).0, &(*pair_ptr).1)
602 }
603 }
604 }
605
606 impl<'t, K, V, M> FullBucket<K, V, M>
607 where M: Deref<Target = RawTable<K, V>> + DerefMut + 't
608 {
609 /// This works similarly to `into_refs`, exchanging a bucket state
610 /// for mutable references into the table.
611 pub fn into_mut_refs(self) -> (&'t mut K, &'t mut V) {
612 unsafe {
613 let pair_ptr = self.raw.pair();
614 (&mut (*pair_ptr).0, &mut (*pair_ptr).1)
615 }
616 }
617 }
618
619 impl<K, V, M> GapThenFull<K, V, M>
620 where M: Deref<Target = RawTable<K, V>>
621 {
622 #[inline]
623 pub fn full(&self) -> &FullBucket<K, V, M> {
624 &self.full
625 }
626
627 pub fn into_table(self) -> M {
628 self.full.into_table()
629 }
630
631 pub fn shift(mut self) -> Result<GapThenFull<K, V, M>, Bucket<K, V, M>> {
632 unsafe {
633 let (gap_hash, gap_pair) = self.gap.raw.hash_pair();
634 let (full_hash, full_pair) = self.full.raw.hash_pair();
635 *gap_hash = mem::replace(&mut *full_hash, EMPTY_BUCKET);
636 ptr::copy_nonoverlapping(full_pair, gap_pair, 1);
637 }
638
639 let FullBucket { raw: prev_raw, .. } = self.full;
640
641 match self.full.next().peek() {
642 Full(bucket) => {
643 self.gap.raw = prev_raw;
644
645 self.full = bucket;
646
647 Ok(self)
648 }
649 Empty(b) => Err(b.into_bucket()),
650 }
651 }
652 }
653
654
655 /// Rounds up to a multiple of a power of two. Returns the closest multiple
656 /// of `target_alignment` that is higher or equal to `unrounded`.
657 ///
658 /// # Panics
659 ///
660 /// Panics if `target_alignment` is not a power of two.
661 #[inline]
662 fn round_up_to_next(unrounded: usize, target_alignment: usize) -> usize {
663 assert!(target_alignment.is_power_of_two());
664 (unrounded + target_alignment - 1) & !(target_alignment - 1)
665 }
666
667 #[test]
668 fn test_rounding() {
669 assert_eq!(round_up_to_next(0, 4), 0);
670 assert_eq!(round_up_to_next(1, 4), 4);
671 assert_eq!(round_up_to_next(2, 4), 4);
672 assert_eq!(round_up_to_next(3, 4), 4);
673 assert_eq!(round_up_to_next(4, 4), 4);
674 assert_eq!(round_up_to_next(5, 4), 8);
675 }
676
677 // Returns a tuple of (pairs_offset, end_of_pairs_offset),
678 // from the start of a mallocated array.
679 #[inline]
680 fn calculate_offsets(hashes_size: usize,
681 pairs_size: usize,
682 pairs_align: usize)
683 -> (usize, usize, bool) {
684 let pairs_offset = round_up_to_next(hashes_size, pairs_align);
685 let (end_of_pairs, oflo) = pairs_offset.overflowing_add(pairs_size);
686
687 (pairs_offset, end_of_pairs, oflo)
688 }
689
690 // Returns a tuple of (minimum required malloc alignment,
691 // array_size), from the start of a mallocated array.
692 fn calculate_allocation(hash_size: usize,
693 hash_align: usize,
694 pairs_size: usize,
695 pairs_align: usize)
696 -> (usize, usize, bool) {
697 let (_, end_of_pairs, oflo) = calculate_offsets(hash_size, pairs_size, pairs_align);
698
699 let align = cmp::max(hash_align, pairs_align);
700
701 (align, end_of_pairs, oflo)
702 }
703
704 #[test]
705 fn test_offset_calculation() {
706 assert_eq!(calculate_allocation(128, 8, 16, 8), (8, 144, false));
707 assert_eq!(calculate_allocation(3, 1, 2, 1), (1, 5, false));
708 assert_eq!(calculate_allocation(6, 2, 12, 4), (4, 20, false));
709 assert_eq!(calculate_offsets(128, 15, 4), (128, 143, false));
710 assert_eq!(calculate_offsets(3, 2, 4), (4, 6, false));
711 assert_eq!(calculate_offsets(6, 12, 4), (8, 20, false));
712 }
713
714 impl<K, V> RawTable<K, V> {
715 /// Does not initialize the buckets. The caller should ensure they,
716 /// at the very least, set every hash to EMPTY_BUCKET.
717 /// Returns an error if it cannot allocate or capacity overflows.
718 unsafe fn try_new_uninitialized(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
719 if capacity == 0 {
720 return Ok(RawTable {
721 size: 0,
722 capacity_mask: capacity.wrapping_sub(1),
723 hashes: TaggedHashUintPtr::new(EMPTY as *mut HashUint),
724 marker: marker::PhantomData,
725 });
726 }
727
728 // No need for `checked_mul` before a more restrictive check performed
729 // later in this method.
730 let hashes_size = capacity.wrapping_mul(size_of::<HashUint>());
731 let pairs_size = capacity.wrapping_mul(size_of::<(K, V)>());
732
733 // Allocating hashmaps is a little tricky. We need to allocate two
734 // arrays, but since we know their sizes and alignments up front,
735 // we just allocate a single array, and then have the subarrays
736 // point into it.
737 //
738 // This is great in theory, but in practice getting the alignment
739 // right is a little subtle. Therefore, calculating offsets has been
740 // factored out into a different function.
741 let (alignment, size, oflo) = calculate_allocation(hashes_size,
742 align_of::<HashUint>(),
743 pairs_size,
744 align_of::<(K, V)>());
745 if oflo {
746 return Err(CollectionAllocErr::CapacityOverflow);
747 }
748
749 // One check for overflow that covers calculation and rounding of size.
750 let size_of_bucket = size_of::<HashUint>().checked_add(size_of::<(K, V)>())
751 .ok_or(CollectionAllocErr::CapacityOverflow)?;
752 let capacity_mul_size_of_bucket = capacity.checked_mul(size_of_bucket);
753 if capacity_mul_size_of_bucket.is_none() || size < capacity_mul_size_of_bucket.unwrap() {
754 return Err(CollectionAllocErr::CapacityOverflow);
755 }
756
757 let buffer = Global.alloc(Layout::from_size_align(size, alignment)
758 .map_err(|_| CollectionAllocErr::CapacityOverflow)?)?;
759
760 Ok(RawTable {
761 capacity_mask: capacity.wrapping_sub(1),
762 size: 0,
763 hashes: TaggedHashUintPtr::new(buffer.cast().as_ptr()),
764 marker: marker::PhantomData,
765 })
766 }
767
768 /// Does not initialize the buckets. The caller should ensure they,
769 /// at the very least, set every hash to EMPTY_BUCKET.
770 unsafe fn new_uninitialized(capacity: usize) -> RawTable<K, V> {
771 match Self::try_new_uninitialized(capacity) {
772 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
773 Err(CollectionAllocErr::AllocErr) => oom(),
774 Ok(table) => { table }
775 }
776 }
777
778 fn raw_bucket_at(&self, index: usize) -> RawBucket<K, V> {
779 let hashes_size = self.capacity() * size_of::<HashUint>();
780 let pairs_size = self.capacity() * size_of::<(K, V)>();
781
782 let (pairs_offset, _, oflo) =
783 calculate_offsets(hashes_size, pairs_size, align_of::<(K, V)>());
784 debug_assert!(!oflo, "capacity overflow");
785
786 let buffer = self.hashes.ptr() as *mut u8;
787 unsafe {
788 RawBucket {
789 hash_start: buffer as *mut HashUint,
790 pair_start: buffer.offset(pairs_offset as isize) as *const (K, V),
791 idx: index,
792 _marker: marker::PhantomData,
793 }
794 }
795 }
796
797 /// Tries to create a new raw table from a given capacity. If it cannot allocate,
798 /// it returns with AllocErr.
799 pub fn try_new(capacity: usize) -> Result<RawTable<K, V>, CollectionAllocErr> {
800 unsafe {
801 let ret = RawTable::try_new_uninitialized(capacity)?;
802 ptr::write_bytes(ret.hashes.ptr(), 0, capacity);
803 Ok(ret)
804 }
805 }
806
807 /// Creates a new raw table from a given capacity. All buckets are
808 /// initially empty.
809 pub fn new(capacity: usize) -> RawTable<K, V> {
810 match Self::try_new(capacity) {
811 Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
812 Err(CollectionAllocErr::AllocErr) => oom(),
813 Ok(table) => { table }
814 }
815 }
816
817 /// The hashtable's capacity, similar to a vector's.
818 pub fn capacity(&self) -> usize {
819 self.capacity_mask.wrapping_add(1)
820 }
821
822 /// The number of elements ever `put` in the hashtable, minus the number
823 /// of elements ever `take`n.
824 pub fn size(&self) -> usize {
825 self.size
826 }
827
828 fn raw_buckets(&self) -> RawBuckets<K, V> {
829 RawBuckets {
830 raw: self.raw_bucket_at(0),
831 elems_left: self.size,
832 marker: marker::PhantomData,
833 }
834 }
835
836 pub fn iter(&self) -> Iter<K, V> {
837 Iter {
838 iter: self.raw_buckets(),
839 }
840 }
841
842 pub fn iter_mut(&mut self) -> IterMut<K, V> {
843 IterMut {
844 iter: self.raw_buckets(),
845 _marker: marker::PhantomData,
846 }
847 }
848
849 pub fn into_iter(self) -> IntoIter<K, V> {
850 let RawBuckets { raw, elems_left, .. } = self.raw_buckets();
851 // Replace the marker regardless of lifetime bounds on parameters.
852 IntoIter {
853 iter: RawBuckets {
854 raw,
855 elems_left,
856 marker: marker::PhantomData,
857 },
858 table: self,
859 }
860 }
861
862 pub fn drain(&mut self) -> Drain<K, V> {
863 let RawBuckets { raw, elems_left, .. } = self.raw_buckets();
864 // Replace the marker regardless of lifetime bounds on parameters.
865 Drain {
866 iter: RawBuckets {
867 raw,
868 elems_left,
869 marker: marker::PhantomData,
870 },
871 table: NonNull::from(self),
872 marker: marker::PhantomData,
873 }
874 }
875
876 /// Drops buckets in reverse order. It leaves the table in an inconsistent
877 /// state and should only be used for dropping the table's remaining
878 /// entries. It's used in the implementation of Drop.
879 unsafe fn rev_drop_buckets(&mut self) {
880 // initialize the raw bucket past the end of the table
881 let mut raw = self.raw_bucket_at(self.capacity());
882 let mut elems_left = self.size;
883
884 while elems_left != 0 {
885 raw.idx -= 1;
886
887 if *raw.hash() != EMPTY_BUCKET {
888 elems_left -= 1;
889 ptr::drop_in_place(raw.pair());
890 }
891 }
892 }
893
894 /// Set the table tag
895 pub fn set_tag(&mut self, value: bool) {
896 self.hashes.set_tag(value)
897 }
898
899 /// Get the table tag
900 pub fn tag(&self) -> bool {
901 self.hashes.tag()
902 }
903 }
904
905 /// A raw iterator. The basis for some other iterators in this module. Although
906 /// this interface is safe, it's not used outside this module.
907 struct RawBuckets<'a, K, V> {
908 raw: RawBucket<K, V>,
909 elems_left: usize,
910
911 // Strictly speaking, this should be &'a (K,V), but that would
912 // require that K:'a, and we often use RawBuckets<'static...> for
913 // move iterations, so that messes up a lot of other things. So
914 // just use `&'a (K,V)` as this is not a publicly exposed type
915 // anyway.
916 marker: marker::PhantomData<&'a ()>,
917 }
918
919 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
920 impl<'a, K, V> Clone for RawBuckets<'a, K, V> {
921 fn clone(&self) -> RawBuckets<'a, K, V> {
922 RawBuckets {
923 raw: self.raw,
924 elems_left: self.elems_left,
925 marker: marker::PhantomData,
926 }
927 }
928 }
929
930
931 impl<'a, K, V> Iterator for RawBuckets<'a, K, V> {
932 type Item = RawBucket<K, V>;
933
934 fn next(&mut self) -> Option<RawBucket<K, V>> {
935 if self.elems_left == 0 {
936 return None;
937 }
938
939 loop {
940 unsafe {
941 let item = self.raw;
942 self.raw.idx += 1;
943 if *item.hash() != EMPTY_BUCKET {
944 self.elems_left -= 1;
945 return Some(item);
946 }
947 }
948 }
949 }
950
951 fn size_hint(&self) -> (usize, Option<usize>) {
952 (self.elems_left, Some(self.elems_left))
953 }
954 }
955
956 impl<'a, K, V> ExactSizeIterator for RawBuckets<'a, K, V> {
957 fn len(&self) -> usize {
958 self.elems_left
959 }
960 }
961
962 /// Iterator over shared references to entries in a table.
963 pub struct Iter<'a, K: 'a, V: 'a> {
964 iter: RawBuckets<'a, K, V>,
965 }
966
967 unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {}
968 unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {}
969
970 // FIXME(#26925) Remove in favor of `#[derive(Clone)]`
971 impl<'a, K, V> Clone for Iter<'a, K, V> {
972 fn clone(&self) -> Iter<'a, K, V> {
973 Iter {
974 iter: self.iter.clone(),
975 }
976 }
977 }
978
979 /// Iterator over mutable references to entries in a table.
980 pub struct IterMut<'a, K: 'a, V: 'a> {
981 iter: RawBuckets<'a, K, V>,
982 // To ensure invariance with respect to V
983 _marker: marker::PhantomData<&'a mut V>,
984 }
985
986 unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {}
987 // Both K: Sync and K: Send are correct for IterMut's Send impl,
988 // but Send is the more useful bound
989 unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {}
990
991 impl<'a, K: 'a, V: 'a> IterMut<'a, K, V> {
992 pub fn iter(&self) -> Iter<K, V> {
993 Iter {
994 iter: self.iter.clone(),
995 }
996 }
997 }
998
999 /// Iterator over the entries in a table, consuming the table.
1000 pub struct IntoIter<K, V> {
1001 table: RawTable<K, V>,
1002 iter: RawBuckets<'static, K, V>,
1003 }
1004
1005 unsafe impl<K: Sync, V: Sync> Sync for IntoIter<K, V> {}
1006 unsafe impl<K: Send, V: Send> Send for IntoIter<K, V> {}
1007
1008 impl<K, V> IntoIter<K, V> {
1009 pub fn iter(&self) -> Iter<K, V> {
1010 Iter {
1011 iter: self.iter.clone(),
1012 }
1013 }
1014 }
1015
1016 /// Iterator over the entries in a table, clearing the table.
1017 pub struct Drain<'a, K: 'a, V: 'a> {
1018 table: NonNull<RawTable<K, V>>,
1019 iter: RawBuckets<'static, K, V>,
1020 marker: marker::PhantomData<&'a RawTable<K, V>>,
1021 }
1022
1023 unsafe impl<'a, K: Sync, V: Sync> Sync for Drain<'a, K, V> {}
1024 unsafe impl<'a, K: Send, V: Send> Send for Drain<'a, K, V> {}
1025
1026 impl<'a, K, V> Drain<'a, K, V> {
1027 pub fn iter(&self) -> Iter<K, V> {
1028 Iter {
1029 iter: self.iter.clone(),
1030 }
1031 }
1032 }
1033
1034 impl<'a, K, V> Iterator for Iter<'a, K, V> {
1035 type Item = (&'a K, &'a V);
1036
1037 fn next(&mut self) -> Option<(&'a K, &'a V)> {
1038 self.iter.next().map(|raw| unsafe {
1039 let pair_ptr = raw.pair();
1040 (&(*pair_ptr).0, &(*pair_ptr).1)
1041 })
1042 }
1043
1044 fn size_hint(&self) -> (usize, Option<usize>) {
1045 self.iter.size_hint()
1046 }
1047 }
1048
1049 impl<'a, K, V> ExactSizeIterator for Iter<'a, K, V> {
1050 fn len(&self) -> usize {
1051 self.iter.len()
1052 }
1053 }
1054
1055 impl<'a, K, V> Iterator for IterMut<'a, K, V> {
1056 type Item = (&'a K, &'a mut V);
1057
1058 fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
1059 self.iter.next().map(|raw| unsafe {
1060 let pair_ptr = raw.pair();
1061 (&(*pair_ptr).0, &mut (*pair_ptr).1)
1062 })
1063 }
1064
1065 fn size_hint(&self) -> (usize, Option<usize>) {
1066 self.iter.size_hint()
1067 }
1068 }
1069
1070 impl<'a, K, V> ExactSizeIterator for IterMut<'a, K, V> {
1071 fn len(&self) -> usize {
1072 self.iter.len()
1073 }
1074 }
1075
1076 impl<K, V> Iterator for IntoIter<K, V> {
1077 type Item = (SafeHash, K, V);
1078
1079 fn next(&mut self) -> Option<(SafeHash, K, V)> {
1080 self.iter.next().map(|raw| {
1081 self.table.size -= 1;
1082 unsafe {
1083 let (k, v) = ptr::read(raw.pair());
1084 (SafeHash { hash: *raw.hash() }, k, v)
1085 }
1086 })
1087 }
1088
1089 fn size_hint(&self) -> (usize, Option<usize>) {
1090 self.iter.size_hint()
1091 }
1092 }
1093
1094 impl<K, V> ExactSizeIterator for IntoIter<K, V> {
1095 fn len(&self) -> usize {
1096 self.iter().len()
1097 }
1098 }
1099
1100 impl<'a, K, V> Iterator for Drain<'a, K, V> {
1101 type Item = (SafeHash, K, V);
1102
1103 #[inline]
1104 fn next(&mut self) -> Option<(SafeHash, K, V)> {
1105 self.iter.next().map(|raw| {
1106 unsafe {
1107 self.table.as_mut().size -= 1;
1108 let (k, v) = ptr::read(raw.pair());
1109 (SafeHash { hash: ptr::replace(&mut *raw.hash(), EMPTY_BUCKET) }, k, v)
1110 }
1111 })
1112 }
1113
1114 fn size_hint(&self) -> (usize, Option<usize>) {
1115 self.iter.size_hint()
1116 }
1117 }
1118
1119 impl<'a, K, V> ExactSizeIterator for Drain<'a, K, V> {
1120 fn len(&self) -> usize {
1121 self.iter.len()
1122 }
1123 }
1124
1125 impl<'a, K: 'a, V: 'a> Drop for Drain<'a, K, V> {
1126 fn drop(&mut self) {
1127 self.for_each(drop);
1128 }
1129 }
1130
1131 impl<K: Clone, V: Clone> Clone for RawTable<K, V> {
1132 fn clone(&self) -> RawTable<K, V> {
1133 unsafe {
1134 let cap = self.capacity();
1135 let mut new_ht = RawTable::new_uninitialized(cap);
1136
1137 let mut new_buckets = new_ht.raw_bucket_at(0);
1138 let mut buckets = self.raw_bucket_at(0);
1139 while buckets.idx < cap {
1140 *new_buckets.hash() = *buckets.hash();
1141 if *new_buckets.hash() != EMPTY_BUCKET {
1142 let pair_ptr = buckets.pair();
1143 let kv = ((*pair_ptr).0.clone(), (*pair_ptr).1.clone());
1144 ptr::write(new_buckets.pair(), kv);
1145 }
1146 buckets.idx += 1;
1147 new_buckets.idx += 1;
1148 }
1149
1150 new_ht.size = self.size();
1151 new_ht.set_tag(self.tag());
1152
1153 new_ht
1154 }
1155 }
1156 }
1157
1158 unsafe impl<#[may_dangle] K, #[may_dangle] V> Drop for RawTable<K, V> {
1159 fn drop(&mut self) {
1160 if self.capacity() == 0 {
1161 return;
1162 }
1163
1164 // This is done in reverse because we've likely partially taken
1165 // some elements out with `.into_iter()` from the front.
1166 // Check if the size is 0, so we don't do a useless scan when
1167 // dropping empty tables such as on resize.
1168 // Also avoid double drop of elements that have been already moved out.
1169 unsafe {
1170 if needs_drop::<(K, V)>() {
1171 // avoid linear runtime for types that don't need drop
1172 self.rev_drop_buckets();
1173 }
1174 }
1175
1176 let hashes_size = self.capacity() * size_of::<HashUint>();
1177 let pairs_size = self.capacity() * size_of::<(K, V)>();
1178 let (align, size, oflo) = calculate_allocation(hashes_size,
1179 align_of::<HashUint>(),
1180 pairs_size,
1181 align_of::<(K, V)>());
1182
1183 debug_assert!(!oflo, "should be impossible");
1184
1185 unsafe {
1186 Global.dealloc(NonNull::new_unchecked(self.hashes.ptr()).as_opaque(),
1187 Layout::from_size_align(size, align).unwrap());
1188 // Remember how everything was allocated out of one buffer
1189 // during initialization? We only need one call to free here.
1190 }
1191 }
1192 }