1 //! This is the core implementation that doesn't depend on the hasher at all.
3 //! The methods of `IndexMapCore` don't use any Hash properties of K.
5 //! It's cleaner to separate them out, then the compiler checks that we are not
6 //! using Hash at all in these methods.
8 //! However, we should probably not let this show in the public API or docs.
12 use hashbrown
::raw
::RawTable
;
14 use crate::vec
::{Drain, Vec}
;
17 use core
::mem
::replace
;
18 use core
::ops
::RangeBounds
;
20 use crate::equivalent
::Equivalent
;
21 use crate::util
::simplify_range
;
22 use crate::{Bucket, Entries, HashValue}
;
24 /// Core of the map that does not depend on S
25 pub(crate) struct IndexMapCore
<K
, V
> {
26 /// indices mapping from the entry hash to its index.
27 indices
: RawTable
<usize>,
28 /// entries is a dense vec of entries in their order.
29 entries
: Vec
<Bucket
<K
, V
>>,
33 fn get_hash
<K
, V
>(entries
: &[Bucket
<K
, V
>]) -> impl Fn(&usize) -> u64 + '_
{
34 move |&i
| entries
[i
].hash
.get()
38 fn equivalent
<'a
, K
, V
, Q
: ?Sized
+ Equivalent
<K
>>(
40 entries
: &'a
[Bucket
<K
, V
>],
41 ) -> impl Fn(&usize) -> bool
+ 'a
{
42 move |&i
| Q
::equivalent(key
, &entries
[i
].key
)
46 fn erase_index(table
: &mut RawTable
<usize>, hash
: HashValue
, index
: usize) {
47 let erased
= table
.erase_entry(hash
.get(), move |&i
| i
== index
);
48 debug_assert
!(erased
);
52 fn update_index(table
: &mut RawTable
<usize>, hash
: HashValue
, old
: usize, new
: usize) {
54 .get_mut(hash
.get(), move |&i
| i
== old
)
55 .expect("index not found");
59 impl<K
, V
> Clone
for IndexMapCore
<K
, V
>
64 fn clone(&self) -> Self {
65 let indices
= self.indices
.clone();
66 let mut entries
= Vec
::with_capacity(indices
.capacity());
67 entries
.clone_from(&self.entries
);
68 IndexMapCore { indices, entries }
71 fn clone_from(&mut self, other
: &Self) {
72 let hasher
= get_hash(&other
.entries
);
73 self.indices
.clone_from_with_hasher(&other
.indices
, hasher
);
74 if self.entries
.capacity() < other
.entries
.len() {
75 // If we must resize, match the indices capacity
76 self.reserve_entries();
78 self.entries
.clone_from(&other
.entries
);
82 impl<K
, V
> fmt
::Debug
for IndexMapCore
<K
, V
>
87 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
88 f
.debug_struct("IndexMapCore")
89 .field("indices", &raw
::DebugIndices(&self.indices
))
90 .field("entries", &self.entries
)
95 impl<K
, V
> Entries
for IndexMapCore
<K
, V
> {
96 type Entry
= Bucket
<K
, V
>;
99 fn into_entries(self) -> Vec
<Self::Entry
> {
104 fn as_entries(&self) -> &[Self::Entry
] {
109 fn as_entries_mut(&mut self) -> &mut [Self::Entry
] {
113 fn with_entries
<F
>(&mut self, f
: F
)
115 F
: FnOnce(&mut [Self::Entry
]),
117 f(&mut self.entries
);
118 self.rebuild_hash_table();
122 impl<K
, V
> IndexMapCore
<K
, V
> {
124 pub(crate) const fn new() -> Self {
126 indices
: RawTable
::new(),
132 pub(crate) fn with_capacity(n
: usize) -> Self {
134 indices
: RawTable
::with_capacity(n
),
135 entries
: Vec
::with_capacity(n
),
140 pub(crate) fn len(&self) -> usize {
145 pub(crate) fn capacity(&self) -> usize {
146 cmp
::min(self.indices
.capacity(), self.entries
.capacity())
149 pub(crate) fn clear(&mut self) {
150 self.indices
.clear();
151 self.entries
.clear();
154 pub(crate) fn truncate(&mut self, len
: usize) {
155 if len
< self.len() {
156 self.erase_indices(len
, self.entries
.len());
157 self.entries
.truncate(len
);
161 pub(crate) fn drain
<R
>(&mut self, range
: R
) -> Drain
<'_
, Bucket
<K
, V
>>
163 R
: RangeBounds
<usize>,
165 let range
= simplify_range(range
, self.entries
.len());
166 self.erase_indices(range
.start
, range
.end
);
167 self.entries
.drain(range
)
170 #[cfg(feature = "rayon")]
171 pub(crate) fn par_drain
<R
>(&mut self, range
: R
) -> rayon
::vec
::Drain
<'_
, Bucket
<K
, V
>>
175 R
: RangeBounds
<usize>,
177 use rayon
::iter
::ParallelDrainRange
;
178 let range
= simplify_range(range
, self.entries
.len());
179 self.erase_indices(range
.start
, range
.end
);
180 self.entries
.par_drain(range
)
183 pub(crate) fn split_off(&mut self, at
: usize) -> Self {
184 assert
!(at
<= self.entries
.len());
185 self.erase_indices(at
, self.entries
.len());
186 let entries
= self.entries
.split_off(at
);
188 let mut indices
= RawTable
::with_capacity(entries
.len());
189 raw
::insert_bulk_no_grow(&mut indices
, &entries
);
190 Self { indices, entries }
193 /// Reserve capacity for `additional` more key-value pairs.
194 pub(crate) fn reserve(&mut self, additional
: usize) {
195 self.indices
.reserve(additional
, get_hash(&self.entries
));
196 self.reserve_entries();
199 /// Reserve entries capacity to match the indices
200 fn reserve_entries(&mut self) {
201 let additional
= self.indices
.capacity() - self.entries
.len();
202 self.entries
.reserve_exact(additional
);
205 /// Shrink the capacity of the map with a lower bound
206 pub(crate) fn shrink_to(&mut self, min_capacity
: usize) {
208 .shrink_to(min_capacity
, get_hash(&self.entries
));
209 self.entries
.shrink_to(min_capacity
);
212 /// Remove the last key-value pair
213 pub(crate) fn pop(&mut self) -> Option
<(K
, V
)> {
214 if let Some(entry
) = self.entries
.pop() {
215 let last
= self.entries
.len();
216 erase_index(&mut self.indices
, entry
.hash
, last
);
217 Some((entry
.key
, entry
.value
))
223 /// Append a key-value pair, *without* checking whether it already exists,
224 /// and return the pair's new index.
225 fn push(&mut self, hash
: HashValue
, key
: K
, value
: V
) -> usize {
226 let i
= self.entries
.len();
227 self.indices
.insert(hash
.get(), i
, get_hash(&self.entries
));
228 if i
== self.entries
.capacity() {
229 // Reserve our own capacity synced to the indices,
230 // rather than letting `Vec::push` just double it.
231 self.reserve_entries();
233 self.entries
.push(Bucket { hash, key, value }
);
237 /// Return the index in `entries` where an equivalent key can be found
238 pub(crate) fn get_index_of
<Q
>(&self, hash
: HashValue
, key
: &Q
) -> Option
<usize>
240 Q
: ?Sized
+ Equivalent
<K
>,
242 let eq
= equivalent(key
, &self.entries
);
243 self.indices
.get(hash
.get(), eq
).copied()
246 pub(crate) fn insert_full(&mut self, hash
: HashValue
, key
: K
, value
: V
) -> (usize, Option
<V
>)
250 match self.get_index_of(hash
, &key
) {
251 Some(i
) => (i
, Some(replace(&mut self.entries
[i
].value
, value
))),
252 None
=> (self.push(hash
, key
, value
), None
),
256 /// Remove an entry by shifting all entries that follow it
257 pub(crate) fn shift_remove_full
<Q
>(&mut self, hash
: HashValue
, key
: &Q
) -> Option
<(usize, K
, V
)>
259 Q
: ?Sized
+ Equivalent
<K
>,
261 let eq
= equivalent(key
, &self.entries
);
262 match self.indices
.remove_entry(hash
.get(), eq
) {
264 let (key
, value
) = self.shift_remove_finish(index
);
265 Some((index
, key
, value
))
271 /// Remove an entry by shifting all entries that follow it
272 pub(crate) fn shift_remove_index(&mut self, index
: usize) -> Option
<(K
, V
)> {
273 match self.entries
.get(index
) {
275 erase_index(&mut self.indices
, entry
.hash
, index
);
276 Some(self.shift_remove_finish(index
))
282 /// Remove an entry by shifting all entries that follow it
284 /// The index should already be removed from `self.indices`.
285 fn shift_remove_finish(&mut self, index
: usize) -> (K
, V
) {
286 // Correct indices that point to the entries that followed the removed entry.
287 self.decrement_indices(index
+ 1, self.entries
.len());
289 // Use Vec::remove to actually remove the entry.
290 let entry
= self.entries
.remove(index
);
291 (entry
.key
, entry
.value
)
294 /// Decrement all indices in the range `start..end`.
296 /// The index `start - 1` should not exist in `self.indices`.
297 /// All entries should still be in their original positions.
298 fn decrement_indices(&mut self, start
: usize, end
: usize) {
299 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
300 let shifted_entries
= &self.entries
[start
..end
];
301 if shifted_entries
.len() > self.indices
.buckets() / 2 {
302 // Shift all indices in range.
303 for i
in self.indices_mut() {
304 if start
<= *i
&& *i
< end
{
309 // Find each entry in range to shift its index.
310 for (i
, entry
) in (start
..end
).zip(shifted_entries
) {
311 update_index(&mut self.indices
, entry
.hash
, i
, i
- 1);
316 /// Increment all indices in the range `start..end`.
318 /// The index `end` should not exist in `self.indices`.
319 /// All entries should still be in their original positions.
320 fn increment_indices(&mut self, start
: usize, end
: usize) {
321 // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
322 let shifted_entries
= &self.entries
[start
..end
];
323 if shifted_entries
.len() > self.indices
.buckets() / 2 {
324 // Shift all indices in range.
325 for i
in self.indices_mut() {
326 if start
<= *i
&& *i
< end
{
331 // Find each entry in range to shift its index, updated in reverse so
332 // we never have duplicated indices that might have a hash collision.
333 for (i
, entry
) in (start
..end
).zip(shifted_entries
).rev() {
334 update_index(&mut self.indices
, entry
.hash
, i
, i
+ 1);
339 pub(super) fn move_index(&mut self, from
: usize, to
: usize) {
340 let from_hash
= self.entries
[from
].hash
;
342 // Use a sentinal index so other indices don't collide.
343 update_index(&mut self.indices
, from_hash
, from
, usize::MAX
);
345 // Update all other indices and rotate the entry positions.
347 self.decrement_indices(from
+ 1, to
+ 1);
348 self.entries
[from
..=to
].rotate_left(1);
349 } else if to
< from
{
350 self.increment_indices(to
, from
);
351 self.entries
[to
..=from
].rotate_right(1);
354 // Change the sentinal index to its final position.
355 update_index(&mut self.indices
, from_hash
, usize::MAX
, to
);
359 /// Remove an entry by swapping it with the last
360 pub(crate) fn swap_remove_full
<Q
>(&mut self, hash
: HashValue
, key
: &Q
) -> Option
<(usize, K
, V
)>
362 Q
: ?Sized
+ Equivalent
<K
>,
364 let eq
= equivalent(key
, &self.entries
);
365 match self.indices
.remove_entry(hash
.get(), eq
) {
367 let (key
, value
) = self.swap_remove_finish(index
);
368 Some((index
, key
, value
))
374 /// Remove an entry by swapping it with the last
375 pub(crate) fn swap_remove_index(&mut self, index
: usize) -> Option
<(K
, V
)> {
376 match self.entries
.get(index
) {
378 erase_index(&mut self.indices
, entry
.hash
, index
);
379 Some(self.swap_remove_finish(index
))
385 /// Finish removing an entry by swapping it with the last
387 /// The index should already be removed from `self.indices`.
388 fn swap_remove_finish(&mut self, index
: usize) -> (K
, V
) {
389 // use swap_remove, but then we need to update the index that points
390 // to the other entry that has to move
391 let entry
= self.entries
.swap_remove(index
);
393 // correct index that points to the entry that had to swap places
394 if let Some(entry
) = self.entries
.get(index
) {
395 // was not last element
396 // examine new element in `index` and find it in indices
397 let last
= self.entries
.len();
398 update_index(&mut self.indices
, entry
.hash
, last
, index
);
401 (entry
.key
, entry
.value
)
404 /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
406 /// All of these items should still be at their original location in `entries`.
407 /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
408 fn erase_indices(&mut self, start
: usize, end
: usize) {
409 let (init
, shifted_entries
) = self.entries
.split_at(end
);
410 let (start_entries
, erased_entries
) = init
.split_at(start
);
412 let erased
= erased_entries
.len();
413 let shifted
= shifted_entries
.len();
414 let half_capacity
= self.indices
.buckets() / 2;
416 // Use a heuristic between different strategies
418 // Degenerate case, nothing to do
419 } else if start
+ shifted
< half_capacity
&& start
< erased
{
420 // Reinsert everything, as there are few kept indices
421 self.indices
.clear();
423 // Reinsert stable indices, then shifted indices
424 raw
::insert_bulk_no_grow(&mut self.indices
, start_entries
);
425 raw
::insert_bulk_no_grow(&mut self.indices
, shifted_entries
);
426 } else if erased
+ shifted
< half_capacity
{
427 // Find each affected index, as there are few to adjust
429 // Find erased indices
430 for (i
, entry
) in (start
..).zip(erased_entries
) {
431 erase_index(&mut self.indices
, entry
.hash
, i
);
434 // Find shifted indices
435 for ((new
, old
), entry
) in (start
..).zip(end
..).zip(shifted_entries
) {
436 update_index(&mut self.indices
, entry
.hash
, old
, new
);
439 // Sweep the whole table for adjustments
440 self.erase_indices_sweep(start
, end
);
443 debug_assert_eq
!(self.indices
.len(), start
+ shifted
);
446 pub(crate) fn retain_in_order
<F
>(&mut self, mut keep
: F
)
448 F
: FnMut(&mut K
, &mut V
) -> bool
,
450 // FIXME: This could use Vec::retain_mut with MSRV 1.61.
451 // Like Vec::retain in self.entries, but with mutable K and V.
452 // We swap-shift all the items we want to keep, truncate the rest,
453 // then rebuild the raw hash table with the new indexes.
454 let len
= self.entries
.len();
455 let mut n_deleted
= 0;
458 let entry
= &mut self.entries
[i
];
459 keep(&mut entry
.key
, &mut entry
.value
)
463 } else if n_deleted
> 0 {
464 self.entries
.swap(i
- n_deleted
, i
);
468 self.entries
.truncate(len
- n_deleted
);
469 self.rebuild_hash_table();
473 fn rebuild_hash_table(&mut self) {
474 self.indices
.clear();
475 raw
::insert_bulk_no_grow(&mut self.indices
, &self.entries
);
478 pub(crate) fn reverse(&mut self) {
479 self.entries
.reverse();
481 // No need to save hash indices, can easily calculate what they should
482 // be, given that this is an in-place reversal.
483 let len
= self.entries
.len();
484 for i
in self.indices_mut() {
490 /// Entry for an existing key-value pair or a vacant location to
492 pub enum Entry
<'a
, K
, V
> {
493 /// Existing slot with equivalent key.
494 Occupied(OccupiedEntry
<'a
, K
, V
>),
495 /// Vacant slot (no equivalent key in the map).
496 Vacant(VacantEntry
<'a
, K
, V
>),
499 impl<'a
, K
, V
> Entry
<'a
, K
, V
> {
500 /// Inserts the given default value in the entry if it is vacant and returns a mutable
501 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
503 /// Computes in **O(1)** time (amortized average).
504 pub fn or_insert(self, default: V
) -> &'a
mut V
{
506 Entry
::Occupied(entry
) => entry
.into_mut(),
507 Entry
::Vacant(entry
) => entry
.insert(default),
511 /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
512 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
514 /// Computes in **O(1)** time (amortized average).
515 pub fn or_insert_with
<F
>(self, call
: F
) -> &'a
mut V
520 Entry
::Occupied(entry
) => entry
.into_mut(),
521 Entry
::Vacant(entry
) => entry
.insert(call()),
525 /// Inserts the result of the `call` function with a reference to the entry's key if it is
526 /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
527 /// an already existent value is returned.
529 /// Computes in **O(1)** time (amortized average).
530 pub fn or_insert_with_key
<F
>(self, call
: F
) -> &'a
mut V
535 Entry
::Occupied(entry
) => entry
.into_mut(),
536 Entry
::Vacant(entry
) => {
537 let value
= call(&entry
.key
);
543 /// Gets a reference to the entry's key, either within the map if occupied,
544 /// or else the new key that was used to find the entry.
545 pub fn key(&self) -> &K
{
547 Entry
::Occupied(ref entry
) => entry
.key(),
548 Entry
::Vacant(ref entry
) => entry
.key(),
552 /// Return the index where the key-value pair exists or will be inserted.
553 pub fn index(&self) -> usize {
555 Entry
::Occupied(ref entry
) => entry
.index(),
556 Entry
::Vacant(ref entry
) => entry
.index(),
560 /// Modifies the entry if it is occupied.
561 pub fn and_modify
<F
>(self, f
: F
) -> Self
566 Entry
::Occupied(mut o
) => {
574 /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
575 /// reference to it. Otherwise a mutable reference to an already existent value is returned.
577 /// Computes in **O(1)** time (amortized average).
578 pub fn or_default(self) -> &'a
mut V
583 Entry
::Occupied(entry
) => entry
.into_mut(),
584 Entry
::Vacant(entry
) => entry
.insert(V
::default()),
589 impl<K
: fmt
::Debug
, V
: fmt
::Debug
> fmt
::Debug
for Entry
<'_
, K
, V
> {
590 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
592 Entry
::Vacant(ref v
) => f
.debug_tuple(stringify
!(Entry
)).field(v
).finish(),
593 Entry
::Occupied(ref o
) => f
.debug_tuple(stringify
!(Entry
)).field(o
).finish(),
598 pub use self::raw
::OccupiedEntry
;
600 // Extra methods that don't threaten the unsafe encapsulation.
601 impl<K
, V
> OccupiedEntry
<'_
, K
, V
> {
602 /// Sets the value of the entry to `value`, and returns the entry's old value.
603 pub fn insert(&mut self, value
: V
) -> V
{
604 replace(self.get_mut(), value
)
607 /// Remove the key, value pair stored in the map for this entry, and return the value.
609 /// **NOTE:** This is equivalent to `.swap_remove()`.
610 pub fn remove(self) -> V
{
614 /// Remove the key, value pair stored in the map for this entry, and return the value.
616 /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
617 /// last element of the map and popping it off. **This perturbs
618 /// the position of what used to be the last element!**
620 /// Computes in **O(1)** time (average).
621 pub fn swap_remove(self) -> V
{
622 self.swap_remove_entry().1
625 /// Remove the key, value pair stored in the map for this entry, and return the value.
627 /// Like `Vec::remove`, the pair is removed by shifting all of the
628 /// elements that follow it, preserving their relative order.
629 /// **This perturbs the index of all of those elements!**
631 /// Computes in **O(n)** time (average).
632 pub fn shift_remove(self) -> V
{
633 self.shift_remove_entry().1
636 /// Remove and return the key, value pair stored in the map for this entry
638 /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
639 pub fn remove_entry(self) -> (K
, V
) {
640 self.swap_remove_entry()
644 impl<K
: fmt
::Debug
, V
: fmt
::Debug
> fmt
::Debug
for OccupiedEntry
<'_
, K
, V
> {
645 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
646 f
.debug_struct(stringify
!(OccupiedEntry
))
647 .field("key", self.key())
648 .field("value", self.get())
653 /// A view into a vacant entry in a `IndexMap`.
654 /// It is part of the [`Entry`] enum.
656 /// [`Entry`]: enum.Entry.html
657 pub struct VacantEntry
<'a
, K
, V
> {
658 map
: &'a
mut IndexMapCore
<K
, V
>,
663 impl<'a
, K
, V
> VacantEntry
<'a
, K
, V
> {
664 /// Gets a reference to the key that was used to find the entry.
665 pub fn key(&self) -> &K
{
669 /// Takes ownership of the key, leaving the entry vacant.
670 pub fn into_key(self) -> K
{
674 /// Return the index where the key-value pair will be inserted.
675 pub fn index(&self) -> usize {
679 /// Inserts the entry's key and the given value into the map, and returns a mutable reference
681 pub fn insert(self, value
: V
) -> &'a
mut V
{
682 let i
= self.map
.push(self.hash
, self.key
, value
);
683 &mut self.map
.entries
[i
].value
687 impl<K
: fmt
::Debug
, V
> fmt
::Debug
for VacantEntry
<'_
, K
, V
> {
688 fn fmt(&self, f
: &mut fmt
::Formatter
<'_
>) -> fmt
::Result
{
689 f
.debug_tuple(stringify
!(VacantEntry
))
696 fn assert_send_sync() {
697 fn assert_send_sync
<T
: Send
+ Sync
>() {}
698 assert_send_sync
::<IndexMapCore
<i32, i32>>();
699 assert_send_sync
::<Entry
<'_
, i32, i32>>();