1 //===- llvm/ADT/DenseMap.h - Dense probed hash table ------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the DenseMap class.
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_ADT_DENSEMAP_H
15 #define LLVM_ADT_DENSEMAP_H
17 #include "llvm/ADT/DenseMapInfo.h"
18 #include "llvm/Support/AlignOf.h"
19 #include "llvm/Support/Compiler.h"
20 #include "llvm/Support/MathExtras.h"
21 #include "llvm/Support/PointerLikeTypeTraits.h"
22 #include "llvm/Support/type_traits.h"
35 // We extend a pair to allow users to override the bucket type with their own
36 // implementation without requiring two members.
37 template <typename KeyT
, typename ValueT
>
38 struct DenseMapPair
: public std::pair
<KeyT
, ValueT
> {
39 KeyT
&getFirst() { return std::pair
<KeyT
, ValueT
>::first
; }
40 const KeyT
&getFirst() const { return std::pair
<KeyT
, ValueT
>::first
; }
41 ValueT
&getSecond() { return std::pair
<KeyT
, ValueT
>::second
; }
42 const ValueT
&getSecond() const { return std::pair
<KeyT
, ValueT
>::second
; }
47 typename KeyT
, typename ValueT
, typename KeyInfoT
= DenseMapInfo
<KeyT
>,
48 typename Bucket
= detail::DenseMapPair
<KeyT
, ValueT
>, bool IsConst
= false>
49 class DenseMapIterator
;
51 template <typename DerivedT
, typename KeyT
, typename ValueT
, typename KeyInfoT
,
55 typedef unsigned size_type
;
56 typedef KeyT key_type
;
57 typedef ValueT mapped_type
;
58 typedef BucketT value_type
;
60 typedef DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, BucketT
> iterator
;
61 typedef DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, BucketT
, true>
63 inline iterator
begin() {
64 // When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
65 return empty() ? end() : iterator(getBuckets(), getBucketsEnd());
67 inline iterator
end() {
68 return iterator(getBucketsEnd(), getBucketsEnd(), true);
70 inline const_iterator
begin() const {
71 return empty() ? end() : const_iterator(getBuckets(), getBucketsEnd());
73 inline const_iterator
end() const {
74 return const_iterator(getBucketsEnd(), getBucketsEnd(), true);
77 bool LLVM_ATTRIBUTE_UNUSED_RESULT
empty() const {
78 return getNumEntries() == 0;
80 unsigned size() const { return getNumEntries(); }
82 /// Grow the densemap so that it has at least Size buckets. Does not shrink
83 void resize(size_type Size
) {
84 if (Size
> getNumBuckets())
89 if (getNumEntries() == 0 && getNumTombstones() == 0) return;
91 // If the capacity of the array is huge, and the # elements used is small,
93 if (getNumEntries() * 4 < getNumBuckets() && getNumBuckets() > 64) {
98 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
99 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
100 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
)) {
101 if (!KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
102 P
->getSecond().~ValueT();
103 decrementNumEntries();
105 P
->getFirst() = EmptyKey
;
108 assert(getNumEntries() == 0 && "Node count imbalance!");
112 /// Return 1 if the specified key is in the map, 0 otherwise.
113 size_type
count(const KeyT
&Val
) const {
114 const BucketT
*TheBucket
;
115 return LookupBucketFor(Val
, TheBucket
) ? 1 : 0;
118 iterator
find(const KeyT
&Val
) {
120 if (LookupBucketFor(Val
, TheBucket
))
121 return iterator(TheBucket
, getBucketsEnd(), true);
124 const_iterator
find(const KeyT
&Val
) const {
125 const BucketT
*TheBucket
;
126 if (LookupBucketFor(Val
, TheBucket
))
127 return const_iterator(TheBucket
, getBucketsEnd(), true);
131 /// Alternate version of find() which allows a different, and possibly
132 /// less expensive, key type.
133 /// The DenseMapInfo is responsible for supplying methods
134 /// getHashValue(LookupKeyT) and isEqual(LookupKeyT, KeyT) for each key
136 template<class LookupKeyT
>
137 iterator
find_as(const LookupKeyT
&Val
) {
139 if (LookupBucketFor(Val
, TheBucket
))
140 return iterator(TheBucket
, getBucketsEnd(), true);
143 template<class LookupKeyT
>
144 const_iterator
find_as(const LookupKeyT
&Val
) const {
145 const BucketT
*TheBucket
;
146 if (LookupBucketFor(Val
, TheBucket
))
147 return const_iterator(TheBucket
, getBucketsEnd(), true);
151 /// lookup - Return the entry for the specified key, or a default
152 /// constructed value if no such entry exists.
153 ValueT
lookup(const KeyT
&Val
) const {
154 const BucketT
*TheBucket
;
155 if (LookupBucketFor(Val
, TheBucket
))
156 return TheBucket
->getSecond();
160 // Inserts key,value pair into the map if the key isn't already in the map.
161 // If the key is already in the map, it returns false and doesn't update the
163 std::pair
<iterator
, bool> insert(const std::pair
<KeyT
, ValueT
> &KV
) {
165 if (LookupBucketFor(KV
.first
, TheBucket
))
166 return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true),
167 false); // Already in map.
169 // Otherwise, insert the new element.
170 TheBucket
= InsertIntoBucket(KV
.first
, KV
.second
, TheBucket
);
171 return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true), true);
174 // Inserts key,value pair into the map if the key isn't already in the map.
175 // If the key is already in the map, it returns false and doesn't update the
177 std::pair
<iterator
, bool> insert(std::pair
<KeyT
, ValueT
> &&KV
) {
179 if (LookupBucketFor(KV
.first
, TheBucket
))
180 return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true),
181 false); // Already in map.
183 // Otherwise, insert the new element.
184 TheBucket
= InsertIntoBucket(std::move(KV
.first
),
185 std::move(KV
.second
),
187 return std::make_pair(iterator(TheBucket
, getBucketsEnd(), true), true);
190 /// insert - Range insertion of pairs.
191 template<typename InputIt
>
192 void insert(InputIt I
, InputIt E
) {
198 bool erase(const KeyT
&Val
) {
200 if (!LookupBucketFor(Val
, TheBucket
))
201 return false; // not in map.
203 TheBucket
->getSecond().~ValueT();
204 TheBucket
->getFirst() = getTombstoneKey();
205 decrementNumEntries();
206 incrementNumTombstones();
209 void erase(iterator I
) {
210 BucketT
*TheBucket
= &*I
;
211 TheBucket
->getSecond().~ValueT();
212 TheBucket
->getFirst() = getTombstoneKey();
213 decrementNumEntries();
214 incrementNumTombstones();
217 value_type
& FindAndConstruct(const KeyT
&Key
) {
219 if (LookupBucketFor(Key
, TheBucket
))
222 return *InsertIntoBucket(Key
, ValueT(), TheBucket
);
225 ValueT
&operator[](const KeyT
&Key
) {
226 return FindAndConstruct(Key
).second
;
229 value_type
& FindAndConstruct(KeyT
&&Key
) {
231 if (LookupBucketFor(Key
, TheBucket
))
234 return *InsertIntoBucket(std::move(Key
), ValueT(), TheBucket
);
237 ValueT
&operator[](KeyT
&&Key
) {
238 return FindAndConstruct(std::move(Key
)).second
;
241 /// isPointerIntoBucketsArray - Return true if the specified pointer points
242 /// somewhere into the DenseMap's array of buckets (i.e. either to a key or
243 /// value in the DenseMap).
244 bool isPointerIntoBucketsArray(const void *Ptr
) const {
245 return Ptr
>= getBuckets() && Ptr
< getBucketsEnd();
248 /// getPointerIntoBucketsArray() - Return an opaque pointer into the buckets
249 /// array. In conjunction with the previous method, this can be used to
250 /// determine whether an insertion caused the DenseMap to reallocate.
251 const void *getPointerIntoBucketsArray() const { return getBuckets(); }
257 if (getNumBuckets() == 0) // Nothing to do.
260 const KeyT EmptyKey
= getEmptyKey(), TombstoneKey
= getTombstoneKey();
261 for (BucketT
*P
= getBuckets(), *E
= getBucketsEnd(); P
!= E
; ++P
) {
262 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
263 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
))
264 P
->getSecond().~ValueT();
265 P
->getFirst().~KeyT();
269 memset((void*)getBuckets(), 0x5a, sizeof(BucketT
)*getNumBuckets());
277 assert((getNumBuckets() & (getNumBuckets()-1)) == 0 &&
278 "# initial buckets must be a power of two!");
279 const KeyT EmptyKey
= getEmptyKey();
280 for (BucketT
*B
= getBuckets(), *E
= getBucketsEnd(); B
!= E
; ++B
)
281 new (&B
->getFirst()) KeyT(EmptyKey
);
284 void moveFromOldBuckets(BucketT
*OldBucketsBegin
, BucketT
*OldBucketsEnd
) {
287 // Insert all the old elements.
288 const KeyT EmptyKey
= getEmptyKey();
289 const KeyT TombstoneKey
= getTombstoneKey();
290 for (BucketT
*B
= OldBucketsBegin
, *E
= OldBucketsEnd
; B
!= E
; ++B
) {
291 if (!KeyInfoT::isEqual(B
->getFirst(), EmptyKey
) &&
292 !KeyInfoT::isEqual(B
->getFirst(), TombstoneKey
)) {
293 // Insert the key/value into the new table.
295 bool FoundVal
= LookupBucketFor(B
->getFirst(), DestBucket
);
296 (void)FoundVal
; // silence warning.
297 assert(!FoundVal
&& "Key already in new map?");
298 DestBucket
->getFirst() = std::move(B
->getFirst());
299 new (&DestBucket
->getSecond()) ValueT(std::move(B
->getSecond()));
300 incrementNumEntries();
303 B
->getSecond().~ValueT();
305 B
->getFirst().~KeyT();
309 if (OldBucketsBegin
!= OldBucketsEnd
)
310 memset((void*)OldBucketsBegin
, 0x5a,
311 sizeof(BucketT
) * (OldBucketsEnd
- OldBucketsBegin
));
315 template <typename OtherBaseT
>
317 const DenseMapBase
<OtherBaseT
, KeyT
, ValueT
, KeyInfoT
, BucketT
> &other
) {
318 assert(&other
!= this);
319 assert(getNumBuckets() == other
.getNumBuckets());
321 setNumEntries(other
.getNumEntries());
322 setNumTombstones(other
.getNumTombstones());
324 if (isPodLike
<KeyT
>::value
&& isPodLike
<ValueT
>::value
)
325 memcpy(getBuckets(), other
.getBuckets(),
326 getNumBuckets() * sizeof(BucketT
));
328 for (size_t i
= 0; i
< getNumBuckets(); ++i
) {
329 new (&getBuckets()[i
].getFirst())
330 KeyT(other
.getBuckets()[i
].getFirst());
331 if (!KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getEmptyKey()) &&
332 !KeyInfoT::isEqual(getBuckets()[i
].getFirst(), getTombstoneKey()))
333 new (&getBuckets()[i
].getSecond())
334 ValueT(other
.getBuckets()[i
].getSecond());
338 void swap(DenseMapBase
& RHS
) {
339 std::swap(getNumEntries(), RHS
.getNumEntries());
340 std::swap(getNumTombstones(), RHS
.getNumTombstones());
343 static unsigned getHashValue(const KeyT
&Val
) {
344 return KeyInfoT::getHashValue(Val
);
346 template<typename LookupKeyT
>
347 static unsigned getHashValue(const LookupKeyT
&Val
) {
348 return KeyInfoT::getHashValue(Val
);
350 static const KeyT
getEmptyKey() {
351 return KeyInfoT::getEmptyKey();
353 static const KeyT
getTombstoneKey() {
354 return KeyInfoT::getTombstoneKey();
358 unsigned getNumEntries() const {
359 return static_cast<const DerivedT
*>(this)->getNumEntries();
361 void setNumEntries(unsigned Num
) {
362 static_cast<DerivedT
*>(this)->setNumEntries(Num
);
364 void incrementNumEntries() {
365 setNumEntries(getNumEntries() + 1);
367 void decrementNumEntries() {
368 setNumEntries(getNumEntries() - 1);
370 unsigned getNumTombstones() const {
371 return static_cast<const DerivedT
*>(this)->getNumTombstones();
373 void setNumTombstones(unsigned Num
) {
374 static_cast<DerivedT
*>(this)->setNumTombstones(Num
);
376 void incrementNumTombstones() {
377 setNumTombstones(getNumTombstones() + 1);
379 void decrementNumTombstones() {
380 setNumTombstones(getNumTombstones() - 1);
382 const BucketT
*getBuckets() const {
383 return static_cast<const DerivedT
*>(this)->getBuckets();
385 BucketT
*getBuckets() {
386 return static_cast<DerivedT
*>(this)->getBuckets();
388 unsigned getNumBuckets() const {
389 return static_cast<const DerivedT
*>(this)->getNumBuckets();
391 BucketT
*getBucketsEnd() {
392 return getBuckets() + getNumBuckets();
394 const BucketT
*getBucketsEnd() const {
395 return getBuckets() + getNumBuckets();
398 void grow(unsigned AtLeast
) {
399 static_cast<DerivedT
*>(this)->grow(AtLeast
);
402 void shrink_and_clear() {
403 static_cast<DerivedT
*>(this)->shrink_and_clear();
407 BucketT
*InsertIntoBucket(const KeyT
&Key
, const ValueT
&Value
,
408 BucketT
*TheBucket
) {
409 TheBucket
= InsertIntoBucketImpl(Key
, TheBucket
);
411 TheBucket
->getFirst() = Key
;
412 new (&TheBucket
->getSecond()) ValueT(Value
);
416 BucketT
*InsertIntoBucket(const KeyT
&Key
, ValueT
&&Value
,
417 BucketT
*TheBucket
) {
418 TheBucket
= InsertIntoBucketImpl(Key
, TheBucket
);
420 TheBucket
->getFirst() = Key
;
421 new (&TheBucket
->getSecond()) ValueT(std::move(Value
));
425 BucketT
*InsertIntoBucket(KeyT
&&Key
, ValueT
&&Value
, BucketT
*TheBucket
) {
426 TheBucket
= InsertIntoBucketImpl(Key
, TheBucket
);
428 TheBucket
->getFirst() = std::move(Key
);
429 new (&TheBucket
->getSecond()) ValueT(std::move(Value
));
433 BucketT
*InsertIntoBucketImpl(const KeyT
&Key
, BucketT
*TheBucket
) {
434 // If the load of the hash table is more than 3/4, or if fewer than 1/8 of
435 // the buckets are empty (meaning that many are filled with tombstones),
438 // The later case is tricky. For example, if we had one empty bucket with
439 // tons of tombstones, failing lookups (e.g. for insertion) would have to
440 // probe almost the entire table until it found the empty bucket. If the
441 // table completely filled with tombstones, no lookup would ever succeed,
442 // causing infinite loops in lookup.
443 unsigned NewNumEntries
= getNumEntries() + 1;
444 unsigned NumBuckets
= getNumBuckets();
445 if (NewNumEntries
*4 >= NumBuckets
*3) {
446 this->grow(NumBuckets
* 2);
447 LookupBucketFor(Key
, TheBucket
);
448 NumBuckets
= getNumBuckets();
449 } else if (NumBuckets
-(NewNumEntries
+getNumTombstones()) <= NumBuckets
/8) {
450 this->grow(NumBuckets
);
451 LookupBucketFor(Key
, TheBucket
);
455 // Only update the state after we've grown our bucket space appropriately
456 // so that when growing buckets we have self-consistent entry count.
457 incrementNumEntries();
459 // If we are writing over a tombstone, remember this.
460 const KeyT EmptyKey
= getEmptyKey();
461 if (!KeyInfoT::isEqual(TheBucket
->getFirst(), EmptyKey
))
462 decrementNumTombstones();
467 /// LookupBucketFor - Lookup the appropriate bucket for Val, returning it in
468 /// FoundBucket. If the bucket contains the key and a value, this returns
469 /// true, otherwise it returns a bucket with an empty marker or tombstone and
471 template<typename LookupKeyT
>
472 bool LookupBucketFor(const LookupKeyT
&Val
,
473 const BucketT
*&FoundBucket
) const {
474 const BucketT
*BucketsPtr
= getBuckets();
475 const unsigned NumBuckets
= getNumBuckets();
477 if (NumBuckets
== 0) {
478 FoundBucket
= nullptr;
482 // FoundTombstone - Keep track of whether we find a tombstone while probing.
483 const BucketT
*FoundTombstone
= nullptr;
484 const KeyT EmptyKey
= getEmptyKey();
485 const KeyT TombstoneKey
= getTombstoneKey();
486 assert(!KeyInfoT::isEqual(Val
, EmptyKey
) &&
487 !KeyInfoT::isEqual(Val
, TombstoneKey
) &&
488 "Empty/Tombstone value shouldn't be inserted into map!");
490 unsigned BucketNo
= getHashValue(Val
) & (NumBuckets
-1);
491 unsigned ProbeAmt
= 1;
493 const BucketT
*ThisBucket
= BucketsPtr
+ BucketNo
;
494 // Found Val's bucket? If so, return it.
495 if (KeyInfoT::isEqual(Val
, ThisBucket
->getFirst())) {
496 FoundBucket
= ThisBucket
;
500 // If we found an empty bucket, the key doesn't exist in the set.
501 // Insert it and return the default value.
502 if (KeyInfoT::isEqual(ThisBucket
->getFirst(), EmptyKey
)) {
503 // If we've already seen a tombstone while probing, fill it in instead
504 // of the empty bucket we eventually probed to.
505 FoundBucket
= FoundTombstone
? FoundTombstone
: ThisBucket
;
509 // If this is a tombstone, remember it. If Val ends up not in the map, we
510 // prefer to return it than something that would require more probing.
511 if (KeyInfoT::isEqual(ThisBucket
->getFirst(), TombstoneKey
) &&
513 FoundTombstone
= ThisBucket
; // Remember the first tombstone found.
515 // Otherwise, it's a hash collision or a tombstone, continue quadratic
517 BucketNo
+= ProbeAmt
++;
518 BucketNo
&= (NumBuckets
-1);
522 template <typename LookupKeyT
>
523 bool LookupBucketFor(const LookupKeyT
&Val
, BucketT
*&FoundBucket
) {
524 const BucketT
*ConstFoundBucket
;
525 bool Result
= const_cast<const DenseMapBase
*>(this)
526 ->LookupBucketFor(Val
, ConstFoundBucket
);
527 FoundBucket
= const_cast<BucketT
*>(ConstFoundBucket
);
532 /// Return the approximate size (in bytes) of the actual map.
533 /// This is just the raw memory used by DenseMap.
534 /// If entries are pointers to objects, the size of the referenced objects
535 /// are not included.
536 size_t getMemorySize() const {
537 return getNumBuckets() * sizeof(BucketT
);
541 template <typename KeyT
, typename ValueT
,
542 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
543 typename BucketT
= detail::DenseMapPair
<KeyT
, ValueT
>>
544 class DenseMap
: public DenseMapBase
<DenseMap
<KeyT
, ValueT
, KeyInfoT
, BucketT
>,
545 KeyT
, ValueT
, KeyInfoT
, BucketT
> {
546 // Lift some types from the dependent base class into this class for
547 // simplicity of referring to them.
548 typedef DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
> BaseT
;
549 friend class DenseMapBase
<DenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
553 unsigned NumTombstones
;
557 explicit DenseMap(unsigned NumInitBuckets
= 0) {
558 init(NumInitBuckets
);
561 DenseMap(const DenseMap
&other
) : BaseT() {
566 DenseMap(DenseMap
&&other
) : BaseT() {
571 template<typename InputIt
>
572 DenseMap(const InputIt
&I
, const InputIt
&E
) {
573 init(NextPowerOf2(std::distance(I
, E
)));
579 operator delete(Buckets
);
582 void swap(DenseMap
& RHS
) {
583 std::swap(Buckets
, RHS
.Buckets
);
584 std::swap(NumEntries
, RHS
.NumEntries
);
585 std::swap(NumTombstones
, RHS
.NumTombstones
);
586 std::swap(NumBuckets
, RHS
.NumBuckets
);
589 DenseMap
& operator=(const DenseMap
& other
) {
595 DenseMap
& operator=(DenseMap
&&other
) {
597 operator delete(Buckets
);
603 void copyFrom(const DenseMap
& other
) {
605 operator delete(Buckets
);
606 if (allocateBuckets(other
.NumBuckets
)) {
607 this->BaseT::copyFrom(other
);
614 void init(unsigned InitBuckets
) {
615 if (allocateBuckets(InitBuckets
)) {
616 this->BaseT::initEmpty();
623 void grow(unsigned AtLeast
) {
624 unsigned OldNumBuckets
= NumBuckets
;
625 BucketT
*OldBuckets
= Buckets
;
627 allocateBuckets(std::max
<unsigned>(64, static_cast<unsigned>(NextPowerOf2(AtLeast
-1))));
630 this->BaseT::initEmpty();
634 this->moveFromOldBuckets(OldBuckets
, OldBuckets
+OldNumBuckets
);
636 // Free the old table.
637 operator delete(OldBuckets
);
640 void shrink_and_clear() {
641 unsigned OldNumEntries
= NumEntries
;
644 // Reduce the number of buckets.
645 unsigned NewNumBuckets
= 0;
647 NewNumBuckets
= std::max(64, 1 << (Log2_32_Ceil(OldNumEntries
) + 1));
648 if (NewNumBuckets
== NumBuckets
) {
649 this->BaseT::initEmpty();
653 operator delete(Buckets
);
658 unsigned getNumEntries() const {
661 void setNumEntries(unsigned Num
) {
665 unsigned getNumTombstones() const {
666 return NumTombstones
;
668 void setNumTombstones(unsigned Num
) {
672 BucketT
*getBuckets() const {
676 unsigned getNumBuckets() const {
680 bool allocateBuckets(unsigned Num
) {
682 if (NumBuckets
== 0) {
687 Buckets
= static_cast<BucketT
*>(operator new(sizeof(BucketT
) * NumBuckets
));
692 template <typename KeyT
, typename ValueT
, unsigned InlineBuckets
= 4,
693 typename KeyInfoT
= DenseMapInfo
<KeyT
>,
694 typename BucketT
= detail::DenseMapPair
<KeyT
, ValueT
>>
696 : public DenseMapBase
<
697 SmallDenseMap
<KeyT
, ValueT
, InlineBuckets
, KeyInfoT
, BucketT
>, KeyT
,
698 ValueT
, KeyInfoT
, BucketT
> {
699 // Lift some types from the dependent base class into this class for
700 // simplicity of referring to them.
701 typedef DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
> BaseT
;
702 friend class DenseMapBase
<SmallDenseMap
, KeyT
, ValueT
, KeyInfoT
, BucketT
>;
705 unsigned NumEntries
: 31;
706 unsigned NumTombstones
;
713 /// A "union" of an inline bucket array and the struct representing
714 /// a large bucket. This union will be discriminated by the 'Small' bit.
715 AlignedCharArrayUnion
<BucketT
[InlineBuckets
], LargeRep
> storage
;
718 explicit SmallDenseMap(unsigned NumInitBuckets
= 0) {
719 init(NumInitBuckets
);
722 SmallDenseMap(const SmallDenseMap
&other
) : BaseT() {
727 SmallDenseMap(SmallDenseMap
&&other
) : BaseT() {
732 template<typename InputIt
>
733 SmallDenseMap(const InputIt
&I
, const InputIt
&E
) {
734 init(NextPowerOf2(std::distance(I
, E
)));
743 void swap(SmallDenseMap
& RHS
) {
744 unsigned TmpNumEntries
= RHS
.NumEntries
;
745 RHS
.NumEntries
= NumEntries
;
746 NumEntries
= TmpNumEntries
;
747 std::swap(NumTombstones
, RHS
.NumTombstones
);
749 const KeyT EmptyKey
= this->getEmptyKey();
750 const KeyT TombstoneKey
= this->getTombstoneKey();
751 if (Small
&& RHS
.Small
) {
752 // If we're swapping inline bucket arrays, we have to cope with some of
753 // the tricky bits of DenseMap's storage system: the buckets are not
754 // fully initialized. Thus we swap every key, but we may have
755 // a one-directional move of the value.
756 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
757 BucketT
*LHSB
= &getInlineBuckets()[i
],
758 *RHSB
= &RHS
.getInlineBuckets()[i
];
759 bool hasLHSValue
= (!KeyInfoT::isEqual(LHSB
->getFirst(), EmptyKey
) &&
760 !KeyInfoT::isEqual(LHSB
->getFirst(), TombstoneKey
));
761 bool hasRHSValue
= (!KeyInfoT::isEqual(RHSB
->getFirst(), EmptyKey
) &&
762 !KeyInfoT::isEqual(RHSB
->getFirst(), TombstoneKey
));
763 if (hasLHSValue
&& hasRHSValue
) {
764 // Swap together if we can...
765 std::swap(*LHSB
, *RHSB
);
768 // Swap separately and handle any assymetry.
769 std::swap(LHSB
->getFirst(), RHSB
->getFirst());
771 new (&RHSB
->getSecond()) ValueT(std::move(LHSB
->getSecond()));
772 LHSB
->getSecond().~ValueT();
773 } else if (hasRHSValue
) {
774 new (&LHSB
->getSecond()) ValueT(std::move(RHSB
->getSecond()));
775 RHSB
->getSecond().~ValueT();
780 if (!Small
&& !RHS
.Small
) {
781 std::swap(getLargeRep()->Buckets
, RHS
.getLargeRep()->Buckets
);
782 std::swap(getLargeRep()->NumBuckets
, RHS
.getLargeRep()->NumBuckets
);
786 SmallDenseMap
&SmallSide
= Small
? *this : RHS
;
787 SmallDenseMap
&LargeSide
= Small
? RHS
: *this;
789 // First stash the large side's rep and move the small side across.
790 LargeRep TmpRep
= std::move(*LargeSide
.getLargeRep());
791 LargeSide
.getLargeRep()->~LargeRep();
792 LargeSide
.Small
= true;
793 // This is similar to the standard move-from-old-buckets, but the bucket
794 // count hasn't actually rotated in this case. So we have to carefully
795 // move construct the keys and values into their new locations, but there
796 // is no need to re-hash things.
797 for (unsigned i
= 0, e
= InlineBuckets
; i
!= e
; ++i
) {
798 BucketT
*NewB
= &LargeSide
.getInlineBuckets()[i
],
799 *OldB
= &SmallSide
.getInlineBuckets()[i
];
800 new (&NewB
->getFirst()) KeyT(std::move(OldB
->getFirst()));
801 OldB
->getFirst().~KeyT();
802 if (!KeyInfoT::isEqual(NewB
->getFirst(), EmptyKey
) &&
803 !KeyInfoT::isEqual(NewB
->getFirst(), TombstoneKey
)) {
804 new (&NewB
->getSecond()) ValueT(std::move(OldB
->getSecond()));
805 OldB
->getSecond().~ValueT();
809 // The hard part of moving the small buckets across is done, just move
810 // the TmpRep into its new home.
811 SmallSide
.Small
= false;
812 new (SmallSide
.getLargeRep()) LargeRep(std::move(TmpRep
));
815 SmallDenseMap
& operator=(const SmallDenseMap
& other
) {
821 SmallDenseMap
& operator=(SmallDenseMap
&&other
) {
829 void copyFrom(const SmallDenseMap
& other
) {
833 if (other
.getNumBuckets() > InlineBuckets
) {
835 new (getLargeRep()) LargeRep(allocateBuckets(other
.getNumBuckets()));
837 this->BaseT::copyFrom(other
);
840 void init(unsigned InitBuckets
) {
842 if (InitBuckets
> InlineBuckets
) {
844 new (getLargeRep()) LargeRep(allocateBuckets(InitBuckets
));
846 this->BaseT::initEmpty();
849 void grow(unsigned AtLeast
) {
850 if (AtLeast
>= InlineBuckets
)
851 AtLeast
= std::max
<unsigned>(64, NextPowerOf2(AtLeast
-1));
854 if (AtLeast
< InlineBuckets
)
855 return; // Nothing to do.
857 // First move the inline buckets into a temporary storage.
858 AlignedCharArrayUnion
<BucketT
[InlineBuckets
]> TmpStorage
;
859 BucketT
*TmpBegin
= reinterpret_cast<BucketT
*>(TmpStorage
.buffer
);
860 BucketT
*TmpEnd
= TmpBegin
;
862 // Loop over the buckets, moving non-empty, non-tombstones into the
863 // temporary storage. Have the loop move the TmpEnd forward as it goes.
864 const KeyT EmptyKey
= this->getEmptyKey();
865 const KeyT TombstoneKey
= this->getTombstoneKey();
866 for (BucketT
*P
= getBuckets(), *E
= P
+ InlineBuckets
; P
!= E
; ++P
) {
867 if (!KeyInfoT::isEqual(P
->getFirst(), EmptyKey
) &&
868 !KeyInfoT::isEqual(P
->getFirst(), TombstoneKey
)) {
869 assert(size_t(TmpEnd
- TmpBegin
) < InlineBuckets
&&
870 "Too many inline buckets!");
871 new (&TmpEnd
->getFirst()) KeyT(std::move(P
->getFirst()));
872 new (&TmpEnd
->getSecond()) ValueT(std::move(P
->getSecond()));
874 P
->getSecond().~ValueT();
876 P
->getFirst().~KeyT();
879 // Now make this map use the large rep, and move all the entries back
882 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
883 this->moveFromOldBuckets(TmpBegin
, TmpEnd
);
887 LargeRep OldRep
= std::move(*getLargeRep());
888 getLargeRep()->~LargeRep();
889 if (AtLeast
<= InlineBuckets
) {
892 new (getLargeRep()) LargeRep(allocateBuckets(AtLeast
));
895 this->moveFromOldBuckets(OldRep
.Buckets
, OldRep
.Buckets
+OldRep
.NumBuckets
);
897 // Free the old table.
898 operator delete(OldRep
.Buckets
);
901 void shrink_and_clear() {
902 unsigned OldSize
= this->size();
905 // Reduce the number of buckets.
906 unsigned NewNumBuckets
= 0;
908 NewNumBuckets
= 1 << (Log2_32_Ceil(OldSize
) + 1);
909 if (NewNumBuckets
> InlineBuckets
&& NewNumBuckets
< 64u)
912 if ((Small
&& NewNumBuckets
<= InlineBuckets
) ||
913 (!Small
&& NewNumBuckets
== getLargeRep()->NumBuckets
)) {
914 this->BaseT::initEmpty();
923 unsigned getNumEntries() const {
926 void setNumEntries(unsigned Num
) {
927 assert(Num
< INT_MAX
&& "Cannot support more than INT_MAX entries");
931 unsigned getNumTombstones() const {
932 return NumTombstones
;
934 void setNumTombstones(unsigned Num
) {
938 const BucketT
*getInlineBuckets() const {
940 // Note that this cast does not violate aliasing rules as we assert that
941 // the memory's dynamic type is the small, inline bucket buffer, and the
942 // 'storage.buffer' static type is 'char *'.
943 return reinterpret_cast<const BucketT
*>(storage
.buffer
);
945 BucketT
*getInlineBuckets() {
946 return const_cast<BucketT
*>(
947 const_cast<const SmallDenseMap
*>(this)->getInlineBuckets());
949 const LargeRep
*getLargeRep() const {
951 // Note, same rule about aliasing as with getInlineBuckets.
952 return reinterpret_cast<const LargeRep
*>(storage
.buffer
);
954 LargeRep
*getLargeRep() {
955 return const_cast<LargeRep
*>(
956 const_cast<const SmallDenseMap
*>(this)->getLargeRep());
959 const BucketT
*getBuckets() const {
960 return Small
? getInlineBuckets() : getLargeRep()->Buckets
;
962 BucketT
*getBuckets() {
963 return const_cast<BucketT
*>(
964 const_cast<const SmallDenseMap
*>(this)->getBuckets());
966 unsigned getNumBuckets() const {
967 return Small
? InlineBuckets
: getLargeRep()->NumBuckets
;
970 void deallocateBuckets() {
974 operator delete(getLargeRep()->Buckets
);
975 getLargeRep()->~LargeRep();
978 LargeRep
allocateBuckets(unsigned Num
) {
979 assert(Num
> InlineBuckets
&& "Must allocate more buckets than are inline");
981 static_cast<BucketT
*>(operator new(sizeof(BucketT
) * Num
)), Num
987 template <typename KeyT
, typename ValueT
, typename KeyInfoT
, typename Bucket
,
989 class DenseMapIterator
{
990 typedef DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, true> ConstIterator
;
991 friend class DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, true>;
994 typedef ptrdiff_t difference_type
;
995 typedef typename
std::conditional
<IsConst
, const Bucket
, Bucket
>::type
997 typedef value_type
*pointer
;
998 typedef value_type
&reference
;
999 typedef std::forward_iterator_tag iterator_category
;
1003 DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
1005 DenseMapIterator(pointer Pos
, pointer E
, bool NoAdvance
= false)
1006 : Ptr(Pos
), End(E
) {
1007 if (!NoAdvance
) AdvancePastEmptyBuckets();
1010 // If IsConst is true this is a converting constructor from iterator to
1011 // const_iterator and the default copy constructor is used.
1012 // Otherwise this is a copy constructor for iterator.
1014 const DenseMapIterator
<KeyT
, ValueT
, KeyInfoT
, Bucket
, false> &I
)
1015 : Ptr(I
.Ptr
), End(I
.End
) {}
1017 reference
operator*() const {
1020 pointer
operator->() const {
1024 bool operator==(const ConstIterator
&RHS
) const {
1025 return Ptr
== RHS
.operator->();
1027 bool operator!=(const ConstIterator
&RHS
) const {
1028 return Ptr
!= RHS
.operator->();
1031 inline DenseMapIterator
& operator++() { // Preincrement
1033 AdvancePastEmptyBuckets();
1036 DenseMapIterator
operator++(int) { // Postincrement
1037 DenseMapIterator tmp
= *this; ++*this; return tmp
;
1041 void AdvancePastEmptyBuckets() {
1042 const KeyT Empty
= KeyInfoT::getEmptyKey();
1043 const KeyT Tombstone
= KeyInfoT::getTombstoneKey();
1045 while (Ptr
!= End
&& (KeyInfoT::isEqual(Ptr
->getFirst(), Empty
) ||
1046 KeyInfoT::isEqual(Ptr
->getFirst(), Tombstone
)))
1051 template<typename KeyT
, typename ValueT
, typename KeyInfoT
>
1052 static inline size_t
1053 capacity_in_bytes(const DenseMap
<KeyT
, ValueT
, KeyInfoT
> &X
) {
1054 return X
.getMemorySize();
1057 } // end namespace llvm