]>
git.proxmox.com Git - mirror_ovs.git/blob - lib/cmap.c
7a54ea6ab2d0bac57fc914807c73b63a4ddb29a4
2 * Copyright (c) 2014 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
26 COVERAGE_DEFINE(cmap_expand
);
27 COVERAGE_DEFINE(cmap_shrink
);
29 /* Optimistic Concurrent Cuckoo Hash
30 * =================================
32 * A "cuckoo hash" is an open addressing hash table schema, designed such that
33 * a given element can be in one of only a small number of buckets 'd', each of
34 * which holds up to a small number 'k' elements. Thus, the expected and
35 * worst-case lookup times are O(1) because they require comparing no more than
36 * a fixed number of elements (k * d). Inserting a new element can require
37 * moving around existing elements, but it is also O(1) amortized expected
40 * An optimistic concurrent hash table goes one step further, making it
41 * possible for a single writer to execute concurrently with any number of
42 * readers without requiring the readers to take any locks.
44 * This cuckoo hash implementation uses:
46 * - Two hash functions (d=2). More hash functions allow for a higher load
47 * factor, but increasing 'k' is easier and the benefits of increasing 'd'
48 * quickly fall off with the 'k' values used here. Also, the method of
49 * generating hashes used in this implementation is hard to reasonably
50 * extend beyond d=2. Finally, each additional hash function means that a
51 * lookup has to look at least one extra cache line.
53 * - 5 or 7 elements per bucket (k=5 or k=7), chosen to make buckets
54 * exactly one cache line in size.
56 * According to Erlingsson [4], these parameters suggest a maximum load factor
57 * of about 93%. The current implementation is conservative, expanding the
58 * hash table when it is over 85% full.
60 * When the load factor is below 20%, the hash table will be shrinked by half.
61 * This is to reduce the memory utilization of the hash table and to avoid
62 * the hash table occupying the top of heap chunk which prevents the trimming
68 * A cuckoo hash requires multiple hash functions. When reorganizing the hash
69 * becomes too difficult, it also requires the ability to change the hash
70 * functions. Requiring the client to provide multiple hashes and to be able
71 * to change them to new hashes upon insertion is inconvenient.
73 * This implementation takes another approach. The client provides a single,
74 * fixed hash. The cuckoo hash internally "rehashes" this hash against a
75 * randomly selected basis value (see rehash()). This rehashed value is one of
76 * the two hashes. The other hash is computed by 16-bit circular rotation of
77 * the rehashed value. Updating the basis changes the hash functions.
79 * To work properly, the hash functions used by a cuckoo hash must be
80 * independent. If one hash function is a function of the other (e.g. h2(x) =
81 * h1(x) + 1, or h2(x) = hash(h1(x))), then insertion will eventually fail
82 * catastrophically (loop forever) because of collisions. With this rehashing
83 * technique, the two hashes are completely independent for masks up to 16 bits
84 * wide. For masks wider than 16 bits, only 32-n bits are independent between
85 * the two hashes. Thus, it becomes risky to grow a cuckoo hash table beyond
86 * about 2**24 buckets (about 71 million elements with k=5 and maximum load
87 * 85%). Fortunately, Open vSwitch does not normally deal with hash tables
94 * This cuckoo hash table implementation deals with duplicate client-provided
95 * hash values by chaining: the second and subsequent cmap_nodes with a given
96 * hash are chained off the initially inserted node's 'next' member. The hash
97 * table maintains the invariant that a single client-provided hash value
98 * exists in only a single chain in a single bucket (even though that hash
99 * could be stored in two buckets).
105 * [1] D. Zhou, B. Fan, H. Lim, M. Kaminsky, D. G. Andersen, "Scalable, High
106 * Performance Ethernet Forwarding with CuckooSwitch". In Proc. 9th
109 * [2] B. Fan, D. G. Andersen, and M. Kaminsky. "MemC3: Compact and concurrent
110 * memcache with dumber caching and smarter hashing". In Proc. 10th USENIX
113 * [3] R. Pagh and F. Rodler. "Cuckoo hashing". Journal of Algorithms, 51(2):
116 * [4] U. Erlingsson, M. Manasse, F. McSherry, "A Cool and Practical
117 * Alternative to Traditional Hash Tables". In Proc. 7th Workshop on
118 * Distributed Data and Structures (WDAS'06), 2006.
120 /* An entry is an int and a pointer: 8 bytes on 32-bit, 12 bytes on 64-bit. */
121 #define CMAP_ENTRY_SIZE (4 + (UINTPTR_MAX == UINT32_MAX ? 4 : 8))
123 /* Number of entries per bucket: 7 on 32-bit, 5 on 64-bit. */
124 #define CMAP_K ((CACHE_LINE_SIZE - 4) / CMAP_ENTRY_SIZE)
126 /* Pad to make a bucket a full cache line in size: 4 on 32-bit, 0 on 64-bit. */
127 #define CMAP_PADDING ((CACHE_LINE_SIZE - 4) - (CMAP_K * CMAP_ENTRY_SIZE))
129 /* A cuckoo hash bucket. Designed to be cache-aligned and exactly one cache
132 /* Allows readers to track in-progress changes. Initially zero, each
133 * writer increments this value just before and just after each change (see
134 * cmap_set_bucket()). Thus, a reader can ensure that it gets a consistent
135 * snapshot by waiting for the counter to become even (see
136 * read_even_counter()), then checking that its value does not change while
137 * examining the bucket (see cmap_find()). */
138 atomic_uint32_t counter
;
140 /* (hash, node) slots. They are parallel arrays instead of an array of
141 * structs to reduce the amount of space lost to padding.
143 * The slots are in no particular order. A null pointer indicates that a
144 * pair is unused. In-use slots are not necessarily in the earliest
146 uint32_t hashes
[CMAP_K
];
147 struct cmap_node nodes
[CMAP_K
];
149 /* Padding to make cmap_bucket exactly one cache line long. */
151 uint8_t pad
[CMAP_PADDING
];
154 BUILD_ASSERT_DECL(sizeof(struct cmap_bucket
) == CACHE_LINE_SIZE
);
156 /* Default maximum load factor (as a fraction of UINT32_MAX + 1) before
157 * enlarging a cmap. Reasonable values lie between about 75% and 93%. Smaller
158 * values waste memory; larger values increase the average insertion time. */
159 #define CMAP_MAX_LOAD ((uint32_t) (UINT32_MAX * .85))
161 /* Default minimum load factor (as a fraction of UINT32_MAX + 1) before
162 * shrinking a cmap. Currently, the value is chosen to be 20%, this
163 * means cmap will have a 40% load factor after shrink. */
164 #define CMAP_MIN_LOAD ((uint32_t) (UINT32_MAX * .20))
166 /* The implementation of a concurrent hash map. */
168 unsigned int n
; /* Number of in-use elements. */
169 unsigned int max_n
; /* Max elements before enlarging. */
170 unsigned int min_n
; /* Min elements before shrinking. */
171 uint32_t mask
; /* Number of 'buckets', minus one. */
172 uint32_t basis
; /* Basis for rehashing client's hash values. */
174 /* Padding to make cmap_impl exactly one cache line long. */
175 uint8_t pad
[CACHE_LINE_SIZE
- sizeof(unsigned int) * 5];
177 struct cmap_bucket buckets
[];
179 BUILD_ASSERT_DECL(sizeof(struct cmap_impl
) == CACHE_LINE_SIZE
);
181 static struct cmap_impl
*cmap_rehash(struct cmap
*, uint32_t mask
);
183 /* Explicit inline keywords in utility functions seem to be necessary
184 * to prevent performance regression on cmap_find(). */
186 /* Given a rehashed value 'hash', returns the other hash for that rehashed
187 * value. This is symmetric: other_hash(other_hash(x)) == x. (See also "Hash
188 * Functions" at the top of this file.) */
189 static inline uint32_t
190 other_hash(uint32_t hash
)
192 return (hash
<< 16) | (hash
>> 16);
195 /* Returns the rehashed value for 'hash' within 'impl'. (See also "Hash
196 * Functions" at the top of this file.) */
197 static inline uint32_t
198 rehash(const struct cmap_impl
*impl
, uint32_t hash
)
200 return hash_finish(impl
->basis
, hash
);
203 /* Not always without the inline keyword. */
204 static inline struct cmap_impl
*
205 cmap_get_impl(const struct cmap
*cmap
)
207 return ovsrcu_get(struct cmap_impl
*, &cmap
->impl
);
211 calc_max_n(uint32_t mask
)
213 return ((uint64_t) (mask
+ 1) * CMAP_K
* CMAP_MAX_LOAD
) >> 32;
217 calc_min_n(uint32_t mask
)
219 return ((uint64_t) (mask
+ 1) * CMAP_K
* CMAP_MIN_LOAD
) >> 32;
222 static struct cmap_impl
*
223 cmap_impl_create(uint32_t mask
)
225 struct cmap_impl
*impl
;
227 ovs_assert(is_pow2(mask
+ 1));
229 impl
= xzalloc_cacheline(sizeof *impl
230 + (mask
+ 1) * sizeof *impl
->buckets
);
232 impl
->max_n
= calc_max_n(mask
);
233 impl
->min_n
= calc_min_n(mask
);
235 impl
->basis
= random_uint32();
240 /* Initializes 'cmap' as an empty concurrent hash map. */
242 cmap_init(struct cmap
*cmap
)
244 ovsrcu_set(&cmap
->impl
, cmap_impl_create(0));
249 * The client is responsible for destroying any data previously held in
252 cmap_destroy(struct cmap
*cmap
)
255 ovsrcu_postpone(free_cacheline
, cmap_get_impl(cmap
));
259 /* Returns the number of elements in 'cmap'. */
261 cmap_count(const struct cmap
*cmap
)
263 return cmap_get_impl(cmap
)->n
;
266 /* Returns true if 'cmap' is empty, false otherwise. */
268 cmap_is_empty(const struct cmap
*cmap
)
270 return cmap_count(cmap
) == 0;
273 static inline uint32_t
274 read_counter(const struct cmap_bucket
*bucket_
)
276 struct cmap_bucket
*bucket
= CONST_CAST(struct cmap_bucket
*, bucket_
);
279 atomic_read_explicit(&bucket
->counter
, &counter
, memory_order_acquire
);
284 static inline uint32_t
285 read_even_counter(const struct cmap_bucket
*bucket
)
290 counter
= read_counter(bucket
);
291 } while (OVS_UNLIKELY(counter
& 1));
297 counter_changed(const struct cmap_bucket
*b_
, uint32_t c
)
299 struct cmap_bucket
*b
= CONST_CAST(struct cmap_bucket
*, b_
);
302 /* Need to make sure the counter read is not moved up, before the hash and
303 * cmap_node_next(). Using atomic_read_explicit with memory_order_acquire
304 * would allow prior reads to be moved after the barrier.
305 * atomic_thread_fence prevents all following memory accesses from moving
306 * prior to preceding loads. */
307 atomic_thread_fence(memory_order_acquire
);
308 atomic_read_relaxed(&b
->counter
, &counter
);
310 return OVS_UNLIKELY(counter
!= c
);
313 static inline const struct cmap_node
*
314 cmap_find_in_bucket(const struct cmap_bucket
*bucket
, uint32_t hash
)
316 for (int i
= 0; i
< CMAP_K
; i
++) {
317 if (bucket
->hashes
[i
] == hash
) {
318 return cmap_node_next(&bucket
->nodes
[i
]);
324 static inline const struct cmap_node
*
325 cmap_find__(const struct cmap_bucket
*b1
, const struct cmap_bucket
*b2
,
329 const struct cmap_node
*node
;
333 c1
= read_even_counter(b1
);
334 node
= cmap_find_in_bucket(b1
, hash
);
335 } while (OVS_UNLIKELY(counter_changed(b1
, c1
)));
340 c2
= read_even_counter(b2
);
341 node
= cmap_find_in_bucket(b2
, hash
);
342 } while (OVS_UNLIKELY(counter_changed(b2
, c2
)));
346 } while (OVS_UNLIKELY(counter_changed(b1
, c1
)));
351 /* Searches 'cmap' for an element with the specified 'hash'. If one or more is
352 * found, returns a pointer to the first one, otherwise a null pointer. All of
353 * the nodes on the returned list are guaranteed to have exactly the given
356 * This function works even if 'cmap' is changing concurrently. If 'cmap' is
357 * not changing, then cmap_find_protected() is slightly faster.
359 * CMAP_FOR_EACH_WITH_HASH is usually more convenient. */
360 const struct cmap_node
*
361 cmap_find(const struct cmap
*cmap
, uint32_t hash
)
363 const struct cmap_impl
*impl
= cmap_get_impl(cmap
);
364 uint32_t h1
= rehash(impl
, hash
);
365 uint32_t h2
= other_hash(h1
);
367 return cmap_find__(&impl
->buckets
[h1
& impl
->mask
],
368 &impl
->buckets
[h2
& impl
->mask
],
372 /* Looks up multiple 'hashes', when the corresponding bit in 'map' is 1,
373 * and sets the corresponding pointer in 'nodes', if the hash value was
374 * found from the 'cmap'. In other cases the 'nodes' values are not changed,
375 * i.e., no NULL pointers are stored there.
376 * Returns a map where a bit is set to 1 if the corresponding 'nodes' pointer
377 * was stored, 0 otherwise.
378 * Generally, the caller wants to use CMAP_NODE_FOR_EACH to verify for
379 * hash collisions. */
381 cmap_find_batch(const struct cmap
*cmap
, unsigned long map
,
382 uint32_t hashes
[], const struct cmap_node
*nodes
[])
384 const struct cmap_impl
*impl
= cmap_get_impl(cmap
);
385 unsigned long result
= map
;
387 uint32_t h1s
[sizeof map
* CHAR_BIT
];
388 const struct cmap_bucket
*b1s
[sizeof map
* CHAR_BIT
];
389 const struct cmap_bucket
*b2s
[sizeof map
* CHAR_BIT
];
390 uint32_t c1s
[sizeof map
* CHAR_BIT
];
392 /* Compute hashes and prefetch 1st buckets. */
393 ULLONG_FOR_EACH_1(i
, map
) {
394 h1s
[i
] = rehash(impl
, hashes
[i
]);
395 b1s
[i
] = &impl
->buckets
[h1s
[i
] & impl
->mask
];
396 OVS_PREFETCH(b1s
[i
]);
398 /* Lookups, Round 1. Only look up at the first bucket. */
399 ULLONG_FOR_EACH_1(i
, map
) {
401 const struct cmap_bucket
*b1
= b1s
[i
];
402 const struct cmap_node
*node
;
405 c1
= read_even_counter(b1
);
406 node
= cmap_find_in_bucket(b1
, hashes
[i
]);
407 } while (OVS_UNLIKELY(counter_changed(b1
, c1
)));
410 /* Not found (yet); Prefetch the 2nd bucket. */
411 b2s
[i
] = &impl
->buckets
[other_hash(h1s
[i
]) & impl
->mask
];
412 OVS_PREFETCH(b2s
[i
]);
413 c1s
[i
] = c1
; /* We may need to check this after Round 2. */
417 ULLONG_SET0(map
, i
); /* Ignore this on round 2. */
421 /* Round 2. Look into the 2nd bucket, if needed. */
422 ULLONG_FOR_EACH_1(i
, map
) {
424 const struct cmap_bucket
*b2
= b2s
[i
];
425 const struct cmap_node
*node
;
428 c2
= read_even_counter(b2
);
429 node
= cmap_find_in_bucket(b2
, hashes
[i
]);
430 } while (OVS_UNLIKELY(counter_changed(b2
, c2
)));
433 /* Not found, but the node may have been moved from b2 to b1 right
434 * after we finished with b1 earlier. We just got a clean reading
435 * of the 2nd bucket, so we check the counter of the 1st bucket
436 * only. However, we need to check both buckets again, as the
437 * entry may be moved again to the 2nd bucket. Basically, we
438 * need to loop as long as it takes to get stable readings of
439 * both buckets. cmap_find__() does that, and now that we have
440 * fetched both buckets we can just use it. */
441 if (OVS_UNLIKELY(counter_changed(b1s
[i
], c1s
[i
]))) {
442 node
= cmap_find__(b1s
[i
], b2s
[i
], hashes
[i
]);
448 ULLONG_SET0(result
, i
); /* Fix the result. */
459 cmap_find_slot_protected(struct cmap_bucket
*b
, uint32_t hash
)
463 for (i
= 0; i
< CMAP_K
; i
++) {
464 if (b
->hashes
[i
] == hash
&& cmap_node_next_protected(&b
->nodes
[i
])) {
471 static struct cmap_node
*
472 cmap_find_bucket_protected(struct cmap_impl
*impl
, uint32_t hash
, uint32_t h
)
474 struct cmap_bucket
*b
= &impl
->buckets
[h
& impl
->mask
];
477 for (i
= 0; i
< CMAP_K
; i
++) {
478 if (b
->hashes
[i
] == hash
) {
479 return cmap_node_next_protected(&b
->nodes
[i
]);
485 /* Like cmap_find(), but only for use if 'cmap' cannot change concurrently.
487 * CMAP_FOR_EACH_WITH_HASH_PROTECTED is usually more convenient. */
489 cmap_find_protected(const struct cmap
*cmap
, uint32_t hash
)
491 struct cmap_impl
*impl
= cmap_get_impl(cmap
);
492 uint32_t h1
= rehash(impl
, hash
);
493 uint32_t h2
= other_hash(hash
);
494 struct cmap_node
*node
;
496 node
= cmap_find_bucket_protected(impl
, hash
, h1
);
500 return cmap_find_bucket_protected(impl
, hash
, h2
);
504 cmap_find_empty_slot_protected(const struct cmap_bucket
*b
)
508 for (i
= 0; i
< CMAP_K
; i
++) {
509 if (!cmap_node_next_protected(&b
->nodes
[i
])) {
517 cmap_set_bucket(struct cmap_bucket
*b
, int i
,
518 struct cmap_node
*node
, uint32_t hash
)
522 atomic_read_explicit(&b
->counter
, &c
, memory_order_acquire
);
523 atomic_store_explicit(&b
->counter
, c
+ 1, memory_order_release
);
524 ovsrcu_set(&b
->nodes
[i
].next
, node
); /* Also atomic. */
526 atomic_store_explicit(&b
->counter
, c
+ 2, memory_order_release
);
529 /* Searches 'b' for a node with the given 'hash'. If it finds one, adds
530 * 'new_node' to the node's linked list and returns true. If it does not find
531 * one, returns false. */
533 cmap_insert_dup(struct cmap_node
*new_node
, uint32_t hash
,
534 struct cmap_bucket
*b
)
538 for (i
= 0; i
< CMAP_K
; i
++) {
539 if (b
->hashes
[i
] == hash
) {
540 struct cmap_node
*node
= cmap_node_next_protected(&b
->nodes
[i
]);
545 /* The common case is that 'new_node' is a singleton,
546 * with a null 'next' pointer. Rehashing can add a
547 * longer chain, but due to our invariant of always
548 * having all nodes with the same (user) hash value at
549 * a single chain, rehashing will always insert the
550 * chain to an empty node. The only way we can end up
551 * here is by the user inserting a chain of nodes at
552 * once. Find the end of the chain starting at
553 * 'new_node', then splice 'node' to the end of that
557 struct cmap_node
*next
= cmap_node_next_protected(p
);
564 ovsrcu_set_hidden(&p
->next
, node
);
566 /* The hash value is there from some previous insertion, but
567 * the associated node has been removed. We're not really
568 * inserting a duplicate, but we can still reuse the slot.
572 /* Change the bucket to point to 'new_node'. This is a degenerate
573 * form of cmap_set_bucket() that doesn't update the counter since
574 * we're only touching one field and in a way that doesn't change
575 * the bucket's meaning for readers. */
576 ovsrcu_set(&b
->nodes
[i
].next
, new_node
);
584 /* Searches 'b' for an empty slot. If successful, stores 'node' and 'hash' in
585 * the slot and returns true. Otherwise, returns false. */
587 cmap_insert_bucket(struct cmap_node
*node
, uint32_t hash
,
588 struct cmap_bucket
*b
)
592 for (i
= 0; i
< CMAP_K
; i
++) {
593 if (!cmap_node_next_protected(&b
->nodes
[i
])) {
594 cmap_set_bucket(b
, i
, node
, hash
);
601 /* Returns the other bucket that b->nodes[slot] could occupy in 'impl'. (This
602 * might be the same as 'b'.) */
603 static struct cmap_bucket
*
604 other_bucket_protected(struct cmap_impl
*impl
, struct cmap_bucket
*b
, int slot
)
606 uint32_t h1
= rehash(impl
, b
->hashes
[slot
]);
607 uint32_t h2
= other_hash(h1
);
608 uint32_t b_idx
= b
- impl
->buckets
;
609 uint32_t other_h
= (h1
& impl
->mask
) == b_idx
? h2
: h1
;
611 return &impl
->buckets
[other_h
& impl
->mask
];
614 /* 'new_node' is to be inserted into 'impl', but both candidate buckets 'b1'
615 * and 'b2' are full. This function attempts to rearrange buckets within
616 * 'impl' to make room for 'new_node'.
618 * The implementation is a general-purpose breadth-first search. At first
619 * glance, this is more complex than a random walk through 'impl' (suggested by
620 * some references), but random walks have a tendency to loop back through a
621 * single bucket. We have to move nodes backward along the path that we find,
622 * so that no node actually disappears from the hash table, which means a
623 * random walk would have to be careful to deal with loops. By contrast, a
624 * successful breadth-first search always finds a *shortest* path through the
625 * hash table, and a shortest path will never contain loops, so it avoids that
629 cmap_insert_bfs(struct cmap_impl
*impl
, struct cmap_node
*new_node
,
630 uint32_t hash
, struct cmap_bucket
*b1
, struct cmap_bucket
*b2
)
632 enum { MAX_DEPTH
= 4 };
634 /* A path from 'start' to 'end' via the 'n' steps in 'slots[]'.
636 * One can follow the path via:
638 * struct cmap_bucket *b;
642 * for (i = 0; i < path->n; i++) {
643 * b = other_bucket_protected(impl, b, path->slots[i]);
645 * ovs_assert(b == path->end);
648 struct cmap_bucket
*start
; /* First bucket along the path. */
649 struct cmap_bucket
*end
; /* Last bucket on the path. */
650 uint8_t slots
[MAX_DEPTH
]; /* Slots used for each hop. */
651 int n
; /* Number of slots[]. */
654 /* We need to limit the amount of work we do trying to find a path. It
655 * might actually be impossible to rearrange the cmap, and after some time
656 * it is likely to be easier to rehash the entire cmap.
658 * This value of MAX_QUEUE is an arbitrary limit suggested by one of the
659 * references. Empirically, it seems to work OK. */
660 enum { MAX_QUEUE
= 500 };
661 struct cmap_path queue
[MAX_QUEUE
];
665 /* Add 'b1' and 'b2' as starting points for the search. */
666 queue
[head
].start
= b1
;
667 queue
[head
].end
= b1
;
671 queue
[head
].start
= b2
;
672 queue
[head
].end
= b2
;
677 while (tail
< head
) {
678 const struct cmap_path
*path
= &queue
[tail
++];
679 struct cmap_bucket
*this = path
->end
;
682 for (i
= 0; i
< CMAP_K
; i
++) {
683 struct cmap_bucket
*next
= other_bucket_protected(impl
, this, i
);
690 j
= cmap_find_empty_slot_protected(next
);
692 /* We've found a path along which we can rearrange the hash
693 * table: Start at path->start, follow all the slots in
694 * path->slots[], then follow slot 'i', then the bucket you
695 * arrive at has slot 'j' empty. */
696 struct cmap_bucket
*buckets
[MAX_DEPTH
+ 2];
697 int slots
[MAX_DEPTH
+ 2];
700 /* Figure out the full sequence of slots. */
701 for (k
= 0; k
< path
->n
; k
++) {
702 slots
[k
] = path
->slots
[k
];
705 slots
[path
->n
+ 1] = j
;
707 /* Figure out the full sequence of buckets. */
708 buckets
[0] = path
->start
;
709 for (k
= 0; k
<= path
->n
; k
++) {
710 buckets
[k
+ 1] = other_bucket_protected(impl
, buckets
[k
], slots
[k
]);
713 /* Now the path is fully expressed. One can start from
714 * buckets[0], go via slots[0] to buckets[1], via slots[1] to
715 * buckets[2], and so on.
717 * Move all the nodes across the path "backward". After each
718 * step some node appears in two buckets. Thus, every node is
719 * always visible to a concurrent search. */
720 for (k
= path
->n
+ 1; k
> 0; k
--) {
721 int slot
= slots
[k
- 1];
724 buckets
[k
], slots
[k
],
725 cmap_node_next_protected(&buckets
[k
- 1]->nodes
[slot
]),
726 buckets
[k
- 1]->hashes
[slot
]);
729 /* Finally, replace the first node on the path by
731 cmap_set_bucket(buckets
[0], slots
[0], new_node
, hash
);
736 if (path
->n
< MAX_DEPTH
&& head
< MAX_QUEUE
) {
737 struct cmap_path
*new_path
= &queue
[head
++];
740 new_path
->end
= next
;
741 new_path
->slots
[new_path
->n
++] = i
;
749 /* Adds 'node', with the given 'hash', to 'impl'.
751 * 'node' is ordinarily a single node, with a null 'next' pointer. When
752 * rehashing, however, it may be a longer chain of nodes. */
754 cmap_try_insert(struct cmap_impl
*impl
, struct cmap_node
*node
, uint32_t hash
)
756 uint32_t h1
= rehash(impl
, hash
);
757 uint32_t h2
= other_hash(h1
);
758 struct cmap_bucket
*b1
= &impl
->buckets
[h1
& impl
->mask
];
759 struct cmap_bucket
*b2
= &impl
->buckets
[h2
& impl
->mask
];
761 return (OVS_UNLIKELY(cmap_insert_dup(node
, hash
, b1
) ||
762 cmap_insert_dup(node
, hash
, b2
)) ||
763 OVS_LIKELY(cmap_insert_bucket(node
, hash
, b1
) ||
764 cmap_insert_bucket(node
, hash
, b2
)) ||
765 cmap_insert_bfs(impl
, node
, hash
, b1
, b2
));
768 /* Inserts 'node', with the given 'hash', into 'cmap'. The caller must ensure
769 * that 'cmap' cannot change concurrently (from another thread). If duplicates
770 * are undesirable, the caller must have already verified that 'cmap' does not
771 * contain a duplicate of 'node'.
773 * Returns the current number of nodes in the cmap after the insertion. */
775 cmap_insert(struct cmap
*cmap
, struct cmap_node
*node
, uint32_t hash
)
777 struct cmap_impl
*impl
= cmap_get_impl(cmap
);
779 ovsrcu_set_hidden(&node
->next
, NULL
);
781 if (OVS_UNLIKELY(impl
->n
>= impl
->max_n
)) {
782 COVERAGE_INC(cmap_expand
);
783 impl
= cmap_rehash(cmap
, (impl
->mask
<< 1) | 1);
786 while (OVS_UNLIKELY(!cmap_try_insert(impl
, node
, hash
))) {
787 impl
= cmap_rehash(cmap
, impl
->mask
);
793 cmap_replace__(struct cmap_impl
*impl
, struct cmap_node
*node
,
794 struct cmap_node
*replacement
, uint32_t hash
, uint32_t h
)
796 struct cmap_bucket
*b
= &impl
->buckets
[h
& impl
->mask
];
799 slot
= cmap_find_slot_protected(b
, hash
);
804 /* The pointer to 'node' is changed to point to 'replacement',
805 * which is the next node if no replacement node is given. */
807 replacement
= cmap_node_next_protected(node
);
809 /* 'replacement' takes the position of 'node' in the list. */
810 ovsrcu_set_hidden(&replacement
->next
, cmap_node_next_protected(node
));
813 struct cmap_node
*iter
= &b
->nodes
[slot
];
815 struct cmap_node
*next
= cmap_node_next_protected(iter
);
818 ovsrcu_set(&iter
->next
, replacement
);
825 /* Replaces 'old_node' in 'cmap' with 'new_node'. The caller must
826 * ensure that 'cmap' cannot change concurrently (from another thread).
828 * 'old_node' must not be destroyed or modified or inserted back into 'cmap' or
829 * into any other concurrent hash map while any other thread might be accessing
830 * it. One correct way to do this is to free it from an RCU callback with
833 * Returns the current number of nodes in the cmap after the replacement. The
834 * number of nodes decreases by one if 'new_node' is NULL. */
836 cmap_replace(struct cmap
*cmap
, struct cmap_node
*old_node
,
837 struct cmap_node
*new_node
, uint32_t hash
)
839 struct cmap_impl
*impl
= cmap_get_impl(cmap
);
840 uint32_t h1
= rehash(impl
, hash
);
841 uint32_t h2
= other_hash(h1
);
844 ok
= cmap_replace__(impl
, old_node
, new_node
, hash
, h1
)
845 || cmap_replace__(impl
, old_node
, new_node
, hash
, h2
);
850 if (OVS_UNLIKELY(impl
->n
< impl
->min_n
)) {
851 COVERAGE_INC(cmap_shrink
);
852 impl
= cmap_rehash(cmap
, impl
->mask
>> 1);
859 cmap_try_rehash(const struct cmap_impl
*old
, struct cmap_impl
*new)
861 const struct cmap_bucket
*b
;
863 for (b
= old
->buckets
; b
<= &old
->buckets
[old
->mask
]; b
++) {
866 for (i
= 0; i
< CMAP_K
; i
++) {
867 /* possible optimization here because we know the hashes are
869 struct cmap_node
*node
= cmap_node_next_protected(&b
->nodes
[i
]);
871 if (node
&& !cmap_try_insert(new, node
, b
->hashes
[i
])) {
879 static struct cmap_impl
*
880 cmap_rehash(struct cmap
*cmap
, uint32_t mask
)
882 struct cmap_impl
*old
= cmap_get_impl(cmap
);
883 struct cmap_impl
*new;
885 new = cmap_impl_create(mask
);
886 ovs_assert(old
->n
< new->max_n
);
888 while (!cmap_try_rehash(old
, new)) {
889 memset(new->buckets
, 0, (mask
+ 1) * sizeof *new->buckets
);
890 new->basis
= random_uint32();
894 ovsrcu_set(&cmap
->impl
, new);
895 ovsrcu_postpone(free_cacheline
, old
);
901 cmap_cursor_start(const struct cmap
*cmap
)
903 struct cmap_cursor cursor
;
905 cursor
.impl
= cmap_get_impl(cmap
);
906 cursor
.bucket_idx
= 0;
907 cursor
.entry_idx
= 0;
909 cmap_cursor_advance(&cursor
);
915 cmap_cursor_advance(struct cmap_cursor
*cursor
)
917 const struct cmap_impl
*impl
= cursor
->impl
;
920 cursor
->node
= cmap_node_next(cursor
->node
);
926 while (cursor
->bucket_idx
<= impl
->mask
) {
927 const struct cmap_bucket
*b
= &impl
->buckets
[cursor
->bucket_idx
];
929 while (cursor
->entry_idx
< CMAP_K
) {
930 cursor
->node
= cmap_node_next(&b
->nodes
[cursor
->entry_idx
++]);
936 cursor
->bucket_idx
++;
937 cursor
->entry_idx
= 0;
941 /* Returns the next node in 'cmap' in hash order, or NULL if no nodes remain in
942 * 'cmap'. Uses '*pos' to determine where to begin iteration, and updates
943 * '*pos' to pass on the next iteration into them before returning.
945 * It's better to use plain CMAP_FOR_EACH and related functions, since they are
946 * faster and better at dealing with cmaps that change during iteration.
948 * Before beginning iteration, set '*pos' to all zeros. */
950 cmap_next_position(const struct cmap
*cmap
,
951 struct cmap_position
*pos
)
953 struct cmap_impl
*impl
= cmap_get_impl(cmap
);
954 unsigned int bucket
= pos
->bucket
;
955 unsigned int entry
= pos
->entry
;
956 unsigned int offset
= pos
->offset
;
958 while (bucket
<= impl
->mask
) {
959 const struct cmap_bucket
*b
= &impl
->buckets
[bucket
];
961 while (entry
< CMAP_K
) {
962 const struct cmap_node
*node
= cmap_node_next(&b
->nodes
[entry
]);
965 for (i
= 0; node
; i
++, node
= cmap_node_next(node
)) {
967 if (cmap_node_next(node
)) {
973 pos
->bucket
= bucket
;
975 pos
->offset
= offset
;
976 return CONST_CAST(struct cmap_node
*, node
);
988 pos
->bucket
= pos
->entry
= pos
->offset
= 0;