]>
Commit | Line | Data |
---|---|---|
1c4dd424 JR |
1 | /* |
2 | * Copyright (c) 2014, 2016 Nicira, Inc. | |
3 | * | |
4 | * Licensed under the Apache License, Version 2.0 (the "License"); | |
5 | * you may not use this file except in compliance with the License. | |
6 | * You may obtain a copy of the License at: | |
7 | * | |
8 | * http://www.apache.org/licenses/LICENSE-2.0 | |
9 | * | |
10 | * Unless required by applicable law or agreed to in writing, software | |
11 | * distributed under the License is distributed on an "AS IS" BASIS, | |
12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
13 | * See the License for the specific language governing permissions and | |
14 | * limitations under the License. | |
15 | */ | |
16 | ||
17 | #include <config.h> | |
18 | #include "ccmap.h" | |
19 | #include "coverage.h" | |
20 | #include "bitmap.h" | |
21 | #include "hash.h" | |
22 | #include "ovs-rcu.h" | |
23 | #include "random.h" | |
24 | #include "util.h" | |
25 | ||
26 | COVERAGE_DEFINE(ccmap_expand); | |
27 | COVERAGE_DEFINE(ccmap_shrink); | |
28 | ||
29 | /* A count-only version of the cmap. */ | |
30 | ||
31 | /* Allow protected access to the value without atomic semantics. This makes | |
32 | * the exclusive writer somewhat faster. */ | |
33 | typedef union { | |
34 | unsigned long long protected_value; | |
35 | ATOMIC(unsigned long long) atomic_value; | |
36 | } ccmap_node_t; | |
37 | BUILD_ASSERT_DECL(sizeof(ccmap_node_t) == sizeof(uint64_t)); | |
38 | ||
39 | static uint64_t | |
40 | ccmap_node_get(const ccmap_node_t *node) | |
41 | { | |
42 | uint64_t value; | |
43 | ||
44 | atomic_read_relaxed(&CONST_CAST(ccmap_node_t *, node)->atomic_value, | |
45 | &value); | |
46 | ||
47 | return value; | |
48 | } | |
49 | ||
50 | /* It is safe to allow compiler optimize reads by the exclusive writer. */ | |
51 | static uint64_t | |
52 | ccmap_node_get_protected(const ccmap_node_t *node) | |
53 | { | |
54 | return node->protected_value; | |
55 | } | |
56 | ||
57 | static void | |
58 | ccmap_node_set_protected(ccmap_node_t *node, uint64_t value) | |
59 | { | |
60 | atomic_store_relaxed(&node->atomic_value, value); | |
61 | } | |
62 | ||
63 | static uint64_t | |
64 | ccmap_node(uint32_t count, uint32_t hash) | |
65 | { | |
66 | return (uint64_t)count << 32 | hash; | |
67 | } | |
68 | ||
69 | static uint32_t | |
70 | ccmap_node_hash(uint64_t node) | |
71 | { | |
72 | return node; | |
73 | } | |
74 | ||
75 | static uint32_t | |
76 | ccmap_node_count(uint64_t node) | |
77 | { | |
78 | return node >> 32; | |
79 | } | |
80 | ||
81 | /* Number of nodes per bucket. */ | |
82 | #define CCMAP_K (CACHE_LINE_SIZE / sizeof(ccmap_node_t)) | |
83 | ||
84 | /* A cuckoo hash bucket. Designed to be cache-aligned and exactly one cache | |
85 | * line long. */ | |
86 | struct ccmap_bucket { | |
87 | /* Each node incudes both the hash (low 32-bits) and the count (high | |
88 | * 32-bits), allowing readers always getting a consistent pair. */ | |
89 | ccmap_node_t nodes[CCMAP_K]; | |
90 | }; | |
91 | BUILD_ASSERT_DECL(sizeof(struct ccmap_bucket) == CACHE_LINE_SIZE); | |
92 | ||
93 | /* Default maximum load factor (as a fraction of UINT32_MAX + 1) before | |
94 | * enlarging a ccmap. Reasonable values lie between about 75% and 93%. Smaller | |
95 | * values waste memory; larger values increase the average insertion time. */ | |
96 | #define CCMAP_MAX_LOAD ((uint32_t) (UINT32_MAX * .85)) | |
97 | ||
98 | /* Default minimum load factor (as a fraction of UINT32_MAX + 1) before | |
99 | * shrinking a ccmap. Currently, the value is chosen to be 20%, this | |
100 | * means ccmap will have a 40% load factor after shrink. */ | |
101 | #define CCMAP_MIN_LOAD ((uint32_t) (UINT32_MAX * .20)) | |
102 | ||
103 | /* The implementation of a concurrent hash map. */ | |
104 | struct ccmap_impl { | |
6be7aa7d BB |
105 | PADDED_MEMBERS(CACHE_LINE_SIZE, |
106 | unsigned int n_unique; /* Number of in-use nodes. */ | |
107 | unsigned int n; /* Number of hashes inserted. */ | |
108 | unsigned int max_n; /* Max nodes before enlarging. */ | |
109 | unsigned int min_n; /* Min nodes before shrinking. */ | |
110 | uint32_t mask; /* Number of 'buckets', minus one. */ | |
111 | uint32_t basis; /* Basis for rehashing client's | |
112 | hash values. */ | |
113 | ); | |
1c4dd424 JR |
114 | struct ccmap_bucket buckets[]; |
115 | }; | |
116 | BUILD_ASSERT_DECL(sizeof(struct ccmap_impl) == CACHE_LINE_SIZE); | |
117 | ||
118 | static struct ccmap_impl *ccmap_rehash(struct ccmap *, uint32_t mask); | |
119 | ||
120 | /* Given a rehashed value 'hash', returns the other hash for that rehashed | |
121 | * value. This is symmetric: other_hash(other_hash(x)) == x. (See also "Hash | |
122 | * Functions" at the top of cmap.c.) */ | |
123 | static uint32_t | |
124 | other_hash(uint32_t hash) | |
125 | { | |
126 | return (hash << 16) | (hash >> 16); | |
127 | } | |
128 | ||
129 | /* Returns the rehashed value for 'hash' within 'impl'. (See also "Hash | |
130 | * Functions" at the top of this file.) */ | |
131 | static uint32_t | |
132 | rehash(const struct ccmap_impl *impl, uint32_t hash) | |
133 | { | |
134 | return hash_finish(impl->basis, hash); | |
135 | } | |
136 | ||
137 | static struct ccmap_impl * | |
138 | ccmap_get_impl(const struct ccmap *ccmap) | |
139 | { | |
140 | return ovsrcu_get(struct ccmap_impl *, &ccmap->impl); | |
141 | } | |
142 | ||
143 | static uint32_t | |
144 | calc_max_n(uint32_t mask) | |
145 | { | |
146 | return ((uint64_t) (mask + 1) * CCMAP_K * CCMAP_MAX_LOAD) >> 32; | |
147 | } | |
148 | ||
149 | static uint32_t | |
150 | calc_min_n(uint32_t mask) | |
151 | { | |
152 | return ((uint64_t) (mask + 1) * CCMAP_K * CCMAP_MIN_LOAD) >> 32; | |
153 | } | |
154 | ||
155 | static struct ccmap_impl * | |
156 | ccmap_impl_create(uint32_t mask) | |
157 | { | |
158 | struct ccmap_impl *impl; | |
159 | ||
160 | ovs_assert(is_pow2(mask + 1)); | |
161 | ||
162 | impl = xzalloc_cacheline(sizeof *impl | |
163 | + (mask + 1) * sizeof *impl->buckets); | |
164 | impl->n_unique = 0; | |
165 | impl->n = 0; | |
166 | impl->max_n = calc_max_n(mask); | |
167 | impl->min_n = calc_min_n(mask); | |
168 | impl->mask = mask; | |
169 | impl->basis = random_uint32(); | |
170 | ||
171 | return impl; | |
172 | } | |
173 | ||
174 | /* Initializes 'ccmap' as an empty concurrent hash map. */ | |
175 | void | |
176 | ccmap_init(struct ccmap *ccmap) | |
177 | { | |
178 | ovsrcu_set(&ccmap->impl, ccmap_impl_create(0)); | |
179 | } | |
180 | ||
181 | /* Destroys 'ccmap'. | |
182 | * | |
183 | * The client is responsible for destroying any data previously held in | |
184 | * 'ccmap'. */ | |
185 | void | |
186 | ccmap_destroy(struct ccmap *ccmap) | |
187 | { | |
188 | if (ccmap) { | |
189 | ovsrcu_postpone(free_cacheline, ccmap_get_impl(ccmap)); | |
190 | } | |
191 | } | |
192 | ||
193 | /* Returns the number of hashes inserted in 'ccmap', including duplicates. */ | |
194 | size_t | |
195 | ccmap_count(const struct ccmap *ccmap) | |
196 | { | |
197 | return ccmap_get_impl(ccmap)->n; | |
198 | } | |
199 | ||
200 | /* Returns true if 'ccmap' is empty, false otherwise. */ | |
201 | bool | |
202 | ccmap_is_empty(const struct ccmap *ccmap) | |
203 | { | |
204 | return ccmap_count(ccmap) == 0; | |
205 | } | |
206 | ||
207 | /* returns 0 if not found. Map does not contain zero counts. */ | |
208 | static uint32_t | |
209 | ccmap_find_in_bucket(const struct ccmap_bucket *bucket, uint32_t hash) | |
210 | { | |
211 | for (int i = 0; i < CCMAP_K; i++) { | |
212 | uint64_t node = ccmap_node_get(&bucket->nodes[i]); | |
213 | ||
214 | if (ccmap_node_hash(node) == hash) { | |
215 | return ccmap_node_count(node); | |
216 | } | |
217 | } | |
218 | return 0; | |
219 | } | |
220 | ||
221 | /* Searches 'ccmap' for a node with the specified 'hash'. If one is | |
222 | * found, returns the count associated with it, otherwise zero. | |
223 | */ | |
224 | uint32_t | |
225 | ccmap_find(const struct ccmap *ccmap, uint32_t hash) | |
226 | { | |
227 | const struct ccmap_impl *impl = ccmap_get_impl(ccmap); | |
228 | uint32_t h = rehash(impl, hash); | |
229 | uint32_t count; | |
230 | ||
231 | count = ccmap_find_in_bucket(&impl->buckets[h & impl->mask], hash); | |
232 | if (!count) { | |
233 | h = other_hash(h); | |
234 | count = ccmap_find_in_bucket(&impl->buckets[h & impl->mask], hash); | |
235 | } | |
236 | return count; | |
237 | } | |
238 | ||
239 | static int | |
240 | ccmap_find_slot_protected(struct ccmap_bucket *b, uint32_t hash, | |
241 | uint32_t *count) | |
242 | { | |
243 | for (int i = 0; i < CCMAP_K; i++) { | |
244 | uint64_t node = ccmap_node_get_protected(&b->nodes[i]); | |
245 | ||
246 | *count = ccmap_node_count(node); | |
247 | if (ccmap_node_hash(node) == hash && *count) { | |
248 | return i; | |
249 | } | |
250 | } | |
251 | return -1; | |
252 | } | |
253 | ||
254 | static int | |
255 | ccmap_find_empty_slot_protected(struct ccmap_bucket *b) | |
256 | { | |
257 | for (int i = 0; i < CCMAP_K; i++) { | |
258 | uint64_t node = ccmap_node_get_protected(&b->nodes[i]); | |
259 | ||
260 | if (!ccmap_node_count(node)) { | |
261 | return i; | |
262 | } | |
263 | } | |
264 | return -1; | |
265 | } | |
266 | ||
267 | static void | |
268 | ccmap_set_bucket(struct ccmap_bucket *b, int i, uint32_t count, uint32_t hash) | |
269 | { | |
270 | ccmap_node_set_protected(&b->nodes[i], ccmap_node(count, hash)); | |
271 | } | |
272 | ||
273 | /* Searches 'b' for a node with the given 'hash'. If it finds one, increments | |
274 | * the associated count by 'inc' and returns the new value. Otherwise returns | |
275 | * 0. */ | |
276 | static uint32_t | |
277 | ccmap_inc_bucket_existing(struct ccmap_bucket *b, uint32_t hash, uint32_t inc) | |
278 | { | |
279 | uint32_t count; | |
280 | ||
281 | int i = ccmap_find_slot_protected(b, hash, &count); | |
282 | if (i < 0) { | |
283 | return 0; | |
284 | } | |
285 | count += inc; | |
286 | ccmap_set_bucket(b, i, count, hash); | |
287 | return count; | |
288 | } | |
289 | ||
290 | /* Searches 'b' for an empty slot. If successful, stores 'inc' and 'hash' in | |
291 | * the slot and returns 'inc'. Otherwise, returns 0. */ | |
292 | static uint32_t | |
293 | ccmap_inc_bucket_new(struct ccmap_bucket *b, uint32_t hash, uint32_t inc) | |
294 | { | |
295 | int i = ccmap_find_empty_slot_protected(b); | |
296 | if (i < 0) { | |
297 | return 0; | |
298 | } | |
299 | ccmap_set_bucket(b, i, inc, hash); | |
300 | return inc; | |
301 | } | |
302 | ||
303 | /* Returns the other bucket that b->nodes[slot] could occupy in 'impl'. (This | |
304 | * might be the same as 'b'.) */ | |
305 | static struct ccmap_bucket * | |
306 | other_bucket_protected(struct ccmap_impl *impl, struct ccmap_bucket *b, int slot) | |
307 | { | |
308 | uint64_t node = ccmap_node_get_protected(&b->nodes[slot]); | |
309 | ||
310 | uint32_t h1 = rehash(impl, ccmap_node_hash(node)); | |
311 | uint32_t h2 = other_hash(h1); | |
312 | uint32_t b_idx = b - impl->buckets; | |
313 | uint32_t other_h = (h1 & impl->mask) == b_idx ? h2 : h1; | |
314 | ||
315 | return &impl->buckets[other_h & impl->mask]; | |
316 | } | |
317 | ||
318 | /* Count 'inc' for 'hash' is to be inserted into 'impl', but both candidate | |
319 | * buckets 'b1' and 'b2' are full. This function attempts to rearrange buckets | |
320 | * within 'impl' to make room for 'hash'. | |
321 | * | |
322 | * Returns 'inc' if the new count for the 'hash' was inserted, otherwise | |
323 | * returns 0. | |
324 | * | |
325 | * The implementation is a general-purpose breadth-first search. At first | |
326 | * glance, this is more complex than a random walk through 'impl' (suggested by | |
327 | * some references), but random walks have a tendency to loop back through a | |
328 | * single bucket. We have to move nodes backward along the path that we find, | |
329 | * so that no node actually disappears from the hash table, which means a | |
330 | * random walk would have to be careful to deal with loops. By contrast, a | |
331 | * successful breadth-first search always finds a *shortest* path through the | |
332 | * hash table, and a shortest path will never contain loops, so it avoids that | |
333 | * problem entirely. | |
334 | */ | |
335 | static uint32_t | |
336 | ccmap_inc_bfs(struct ccmap_impl *impl, uint32_t hash, | |
337 | struct ccmap_bucket *b1, struct ccmap_bucket *b2, uint32_t inc) | |
338 | { | |
339 | enum { MAX_DEPTH = 4 }; | |
340 | ||
341 | /* A path from 'start' to 'end' via the 'n' steps in 'slots[]'. | |
342 | * | |
343 | * One can follow the path via: | |
344 | * | |
345 | * struct ccmap_bucket *b; | |
346 | * int i; | |
347 | * | |
348 | * b = path->start; | |
349 | * for (i = 0; i < path->n; i++) { | |
350 | * b = other_bucket_protected(impl, b, path->slots[i]); | |
351 | * } | |
352 | * ovs_assert(b == path->end); | |
353 | */ | |
354 | struct ccmap_path { | |
355 | struct ccmap_bucket *start; /* First bucket along the path. */ | |
356 | struct ccmap_bucket *end; /* Last bucket on the path. */ | |
357 | uint8_t slots[MAX_DEPTH]; /* Slots used for each hop. */ | |
358 | int n; /* Number of slots[]. */ | |
359 | }; | |
360 | ||
361 | /* We need to limit the amount of work we do trying to find a path. It | |
362 | * might actually be impossible to rearrange the ccmap, and after some time | |
363 | * it is likely to be easier to rehash the entire ccmap. | |
364 | * | |
365 | * This value of MAX_QUEUE is an arbitrary limit suggested by one of the | |
366 | * references. Empirically, it seems to work OK. */ | |
367 | enum { MAX_QUEUE = 500 }; | |
368 | struct ccmap_path queue[MAX_QUEUE]; | |
369 | int head = 0; | |
370 | int tail = 0; | |
371 | ||
372 | /* Add 'b1' and 'b2' as starting points for the search. */ | |
373 | queue[head].start = b1; | |
374 | queue[head].end = b1; | |
375 | queue[head].n = 0; | |
376 | head++; | |
377 | if (b1 != b2) { | |
378 | queue[head].start = b2; | |
379 | queue[head].end = b2; | |
380 | queue[head].n = 0; | |
381 | head++; | |
382 | } | |
383 | ||
384 | while (tail < head) { | |
385 | const struct ccmap_path *path = &queue[tail++]; | |
386 | struct ccmap_bucket *this = path->end; | |
387 | int i; | |
388 | ||
389 | for (i = 0; i < CCMAP_K; i++) { | |
390 | struct ccmap_bucket *next = other_bucket_protected(impl, this, i); | |
391 | int j; | |
392 | ||
393 | if (this == next) { | |
394 | continue; | |
395 | } | |
396 | ||
397 | j = ccmap_find_empty_slot_protected(next); | |
398 | if (j >= 0) { | |
399 | /* We've found a path along which we can rearrange the hash | |
400 | * table: Start at path->start, follow all the slots in | |
401 | * path->slots[], then follow slot 'i', then the bucket you | |
402 | * arrive at has slot 'j' empty. */ | |
403 | struct ccmap_bucket *buckets[MAX_DEPTH + 2]; | |
404 | int slots[MAX_DEPTH + 2]; | |
405 | int k; | |
406 | ||
407 | /* Figure out the full sequence of slots. */ | |
408 | for (k = 0; k < path->n; k++) { | |
409 | slots[k] = path->slots[k]; | |
410 | } | |
411 | slots[path->n] = i; | |
412 | slots[path->n + 1] = j; | |
413 | ||
414 | /* Figure out the full sequence of buckets. */ | |
415 | buckets[0] = path->start; | |
416 | for (k = 0; k <= path->n; k++) { | |
417 | buckets[k + 1] = other_bucket_protected(impl, buckets[k], slots[k]); | |
418 | } | |
419 | ||
420 | /* Now the path is fully expressed. One can start from | |
421 | * buckets[0], go via slots[0] to buckets[1], via slots[1] to | |
422 | * buckets[2], and so on. | |
423 | * | |
424 | * Move all the nodes across the path "backward". After each | |
425 | * step some node appears in two buckets. Thus, every node is | |
426 | * always visible to a concurrent search. */ | |
427 | for (k = path->n + 1; k > 0; k--) { | |
428 | uint64_t node = ccmap_node_get_protected | |
429 | (&buckets[k - 1]->nodes[slots[k - 1]]); | |
430 | ccmap_node_set_protected(&buckets[k]->nodes[slots[k]], | |
431 | node); | |
432 | } | |
433 | ||
434 | /* Finally, insert the count. */ | |
435 | ccmap_set_bucket(buckets[0], slots[0], inc, hash); | |
436 | ||
437 | return inc; | |
438 | } | |
439 | ||
440 | if (path->n < MAX_DEPTH && head < MAX_QUEUE) { | |
441 | struct ccmap_path *new_path = &queue[head++]; | |
442 | ||
443 | *new_path = *path; | |
444 | new_path->end = next; | |
445 | new_path->slots[new_path->n++] = i; | |
446 | } | |
447 | } | |
448 | } | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
453 | /* Increments the count associated with 'hash', in 'impl', by 'inc'. */ | |
454 | static uint32_t | |
455 | ccmap_try_inc(struct ccmap_impl *impl, uint32_t hash, uint32_t inc) | |
456 | { | |
457 | uint32_t h1 = rehash(impl, hash); | |
458 | uint32_t h2 = other_hash(h1); | |
459 | struct ccmap_bucket *b1 = &impl->buckets[h1 & impl->mask]; | |
460 | struct ccmap_bucket *b2 = &impl->buckets[h2 & impl->mask]; | |
461 | uint32_t count; | |
462 | ||
463 | return OVS_UNLIKELY(count = ccmap_inc_bucket_existing(b1, hash, inc)) | |
464 | ? count : OVS_UNLIKELY(count = ccmap_inc_bucket_existing(b2, hash, inc)) | |
465 | ? count : OVS_LIKELY(count = ccmap_inc_bucket_new(b1, hash, inc)) | |
466 | ? count : OVS_LIKELY(count = ccmap_inc_bucket_new(b2, hash, inc)) | |
467 | ? count : ccmap_inc_bfs(impl, hash, b1, b2, inc); | |
468 | } | |
469 | ||
470 | /* Increments the count of 'hash' values in the 'ccmap'. The caller must | |
471 | * ensure that 'ccmap' cannot change concurrently (from another thread). | |
472 | * | |
473 | * Returns the current count of the given hash value after the incremention. */ | |
474 | uint32_t | |
475 | ccmap_inc(struct ccmap *ccmap, uint32_t hash) | |
476 | { | |
477 | struct ccmap_impl *impl = ccmap_get_impl(ccmap); | |
478 | uint32_t count; | |
479 | ||
480 | if (OVS_UNLIKELY(impl->n_unique >= impl->max_n)) { | |
481 | COVERAGE_INC(ccmap_expand); | |
482 | impl = ccmap_rehash(ccmap, (impl->mask << 1) | 1); | |
483 | } | |
484 | ||
485 | while (OVS_UNLIKELY(!(count = ccmap_try_inc(impl, hash, 1)))) { | |
486 | impl = ccmap_rehash(ccmap, impl->mask); | |
487 | } | |
488 | ++impl->n; | |
489 | if (count == 1) { | |
490 | ++impl->n_unique; | |
491 | } | |
492 | return count; | |
493 | } | |
494 | ||
495 | /* Decrement the count associated with 'hash' in the bucket identified by | |
496 | * 'h'. Return the OLD count if successful, or 0. */ | |
497 | static uint32_t | |
498 | ccmap_dec__(struct ccmap_impl *impl, uint32_t hash, uint32_t h) | |
499 | { | |
500 | struct ccmap_bucket *b = &impl->buckets[h & impl->mask]; | |
501 | uint32_t count; | |
502 | ||
503 | int slot = ccmap_find_slot_protected(b, hash, &count); | |
504 | if (slot < 0) { | |
505 | return 0; | |
506 | } | |
507 | ||
508 | ccmap_set_bucket(b, slot, count - 1, hash); | |
509 | return count; | |
510 | } | |
511 | ||
512 | /* Decrements the count associated with 'hash'. The caller must | |
513 | * ensure that 'ccmap' cannot change concurrently (from another thread). | |
514 | * | |
515 | * Returns the current count related to 'hash' in the ccmap after the | |
516 | * decrement. */ | |
517 | uint32_t | |
518 | ccmap_dec(struct ccmap *ccmap, uint32_t hash) | |
519 | { | |
520 | struct ccmap_impl *impl = ccmap_get_impl(ccmap); | |
521 | uint32_t h1 = rehash(impl, hash); | |
522 | uint32_t h2 = other_hash(h1); | |
523 | ||
524 | uint32_t old_count = ccmap_dec__(impl, hash, h1); | |
525 | if (!old_count) { | |
526 | old_count = ccmap_dec__(impl, hash, h2); | |
527 | } | |
528 | ovs_assert(old_count); | |
529 | ||
530 | old_count--; | |
531 | ||
532 | if (old_count == 0) { | |
533 | impl->n_unique--; | |
534 | if (OVS_UNLIKELY(impl->n_unique < impl->min_n)) { | |
535 | COVERAGE_INC(ccmap_shrink); | |
536 | impl = ccmap_rehash(ccmap, impl->mask >> 1); | |
537 | } | |
538 | } | |
539 | impl->n--; | |
540 | return old_count; | |
541 | } | |
542 | ||
543 | static bool | |
544 | ccmap_try_rehash(const struct ccmap_impl *old, struct ccmap_impl *new) | |
545 | { | |
546 | const struct ccmap_bucket *b; | |
547 | ||
548 | for (b = old->buckets; b <= &old->buckets[old->mask]; b++) { | |
549 | for (int i = 0; i < CCMAP_K; i++) { | |
550 | uint64_t node = ccmap_node_get_protected(&b->nodes[i]); | |
551 | uint32_t count = ccmap_node_count(node); | |
552 | ||
553 | if (count && !ccmap_try_inc(new, ccmap_node_hash(node), count)) { | |
554 | return false; | |
555 | } | |
556 | } | |
557 | } | |
558 | return true; | |
559 | } | |
560 | ||
561 | static struct ccmap_impl * | |
562 | ccmap_rehash(struct ccmap *ccmap, uint32_t mask) | |
563 | { | |
564 | struct ccmap_impl *old = ccmap_get_impl(ccmap); | |
565 | struct ccmap_impl *new = ccmap_impl_create(mask); | |
566 | ||
567 | ovs_assert(old->n_unique < new->max_n); | |
568 | ||
569 | while (!ccmap_try_rehash(old, new)) { | |
570 | memset(new->buckets, 0, (mask + 1) * sizeof *new->buckets); | |
571 | new->basis = random_uint32(); | |
572 | } | |
573 | ||
574 | new->n = old->n; | |
575 | new->n_unique = old->n_unique; | |
576 | ovsrcu_set(&ccmap->impl, new); | |
577 | ovsrcu_postpone(free_cacheline, old); | |
578 | ||
579 | return new; | |
580 | } |