]>
git.proxmox.com Git - mirror_ovs.git/blob - lib/hmap.c
2 * Copyright (c) 2008, 2009, 2010, 2012, 2013, 2015, 2019 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
18 #include "openvswitch/hmap.h"
24 #include "openvswitch/vlog.h"
26 VLOG_DEFINE_THIS_MODULE(hmap
);
28 COVERAGE_DEFINE(hmap_pathological
);
29 COVERAGE_DEFINE(hmap_expand
);
30 COVERAGE_DEFINE(hmap_shrink
);
31 COVERAGE_DEFINE(hmap_reserve
);
33 /* Initializes 'hmap' as an empty hash table. */
35 hmap_init(struct hmap
*hmap
)
37 hmap
->buckets
= &hmap
->one
;
43 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
44 * the nodes themselves, if necessary. */
46 hmap_destroy(struct hmap
*hmap
)
48 if (hmap
&& hmap
->buckets
!= &hmap
->one
) {
53 /* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
54 * not free memory allocated for 'hmap'.
56 * This function is appropriate when 'hmap' will soon have about as many
57 * elements as it did before. If 'hmap' will likely have fewer elements than
58 * before, use hmap_destroy() followed by hmap_init() to save memory and
61 hmap_clear(struct hmap
*hmap
)
65 memset(hmap
->buckets
, 0, (hmap
->mask
+ 1) * sizeof *hmap
->buckets
);
69 /* Exchanges hash maps 'a' and 'b'. */
71 hmap_swap(struct hmap
*a
, struct hmap
*b
)
80 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
83 hmap_moved(struct hmap
*hmap
)
86 hmap
->buckets
= &hmap
->one
;
91 resize(struct hmap
*hmap
, size_t new_mask
, const char *where
)
96 ovs_assert(is_pow2(new_mask
+ 1));
100 tmp
.buckets
= xmalloc(sizeof *tmp
.buckets
* (new_mask
+ 1));
102 for (i
= 0; i
<= tmp
.mask
; i
++) {
103 tmp
.buckets
[i
] = NULL
;
106 int n_big_buckets
= 0;
107 int biggest_count
= 0;
108 int n_biggest_buckets
= 0;
109 for (i
= 0; i
<= hmap
->mask
; i
++) {
110 struct hmap_node
*node
, *next
;
112 for (node
= hmap
->buckets
[i
]; node
; node
= next
) {
114 hmap_insert_fast(&tmp
, node
, node
->hash
);
119 if (count
> biggest_count
) {
120 biggest_count
= count
;
121 n_biggest_buckets
= 1;
122 } else if (count
== biggest_count
) {
127 hmap_swap(hmap
, &tmp
);
131 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(10, 10);
132 COVERAGE_INC(hmap_pathological
);
133 VLOG_DBG_RL(&rl
, "%s: %d bucket%s with 6+ nodes, "
134 "including %d bucket%s with %d nodes "
135 "(%"PRIuSIZE
" nodes total across %"PRIuSIZE
" buckets)",
137 n_big_buckets
, n_big_buckets
> 1 ? "s" : "",
138 n_biggest_buckets
, n_biggest_buckets
> 1 ? "s" : "",
140 hmap
->n
, hmap
->mask
+ 1);
145 calc_mask(size_t capacity
)
147 size_t mask
= capacity
/ 2;
153 #if SIZE_MAX > UINT32_MAX
157 /* If we need to dynamically allocate buckets we might as well allocate at
158 * least 4 of them. */
159 mask
|= (mask
& 1) << 1;
164 /* Expands 'hmap', if necessary, to optimize the performance of searches.
166 * ('where' is used in debug logging. Commonly one would use hmap_expand() to
167 * automatically provide the caller's source file and line number for
170 hmap_expand_at(struct hmap
*hmap
, const char *where
)
172 size_t new_mask
= calc_mask(hmap
->n
);
173 if (new_mask
> hmap
->mask
) {
174 COVERAGE_INC(hmap_expand
);
175 resize(hmap
, new_mask
, where
);
179 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration.
181 * ('where' is used in debug logging. Commonly one would use hmap_shrink() to
182 * automatically provide the caller's source file and line number for
185 hmap_shrink_at(struct hmap
*hmap
, const char *where
)
187 size_t new_mask
= calc_mask(hmap
->n
);
188 if (new_mask
< hmap
->mask
) {
189 COVERAGE_INC(hmap_shrink
);
190 resize(hmap
, new_mask
, where
);
194 /* Expands 'hmap', if necessary, to optimize the performance of searches when
195 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
196 * allocated capacity is much higher than its current number of nodes.)
198 * ('where' is used in debug logging. Commonly one would use hmap_reserve() to
199 * automatically provide the caller's source file and line number for
202 hmap_reserve_at(struct hmap
*hmap
, size_t n
, const char *where
)
204 size_t new_mask
= calc_mask(n
);
205 if (new_mask
> hmap
->mask
) {
206 COVERAGE_INC(hmap_reserve
);
207 resize(hmap
, new_mask
, where
);
211 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
212 * to 'node' (e.g. due to realloc()). */
214 hmap_node_moved(struct hmap
*hmap
,
215 struct hmap_node
*old_node
, struct hmap_node
*node
)
217 struct hmap_node
**bucket
= &hmap
->buckets
[node
->hash
& hmap
->mask
];
218 while (*bucket
!= old_node
) {
219 bucket
= &(*bucket
)->next
;
224 /* Chooses and returns a randomly selected node from 'hmap', which must not be
227 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
228 * But it does at least ensure that any node in 'hmap' can be chosen. */
230 hmap_random_node(const struct hmap
*hmap
)
232 struct hmap_node
*bucket
, *node
;
235 /* Choose a random non-empty bucket. */
237 bucket
= hmap
->buckets
[random_uint32() & hmap
->mask
];
243 /* Count nodes in bucket. */
245 for (node
= bucket
; node
; node
= node
->next
) {
249 /* Choose random node from bucket. */
251 for (node
= bucket
; i
-- > 0; node
= node
->next
) {
257 /* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
258 * 'hmap'. Uses '*pos' to determine where to begin iteration, and updates
259 * '*pos' to pass on the next iteration into them before returning.
261 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
262 * faster and better at dealing with hmaps that change during iteration.
264 * Before beginning iteration, set '*pos' to all zeros. */
266 hmap_at_position(const struct hmap
*hmap
,
267 struct hmap_position
*pos
)
272 offset
= pos
->offset
;
273 for (b_idx
= pos
->bucket
; b_idx
<= hmap
->mask
; b_idx
++) {
274 struct hmap_node
*node
;
277 for (n_idx
= 0, node
= hmap
->buckets
[b_idx
]; node
!= NULL
;
278 n_idx
++, node
= node
->next
) {
279 if (n_idx
== offset
) {
281 pos
->bucket
= node
->hash
& hmap
->mask
;
282 pos
->offset
= offset
+ 1;
284 pos
->bucket
= (node
->hash
& hmap
->mask
) + 1;
298 /* Returns true if 'node' is in 'hmap', false otherwise. */
300 hmap_contains(const struct hmap
*hmap
, const struct hmap_node
*node
)
304 for (p
= hmap_first_in_bucket(hmap
, node
->hash
); p
; p
= p
->next
) {