]>
git.proxmox.com Git - ovs.git/blob - lib/hmap.c
2 * Copyright (c) 2008, 2009, 2010, 2012, 2013, 2015 Nicira, Inc.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
24 #include "openvswitch/vlog.h"
26 VLOG_DEFINE_THIS_MODULE(hmap
);
28 COVERAGE_DEFINE(hmap_pathological
);
29 COVERAGE_DEFINE(hmap_expand
);
30 COVERAGE_DEFINE(hmap_shrink
);
31 COVERAGE_DEFINE(hmap_reserve
);
33 /* Initializes 'hmap' as an empty hash table. */
35 hmap_init(struct hmap
*hmap
)
37 hmap
->buckets
= &hmap
->one
;
43 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
44 * the nodes themselves, if necessary. */
46 hmap_destroy(struct hmap
*hmap
)
48 if (hmap
&& hmap
->buckets
!= &hmap
->one
) {
53 /* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
54 * not free memory allocated for 'hmap'.
56 * This function is appropriate when 'hmap' will soon have about as many
57 * elements as it did before. If 'hmap' will likely have fewer elements than
58 * before, use hmap_destroy() followed by hmap_init() to save memory and
61 hmap_clear(struct hmap
*hmap
)
65 memset(hmap
->buckets
, 0, (hmap
->mask
+ 1) * sizeof *hmap
->buckets
);
69 /* Exchanges hash maps 'a' and 'b'. */
71 hmap_swap(struct hmap
*a
, struct hmap
*b
)
80 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
83 hmap_moved(struct hmap
*hmap
)
86 hmap
->buckets
= &hmap
->one
;
91 resize(struct hmap
*hmap
, size_t new_mask
, const char *where
)
96 ovs_assert(is_pow2(new_mask
+ 1));
100 tmp
.buckets
= xmalloc(sizeof *tmp
.buckets
* (new_mask
+ 1));
102 for (i
= 0; i
<= tmp
.mask
; i
++) {
103 tmp
.buckets
[i
] = NULL
;
106 for (i
= 0; i
<= hmap
->mask
; i
++) {
107 struct hmap_node
*node
, *next
;
109 for (node
= hmap
->buckets
[i
]; node
; node
= next
) {
111 hmap_insert_fast(&tmp
, node
, node
->hash
);
115 static struct vlog_rate_limit rl
= VLOG_RATE_LIMIT_INIT(10, 10);
116 COVERAGE_INC(hmap_pathological
);
117 VLOG_DBG_RL(&rl
, "%s: %d nodes in bucket (%"PRIuSIZE
" nodes, %"PRIuSIZE
" buckets)",
118 where
, count
, hmap
->n
, hmap
->mask
+ 1);
121 hmap_swap(hmap
, &tmp
);
126 calc_mask(size_t capacity
)
128 size_t mask
= capacity
/ 2;
134 #if SIZE_MAX > UINT32_MAX
138 /* If we need to dynamically allocate buckets we might as well allocate at
139 * least 4 of them. */
140 mask
|= (mask
& 1) << 1;
145 /* Expands 'hmap', if necessary, to optimize the performance of searches.
147 * ('where' is used in debug logging. Commonly one would use hmap_expand() to
148 * automatically provide the caller's source file and line number for
151 hmap_expand_at(struct hmap
*hmap
, const char *where
)
153 size_t new_mask
= calc_mask(hmap
->n
);
154 if (new_mask
> hmap
->mask
) {
155 COVERAGE_INC(hmap_expand
);
156 resize(hmap
, new_mask
, where
);
160 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration.
162 * ('where' is used in debug logging. Commonly one would use hmap_shrink() to
163 * automatically provide the caller's source file and line number for
166 hmap_shrink_at(struct hmap
*hmap
, const char *where
)
168 size_t new_mask
= calc_mask(hmap
->n
);
169 if (new_mask
< hmap
->mask
) {
170 COVERAGE_INC(hmap_shrink
);
171 resize(hmap
, new_mask
, where
);
175 /* Expands 'hmap', if necessary, to optimize the performance of searches when
176 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
177 * allocated capacity is much higher than its current number of nodes.)
179 * ('where' is used in debug logging. Commonly one would use hmap_reserve() to
180 * automatically provide the caller's source file and line number for
183 hmap_reserve_at(struct hmap
*hmap
, size_t n
, const char *where
)
185 size_t new_mask
= calc_mask(n
);
186 if (new_mask
> hmap
->mask
) {
187 COVERAGE_INC(hmap_reserve
);
188 resize(hmap
, new_mask
, where
);
192 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
193 * to 'node' (e.g. due to realloc()). */
195 hmap_node_moved(struct hmap
*hmap
,
196 struct hmap_node
*old_node
, struct hmap_node
*node
)
198 struct hmap_node
**bucket
= &hmap
->buckets
[node
->hash
& hmap
->mask
];
199 while (*bucket
!= old_node
) {
200 bucket
= &(*bucket
)->next
;
205 /* Chooses and returns a randomly selected node from 'hmap', which must not be
208 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
209 * But it does at least ensure that any node in 'hmap' can be chosen. */
211 hmap_random_node(const struct hmap
*hmap
)
213 struct hmap_node
*bucket
, *node
;
216 /* Choose a random non-empty bucket. */
218 bucket
= hmap
->buckets
[random_uint32() & hmap
->mask
];
224 /* Count nodes in bucket. */
226 for (node
= bucket
; node
; node
= node
->next
) {
230 /* Choose random node from bucket. */
232 for (node
= bucket
; i
-- > 0; node
= node
->next
) {
238 /* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
239 * 'hmap'. Uses '*pos' to determine where to begin iteration, and updates
240 * '*pos' to pass on the next iteration into them before returning.
242 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
243 * faster and better at dealing with hmaps that change during iteration.
245 * Before beginning iteration, set '*pos' to all zeros. */
247 hmap_at_position(const struct hmap
*hmap
,
248 struct hmap_position
*pos
)
253 offset
= pos
->offset
;
254 for (b_idx
= pos
->bucket
; b_idx
<= hmap
->mask
; b_idx
++) {
255 struct hmap_node
*node
;
258 for (n_idx
= 0, node
= hmap
->buckets
[b_idx
]; node
!= NULL
;
259 n_idx
++, node
= node
->next
) {
260 if (n_idx
== offset
) {
262 pos
->bucket
= node
->hash
& hmap
->mask
;
263 pos
->offset
= offset
+ 1;
265 pos
->bucket
= (node
->hash
& hmap
->mask
) + 1;
279 /* Returns true if 'node' is in 'hmap', false otherwise. */
281 hmap_contains(const struct hmap
*hmap
, const struct hmap_node
*node
)
285 for (p
= hmap_first_in_bucket(hmap
, node
->hash
); p
; p
= p
->next
) {