]>
git.proxmox.com Git - ovs.git/blob - lib/hmap.c
2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
26 COVERAGE_DEFINE(hmap_pathological
);
27 COVERAGE_DEFINE(hmap_expand
);
28 COVERAGE_DEFINE(hmap_shrink
);
29 COVERAGE_DEFINE(hmap_reserve
);
31 /* Initializes 'hmap' as an empty hash table. */
33 hmap_init(struct hmap
*hmap
)
35 hmap
->buckets
= &hmap
->one
;
41 /* Frees memory reserved by 'hmap'. It is the client's responsibility to free
42 * the nodes themselves, if necessary. */
44 hmap_destroy(struct hmap
*hmap
)
46 if (hmap
&& hmap
->buckets
!= &hmap
->one
) {
51 /* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
52 * not free memory allocated for 'hmap'.
54 * This function is appropriate when 'hmap' will soon have about as many
55 * elements as it before. If 'hmap' will likely have fewer elements than
56 * before, use hmap_destroy() followed by hmap_clear() to save memory and
59 hmap_clear(struct hmap
*hmap
)
63 memset(hmap
->buckets
, 0, (hmap
->mask
+ 1) * sizeof *hmap
->buckets
);
67 /* Exchanges hash maps 'a' and 'b'. */
69 hmap_swap(struct hmap
*a
, struct hmap
*b
)
78 /* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
81 hmap_moved(struct hmap
*hmap
)
84 hmap
->buckets
= &hmap
->one
;
89 resize(struct hmap
*hmap
, size_t new_mask
)
94 assert(!(new_mask
& (new_mask
+ 1)));
95 assert(new_mask
!= SIZE_MAX
);
99 tmp
.buckets
= xmalloc(sizeof *tmp
.buckets
* (new_mask
+ 1));
101 for (i
= 0; i
<= tmp
.mask
; i
++) {
102 tmp
.buckets
[i
] = NULL
;
105 for (i
= 0; i
<= hmap
->mask
; i
++) {
106 struct hmap_node
*node
, *next
;
108 for (node
= hmap
->buckets
[i
]; node
; node
= next
) {
110 hmap_insert_fast(&tmp
, node
, node
->hash
);
114 COVERAGE_INC(hmap_pathological
);
117 hmap_swap(hmap
, &tmp
);
122 calc_mask(size_t capacity
)
124 size_t mask
= capacity
/ 2;
130 #if SIZE_MAX > UINT32_MAX
134 /* If we need to dynamically allocate buckets we might as well allocate at
135 * least 4 of them. */
136 mask
|= (mask
& 1) << 1;
141 /* Expands 'hmap', if necessary, to optimize the performance of searches. */
143 hmap_expand(struct hmap
*hmap
)
145 size_t new_mask
= calc_mask(hmap
->n
);
146 if (new_mask
> hmap
->mask
) {
147 COVERAGE_INC(hmap_expand
);
148 resize(hmap
, new_mask
);
152 /* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
154 hmap_shrink(struct hmap
*hmap
)
156 size_t new_mask
= calc_mask(hmap
->n
);
157 if (new_mask
< hmap
->mask
) {
158 COVERAGE_INC(hmap_shrink
);
159 resize(hmap
, new_mask
);
163 /* Expands 'hmap', if necessary, to optimize the performance of searches when
164 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
165 * allocated capacity is much higher than its current number of nodes.) */
167 hmap_reserve(struct hmap
*hmap
, size_t n
)
169 size_t new_mask
= calc_mask(n
);
170 if (new_mask
> hmap
->mask
) {
171 COVERAGE_INC(hmap_reserve
);
172 resize(hmap
, new_mask
);
176 /* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
177 * to 'node' (e.g. due to realloc()). */
179 hmap_node_moved(struct hmap
*hmap
,
180 struct hmap_node
*old_node
, struct hmap_node
*node
)
182 struct hmap_node
**bucket
= &hmap
->buckets
[node
->hash
& hmap
->mask
];
183 while (*bucket
!= old_node
) {
184 bucket
= &(*bucket
)->next
;
189 /* Chooses and returns a randomly selected node from 'hmap', which must not be
192 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
193 * But it does at least ensure that any node in 'hmap' can be chosen. */
195 hmap_random_node(const struct hmap
*hmap
)
197 struct hmap_node
*bucket
, *node
;
200 /* Choose a random non-empty bucket. */
201 for (i
= random_uint32(); ; i
++) {
202 bucket
= hmap
->buckets
[i
& hmap
->mask
];
208 /* Count nodes in bucket. */
210 for (node
= bucket
; node
; node
= node
->next
) {
214 /* Choose random node from bucket. */
216 for (node
= bucket
; i
-- > 0; node
= node
->next
) {
222 /* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
223 * 'hmap'. Uses '*bucketp' and '*offsetp' to determine where to begin
224 * iteration, and stores new values to pass on the next iteration into them
227 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
228 * faster and better at dealing with hmaps that change during iteration.
230 * Before beginning iteration, store 0 into '*bucketp' and '*offsetp'.
233 hmap_at_position(const struct hmap
*hmap
,
234 uint32_t *bucketp
, uint32_t *offsetp
)
240 for (b_idx
= *bucketp
; b_idx
<= hmap
->mask
; b_idx
++) {
241 struct hmap_node
*node
;
244 for (n_idx
= 0, node
= hmap
->buckets
[b_idx
]; node
!= NULL
;
245 n_idx
++, node
= node
->next
) {
246 if (n_idx
== offset
) {
248 *bucketp
= node
->hash
& hmap
->mask
;
249 *offsetp
= offset
+ 1;
251 *bucketp
= (node
->hash
& hmap
->mask
) + 1;