]> git.proxmox.com Git - mirror_ovs.git/blame - lib/hmap.c
json: Move from lib to include/openvswitch.
[mirror_ovs.git] / lib / hmap.c
CommitLineData
064af421 1/*
07d2723a 2 * Copyright (c) 2008, 2009, 2010, 2012, 2013, 2015 Nicira, Inc.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
ee89ea7b 18#include "openvswitch/hmap.h"
064af421 19#include <stdint.h>
f3099647 20#include <string.h>
064af421 21#include "coverage.h"
f2f7be86 22#include "random.h"
064af421 23#include "util.h"
e6211adc 24#include "openvswitch/vlog.h"
0c5e05bf
BP
25
26VLOG_DEFINE_THIS_MODULE(hmap);
064af421 27
d76f09ea
BP
28COVERAGE_DEFINE(hmap_pathological);
29COVERAGE_DEFINE(hmap_expand);
30COVERAGE_DEFINE(hmap_shrink);
31COVERAGE_DEFINE(hmap_reserve);
32
064af421
BP
33/* Initializes 'hmap' as an empty hash table. */
34void
35hmap_init(struct hmap *hmap)
36{
37 hmap->buckets = &hmap->one;
38 hmap->one = NULL;
39 hmap->mask = 0;
40 hmap->n = 0;
41}
42
43/* Frees memory reserved by 'hmap'. It is the client's responsibility to free
44 * the nodes themselves, if necessary. */
45void
46hmap_destroy(struct hmap *hmap)
47{
48 if (hmap && hmap->buckets != &hmap->one) {
49 free(hmap->buckets);
50 }
51}
52
f3099647
BP
53/* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
54 * not free memory allocated for 'hmap'.
55 *
56 * This function is appropriate when 'hmap' will soon have about as many
07d2723a
BP
57 * elements as it did before. If 'hmap' will likely have fewer elements than
58 * before, use hmap_destroy() followed by hmap_init() to save memory and
f3099647
BP
59 * iteration time. */
60void
61hmap_clear(struct hmap *hmap)
62{
63 if (hmap->n > 0) {
64 hmap->n = 0;
65 memset(hmap->buckets, 0, (hmap->mask + 1) * sizeof *hmap->buckets);
66 }
67}
68
064af421
BP
69/* Exchanges hash maps 'a' and 'b'. */
70void
71hmap_swap(struct hmap *a, struct hmap *b)
72{
73 struct hmap tmp = *a;
74 *a = *b;
75 *b = tmp;
baa8f41b
BP
76 hmap_moved(a);
77 hmap_moved(b);
78}
79
80/* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
81 * to realloc()). */
82void
83hmap_moved(struct hmap *hmap)
84{
85 if (!hmap->mask) {
86 hmap->buckets = &hmap->one;
064af421
BP
87 }
88}
89
90static void
0c5e05bf 91resize(struct hmap *hmap, size_t new_mask, const char *where)
064af421
BP
92{
93 struct hmap tmp;
94 size_t i;
95
34a78b18 96 ovs_assert(is_pow2(new_mask + 1));
064af421
BP
97
98 hmap_init(&tmp);
99 if (new_mask) {
100 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
101 tmp.mask = new_mask;
102 for (i = 0; i <= tmp.mask; i++) {
103 tmp.buckets[i] = NULL;
104 }
105 }
106 for (i = 0; i <= hmap->mask; i++) {
107 struct hmap_node *node, *next;
108 int count = 0;
109 for (node = hmap->buckets[i]; node; node = next) {
110 next = node->next;
111 hmap_insert_fast(&tmp, node, node->hash);
112 count++;
113 }
114 if (count > 5) {
0c5e05bf 115 static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(10, 10);
064af421 116 COVERAGE_INC(hmap_pathological);
34582733 117 VLOG_DBG_RL(&rl, "%s: %d nodes in bucket (%"PRIuSIZE" nodes, %"PRIuSIZE" buckets)",
0c5e05bf 118 where, count, hmap->n, hmap->mask + 1);
064af421
BP
119 }
120 }
121 hmap_swap(hmap, &tmp);
122 hmap_destroy(&tmp);
123}
124
125static size_t
126calc_mask(size_t capacity)
127{
128 size_t mask = capacity / 2;
129 mask |= mask >> 1;
130 mask |= mask >> 2;
131 mask |= mask >> 4;
132 mask |= mask >> 8;
133 mask |= mask >> 16;
134#if SIZE_MAX > UINT32_MAX
135 mask |= mask >> 32;
136#endif
137
138 /* If we need to dynamically allocate buckets we might as well allocate at
139 * least 4 of them. */
140 mask |= (mask & 1) << 1;
141
142 return mask;
143}
144
0c5e05bf
BP
145/* Expands 'hmap', if necessary, to optimize the performance of searches.
146 *
147 * ('where' is used in debug logging. Commonly one would use hmap_expand() to
148 * automatically provide the caller's source file and line number for
149 * 'where'.) */
064af421 150void
0c5e05bf 151hmap_expand_at(struct hmap *hmap, const char *where)
064af421
BP
152{
153 size_t new_mask = calc_mask(hmap->n);
154 if (new_mask > hmap->mask) {
155 COVERAGE_INC(hmap_expand);
0c5e05bf 156 resize(hmap, new_mask, where);
064af421
BP
157 }
158}
159
0c5e05bf
BP
160/* Shrinks 'hmap', if necessary, to optimize the performance of iteration.
161 *
162 * ('where' is used in debug logging. Commonly one would use hmap_shrink() to
163 * automatically provide the caller's source file and line number for
164 * 'where'.) */
064af421 165void
0c5e05bf 166hmap_shrink_at(struct hmap *hmap, const char *where)
064af421
BP
167{
168 size_t new_mask = calc_mask(hmap->n);
169 if (new_mask < hmap->mask) {
170 COVERAGE_INC(hmap_shrink);
0c5e05bf 171 resize(hmap, new_mask, where);
064af421
BP
172 }
173}
174
175/* Expands 'hmap', if necessary, to optimize the performance of searches when
176 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
0c5e05bf
BP
177 * allocated capacity is much higher than its current number of nodes.)
178 *
179 * ('where' is used in debug logging. Commonly one would use hmap_reserve() to
180 * automatically provide the caller's source file and line number for
181 * 'where'.) */
064af421 182void
0c5e05bf 183hmap_reserve_at(struct hmap *hmap, size_t n, const char *where)
064af421
BP
184{
185 size_t new_mask = calc_mask(n);
186 if (new_mask > hmap->mask) {
187 COVERAGE_INC(hmap_reserve);
0c5e05bf 188 resize(hmap, new_mask, where);
064af421
BP
189 }
190}
63e60b86
BP
191
192/* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
193 * to 'node' (e.g. due to realloc()). */
194void
195hmap_node_moved(struct hmap *hmap,
196 struct hmap_node *old_node, struct hmap_node *node)
197{
198 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
199 while (*bucket != old_node) {
200 bucket = &(*bucket)->next;
201 }
202 *bucket = node;
203}
204
f2f7be86
BP
205/* Chooses and returns a randomly selected node from 'hmap', which must not be
206 * empty.
207 *
208 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
209 * But it does at least ensure that any node in 'hmap' can be chosen. */
210struct hmap_node *
211hmap_random_node(const struct hmap *hmap)
212{
213 struct hmap_node *bucket, *node;
214 size_t n, i;
215
216 /* Choose a random non-empty bucket. */
e58f91a1
YT
217 for (;;) {
218 bucket = hmap->buckets[random_uint32() & hmap->mask];
f2f7be86
BP
219 if (bucket) {
220 break;
221 }
222 }
223
224 /* Count nodes in bucket. */
225 n = 0;
226 for (node = bucket; node; node = node->next) {
227 n++;
228 }
229
230 /* Choose random node from bucket. */
231 i = random_range(n);
232 for (node = bucket; i-- > 0; node = node->next) {
233 continue;
234 }
235 return node;
236}
ee114c23
BP
237
238/* Returns the next node in 'hmap' in hash order, or NULL if no nodes remain in
bfbcebc2
DDP
239 * 'hmap'. Uses '*pos' to determine where to begin iteration, and updates
240 * '*pos' to pass on the next iteration into them before returning.
ee114c23
BP
241 *
242 * It's better to use plain HMAP_FOR_EACH and related functions, since they are
243 * faster and better at dealing with hmaps that change during iteration.
244 *
bfbcebc2 245 * Before beginning iteration, set '*pos' to all zeros. */
ee114c23
BP
246struct hmap_node *
247hmap_at_position(const struct hmap *hmap,
bfbcebc2 248 struct hmap_position *pos)
ee114c23
BP
249{
250 size_t offset;
251 size_t b_idx;
252
bfbcebc2
DDP
253 offset = pos->offset;
254 for (b_idx = pos->bucket; b_idx <= hmap->mask; b_idx++) {
ee114c23
BP
255 struct hmap_node *node;
256 size_t n_idx;
257
258 for (n_idx = 0, node = hmap->buckets[b_idx]; node != NULL;
259 n_idx++, node = node->next) {
260 if (n_idx == offset) {
261 if (node->next) {
bfbcebc2
DDP
262 pos->bucket = node->hash & hmap->mask;
263 pos->offset = offset + 1;
ee114c23 264 } else {
bfbcebc2
DDP
265 pos->bucket = (node->hash & hmap->mask) + 1;
266 pos->offset = 0;
ee114c23
BP
267 }
268 return node;
269 }
270 }
271 offset = 0;
272 }
273
bfbcebc2
DDP
274 pos->bucket = 0;
275 pos->offset = 0;
ee114c23
BP
276 return NULL;
277}
e39e5b9d
BP
278
279/* Returns true if 'node' is in 'hmap', false otherwise. */
280bool
281hmap_contains(const struct hmap *hmap, const struct hmap_node *node)
282{
283 struct hmap_node *p;
284
285 for (p = hmap_first_in_bucket(hmap, node->hash); p; p = p->next) {
286 if (p == node) {
287 return true;
288 }
289 }
290
291 return false;
292}