]> git.proxmox.com Git - mirror_ovs.git/blame - lib/hmap.c
ofpbuf: New function ofpbuf_clone_data_with_headroom().
[mirror_ovs.git] / lib / hmap.c
CommitLineData
064af421 1/*
63e60b86 2 * Copyright (c) 2008, 2009, 2010 Nicira Networks.
064af421 3 *
a14bc59f
BP
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
064af421 7 *
a14bc59f
BP
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
064af421
BP
15 */
16
17#include <config.h>
18#include "hmap.h"
19#include <assert.h>
20#include <stdint.h>
f3099647 21#include <string.h>
064af421 22#include "coverage.h"
f2f7be86 23#include "random.h"
064af421
BP
24#include "util.h"
25
d76f09ea
BP
26COVERAGE_DEFINE(hmap_pathological);
27COVERAGE_DEFINE(hmap_expand);
28COVERAGE_DEFINE(hmap_shrink);
29COVERAGE_DEFINE(hmap_reserve);
30
064af421
BP
31/* Initializes 'hmap' as an empty hash table. */
32void
33hmap_init(struct hmap *hmap)
34{
35 hmap->buckets = &hmap->one;
36 hmap->one = NULL;
37 hmap->mask = 0;
38 hmap->n = 0;
39}
40
41/* Frees memory reserved by 'hmap'. It is the client's responsibility to free
42 * the nodes themselves, if necessary. */
43void
44hmap_destroy(struct hmap *hmap)
45{
46 if (hmap && hmap->buckets != &hmap->one) {
47 free(hmap->buckets);
48 }
49}
50
f3099647
BP
51/* Removes all node from 'hmap', leaving it ready to accept more nodes. Does
52 * not free memory allocated for 'hmap'.
53 *
54 * This function is appropriate when 'hmap' will soon have about as many
55 * elements as it before. If 'hmap' will likely have fewer elements than
56 * before, use hmap_destroy() followed by hmap_clear() to save memory and
57 * iteration time. */
58void
59hmap_clear(struct hmap *hmap)
60{
61 if (hmap->n > 0) {
62 hmap->n = 0;
63 memset(hmap->buckets, 0, (hmap->mask + 1) * sizeof *hmap->buckets);
64 }
65}
66
064af421
BP
67/* Exchanges hash maps 'a' and 'b'. */
68void
69hmap_swap(struct hmap *a, struct hmap *b)
70{
71 struct hmap tmp = *a;
72 *a = *b;
73 *b = tmp;
baa8f41b
BP
74 hmap_moved(a);
75 hmap_moved(b);
76}
77
78/* Adjusts 'hmap' to compensate for having moved position in memory (e.g. due
79 * to realloc()). */
80void
81hmap_moved(struct hmap *hmap)
82{
83 if (!hmap->mask) {
84 hmap->buckets = &hmap->one;
064af421
BP
85 }
86}
87
88static void
89resize(struct hmap *hmap, size_t new_mask)
90{
91 struct hmap tmp;
92 size_t i;
93
94 assert(!(new_mask & (new_mask + 1)));
95 assert(new_mask != SIZE_MAX);
96
97 hmap_init(&tmp);
98 if (new_mask) {
99 tmp.buckets = xmalloc(sizeof *tmp.buckets * (new_mask + 1));
100 tmp.mask = new_mask;
101 for (i = 0; i <= tmp.mask; i++) {
102 tmp.buckets[i] = NULL;
103 }
104 }
105 for (i = 0; i <= hmap->mask; i++) {
106 struct hmap_node *node, *next;
107 int count = 0;
108 for (node = hmap->buckets[i]; node; node = next) {
109 next = node->next;
110 hmap_insert_fast(&tmp, node, node->hash);
111 count++;
112 }
113 if (count > 5) {
114 COVERAGE_INC(hmap_pathological);
115 }
116 }
117 hmap_swap(hmap, &tmp);
118 hmap_destroy(&tmp);
119}
120
121static size_t
122calc_mask(size_t capacity)
123{
124 size_t mask = capacity / 2;
125 mask |= mask >> 1;
126 mask |= mask >> 2;
127 mask |= mask >> 4;
128 mask |= mask >> 8;
129 mask |= mask >> 16;
130#if SIZE_MAX > UINT32_MAX
131 mask |= mask >> 32;
132#endif
133
134 /* If we need to dynamically allocate buckets we might as well allocate at
135 * least 4 of them. */
136 mask |= (mask & 1) << 1;
137
138 return mask;
139}
140
141/* Expands 'hmap', if necessary, to optimize the performance of searches. */
142void
143hmap_expand(struct hmap *hmap)
144{
145 size_t new_mask = calc_mask(hmap->n);
146 if (new_mask > hmap->mask) {
147 COVERAGE_INC(hmap_expand);
148 resize(hmap, new_mask);
149 }
150}
151
152/* Shrinks 'hmap', if necessary, to optimize the performance of iteration. */
153void
154hmap_shrink(struct hmap *hmap)
155{
156 size_t new_mask = calc_mask(hmap->n);
157 if (new_mask < hmap->mask) {
158 COVERAGE_INC(hmap_shrink);
159 resize(hmap, new_mask);
160 }
161}
162
163/* Expands 'hmap', if necessary, to optimize the performance of searches when
164 * it has up to 'n' elements. (But iteration will be slow in a hash map whose
165 * allocated capacity is much higher than its current number of nodes.) */
166void
167hmap_reserve(struct hmap *hmap, size_t n)
168{
169 size_t new_mask = calc_mask(n);
170 if (new_mask > hmap->mask) {
171 COVERAGE_INC(hmap_reserve);
172 resize(hmap, new_mask);
173 }
174}
63e60b86
BP
175
176/* Adjusts 'hmap' to compensate for 'old_node' having moved position in memory
177 * to 'node' (e.g. due to realloc()). */
178void
179hmap_node_moved(struct hmap *hmap,
180 struct hmap_node *old_node, struct hmap_node *node)
181{
182 struct hmap_node **bucket = &hmap->buckets[node->hash & hmap->mask];
183 while (*bucket != old_node) {
184 bucket = &(*bucket)->next;
185 }
186 *bucket = node;
187}
188
f2f7be86
BP
189/* Chooses and returns a randomly selected node from 'hmap', which must not be
190 * empty.
191 *
192 * I wouldn't depend on this algorithm to be fair, since I haven't analyzed it.
193 * But it does at least ensure that any node in 'hmap' can be chosen. */
194struct hmap_node *
195hmap_random_node(const struct hmap *hmap)
196{
197 struct hmap_node *bucket, *node;
198 size_t n, i;
199
200 /* Choose a random non-empty bucket. */
201 for (i = random_uint32(); ; i++) {
202 bucket = hmap->buckets[i & hmap->mask];
203 if (bucket) {
204 break;
205 }
206 }
207
208 /* Count nodes in bucket. */
209 n = 0;
210 for (node = bucket; node; node = node->next) {
211 n++;
212 }
213
214 /* Choose random node from bucket. */
215 i = random_range(n);
216 for (node = bucket; i-- > 0; node = node->next) {
217 continue;
218 }
219 return node;
220}