]> git.proxmox.com Git - mirror_frr.git/blame - lib/hash.c
*: Convert list_delete(struct list *) to ** to allow nulling
[mirror_frr.git] / lib / hash.c
CommitLineData
718e3744 1/* Hash routine.
2 * Copyright (C) 1998 Kunihiro Ishiguro
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2, or (at your
9 * option) any later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
896014f4
DL
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
718e3744 19 */
20
21#include <zebra.h>
4db0cff1 22#include <math.h>
718e3744 23
24#include "hash.h"
25#include "memory.h"
4db0cff1
QY
26#include "linklist.h"
27#include "termtable.h"
28#include "vty.h"
29#include "command.h"
6f6f0010 30#include "libfrr.h"
718e3744 31
d62a17ae 32DEFINE_MTYPE(LIB, HASH, "Hash")
33DEFINE_MTYPE(LIB, HASH_BACKET, "Hash Bucket")
34DEFINE_MTYPE_STATIC(LIB, HASH_INDEX, "Hash Index")
4a1ab8e4 35
6f6f0010 36pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
4db0cff1
QY
37static struct list *_hashes;
38
718e3744 39/* Allocate a new hash. */
d62a17ae 40struct hash *hash_create_size(unsigned int size,
41 unsigned int (*hash_key)(void *),
42 int (*hash_cmp)(const void *, const void *),
43 const char *name)
718e3744 44{
d62a17ae 45 struct hash *hash;
46
47 assert((size & (size - 1)) == 0);
48 hash = XCALLOC(MTYPE_HASH, sizeof(struct hash));
49 hash->index =
50 XCALLOC(MTYPE_HASH_INDEX, sizeof(struct hash_backet *) * size);
51 hash->size = size;
d62a17ae 52 hash->hash_key = hash_key;
53 hash->hash_cmp = hash_cmp;
54 hash->count = 0;
55 hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
56 hash->stats.empty = hash->size;
57
58 pthread_mutex_lock(&_hashes_mtx);
59 {
60 if (!_hashes)
61 _hashes = list_new();
62
63 listnode_add(_hashes, hash);
64 }
65 pthread_mutex_unlock(&_hashes_mtx);
66
67 return hash;
718e3744 68}
69
70/* Allocate a new hash with default hash size. */
d62a17ae 71struct hash *hash_create(unsigned int (*hash_key)(void *),
72 int (*hash_cmp)(const void *, const void *),
73 const char *name)
718e3744 74{
d62a17ae 75 return hash_create_size(HASH_INITIAL_SIZE, hash_key, hash_cmp, name);
718e3744 76}
77
78/* Utility function for hash_get(). When this function is specified
79 as alloc_func, return arugment as it is. This function is used for
80 intern already allocated value. */
d62a17ae 81void *hash_alloc_intern(void *arg)
718e3744 82{
d62a17ae 83 return arg;
718e3744 84}
85
d62a17ae 86#define hash_update_ssq(hz, old, new) \
87 atomic_fetch_add_explicit(&hz->stats.ssq, (new + old) * (new - old), \
88 memory_order_relaxed);
6f6f0010 89
97c84db0 90/* Expand hash if the chain length exceeds the threshold. */
d62a17ae 91static void hash_expand(struct hash *hash)
97c84db0 92{
8b2a6222 93 unsigned int i, new_size;
d62a17ae 94 struct hash_backet *hb, *hbnext, **new_index;
97c84db0 95
d62a17ae 96 new_size = hash->size * 2;
40520c36
DW
97
98 if (hash->max_size && new_size > hash->max_size)
99 return;
100
d62a17ae 101 new_index = XCALLOC(MTYPE_HASH_INDEX,
102 sizeof(struct hash_backet *) * new_size);
103 if (new_index == NULL)
104 return;
97c84db0 105
d62a17ae 106 hash->stats.empty = new_size;
6f6f0010 107
d62a17ae 108 for (i = 0; i < hash->size; i++)
109 for (hb = hash->index[i]; hb; hb = hbnext) {
110 unsigned int h = hb->key & (new_size - 1);
97c84db0 111
d62a17ae 112 hbnext = hb->next;
113 hb->next = new_index[h];
6f6f0010 114
d62a17ae 115 int oldlen = hb->next ? hb->next->len : 0;
116 int newlen = oldlen + 1;
6f6f0010 117
d62a17ae 118 if (newlen == 1)
119 hash->stats.empty--;
120 else
121 hb->next->len = 0;
6f6f0010 122
d62a17ae 123 hb->len = newlen;
6f6f0010 124
d62a17ae 125 hash_update_ssq(hash, oldlen, newlen);
6f6f0010 126
d62a17ae 127 new_index[h] = hb;
128 }
97c84db0 129
d62a17ae 130 /* Switch to new table */
131 XFREE(MTYPE_HASH_INDEX, hash->index);
132 hash->size = new_size;
133 hash->index = new_index;
97c84db0
SH
134}
135
718e3744 136/* Lookup and return hash backet in hash. If there is no
137 corresponding hash backet and alloc_func is specified, create new
138 hash backet. */
d62a17ae 139void *hash_get(struct hash *hash, void *data, void *(*alloc_func)(void *))
718e3744 140{
d62a17ae 141 unsigned int key;
142 unsigned int index;
143 void *newdata;
d62a17ae 144 struct hash_backet *backet;
145
146 key = (*hash->hash_key)(data);
147 index = key & (hash->size - 1);
d62a17ae 148
149 for (backet = hash->index[index]; backet != NULL;
150 backet = backet->next) {
151 if (backet->key == key && (*hash->hash_cmp)(backet->data, data))
152 return backet->data;
97c84db0
SH
153 }
154
d62a17ae 155 if (alloc_func) {
156 newdata = (*alloc_func)(data);
157 if (newdata == NULL)
158 return NULL;
6f6f0010 159
bed7ad83 160 if (HASH_THRESHOLD(hash->count + 1, hash->size)) {
d62a17ae 161 hash_expand(hash);
162 index = key & (hash->size - 1);
163 }
6f6f0010 164
d62a17ae 165 backet = XCALLOC(MTYPE_HASH_BACKET, sizeof(struct hash_backet));
166 backet->data = newdata;
167 backet->key = key;
168 backet->next = hash->index[index];
169 hash->index[index] = backet;
170 hash->count++;
6f6f0010 171
d62a17ae 172 int oldlen = backet->next ? backet->next->len : 0;
173 int newlen = oldlen + 1;
6f6f0010 174
d62a17ae 175 if (newlen == 1)
176 hash->stats.empty--;
177 else
178 backet->next->len = 0;
6f6f0010 179
d62a17ae 180 backet->len = newlen;
181
182 hash_update_ssq(hash, oldlen, newlen);
183
184 return backet->data;
185 }
186 return NULL;
718e3744 187}
188
189/* Hash lookup. */
d62a17ae 190void *hash_lookup(struct hash *hash, void *data)
718e3744 191{
d62a17ae 192 return hash_get(hash, data, NULL);
718e3744 193}
194
6392aa83 195/* Simple Bernstein hash which is simple and fast for common case */
d62a17ae 196unsigned int string_hash_make(const char *str)
6392aa83 197{
d62a17ae 198 unsigned int hash = 0;
6392aa83 199
d62a17ae 200 while (*str)
201 hash = (hash * 33) ^ (unsigned int)*str++;
6392aa83 202
d62a17ae 203 return hash;
6392aa83
SH
204}
205
718e3744 206/* This function release registered value from specified hash. When
207 release is successfully finished, return the data pointer in the
208 hash backet. */
d62a17ae 209void *hash_release(struct hash *hash, void *data)
718e3744 210{
d62a17ae 211 void *ret;
212 unsigned int key;
213 unsigned int index;
214 struct hash_backet *backet;
215 struct hash_backet *pp;
216
217 key = (*hash->hash_key)(data);
218 index = key & (hash->size - 1);
219
220 for (backet = pp = hash->index[index]; backet; backet = backet->next) {
221 if (backet->key == key
222 && (*hash->hash_cmp)(backet->data, data)) {
223 int oldlen = hash->index[index]->len;
224 int newlen = oldlen - 1;
225
226 if (backet == pp)
227 hash->index[index] = backet->next;
228 else
229 pp->next = backet->next;
230
231 if (hash->index[index])
232 hash->index[index]->len = newlen;
233 else
234 hash->stats.empty++;
235
236 hash_update_ssq(hash, oldlen, newlen);
237
238 ret = backet->data;
239 XFREE(MTYPE_HASH_BACKET, backet);
240 hash->count--;
241 return ret;
242 }
243 pp = backet;
718e3744 244 }
d62a17ae 245 return NULL;
718e3744 246}
247
248/* Iterator function for hash. */
d62a17ae 249void hash_iterate(struct hash *hash, void (*func)(struct hash_backet *, void *),
250 void *arg)
718e3744 251{
d62a17ae 252 unsigned int i;
253 struct hash_backet *hb;
254 struct hash_backet *hbnext;
255
256 for (i = 0; i < hash->size; i++)
257 for (hb = hash->index[i]; hb; hb = hbnext) {
258 /* get pointer to next hash backet here, in case (*func)
259 * decides to delete hb by calling hash_release
260 */
261 hbnext = hb->next;
262 (*func)(hb, arg);
263 }
718e3744 264}
265
3f9c7369 266/* Iterator function for hash. */
d62a17ae 267void hash_walk(struct hash *hash, int (*func)(struct hash_backet *, void *),
268 void *arg)
3f9c7369 269{
d62a17ae 270 unsigned int i;
271 struct hash_backet *hb;
272 struct hash_backet *hbnext;
273 int ret = HASHWALK_CONTINUE;
274
275 for (i = 0; i < hash->size; i++) {
276 for (hb = hash->index[i]; hb; hb = hbnext) {
277 /* get pointer to next hash backet here, in case (*func)
278 * decides to delete hb by calling hash_release
279 */
280 hbnext = hb->next;
281 ret = (*func)(hb, arg);
282 if (ret == HASHWALK_ABORT)
283 return;
284 }
3f9c7369 285 }
3f9c7369
DS
286}
287
718e3744 288/* Clean up hash. */
d62a17ae 289void hash_clean(struct hash *hash, void (*free_func)(void *))
718e3744 290{
d62a17ae 291 unsigned int i;
292 struct hash_backet *hb;
293 struct hash_backet *next;
718e3744 294
d62a17ae 295 for (i = 0; i < hash->size; i++) {
296 for (hb = hash->index[i]; hb; hb = next) {
297 next = hb->next;
298
299 if (free_func)
300 (*free_func)(hb->data);
718e3744 301
d62a17ae 302 XFREE(MTYPE_HASH_BACKET, hb);
303 hash->count--;
304 }
305 hash->index[i] = NULL;
718e3744 306 }
6f6f0010 307
d62a17ae 308 hash->stats.ssq = 0;
309 hash->stats.empty = hash->size;
718e3744 310}
311
312/* Free hash memory. You may call hash_clean before call this
313 function. */
d62a17ae 314void hash_free(struct hash *hash)
718e3744 315{
d62a17ae 316 pthread_mutex_lock(&_hashes_mtx);
317 {
318 if (_hashes) {
319 listnode_delete(_hashes, hash);
320 if (_hashes->count == 0) {
affe9e99 321 list_delete_and_null(&_hashes);
d62a17ae 322 }
323 }
324 }
325 pthread_mutex_unlock(&_hashes_mtx);
326
327 if (hash->name)
328 XFREE(MTYPE_HASH, hash->name);
329
330 XFREE(MTYPE_HASH_INDEX, hash->index);
331 XFREE(MTYPE_HASH, hash);
4db0cff1
QY
332}
333
6f6f0010
QY
334
335/* CLI commands ------------------------------------------------------------ */
4db0cff1 336
40818cec
DL
337DEFUN_NOSH(show_hash_stats,
338 show_hash_stats_cmd,
339 "show debugging hashtable [statistics]",
340 SHOW_STR
341 DEBUG_STR
342 "Statistics about hash tables\n"
343 "Statistics about hash tables\n")
4db0cff1 344{
d62a17ae 345 struct hash *h;
346 struct listnode *ln;
347 struct ttable *tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
348
349 ttable_add_row(tt, "Hash table|Buckets|Entries|Empty|LF|SD|FLF|SD");
350 tt->style.cell.lpad = 2;
351 tt->style.cell.rpad = 1;
352 tt->style.corner = '+';
353 ttable_restyle(tt);
354 ttable_rowseps(tt, 0, BOTTOM, true, '-');
355
356 /* Summary statistics calculated are:
357 *
c7d3895e
CF
358 * - Load factor: This is the number of elements in the table divided
359 * by the number of buckets. Since this hash table implementation
360 * uses chaining, this value can be greater than 1.
361 * This number provides information on how 'full' the table is, but
362 * does not provide information on how evenly distributed the
363 * elements are.
364 * Notably, a load factor >= 1 does not imply that every bucket has
365 * an element; with a pathological hash function, all elements could
366 * be in a single bucket.
d62a17ae 367 *
368 * - Full load factor: this is the number of elements in the table
c7d3895e 369 * divided by the number of buckets that have some elements in them.
d62a17ae 370 *
371 * - Std. Dev.: This is the standard deviation calculated from the
c7d3895e
CF
372 * relevant load factor. If the load factor is the mean of number of
373 * elements per bucket, the standard deviation measures how much any
374 * particular bucket is likely to deviate from the mean.
375 * As a rule of thumb this number should be less than 2, and ideally
376 * <= 1 for optimal performance. A number larger than 3 generally
377 * indicates a poor hash function.
d62a17ae 378 */
379
380 double lf; // load factor
381 double flf; // full load factor
382 double var; // overall variance
383 double fvar; // full variance
384 double stdv; // overall stddev
385 double fstdv; // full stddev
386
387 long double x2; // h->count ^ 2
388 long double ldc; // (long double) h->count
389 long double full; // h->size - h->stats.empty
390 long double ssq; // ssq casted to long double
391
392 pthread_mutex_lock(&_hashes_mtx);
393 if (!_hashes) {
394 pthread_mutex_unlock(&_hashes_mtx);
395 vty_out(vty, "No hash tables in use.\n");
396 return CMD_SUCCESS;
397 }
398
399 for (ALL_LIST_ELEMENTS_RO(_hashes, ln, h)) {
400 if (!h->name)
401 continue;
402
403 ssq = (long double)h->stats.ssq;
61b9e9d6 404 x2 = h->count * h->count;
d62a17ae 405 ldc = (long double)h->count;
406 full = h->size - h->stats.empty;
407 lf = h->count / (double)h->size;
408 flf = full ? h->count / (double)(full) : 0;
409 var = ldc ? (1.0 / ldc) * (ssq - x2 / ldc) : 0;
410 fvar = full ? (1.0 / full) * (ssq - x2 / full) : 0;
411 var = (var < .0001) ? 0 : var;
412 fvar = (fvar < .0001) ? 0 : fvar;
413 stdv = sqrt(var);
414 fstdv = sqrt(fvar);
415
416 ttable_add_row(tt, "%s|%d|%ld|%.0f%%|%.2lf|%.2lf|%.2lf|%.2lf",
417 h->name, h->size, h->count,
418 (h->stats.empty / (double)h->size) * 100, lf,
419 stdv, flf, fstdv);
420 }
421 pthread_mutex_unlock(&_hashes_mtx);
422
423 /* display header */
424 char header[] = "Showing hash table statistics for ";
425 char underln[sizeof(header) + strlen(frr_protonameinst)];
426 memset(underln, '-', sizeof(underln));
427 underln[sizeof(underln) - 1] = '\0';
428 vty_out(vty, "%s%s\n", header, frr_protonameinst);
429 vty_out(vty, "%s\n", underln);
430
431 vty_out(vty, "# allocated: %d\n", _hashes->count);
432 vty_out(vty, "# named: %d\n\n", tt->nrows - 1);
433
434 if (tt->nrows > 1) {
435 ttable_colseps(tt, 0, RIGHT, true, '|');
436 char *table = ttable_dump(tt, "\n");
437 vty_out(vty, "%s\n", table);
438 XFREE(MTYPE_TMP, table);
439 } else
440 vty_out(vty, "No named hash tables to display.\n");
441
442 ttable_del(tt);
443
444 return CMD_SUCCESS;
4db0cff1
QY
445}
446
d62a17ae 447void hash_cmd_init()
4db0cff1 448{
d62a17ae 449 install_element(ENABLE_NODE, &show_hash_stats_cmd);
4db0cff1 450}