]> git.proxmox.com Git - mirror_frr.git/blame - lib/hash.c
Merge pull request #5590 from qlyoung/fix-nhrp-underflow
[mirror_frr.git] / lib / hash.c
CommitLineData
718e3744 1/* Hash routine.
2 * Copyright (C) 1998 Kunihiro Ishiguro
3 *
4 * This file is part of GNU Zebra.
5 *
6 * GNU Zebra is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published
8 * by the Free Software Foundation; either version 2, or (at your
9 * option) any later version.
10 *
11 * GNU Zebra is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
896014f4
DL
16 * You should have received a copy of the GNU General Public License along
17 * with this program; see the file COPYING; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
718e3744 19 */
20
21#include <zebra.h>
4db0cff1 22#include <math.h>
718e3744 23
24#include "hash.h"
25#include "memory.h"
4db0cff1
QY
26#include "linklist.h"
27#include "termtable.h"
28#include "vty.h"
29#include "command.h"
6f6f0010 30#include "libfrr.h"
00dffa8c 31#include "frr_pthread.h"
718e3744 32
eaf58ba9
DL
33DEFINE_MTYPE_STATIC(LIB, HASH, "Hash")
34DEFINE_MTYPE_STATIC(LIB, HASH_BACKET, "Hash Bucket")
d62a17ae 35DEFINE_MTYPE_STATIC(LIB, HASH_INDEX, "Hash Index")
4a1ab8e4 36
c17faa4b 37static pthread_mutex_t _hashes_mtx = PTHREAD_MUTEX_INITIALIZER;
4db0cff1
QY
38static struct list *_hashes;
39
d62a17ae 40struct hash *hash_create_size(unsigned int size,
d8b87afe 41 unsigned int (*hash_key)(const void *),
74df8d6d 42 bool (*hash_cmp)(const void *, const void *),
d62a17ae 43 const char *name)
718e3744 44{
d62a17ae 45 struct hash *hash;
46
47 assert((size & (size - 1)) == 0);
48 hash = XCALLOC(MTYPE_HASH, sizeof(struct hash));
49 hash->index =
e3b78da8 50 XCALLOC(MTYPE_HASH_INDEX, sizeof(struct hash_bucket *) * size);
d62a17ae 51 hash->size = size;
d62a17ae 52 hash->hash_key = hash_key;
53 hash->hash_cmp = hash_cmp;
54 hash->count = 0;
55 hash->name = name ? XSTRDUP(MTYPE_HASH, name) : NULL;
56 hash->stats.empty = hash->size;
57
00dffa8c 58 frr_with_mutex(&_hashes_mtx) {
d62a17ae 59 if (!_hashes)
60 _hashes = list_new();
61
62 listnode_add(_hashes, hash);
63 }
d62a17ae 64
65 return hash;
718e3744 66}
67
d8b87afe 68struct hash *hash_create(unsigned int (*hash_key)(const void *),
74df8d6d 69 bool (*hash_cmp)(const void *, const void *),
d62a17ae 70 const char *name)
718e3744 71{
d62a17ae 72 return hash_create_size(HASH_INITIAL_SIZE, hash_key, hash_cmp, name);
718e3744 73}
74
d62a17ae 75void *hash_alloc_intern(void *arg)
718e3744 76{
d62a17ae 77 return arg;
718e3744 78}
79
d62a17ae 80#define hash_update_ssq(hz, old, new) \
81 atomic_fetch_add_explicit(&hz->stats.ssq, (new + old) * (new - old), \
82 memory_order_relaxed);
6f6f0010 83
97c84db0 84/* Expand hash if the chain length exceeds the threshold. */
d62a17ae 85static void hash_expand(struct hash *hash)
97c84db0 86{
8b2a6222 87 unsigned int i, new_size;
e3b78da8 88 struct hash_bucket *hb, *hbnext, **new_index;
97c84db0 89
d62a17ae 90 new_size = hash->size * 2;
40520c36
DW
91
92 if (hash->max_size && new_size > hash->max_size)
93 return;
94
d62a17ae 95 new_index = XCALLOC(MTYPE_HASH_INDEX,
e3b78da8 96 sizeof(struct hash_bucket *) * new_size);
97c84db0 97
d62a17ae 98 hash->stats.empty = new_size;
6f6f0010 99
d62a17ae 100 for (i = 0; i < hash->size; i++)
101 for (hb = hash->index[i]; hb; hb = hbnext) {
102 unsigned int h = hb->key & (new_size - 1);
97c84db0 103
d62a17ae 104 hbnext = hb->next;
105 hb->next = new_index[h];
6f6f0010 106
d62a17ae 107 int oldlen = hb->next ? hb->next->len : 0;
108 int newlen = oldlen + 1;
6f6f0010 109
d62a17ae 110 if (newlen == 1)
111 hash->stats.empty--;
112 else
113 hb->next->len = 0;
6f6f0010 114
d62a17ae 115 hb->len = newlen;
6f6f0010 116
d62a17ae 117 hash_update_ssq(hash, oldlen, newlen);
6f6f0010 118
d62a17ae 119 new_index[h] = hb;
120 }
97c84db0 121
d62a17ae 122 /* Switch to new table */
123 XFREE(MTYPE_HASH_INDEX, hash->index);
124 hash->size = new_size;
125 hash->index = new_index;
97c84db0
SH
126}
127
d62a17ae 128void *hash_get(struct hash *hash, void *data, void *(*alloc_func)(void *))
718e3744 129{
d62a17ae 130 unsigned int key;
131 unsigned int index;
132 void *newdata;
e3b78da8 133 struct hash_bucket *bucket;
d62a17ae 134
efb149d9
DS
135 if (!alloc_func && !hash->count)
136 return NULL;
137
d62a17ae 138 key = (*hash->hash_key)(data);
139 index = key & (hash->size - 1);
d62a17ae 140
e3b78da8
TB
141 for (bucket = hash->index[index]; bucket != NULL;
142 bucket = bucket->next) {
143 if (bucket->key == key && (*hash->hash_cmp)(bucket->data, data))
144 return bucket->data;
97c84db0
SH
145 }
146
d62a17ae 147 if (alloc_func) {
148 newdata = (*alloc_func)(data);
149 if (newdata == NULL)
150 return NULL;
6f6f0010 151
bed7ad83 152 if (HASH_THRESHOLD(hash->count + 1, hash->size)) {
d62a17ae 153 hash_expand(hash);
154 index = key & (hash->size - 1);
155 }
6f6f0010 156
e3b78da8
TB
157 bucket = XCALLOC(MTYPE_HASH_BACKET, sizeof(struct hash_bucket));
158 bucket->data = newdata;
159 bucket->key = key;
160 bucket->next = hash->index[index];
161 hash->index[index] = bucket;
d62a17ae 162 hash->count++;
6f6f0010 163
e3b78da8 164 int oldlen = bucket->next ? bucket->next->len : 0;
d62a17ae 165 int newlen = oldlen + 1;
6f6f0010 166
d62a17ae 167 if (newlen == 1)
168 hash->stats.empty--;
169 else
e3b78da8 170 bucket->next->len = 0;
6f6f0010 171
e3b78da8 172 bucket->len = newlen;
d62a17ae 173
174 hash_update_ssq(hash, oldlen, newlen);
175
e3b78da8 176 return bucket->data;
d62a17ae 177 }
178 return NULL;
718e3744 179}
180
d62a17ae 181void *hash_lookup(struct hash *hash, void *data)
718e3744 182{
d62a17ae 183 return hash_get(hash, data, NULL);
718e3744 184}
185
d62a17ae 186unsigned int string_hash_make(const char *str)
6392aa83 187{
d62a17ae 188 unsigned int hash = 0;
6392aa83 189
d62a17ae 190 while (*str)
191 hash = (hash * 33) ^ (unsigned int)*str++;
6392aa83 192
d62a17ae 193 return hash;
6392aa83
SH
194}
195
d62a17ae 196void *hash_release(struct hash *hash, void *data)
718e3744 197{
d62a17ae 198 void *ret;
199 unsigned int key;
200 unsigned int index;
e3b78da8
TB
201 struct hash_bucket *bucket;
202 struct hash_bucket *pp;
d62a17ae 203
204 key = (*hash->hash_key)(data);
205 index = key & (hash->size - 1);
206
e3b78da8
TB
207 for (bucket = pp = hash->index[index]; bucket; bucket = bucket->next) {
208 if (bucket->key == key
209 && (*hash->hash_cmp)(bucket->data, data)) {
d62a17ae 210 int oldlen = hash->index[index]->len;
211 int newlen = oldlen - 1;
212
e3b78da8
TB
213 if (bucket == pp)
214 hash->index[index] = bucket->next;
d62a17ae 215 else
e3b78da8 216 pp->next = bucket->next;
d62a17ae 217
218 if (hash->index[index])
219 hash->index[index]->len = newlen;
220 else
221 hash->stats.empty++;
222
223 hash_update_ssq(hash, oldlen, newlen);
224
e3b78da8
TB
225 ret = bucket->data;
226 XFREE(MTYPE_HASH_BACKET, bucket);
d62a17ae 227 hash->count--;
228 return ret;
229 }
e3b78da8 230 pp = bucket;
718e3744 231 }
d62a17ae 232 return NULL;
718e3744 233}
234
e3b78da8 235void hash_iterate(struct hash *hash, void (*func)(struct hash_bucket *, void *),
d62a17ae 236 void *arg)
718e3744 237{
d62a17ae 238 unsigned int i;
e3b78da8
TB
239 struct hash_bucket *hb;
240 struct hash_bucket *hbnext;
d62a17ae 241
df66eb2e 242 for (i = 0; i < hash->size; i++)
d62a17ae 243 for (hb = hash->index[i]; hb; hb = hbnext) {
e3b78da8 244 /* get pointer to next hash bucket here, in case (*func)
d62a17ae 245 * decides to delete hb by calling hash_release
246 */
247 hbnext = hb->next;
248 (*func)(hb, arg);
249 }
718e3744 250}
251
e3b78da8 252void hash_walk(struct hash *hash, int (*func)(struct hash_bucket *, void *),
d62a17ae 253 void *arg)
3f9c7369 254{
d62a17ae 255 unsigned int i;
e3b78da8
TB
256 struct hash_bucket *hb;
257 struct hash_bucket *hbnext;
d62a17ae 258 int ret = HASHWALK_CONTINUE;
259
260 for (i = 0; i < hash->size; i++) {
261 for (hb = hash->index[i]; hb; hb = hbnext) {
e3b78da8 262 /* get pointer to next hash bucket here, in case (*func)
d62a17ae 263 * decides to delete hb by calling hash_release
264 */
265 hbnext = hb->next;
266 ret = (*func)(hb, arg);
267 if (ret == HASHWALK_ABORT)
268 return;
269 }
3f9c7369 270 }
3f9c7369
DS
271}
272
d62a17ae 273void hash_clean(struct hash *hash, void (*free_func)(void *))
718e3744 274{
d62a17ae 275 unsigned int i;
e3b78da8
TB
276 struct hash_bucket *hb;
277 struct hash_bucket *next;
718e3744 278
d62a17ae 279 for (i = 0; i < hash->size; i++) {
280 for (hb = hash->index[i]; hb; hb = next) {
281 next = hb->next;
282
283 if (free_func)
284 (*free_func)(hb->data);
718e3744 285
d62a17ae 286 XFREE(MTYPE_HASH_BACKET, hb);
287 hash->count--;
288 }
289 hash->index[i] = NULL;
718e3744 290 }
6f6f0010 291
d62a17ae 292 hash->stats.ssq = 0;
293 hash->stats.empty = hash->size;
718e3744 294}
295
e3b78da8 296static void hash_to_list_iter(struct hash_bucket *hb, void *arg)
91f10370
QY
297{
298 struct list *list = arg;
299
300 listnode_add(list, hb->data);
301}
302
303struct list *hash_to_list(struct hash *hash)
304{
305 struct list *list = list_new();
306
307 hash_iterate(hash, hash_to_list_iter, list);
308 return list;
309}
310
d62a17ae 311void hash_free(struct hash *hash)
718e3744 312{
00dffa8c 313 frr_with_mutex(&_hashes_mtx) {
d62a17ae 314 if (_hashes) {
315 listnode_delete(_hashes, hash);
316 if (_hashes->count == 0) {
6a154c88 317 list_delete(&_hashes);
d62a17ae 318 }
319 }
320 }
d62a17ae 321
0a22ddfb 322 XFREE(MTYPE_HASH, hash->name);
d62a17ae 323
324 XFREE(MTYPE_HASH_INDEX, hash->index);
325 XFREE(MTYPE_HASH, hash);
4db0cff1
QY
326}
327
6f6f0010
QY
328
329/* CLI commands ------------------------------------------------------------ */
4db0cff1 330
40818cec
DL
331DEFUN_NOSH(show_hash_stats,
332 show_hash_stats_cmd,
333 "show debugging hashtable [statistics]",
334 SHOW_STR
335 DEBUG_STR
336 "Statistics about hash tables\n"
337 "Statistics about hash tables\n")
4db0cff1 338{
d62a17ae 339 struct hash *h;
340 struct listnode *ln;
341 struct ttable *tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]);
342
343 ttable_add_row(tt, "Hash table|Buckets|Entries|Empty|LF|SD|FLF|SD");
344 tt->style.cell.lpad = 2;
345 tt->style.cell.rpad = 1;
346 tt->style.corner = '+';
347 ttable_restyle(tt);
348 ttable_rowseps(tt, 0, BOTTOM, true, '-');
349
350 /* Summary statistics calculated are:
351 *
c7d3895e
CF
352 * - Load factor: This is the number of elements in the table divided
353 * by the number of buckets. Since this hash table implementation
354 * uses chaining, this value can be greater than 1.
355 * This number provides information on how 'full' the table is, but
356 * does not provide information on how evenly distributed the
357 * elements are.
358 * Notably, a load factor >= 1 does not imply that every bucket has
359 * an element; with a pathological hash function, all elements could
360 * be in a single bucket.
d62a17ae 361 *
362 * - Full load factor: this is the number of elements in the table
c7d3895e 363 * divided by the number of buckets that have some elements in them.
d62a17ae 364 *
365 * - Std. Dev.: This is the standard deviation calculated from the
c7d3895e
CF
366 * relevant load factor. If the load factor is the mean of number of
367 * elements per bucket, the standard deviation measures how much any
368 * particular bucket is likely to deviate from the mean.
369 * As a rule of thumb this number should be less than 2, and ideally
370 * <= 1 for optimal performance. A number larger than 3 generally
371 * indicates a poor hash function.
d62a17ae 372 */
373
374 double lf; // load factor
375 double flf; // full load factor
376 double var; // overall variance
377 double fvar; // full variance
378 double stdv; // overall stddev
379 double fstdv; // full stddev
380
381 long double x2; // h->count ^ 2
382 long double ldc; // (long double) h->count
383 long double full; // h->size - h->stats.empty
384 long double ssq; // ssq casted to long double
385
386 pthread_mutex_lock(&_hashes_mtx);
387 if (!_hashes) {
388 pthread_mutex_unlock(&_hashes_mtx);
44f12f20 389 ttable_del(tt);
d62a17ae 390 vty_out(vty, "No hash tables in use.\n");
391 return CMD_SUCCESS;
392 }
393
394 for (ALL_LIST_ELEMENTS_RO(_hashes, ln, h)) {
395 if (!h->name)
396 continue;
397
398 ssq = (long double)h->stats.ssq;
61b9e9d6 399 x2 = h->count * h->count;
d62a17ae 400 ldc = (long double)h->count;
401 full = h->size - h->stats.empty;
402 lf = h->count / (double)h->size;
403 flf = full ? h->count / (double)(full) : 0;
404 var = ldc ? (1.0 / ldc) * (ssq - x2 / ldc) : 0;
405 fvar = full ? (1.0 / full) * (ssq - x2 / full) : 0;
406 var = (var < .0001) ? 0 : var;
407 fvar = (fvar < .0001) ? 0 : fvar;
408 stdv = sqrt(var);
409 fstdv = sqrt(fvar);
410
411 ttable_add_row(tt, "%s|%d|%ld|%.0f%%|%.2lf|%.2lf|%.2lf|%.2lf",
412 h->name, h->size, h->count,
413 (h->stats.empty / (double)h->size) * 100, lf,
414 stdv, flf, fstdv);
415 }
416 pthread_mutex_unlock(&_hashes_mtx);
417
418 /* display header */
419 char header[] = "Showing hash table statistics for ";
420 char underln[sizeof(header) + strlen(frr_protonameinst)];
421 memset(underln, '-', sizeof(underln));
422 underln[sizeof(underln) - 1] = '\0';
423 vty_out(vty, "%s%s\n", header, frr_protonameinst);
424 vty_out(vty, "%s\n", underln);
425
426 vty_out(vty, "# allocated: %d\n", _hashes->count);
427 vty_out(vty, "# named: %d\n\n", tt->nrows - 1);
428
429 if (tt->nrows > 1) {
430 ttable_colseps(tt, 0, RIGHT, true, '|');
431 char *table = ttable_dump(tt, "\n");
432 vty_out(vty, "%s\n", table);
433 XFREE(MTYPE_TMP, table);
434 } else
435 vty_out(vty, "No named hash tables to display.\n");
436
437 ttable_del(tt);
438
439 return CMD_SUCCESS;
4db0cff1
QY
440}
441
4d762f26 442void hash_cmd_init(void)
4db0cff1 443{
d62a17ae 444 install_element(ENABLE_NODE, &show_hash_stats_cmd);
4db0cff1 445}