1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) 1998 Kunihiro Ishiguro
12 #include "termtable.h"
16 #include "frr_pthread.h"
17 #include "libfrr_trace.h"
19 DEFINE_MTYPE_STATIC(LIB
, HASH
, "Hash");
20 DEFINE_MTYPE_STATIC(LIB
, HASH_BUCKET
, "Hash Bucket");
21 DEFINE_MTYPE_STATIC(LIB
, HASH_INDEX
, "Hash Index");
23 static pthread_mutex_t _hashes_mtx
= PTHREAD_MUTEX_INITIALIZER
;
24 static struct list
*_hashes
;
26 struct hash
*hash_create_size(unsigned int size
,
27 unsigned int (*hash_key
)(const void *),
28 bool (*hash_cmp
)(const void *, const void *),
33 assert((size
& (size
- 1)) == 0);
34 hash
= XCALLOC(MTYPE_HASH
, sizeof(struct hash
));
36 XCALLOC(MTYPE_HASH_INDEX
, sizeof(struct hash_bucket
*) * size
);
38 hash
->hash_key
= hash_key
;
39 hash
->hash_cmp
= hash_cmp
;
41 hash
->name
= name
? XSTRDUP(MTYPE_HASH
, name
) : NULL
;
42 hash
->stats
.empty
= hash
->size
;
44 frr_with_mutex (&_hashes_mtx
) {
48 listnode_add(_hashes
, hash
);
54 struct hash
*hash_create(unsigned int (*hash_key
)(const void *),
55 bool (*hash_cmp
)(const void *, const void *),
58 return hash_create_size(HASH_INITIAL_SIZE
, hash_key
, hash_cmp
, name
);
61 void *hash_alloc_intern(void *arg
)
67 * ssq = ssq + (new^2 - old^2)
68 * = ssq + ((new + old) * (new - old))
70 #define hash_update_ssq(hz, old, new) \
72 int _adjust = (new + old) * (new - old); \
74 atomic_fetch_sub_explicit(&hz->stats.ssq, -_adjust, \
75 memory_order_relaxed); \
77 atomic_fetch_add_explicit(&hz->stats.ssq, _adjust, \
78 memory_order_relaxed); \
81 /* Expand hash if the chain length exceeds the threshold. */
82 static void hash_expand(struct hash
*hash
)
84 unsigned int i
, new_size
;
85 struct hash_bucket
*hb
, *hbnext
, **new_index
;
87 new_size
= hash
->size
* 2;
89 if (hash
->max_size
&& new_size
> hash
->max_size
)
92 new_index
= XCALLOC(MTYPE_HASH_INDEX
,
93 sizeof(struct hash_bucket
*) * new_size
);
95 hash
->stats
.empty
= new_size
;
97 for (i
= 0; i
< hash
->size
; i
++)
98 for (hb
= hash
->index
[i
]; hb
; hb
= hbnext
) {
99 unsigned int h
= hb
->key
& (new_size
- 1);
102 hb
->next
= new_index
[h
];
104 int oldlen
= hb
->next
? hb
->next
->len
: 0;
105 int newlen
= oldlen
+ 1;
114 hash_update_ssq(hash
, oldlen
, newlen
);
119 /* Switch to new table */
120 XFREE(MTYPE_HASH_INDEX
, hash
->index
);
121 hash
->size
= new_size
;
122 hash
->index
= new_index
;
125 void *hash_get(struct hash
*hash
, void *data
, void *(*alloc_func
)(void *))
127 frrtrace(2, frr_libfrr
, hash_get
, hash
, data
);
132 struct hash_bucket
*bucket
;
134 if (!alloc_func
&& !hash
->count
)
137 key
= (*hash
->hash_key
)(data
);
138 index
= key
& (hash
->size
- 1);
140 for (bucket
= hash
->index
[index
]; bucket
!= NULL
;
141 bucket
= bucket
->next
) {
142 if (bucket
->key
== key
&& (*hash
->hash_cmp
)(bucket
->data
, data
))
147 newdata
= (*alloc_func
)(data
);
151 if (HASH_THRESHOLD(hash
->count
+ 1, hash
->size
)) {
153 index
= key
& (hash
->size
- 1);
156 bucket
= XCALLOC(MTYPE_HASH_BUCKET
, sizeof(struct hash_bucket
));
157 bucket
->data
= newdata
;
159 bucket
->next
= hash
->index
[index
];
160 hash
->index
[index
] = bucket
;
163 frrtrace(3, frr_libfrr
, hash_insert
, hash
, data
, key
);
165 int oldlen
= bucket
->next
? bucket
->next
->len
: 0;
166 int newlen
= oldlen
+ 1;
171 bucket
->next
->len
= 0;
173 bucket
->len
= newlen
;
175 hash_update_ssq(hash
, oldlen
, newlen
);
182 void *hash_lookup(struct hash
*hash
, void *data
)
184 return hash_get(hash
, data
, NULL
);
187 unsigned int string_hash_make(const char *str
)
189 unsigned int hash
= 0;
192 hash
= (hash
* 33) ^ (unsigned int)*str
++;
197 void *hash_release(struct hash
*hash
, void *data
)
202 struct hash_bucket
*bucket
;
203 struct hash_bucket
*pp
;
205 key
= (*hash
->hash_key
)(data
);
206 index
= key
& (hash
->size
- 1);
208 for (bucket
= pp
= hash
->index
[index
]; bucket
; bucket
= bucket
->next
) {
209 if (bucket
->key
== key
210 && (*hash
->hash_cmp
)(bucket
->data
, data
)) {
211 int oldlen
= hash
->index
[index
]->len
;
212 int newlen
= oldlen
- 1;
215 hash
->index
[index
] = bucket
->next
;
217 pp
->next
= bucket
->next
;
219 if (hash
->index
[index
])
220 hash
->index
[index
]->len
= newlen
;
224 hash_update_ssq(hash
, oldlen
, newlen
);
227 XFREE(MTYPE_HASH_BUCKET
, bucket
);
234 frrtrace(3, frr_libfrr
, hash_release
, hash
, data
, ret
);
239 void hash_iterate(struct hash
*hash
, void (*func
)(struct hash_bucket
*, void *),
243 struct hash_bucket
*hb
;
244 struct hash_bucket
*hbnext
;
246 for (i
= 0; i
< hash
->size
; i
++)
247 for (hb
= hash
->index
[i
]; hb
; hb
= hbnext
) {
248 /* get pointer to next hash bucket here, in case (*func)
249 * decides to delete hb by calling hash_release
256 void hash_walk(struct hash
*hash
, int (*func
)(struct hash_bucket
*, void *),
260 struct hash_bucket
*hb
;
261 struct hash_bucket
*hbnext
;
262 int ret
= HASHWALK_CONTINUE
;
264 for (i
= 0; i
< hash
->size
; i
++) {
265 for (hb
= hash
->index
[i
]; hb
; hb
= hbnext
) {
266 /* get pointer to next hash bucket here, in case (*func)
267 * decides to delete hb by calling hash_release
270 ret
= (*func
)(hb
, arg
);
271 if (ret
== HASHWALK_ABORT
)
277 void hash_clean(struct hash
*hash
, void (*free_func
)(void *))
280 struct hash_bucket
*hb
;
281 struct hash_bucket
*next
;
283 for (i
= 0; i
< hash
->size
; i
++) {
284 for (hb
= hash
->index
[i
]; hb
; hb
= next
) {
288 (*free_func
)(hb
->data
);
290 XFREE(MTYPE_HASH_BUCKET
, hb
);
293 hash
->index
[i
] = NULL
;
297 hash
->stats
.empty
= hash
->size
;
300 static void hash_to_list_iter(struct hash_bucket
*hb
, void *arg
)
302 struct list
*list
= arg
;
304 listnode_add(list
, hb
->data
);
307 struct list
*hash_to_list(struct hash
*hash
)
309 struct list
*list
= list_new();
311 hash_iterate(hash
, hash_to_list_iter
, list
);
315 void hash_free(struct hash
*hash
)
317 frr_with_mutex (&_hashes_mtx
) {
319 listnode_delete(_hashes
, hash
);
320 if (_hashes
->count
== 0) {
321 list_delete(&_hashes
);
326 XFREE(MTYPE_HASH
, hash
->name
);
328 XFREE(MTYPE_HASH_INDEX
, hash
->index
);
329 XFREE(MTYPE_HASH
, hash
);
333 /* CLI commands ------------------------------------------------------------ */
335 DEFUN_NOSH(show_hash_stats
,
337 "show debugging hashtable [statistics]",
340 "Statistics about hash tables\n"
341 "Statistics about hash tables\n")
345 struct ttable
*tt
= ttable_new(&ttable_styles
[TTSTYLE_BLANK
]);
347 ttable_add_row(tt
, "Hash table|Buckets|Entries|Empty|LF|SD|FLF|SD");
348 tt
->style
.cell
.lpad
= 2;
349 tt
->style
.cell
.rpad
= 1;
350 tt
->style
.corner
= '+';
352 ttable_rowseps(tt
, 0, BOTTOM
, true, '-');
354 /* Summary statistics calculated are:
356 * - Load factor: This is the number of elements in the table divided
357 * by the number of buckets. Since this hash table implementation
358 * uses chaining, this value can be greater than 1.
359 * This number provides information on how 'full' the table is, but
360 * does not provide information on how evenly distributed the
362 * Notably, a load factor >= 1 does not imply that every bucket has
363 * an element; with a pathological hash function, all elements could
364 * be in a single bucket.
366 * - Full load factor: this is the number of elements in the table
367 * divided by the number of buckets that have some elements in them.
369 * - Std. Dev.: This is the standard deviation calculated from the
370 * relevant load factor. If the load factor is the mean of number of
371 * elements per bucket, the standard deviation measures how much any
372 * particular bucket is likely to deviate from the mean.
373 * As a rule of thumb this number should be less than 2, and ideally
374 * <= 1 for optimal performance. A number larger than 3 generally
375 * indicates a poor hash function.
378 double lf
; // load factor
379 double flf
; // full load factor
380 double var
; // overall variance
381 double fvar
; // full variance
382 double stdv
; // overall stddev
383 double fstdv
; // full stddev
385 long double x2
; // h->count ^ 2
386 long double ldc
; // (long double) h->count
387 long double full
; // h->size - h->stats.empty
388 long double ssq
; // ssq casted to long double
390 pthread_mutex_lock(&_hashes_mtx
);
392 pthread_mutex_unlock(&_hashes_mtx
);
394 vty_out(vty
, "No hash tables in use.\n");
398 for (ALL_LIST_ELEMENTS_RO(_hashes
, ln
, h
)) {
402 ssq
= (long double)h
->stats
.ssq
;
403 x2
= h
->count
* h
->count
;
404 ldc
= (long double)h
->count
;
405 full
= h
->size
- h
->stats
.empty
;
406 lf
= h
->count
/ (double)h
->size
;
407 flf
= full
? h
->count
/ (double)(full
) : 0;
408 var
= ldc
? (1.0 / ldc
) * (ssq
- x2
/ ldc
) : 0;
409 fvar
= full
? (1.0 / full
) * (ssq
- x2
/ full
) : 0;
410 var
= (var
< .0001) ? 0 : var
;
411 fvar
= (fvar
< .0001) ? 0 : fvar
;
415 ttable_add_row(tt
, "%s|%d|%ld|%.0f%%|%.2lf|%.2lf|%.2lf|%.2lf",
416 h
->name
, h
->size
, h
->count
,
417 (h
->stats
.empty
/ (double)h
->size
) * 100, lf
,
420 pthread_mutex_unlock(&_hashes_mtx
);
423 char header
[] = "Showing hash table statistics for ";
424 char underln
[sizeof(header
) + strlen(frr_protonameinst
)];
425 memset(underln
, '-', sizeof(underln
));
426 underln
[sizeof(underln
) - 1] = '\0';
427 vty_out(vty
, "%s%s\n", header
, frr_protonameinst
);
428 vty_out(vty
, "%s\n", underln
);
430 vty_out(vty
, "# allocated: %d\n", _hashes
->count
);
431 vty_out(vty
, "# named: %d\n\n", tt
->nrows
- 1);
434 ttable_colseps(tt
, 0, RIGHT
, true, '|');
435 char *table
= ttable_dump(tt
, "\n");
436 vty_out(vty
, "%s\n", table
);
437 XFREE(MTYPE_TMP
, table
);
439 vty_out(vty
, "No named hash tables to display.\n");
446 void hash_cmd_init(void)
448 install_element(ENABLE_NODE
, &show_hash_stats_cmd
);