]> git.proxmox.com Git - mirror_frr.git/blame - lib/memory.c
Merge pull request #13649 from donaldsharp/unlock_the_node_or_else
[mirror_frr.git] / lib / memory.c
CommitLineData
acddc0ed 1// SPDX-License-Identifier: ISC
3b4cd783
DL
2/*
3 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3b4cd783
DL
4 */
5
6#include <zebra.h>
7
8#include <stdlib.h>
602a6584
DL
9#ifdef HAVE_MALLOC_H
10#include <malloc.h>
11#endif
324be174
DL
12#ifdef HAVE_MALLOC_NP_H
13#include <malloc_np.h>
14#endif
602a6584
DL
15#ifdef HAVE_MALLOC_MALLOC_H
16#include <malloc/malloc.h>
17#endif
3b4cd783
DL
18
19#include "memory.h"
f65e2d40 20#include "log.h"
912d45a1 21#include "libfrr_trace.h"
3b4cd783
DL
22
23static struct memgroup *mg_first = NULL;
24struct memgroup **mg_insert = &mg_first;
25
bf8d3d6a
DL
26DEFINE_MGROUP(LIB, "libfrr");
27DEFINE_MTYPE(LIB, TMP, "Temporary memory");
0e2deb58 28DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
4a1ab8e4 29
602a6584 30static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
3b4cd783 31{
13f1ba41 32 size_t current;
fa7fe831 33 size_t oldsize;
3b4cd783 34
7a153339
LB
35 current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
36 memory_order_relaxed);
13f1ba41
LB
37
38 oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
39 if (current > oldsize)
40 /* note that this may fail, but approximation is sufficient */
41 atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
42 current,
43 memory_order_relaxed,
44 memory_order_relaxed);
a31446a8 45
fa7fe831
DL
46 oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
47 if (oldsize == 0)
d62a17ae 48 oldsize = atomic_exchange_explicit(&mt->size, size,
49 memory_order_relaxed);
fa7fe831 50 if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
d62a17ae 51 atomic_store_explicit(&mt->size, SIZE_VAR,
52 memory_order_relaxed);
602a6584
DL
53
54#ifdef HAVE_MALLOC_USABLE_SIZE
55 size_t mallocsz = malloc_usable_size(ptr);
56
7a153339
LB
57 current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
58 memory_order_relaxed);
13f1ba41
LB
59 oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
60 if (current > oldsize)
61 /* note that this may fail, but approximation is sufficient */
62 atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
63 current,
64 memory_order_relaxed,
65 memory_order_relaxed);
602a6584 66#endif
3b4cd783
DL
67}
68
602a6584 69static inline void mt_count_free(struct memtype *mt, void *ptr)
3b4cd783 70{
c7bb4f00 71 frrtrace(2, frr_libfrr, memfree, mt, ptr);
d92658f4 72
fa7fe831
DL
73 assert(mt->n_alloc);
74 atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
602a6584
DL
75
76#ifdef HAVE_MALLOC_USABLE_SIZE
77 size_t mallocsz = malloc_usable_size(ptr);
78
79 atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
80#endif
3b4cd783
DL
81}
82
fa7fe831 83static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
3b4cd783 84{
c7bb4f00 85 frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
d92658f4 86
fa7fe831 87 if (__builtin_expect(ptr == NULL, 0)) {
c897c456
CF
88 if (size) {
89 /* malloc(0) is allowed to return NULL */
90 memory_oom(size, mt->name);
91 }
fa7fe831
DL
92 return NULL;
93 }
602a6584 94 mt_count_alloc(mt, size, ptr);
fa7fe831 95 return ptr;
3b4cd783
DL
96}
97
fa7fe831 98void *qmalloc(struct memtype *mt, size_t size)
3b4cd783 99{
fa7fe831 100 return mt_checkalloc(mt, malloc(size), size);
3b4cd783
DL
101}
102
fa7fe831 103void *qcalloc(struct memtype *mt, size_t size)
3b4cd783 104{
fa7fe831 105 return mt_checkalloc(mt, calloc(size, 1), size);
3b4cd783
DL
106}
107
fa7fe831 108void *qrealloc(struct memtype *mt, void *ptr, size_t size)
3b4cd783 109{
fa7fe831 110 if (ptr)
602a6584 111 mt_count_free(mt, ptr);
fa7fe831 112 return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
3b4cd783
DL
113}
114
fa7fe831 115void *qstrdup(struct memtype *mt, const char *str)
3b4cd783 116{
4ac99370 117 return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
3b4cd783
DL
118}
119
6df43392
IR
120void qcountfree(struct memtype *mt, void *ptr)
121{
122 if (ptr)
123 mt_count_free(mt, ptr);
124}
125
fa7fe831 126void qfree(struct memtype *mt, void *ptr)
3b4cd783 127{
fa7fe831 128 if (ptr)
602a6584 129 mt_count_free(mt, ptr);
fa7fe831 130 free(ptr);
3b4cd783
DL
131}
132
fa7fe831 133int qmem_walk(qmem_walk_fn *func, void *arg)
3b4cd783 134{
fa7fe831
DL
135 struct memgroup *mg;
136 struct memtype *mt;
137 int rv;
138
139 for (mg = mg_first; mg; mg = mg->next) {
140 if ((rv = func(arg, mg, NULL)))
141 return rv;
142 for (mt = mg->types; mt; mt = mt->next)
143 if ((rv = func(arg, mg, mt)))
144 return rv;
145 }
146 return 0;
3b4cd783 147}
7a13c923 148
fa7fe831 149struct exit_dump_args {
9eed278b 150 FILE *fp;
fa7fe831
DL
151 const char *prefix;
152 int error;
7a13c923
DL
153};
154
fa7fe831 155static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
7a13c923 156{
fa7fe831
DL
157 struct exit_dump_args *eda = arg;
158
159 if (!mt) {
9eed278b 160 fprintf(eda->fp,
3efd0893 161 "%s: showing active allocations in memory group %s\n",
d62a17ae 162 eda->prefix, mg->name);
fa7fe831
DL
163
164 } else if (mt->n_alloc) {
165 char size[32];
767439c5
DL
166 if (!mg->active_at_exit)
167 eda->error++;
fa7fe831 168 snprintf(size, sizeof(size), "%10zu", mt->size);
9eed278b 169 fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n",
d62a17ae 170 eda->prefix, mt->name, mt->n_alloc,
171 mt->size == SIZE_VAR ? "(variably sized)" : size);
fa7fe831
DL
172 }
173 return 0;
7a13c923
DL
174}
175
9eed278b 176int log_memstats(FILE *fp, const char *prefix)
7a13c923 177{
996c9314 178 struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
fa7fe831 179 qmem_walk(qmem_exit_walker, &eda);
9eed278b 180 return eda.error;
7a13c923 181}