]> git.proxmox.com Git - mirror_frr.git/blob - lib/memory.c
Merge pull request #12798 from donaldsharp/rib_match_multicast
[mirror_frr.git] / lib / memory.c
1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
4 */
5
6 #include <zebra.h>
7
8 #include <stdlib.h>
9 #ifdef HAVE_MALLOC_H
10 #include <malloc.h>
11 #endif
12 #ifdef HAVE_MALLOC_NP_H
13 #include <malloc_np.h>
14 #endif
15 #ifdef HAVE_MALLOC_MALLOC_H
16 #include <malloc/malloc.h>
17 #endif
18
19 #include "memory.h"
20 #include "log.h"
21 #include "libfrr_trace.h"
22
23 static struct memgroup *mg_first = NULL;
24 struct memgroup **mg_insert = &mg_first;
25
26 DEFINE_MGROUP(LIB, "libfrr");
27 DEFINE_MTYPE(LIB, TMP, "Temporary memory");
28 DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
29
30 static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
31 {
32 size_t current;
33 size_t oldsize;
34
35 current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
36 memory_order_relaxed);
37
38 oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
39 if (current > oldsize)
40 /* note that this may fail, but approximation is sufficient */
41 atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
42 current,
43 memory_order_relaxed,
44 memory_order_relaxed);
45
46 oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
47 if (oldsize == 0)
48 oldsize = atomic_exchange_explicit(&mt->size, size,
49 memory_order_relaxed);
50 if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
51 atomic_store_explicit(&mt->size, SIZE_VAR,
52 memory_order_relaxed);
53
54 #ifdef HAVE_MALLOC_USABLE_SIZE
55 size_t mallocsz = malloc_usable_size(ptr);
56
57 current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
58 memory_order_relaxed);
59 oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
60 if (current > oldsize)
61 /* note that this may fail, but approximation is sufficient */
62 atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
63 current,
64 memory_order_relaxed,
65 memory_order_relaxed);
66 #endif
67 }
68
69 static inline void mt_count_free(struct memtype *mt, void *ptr)
70 {
71 frrtrace(2, frr_libfrr, memfree, mt, ptr);
72
73 assert(mt->n_alloc);
74 atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
75
76 #ifdef HAVE_MALLOC_USABLE_SIZE
77 size_t mallocsz = malloc_usable_size(ptr);
78
79 atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
80 #endif
81 }
82
83 static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
84 {
85 frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
86
87 if (__builtin_expect(ptr == NULL, 0)) {
88 if (size) {
89 /* malloc(0) is allowed to return NULL */
90 memory_oom(size, mt->name);
91 }
92 return NULL;
93 }
94 mt_count_alloc(mt, size, ptr);
95 return ptr;
96 }
97
98 void *qmalloc(struct memtype *mt, size_t size)
99 {
100 return mt_checkalloc(mt, malloc(size), size);
101 }
102
103 void *qcalloc(struct memtype *mt, size_t size)
104 {
105 return mt_checkalloc(mt, calloc(size, 1), size);
106 }
107
108 void *qrealloc(struct memtype *mt, void *ptr, size_t size)
109 {
110 if (ptr)
111 mt_count_free(mt, ptr);
112 return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
113 }
114
115 void *qstrdup(struct memtype *mt, const char *str)
116 {
117 return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
118 }
119
120 void qcountfree(struct memtype *mt, void *ptr)
121 {
122 if (ptr)
123 mt_count_free(mt, ptr);
124 }
125
126 void qfree(struct memtype *mt, void *ptr)
127 {
128 if (ptr)
129 mt_count_free(mt, ptr);
130 free(ptr);
131 }
132
133 int qmem_walk(qmem_walk_fn *func, void *arg)
134 {
135 struct memgroup *mg;
136 struct memtype *mt;
137 int rv;
138
139 for (mg = mg_first; mg; mg = mg->next) {
140 if ((rv = func(arg, mg, NULL)))
141 return rv;
142 for (mt = mg->types; mt; mt = mt->next)
143 if ((rv = func(arg, mg, mt)))
144 return rv;
145 }
146 return 0;
147 }
148
149 struct exit_dump_args {
150 FILE *fp;
151 const char *prefix;
152 int error;
153 };
154
155 static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
156 {
157 struct exit_dump_args *eda = arg;
158
159 if (!mt) {
160 fprintf(eda->fp,
161 "%s: showing active allocations in memory group %s\n",
162 eda->prefix, mg->name);
163
164 } else if (mt->n_alloc) {
165 char size[32];
166 if (!mg->active_at_exit)
167 eda->error++;
168 snprintf(size, sizeof(size), "%10zu", mt->size);
169 fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n",
170 eda->prefix, mt->name, mt->n_alloc,
171 mt->size == SIZE_VAR ? "(variably sized)" : size);
172 }
173 return 0;
174 }
175
176 int log_memstats(FILE *fp, const char *prefix)
177 {
178 struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
179 qmem_walk(qmem_exit_walker, &eda);
180 return eda.error;
181 }