]> git.proxmox.com Git - mirror_frr.git/blob - lib/memory.c
Merge pull request #7601 from patrasar/pim_fix
[mirror_frr.git] / lib / memory.c
1 /*
2 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <zebra.h>
18
19 #include <stdlib.h>
20 #ifdef HAVE_MALLOC_H
21 #include <malloc.h>
22 #endif
23 #ifdef HAVE_MALLOC_NP_H
24 #include <malloc_np.h>
25 #endif
26 #ifdef HAVE_MALLOC_MALLOC_H
27 #include <malloc/malloc.h>
28 #endif
29
30 #include "memory.h"
31 #include "log.h"
32 #include "libfrr_trace.h"
33
34 static struct memgroup *mg_first = NULL;
35 struct memgroup **mg_insert = &mg_first;
36
37 DEFINE_MGROUP(LIB, "libfrr")
38 DEFINE_MTYPE(LIB, TMP, "Temporary memory")
39
40 static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
41 {
42 size_t current;
43 size_t oldsize;
44
45 current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
46 memory_order_relaxed);
47
48 oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
49 if (current > oldsize)
50 /* note that this may fail, but approximation is sufficient */
51 atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
52 current,
53 memory_order_relaxed,
54 memory_order_relaxed);
55
56 oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
57 if (oldsize == 0)
58 oldsize = atomic_exchange_explicit(&mt->size, size,
59 memory_order_relaxed);
60 if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
61 atomic_store_explicit(&mt->size, SIZE_VAR,
62 memory_order_relaxed);
63
64 #ifdef HAVE_MALLOC_USABLE_SIZE
65 size_t mallocsz = malloc_usable_size(ptr);
66
67 current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
68 memory_order_relaxed);
69 oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
70 if (current > oldsize)
71 /* note that this may fail, but approximation is sufficient */
72 atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
73 current,
74 memory_order_relaxed,
75 memory_order_relaxed);
76 #endif
77 }
78
79 static inline void mt_count_free(struct memtype *mt, void *ptr)
80 {
81 frrtrace(2, frr_libfrr, memfree, mt, ptr);
82
83 assert(mt->n_alloc);
84 atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
85
86 #ifdef HAVE_MALLOC_USABLE_SIZE
87 size_t mallocsz = malloc_usable_size(ptr);
88
89 atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
90 #endif
91 }
92
93 static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
94 {
95 frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
96
97 if (__builtin_expect(ptr == NULL, 0)) {
98 if (size) {
99 /* malloc(0) is allowed to return NULL */
100 memory_oom(size, mt->name);
101 }
102 return NULL;
103 }
104 mt_count_alloc(mt, size, ptr);
105 return ptr;
106 }
107
108 void *qmalloc(struct memtype *mt, size_t size)
109 {
110 return mt_checkalloc(mt, malloc(size), size);
111 }
112
113 void *qcalloc(struct memtype *mt, size_t size)
114 {
115 return mt_checkalloc(mt, calloc(size, 1), size);
116 }
117
118 void *qrealloc(struct memtype *mt, void *ptr, size_t size)
119 {
120 if (ptr)
121 mt_count_free(mt, ptr);
122 return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
123 }
124
125 void *qstrdup(struct memtype *mt, const char *str)
126 {
127 return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
128 }
129
130 void qcountfree(struct memtype *mt, void *ptr)
131 {
132 if (ptr)
133 mt_count_free(mt, ptr);
134 }
135
136 void qfree(struct memtype *mt, void *ptr)
137 {
138 if (ptr)
139 mt_count_free(mt, ptr);
140 free(ptr);
141 }
142
143 int qmem_walk(qmem_walk_fn *func, void *arg)
144 {
145 struct memgroup *mg;
146 struct memtype *mt;
147 int rv;
148
149 for (mg = mg_first; mg; mg = mg->next) {
150 if ((rv = func(arg, mg, NULL)))
151 return rv;
152 for (mt = mg->types; mt; mt = mt->next)
153 if ((rv = func(arg, mg, mt)))
154 return rv;
155 }
156 return 0;
157 }
158
159 struct exit_dump_args {
160 FILE *fp;
161 const char *prefix;
162 int error;
163 };
164
165 static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
166 {
167 struct exit_dump_args *eda = arg;
168
169 if (!mt) {
170 fprintf(eda->fp,
171 "%s: showing active allocations in memory group %s\n",
172 eda->prefix, mg->name);
173
174 } else if (mt->n_alloc) {
175 char size[32];
176 if (!mg->active_at_exit)
177 eda->error++;
178 snprintf(size, sizeof(size), "%10zu", mt->size);
179 fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n",
180 eda->prefix, mt->name, mt->n_alloc,
181 mt->size == SIZE_VAR ? "(variably sized)" : size);
182 }
183 return 0;
184 }
185
186 int log_memstats(FILE *fp, const char *prefix)
187 {
188 struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
189 qmem_walk(qmem_exit_walker, &eda);
190 return eda.error;
191 }