]> git.proxmox.com Git - mirror_frr.git/blob - lib/memory.c
Merge pull request #8749 from pjdruddy/bitfield_mtype
[mirror_frr.git] / lib / memory.c
1 /*
2 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <zebra.h>
18
19 #include <stdlib.h>
20 #ifdef HAVE_MALLOC_H
21 #include <malloc.h>
22 #endif
23 #ifdef HAVE_MALLOC_NP_H
24 #include <malloc_np.h>
25 #endif
26 #ifdef HAVE_MALLOC_MALLOC_H
27 #include <malloc/malloc.h>
28 #endif
29
30 #include "memory.h"
31 #include "log.h"
32 #include "libfrr_trace.h"
33
34 static struct memgroup *mg_first = NULL;
35 struct memgroup **mg_insert = &mg_first;
36
37 DEFINE_MGROUP(LIB, "libfrr");
38 DEFINE_MTYPE(LIB, TMP, "Temporary memory");
39 DEFINE_MTYPE(LIB, BITFIELD, "Bitfield memory");
40
41 static inline void mt_count_alloc(struct memtype *mt, size_t size, void *ptr)
42 {
43 size_t current;
44 size_t oldsize;
45
46 current = 1 + atomic_fetch_add_explicit(&mt->n_alloc, 1,
47 memory_order_relaxed);
48
49 oldsize = atomic_load_explicit(&mt->n_max, memory_order_relaxed);
50 if (current > oldsize)
51 /* note that this may fail, but approximation is sufficient */
52 atomic_compare_exchange_weak_explicit(&mt->n_max, &oldsize,
53 current,
54 memory_order_relaxed,
55 memory_order_relaxed);
56
57 oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
58 if (oldsize == 0)
59 oldsize = atomic_exchange_explicit(&mt->size, size,
60 memory_order_relaxed);
61 if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
62 atomic_store_explicit(&mt->size, SIZE_VAR,
63 memory_order_relaxed);
64
65 #ifdef HAVE_MALLOC_USABLE_SIZE
66 size_t mallocsz = malloc_usable_size(ptr);
67
68 current = mallocsz + atomic_fetch_add_explicit(&mt->total, mallocsz,
69 memory_order_relaxed);
70 oldsize = atomic_load_explicit(&mt->max_size, memory_order_relaxed);
71 if (current > oldsize)
72 /* note that this may fail, but approximation is sufficient */
73 atomic_compare_exchange_weak_explicit(&mt->max_size, &oldsize,
74 current,
75 memory_order_relaxed,
76 memory_order_relaxed);
77 #endif
78 }
79
80 static inline void mt_count_free(struct memtype *mt, void *ptr)
81 {
82 frrtrace(2, frr_libfrr, memfree, mt, ptr);
83
84 assert(mt->n_alloc);
85 atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
86
87 #ifdef HAVE_MALLOC_USABLE_SIZE
88 size_t mallocsz = malloc_usable_size(ptr);
89
90 atomic_fetch_sub_explicit(&mt->total, mallocsz, memory_order_relaxed);
91 #endif
92 }
93
94 static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
95 {
96 frrtrace(3, frr_libfrr, memalloc, mt, ptr, size);
97
98 if (__builtin_expect(ptr == NULL, 0)) {
99 if (size) {
100 /* malloc(0) is allowed to return NULL */
101 memory_oom(size, mt->name);
102 }
103 return NULL;
104 }
105 mt_count_alloc(mt, size, ptr);
106 return ptr;
107 }
108
109 void *qmalloc(struct memtype *mt, size_t size)
110 {
111 return mt_checkalloc(mt, malloc(size), size);
112 }
113
114 void *qcalloc(struct memtype *mt, size_t size)
115 {
116 return mt_checkalloc(mt, calloc(size, 1), size);
117 }
118
119 void *qrealloc(struct memtype *mt, void *ptr, size_t size)
120 {
121 if (ptr)
122 mt_count_free(mt, ptr);
123 return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
124 }
125
126 void *qstrdup(struct memtype *mt, const char *str)
127 {
128 return str ? mt_checkalloc(mt, strdup(str), strlen(str) + 1) : NULL;
129 }
130
131 void qcountfree(struct memtype *mt, void *ptr)
132 {
133 if (ptr)
134 mt_count_free(mt, ptr);
135 }
136
137 void qfree(struct memtype *mt, void *ptr)
138 {
139 if (ptr)
140 mt_count_free(mt, ptr);
141 free(ptr);
142 }
143
144 int qmem_walk(qmem_walk_fn *func, void *arg)
145 {
146 struct memgroup *mg;
147 struct memtype *mt;
148 int rv;
149
150 for (mg = mg_first; mg; mg = mg->next) {
151 if ((rv = func(arg, mg, NULL)))
152 return rv;
153 for (mt = mg->types; mt; mt = mt->next)
154 if ((rv = func(arg, mg, mt)))
155 return rv;
156 }
157 return 0;
158 }
159
160 struct exit_dump_args {
161 FILE *fp;
162 const char *prefix;
163 int error;
164 };
165
166 static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
167 {
168 struct exit_dump_args *eda = arg;
169
170 if (!mt) {
171 fprintf(eda->fp,
172 "%s: showing active allocations in memory group %s\n",
173 eda->prefix, mg->name);
174
175 } else if (mt->n_alloc) {
176 char size[32];
177 if (!mg->active_at_exit)
178 eda->error++;
179 snprintf(size, sizeof(size), "%10zu", mt->size);
180 fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n",
181 eda->prefix, mt->name, mt->n_alloc,
182 mt->size == SIZE_VAR ? "(variably sized)" : size);
183 }
184 return 0;
185 }
186
187 int log_memstats(FILE *fp, const char *prefix)
188 {
189 struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
190 qmem_walk(qmem_exit_walker, &eda);
191 return eda.error;
192 }