]> git.proxmox.com Git - mirror_frr.git/blob - lib/memory.c
Merge pull request #2304 from ppmathis/enhancement/bgp-pg-overrides
[mirror_frr.git] / lib / memory.c
1 /*
2 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
3 *
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17 #include <zebra.h>
18
19 #include <stdlib.h>
20
21 #include "memory.h"
22 #include "log.h"
23
24 static struct memgroup *mg_first = NULL;
25 struct memgroup **mg_insert = &mg_first;
26
27 DEFINE_MGROUP(LIB, "libfrr")
28 DEFINE_MTYPE(LIB, TMP, "Temporary memory")
29 DEFINE_MTYPE(LIB, PREFIX_FLOWSPEC, "Prefix Flowspec")
30
31 static inline void mt_count_alloc(struct memtype *mt, size_t size)
32 {
33 size_t oldsize;
34
35 atomic_fetch_add_explicit(&mt->n_alloc, 1, memory_order_relaxed);
36
37 oldsize = atomic_load_explicit(&mt->size, memory_order_relaxed);
38 if (oldsize == 0)
39 oldsize = atomic_exchange_explicit(&mt->size, size,
40 memory_order_relaxed);
41 if (oldsize != 0 && oldsize != size && oldsize != SIZE_VAR)
42 atomic_store_explicit(&mt->size, SIZE_VAR,
43 memory_order_relaxed);
44 }
45
46 static inline void mt_count_free(struct memtype *mt)
47 {
48 assert(mt->n_alloc);
49 atomic_fetch_sub_explicit(&mt->n_alloc, 1, memory_order_relaxed);
50 }
51
52 static inline void *mt_checkalloc(struct memtype *mt, void *ptr, size_t size)
53 {
54 if (__builtin_expect(ptr == NULL, 0)) {
55 if (size) {
56 /* malloc(0) is allowed to return NULL */
57 memory_oom(size, mt->name);
58 }
59 return NULL;
60 }
61 mt_count_alloc(mt, size);
62 return ptr;
63 }
64
65 void *qmalloc(struct memtype *mt, size_t size)
66 {
67 return mt_checkalloc(mt, malloc(size), size);
68 }
69
70 void *qcalloc(struct memtype *mt, size_t size)
71 {
72 return mt_checkalloc(mt, calloc(size, 1), size);
73 }
74
75 void *qrealloc(struct memtype *mt, void *ptr, size_t size)
76 {
77 if (ptr)
78 mt_count_free(mt);
79 return mt_checkalloc(mt, ptr ? realloc(ptr, size) : malloc(size), size);
80 }
81
82 void *qstrdup(struct memtype *mt, const char *str)
83 {
84 return mt_checkalloc(mt, strdup(str), strlen(str) + 1);
85 }
86
87 void qfree(struct memtype *mt, void *ptr)
88 {
89 if (ptr)
90 mt_count_free(mt);
91 free(ptr);
92 }
93
94 int qmem_walk(qmem_walk_fn *func, void *arg)
95 {
96 struct memgroup *mg;
97 struct memtype *mt;
98 int rv;
99
100 for (mg = mg_first; mg; mg = mg->next) {
101 if ((rv = func(arg, mg, NULL)))
102 return rv;
103 for (mt = mg->types; mt; mt = mt->next)
104 if ((rv = func(arg, mg, mt)))
105 return rv;
106 }
107 return 0;
108 }
109
110 struct exit_dump_args {
111 FILE *fp;
112 const char *prefix;
113 int error;
114 };
115
116 static int qmem_exit_walker(void *arg, struct memgroup *mg, struct memtype *mt)
117 {
118 struct exit_dump_args *eda = arg;
119
120 if (!mt) {
121 fprintf(eda->fp,
122 "%s: showing active allocations in "
123 "memory group %s\n",
124 eda->prefix, mg->name);
125
126 } else if (mt->n_alloc) {
127 char size[32];
128 eda->error++;
129 snprintf(size, sizeof(size), "%10zu", mt->size);
130 fprintf(eda->fp, "%s: memstats: %-30s: %6zu * %s\n",
131 eda->prefix, mt->name, mt->n_alloc,
132 mt->size == SIZE_VAR ? "(variably sized)" : size);
133 }
134 return 0;
135 }
136
137 int log_memstats(FILE *fp, const char *prefix)
138 {
139 struct exit_dump_args eda = {.fp = fp, .prefix = prefix, .error = 0};
140 qmem_walk(qmem_exit_walker, &eda);
141 return eda.error;
142 }