]>
git.proxmox.com Git - mirror_frr.git/blob - lib/memory.c
2 * Copyright (c) 2015-16 David Lamparter, for NetDEF, Inc.
4 * Permission to use, copy, modify, and distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 #ifdef HAVE_MALLOC_NP_H
24 #include <malloc_np.h>
26 #ifdef HAVE_MALLOC_MALLOC_H
27 #include <malloc/malloc.h>
32 #include "libfrr_trace.h"
34 static struct memgroup
*mg_first
= NULL
;
35 struct memgroup
**mg_insert
= &mg_first
;
37 DEFINE_MGROUP(LIB
, "libfrr");
38 DEFINE_MTYPE(LIB
, TMP
, "Temporary memory");
39 DEFINE_MTYPE(LIB
, BITFIELD
, "Bitfield memory");
41 static inline void mt_count_alloc(struct memtype
*mt
, size_t size
, void *ptr
)
46 current
= 1 + atomic_fetch_add_explicit(&mt
->n_alloc
, 1,
47 memory_order_relaxed
);
49 oldsize
= atomic_load_explicit(&mt
->n_max
, memory_order_relaxed
);
50 if (current
> oldsize
)
51 /* note that this may fail, but approximation is sufficient */
52 atomic_compare_exchange_weak_explicit(&mt
->n_max
, &oldsize
,
55 memory_order_relaxed
);
57 oldsize
= atomic_load_explicit(&mt
->size
, memory_order_relaxed
);
59 oldsize
= atomic_exchange_explicit(&mt
->size
, size
,
60 memory_order_relaxed
);
61 if (oldsize
!= 0 && oldsize
!= size
&& oldsize
!= SIZE_VAR
)
62 atomic_store_explicit(&mt
->size
, SIZE_VAR
,
63 memory_order_relaxed
);
65 #ifdef HAVE_MALLOC_USABLE_SIZE
66 size_t mallocsz
= malloc_usable_size(ptr
);
68 current
= mallocsz
+ atomic_fetch_add_explicit(&mt
->total
, mallocsz
,
69 memory_order_relaxed
);
70 oldsize
= atomic_load_explicit(&mt
->max_size
, memory_order_relaxed
);
71 if (current
> oldsize
)
72 /* note that this may fail, but approximation is sufficient */
73 atomic_compare_exchange_weak_explicit(&mt
->max_size
, &oldsize
,
76 memory_order_relaxed
);
80 static inline void mt_count_free(struct memtype
*mt
, void *ptr
)
82 frrtrace(2, frr_libfrr
, memfree
, mt
, ptr
);
85 atomic_fetch_sub_explicit(&mt
->n_alloc
, 1, memory_order_relaxed
);
87 #ifdef HAVE_MALLOC_USABLE_SIZE
88 size_t mallocsz
= malloc_usable_size(ptr
);
90 atomic_fetch_sub_explicit(&mt
->total
, mallocsz
, memory_order_relaxed
);
94 static inline void *mt_checkalloc(struct memtype
*mt
, void *ptr
, size_t size
)
96 frrtrace(3, frr_libfrr
, memalloc
, mt
, ptr
, size
);
98 if (__builtin_expect(ptr
== NULL
, 0)) {
100 /* malloc(0) is allowed to return NULL */
101 memory_oom(size
, mt
->name
);
105 mt_count_alloc(mt
, size
, ptr
);
109 void *qmalloc(struct memtype
*mt
, size_t size
)
111 return mt_checkalloc(mt
, malloc(size
), size
);
114 void *qcalloc(struct memtype
*mt
, size_t size
)
116 return mt_checkalloc(mt
, calloc(size
, 1), size
);
119 void *qrealloc(struct memtype
*mt
, void *ptr
, size_t size
)
122 mt_count_free(mt
, ptr
);
123 return mt_checkalloc(mt
, ptr
? realloc(ptr
, size
) : malloc(size
), size
);
126 void *qstrdup(struct memtype
*mt
, const char *str
)
128 return str
? mt_checkalloc(mt
, strdup(str
), strlen(str
) + 1) : NULL
;
131 void qcountfree(struct memtype
*mt
, void *ptr
)
134 mt_count_free(mt
, ptr
);
137 void qfree(struct memtype
*mt
, void *ptr
)
140 mt_count_free(mt
, ptr
);
144 int qmem_walk(qmem_walk_fn
*func
, void *arg
)
150 for (mg
= mg_first
; mg
; mg
= mg
->next
) {
151 if ((rv
= func(arg
, mg
, NULL
)))
153 for (mt
= mg
->types
; mt
; mt
= mt
->next
)
154 if ((rv
= func(arg
, mg
, mt
)))
160 struct exit_dump_args
{
166 static int qmem_exit_walker(void *arg
, struct memgroup
*mg
, struct memtype
*mt
)
168 struct exit_dump_args
*eda
= arg
;
172 "%s: showing active allocations in memory group %s\n",
173 eda
->prefix
, mg
->name
);
175 } else if (mt
->n_alloc
) {
177 if (!mg
->active_at_exit
)
179 snprintf(size
, sizeof(size
), "%10zu", mt
->size
);
180 fprintf(eda
->fp
, "%s: memstats: %-30s: %6zu * %s\n",
181 eda
->prefix
, mt
->name
, mt
->n_alloc
,
182 mt
->size
== SIZE_VAR
? "(variably sized)" : size
);
187 int log_memstats(FILE *fp
, const char *prefix
)
189 struct exit_dump_args eda
= {.fp
= fp
, .prefix
= prefix
, .error
= 0};
190 qmem_walk(qmem_exit_walker
, &eda
);