1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 static malloc_mutex_t base_mtx
;
8 static size_t base_extent_sn_next
;
9 static extent_tree_t base_avail_szsnad
;
10 static extent_node_t
*base_nodes
;
11 static size_t base_allocated
;
12 static size_t base_resident
;
13 static size_t base_mapped
;
15 /******************************************************************************/
17 static extent_node_t
*
18 base_node_try_alloc(tsdn_t
*tsdn
)
22 malloc_mutex_assert_owner(tsdn
, &base_mtx
);
24 if (base_nodes
== NULL
)
27 base_nodes
= *(extent_node_t
**)node
;
28 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node
, sizeof(extent_node_t
));
33 base_node_dalloc(tsdn_t
*tsdn
, extent_node_t
*node
)
36 malloc_mutex_assert_owner(tsdn
, &base_mtx
);
38 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node
, sizeof(extent_node_t
));
39 *(extent_node_t
**)node
= base_nodes
;
44 base_extent_node_init(extent_node_t
*node
, void *addr
, size_t size
)
46 size_t sn
= atomic_add_z(&base_extent_sn_next
, 1) - 1;
48 extent_node_init(node
, NULL
, addr
, size
, sn
, true, true);
51 static extent_node_t
*
52 base_chunk_alloc(tsdn_t
*tsdn
, size_t minsize
)
58 malloc_mutex_assert_owner(tsdn
, &base_mtx
);
60 node
= base_node_try_alloc(tsdn
);
61 /* Allocate enough space to also carve a node out if necessary. */
62 nsize
= (node
== NULL
) ? CACHELINE_CEILING(sizeof(extent_node_t
)) : 0;
63 csize
= CHUNK_CEILING(minsize
+ nsize
);
64 addr
= chunk_alloc_base(csize
);
67 base_node_dalloc(tsdn
, node
);
72 node
= (extent_node_t
*)addr
;
73 addr
= (void *)((uintptr_t)addr
+ nsize
);
76 base_allocated
+= nsize
;
77 base_resident
+= PAGE_CEILING(nsize
);
80 base_extent_node_init(node
, addr
, csize
);
85 * base_alloc() guarantees demand-zeroed memory, in order to make multi-page
86 * sparse data structures such as radix tree nodes efficient with respect to
87 * physical memory usage.
90 base_alloc(tsdn_t
*tsdn
, size_t size
)
98 * Round size up to nearest multiple of the cacheline size, so that
99 * there is no chance of false cache line sharing.
101 csize
= CACHELINE_CEILING(size
);
104 extent_node_init(&key
, NULL
, NULL
, usize
, 0, false, false);
105 malloc_mutex_lock(tsdn
, &base_mtx
);
106 node
= extent_tree_szsnad_nsearch(&base_avail_szsnad
, &key
);
108 /* Use existing space. */
109 extent_tree_szsnad_remove(&base_avail_szsnad
, node
);
111 /* Try to allocate more space. */
112 node
= base_chunk_alloc(tsdn
, csize
);
119 ret
= extent_node_addr_get(node
);
120 if (extent_node_size_get(node
) > csize
) {
121 extent_node_addr_set(node
, (void *)((uintptr_t)ret
+ csize
));
122 extent_node_size_set(node
, extent_node_size_get(node
) - csize
);
123 extent_tree_szsnad_insert(&base_avail_szsnad
, node
);
125 base_node_dalloc(tsdn
, node
);
127 base_allocated
+= csize
;
129 * Add one PAGE to base_resident for every page boundary that is
130 * crossed by the new allocation.
132 base_resident
+= PAGE_CEILING((uintptr_t)ret
+ csize
) -
133 PAGE_CEILING((uintptr_t)ret
);
135 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret
, csize
);
137 malloc_mutex_unlock(tsdn
, &base_mtx
);
142 base_stats_get(tsdn_t
*tsdn
, size_t *allocated
, size_t *resident
,
146 malloc_mutex_lock(tsdn
, &base_mtx
);
147 assert(base_allocated
<= base_resident
);
148 assert(base_resident
<= base_mapped
);
149 *allocated
= base_allocated
;
150 *resident
= base_resident
;
151 *mapped
= base_mapped
;
152 malloc_mutex_unlock(tsdn
, &base_mtx
);
159 if (malloc_mutex_init(&base_mtx
, "base", WITNESS_RANK_BASE
))
161 base_extent_sn_next
= 0;
162 extent_tree_szsnad_new(&base_avail_szsnad
);
169 base_prefork(tsdn_t
*tsdn
)
172 malloc_mutex_prefork(tsdn
, &base_mtx
);
176 base_postfork_parent(tsdn_t
*tsdn
)
179 malloc_mutex_postfork_parent(tsdn
, &base_mtx
);
183 base_postfork_child(tsdn_t
*tsdn
)
186 malloc_mutex_postfork_child(tsdn
, &base_mtx
);