2 * Copyright 2018 The Emscripten Authors. All rights reserved.
3 * Emscripten is available under two separate licenses, the MIT license and the
4 * University of Illinois/NCSA Open Source License. Both these licenses can be
5 * found in the LICENSE file.
7 * Simple minimalistic but efficient sbrk()-based malloc/free that works in
8 * singlethreaded and multithreaded builds.
12 * - sbrk() is used to claim new memory (sbrk handles geometric/linear
13 * - overallocation growth)
14 * - sbrk() can be used by other code outside emmalloc.
15 * - sbrk() is very fast in most cases (internal wasm call).
16 * - sbrk() returns pointers with an alignment of alignof(max_align_t)
20 * - Per-allocation header overhead is 8 bytes, smallest allocated payload
21 * amount is 8 bytes, and a multiple of 4 bytes.
22 * - Acquired memory blocks are subdivided into disjoint regions that lie
24 * - A region is either in used or free.
25 * Used regions may be adjacent, and a used and unused region
26 * may be adjacent, but not two unused ones - they would be
28 * - Memory allocation takes constant time, unless the alloc needs to sbrk()
29 * or memory is very close to being exhausted.
33 * - If not NDEBUG, runtime assert()s are in use.
34 * - If EMMALLOC_MEMVALIDATE is defined, a large amount of extra checks are done.
35 * - If EMMALLOC_VERBOSE is defined, a lot of operations are logged
36 * out, in addition to EMMALLOC_MEMVALIDATE.
37 * - Debugging and logging directly uses console.log via uses EM_ASM, not
38 * printf etc., to minimize any risk of debugging or logging depending on
53 #ifdef __EMSCRIPTEN_TRACING__
54 #include <emscripten/trace.h>
57 // Defind by the linker to have the address of the start of the heap.
58 extern unsigned char __heap_base
;
60 // Behavior of right shifting a signed integer is compiler implementation defined.
61 static_assert((((int32_t)0x80000000U
) >> 31) == -1, "This malloc implementation requires that right-shifting a signed integer produces a sign-extending (arithmetic) shift!");
63 // Configuration: specifies the minimum alignment that malloc()ed memory outputs. Allocation requests with smaller alignment
64 // than this will yield an allocation with this much alignment.
65 #define MALLOC_ALIGNMENT alignof(max_align_t)
66 static_assert(alignof(max_align_t
) == 16, "max_align_t must be correct");
68 #define EMMALLOC_EXPORT __attribute__((weak))
70 #define MIN(x, y) ((x) < (y) ? (x) : (y))
71 #define MAX(x, y) ((x) > (y) ? (x) : (y))
73 #define NUM_FREE_BUCKETS 64
74 #define BUCKET_BITMASK_T uint64_t
76 // Dynamic memory is subdivided into regions, in the format
78 // <size:uint32_t> ..... <size:uint32_t> | <size:uint32_t> ..... <size:uint32_t> | <size:uint32_t> ..... <size:uint32_t> | .....
80 // That is, at the bottom and top end of each memory region, the size of that region is stored. That allows traversing the
81 // memory regions backwards and forwards. Because each allocation must be at least a multiple of 4 bytes, the lowest two bits of
82 // each size field is unused. Free regions are distinguished by used regions by having the FREE_REGION_FLAG bit present
83 // in the size field. I.e. for free regions, the size field is odd, and for used regions, the size field reads even.
84 #define FREE_REGION_FLAG 0x1u
86 // Attempts to malloc() more than this many bytes would cause an overflow when calculating the size of a region,
87 // therefore allocations larger than this are short-circuited immediately on entry.
88 #define MAX_ALLOC_SIZE 0xFFFFFFC7u
90 // A free region has the following structure:
91 // <size:size_t> <prevptr> <nextptr> ... <size:size_t>
96 // Use a circular doubly linked list to represent free region data.
97 struct Region
*prev
, *next
;
98 // ... N bytes of free data
99 size_t _at_the_end_of_this_struct_size
; // do not dereference, this is present for convenient struct sizeof() computation only
102 // Each memory block starts with a RootRegion at the beginning.
103 // The RootRegion specifies the size of the region block, and forms a linked
104 // list of all RootRegions in the program, starting with `listOfAllRegions`
106 typedef struct RootRegion
109 struct RootRegion
*next
;
113 #if defined(__EMSCRIPTEN_PTHREADS__)
114 // In multithreaded builds, use a simple global spinlock strategy to acquire/release access to the memory allocator.
115 static volatile uint8_t multithreadingLock
= 0;
116 #define MALLOC_ACQUIRE() while(__sync_lock_test_and_set(&multithreadingLock, 1)) { while(multithreadingLock) { /*nop*/ } }
117 #define MALLOC_RELEASE() __sync_lock_release(&multithreadingLock)
118 // Test code to ensure we have tight malloc acquire/release guards in place.
119 #define ASSERT_MALLOC_IS_ACQUIRED() assert(multithreadingLock == 1)
121 // In singlethreaded builds, no need for locking.
122 #define MALLOC_ACQUIRE() ((void)0)
123 #define MALLOC_RELEASE() ((void)0)
124 #define ASSERT_MALLOC_IS_ACQUIRED() ((void)0)
127 #define IS_POWER_OF_2(val) (((val) & ((val)-1)) == 0)
128 #define ALIGN_UP(ptr, alignment) ((uint8_t*)((((uintptr_t)(ptr)) + ((alignment)-1)) & ~((alignment)-1)))
129 #define HAS_ALIGNMENT(ptr, alignment) ((((uintptr_t)(ptr)) & ((alignment)-1)) == 0)
131 static_assert(IS_POWER_OF_2(MALLOC_ALIGNMENT
), "MALLOC_ALIGNMENT must be a power of two value!");
132 static_assert(MALLOC_ALIGNMENT
>= 4, "Smallest possible MALLOC_ALIGNMENT if 4!");
134 // A region that contains as payload a single forward linked list of pointers to
135 // root regions of each disjoint region blocks.
136 static RootRegion
*listOfAllRegions
= NULL
;
138 // For each of the buckets, maintain a linked list head node. The head node for each
139 // free region is a sentinel node that does not actually represent any free space, but
140 // the sentinel is used to avoid awkward testing against (if node == freeRegionHeadNode)
141 // when adding and removing elements from the linked list, i.e. we are guaranteed that
142 // the sentinel node is always fixed and there, and the actual free region list elements
143 // start at freeRegionBuckets[i].next each.
144 static Region freeRegionBuckets
[NUM_FREE_BUCKETS
] = {
145 { .prev
= &freeRegionBuckets
[0], .next
= &freeRegionBuckets
[0] },
146 { .prev
= &freeRegionBuckets
[1], .next
= &freeRegionBuckets
[1] },
147 { .prev
= &freeRegionBuckets
[2], .next
= &freeRegionBuckets
[2] },
148 { .prev
= &freeRegionBuckets
[3], .next
= &freeRegionBuckets
[3] },
149 { .prev
= &freeRegionBuckets
[4], .next
= &freeRegionBuckets
[4] },
150 { .prev
= &freeRegionBuckets
[5], .next
= &freeRegionBuckets
[5] },
151 { .prev
= &freeRegionBuckets
[6], .next
= &freeRegionBuckets
[6] },
152 { .prev
= &freeRegionBuckets
[7], .next
= &freeRegionBuckets
[7] },
153 { .prev
= &freeRegionBuckets
[8], .next
= &freeRegionBuckets
[8] },
154 { .prev
= &freeRegionBuckets
[9], .next
= &freeRegionBuckets
[9] },
155 { .prev
= &freeRegionBuckets
[10], .next
= &freeRegionBuckets
[10] },
156 { .prev
= &freeRegionBuckets
[11], .next
= &freeRegionBuckets
[11] },
157 { .prev
= &freeRegionBuckets
[12], .next
= &freeRegionBuckets
[12] },
158 { .prev
= &freeRegionBuckets
[13], .next
= &freeRegionBuckets
[13] },
159 { .prev
= &freeRegionBuckets
[14], .next
= &freeRegionBuckets
[14] },
160 { .prev
= &freeRegionBuckets
[15], .next
= &freeRegionBuckets
[15] },
161 { .prev
= &freeRegionBuckets
[16], .next
= &freeRegionBuckets
[16] },
162 { .prev
= &freeRegionBuckets
[17], .next
= &freeRegionBuckets
[17] },
163 { .prev
= &freeRegionBuckets
[18], .next
= &freeRegionBuckets
[18] },
164 { .prev
= &freeRegionBuckets
[19], .next
= &freeRegionBuckets
[19] },
165 { .prev
= &freeRegionBuckets
[20], .next
= &freeRegionBuckets
[20] },
166 { .prev
= &freeRegionBuckets
[21], .next
= &freeRegionBuckets
[21] },
167 { .prev
= &freeRegionBuckets
[22], .next
= &freeRegionBuckets
[22] },
168 { .prev
= &freeRegionBuckets
[23], .next
= &freeRegionBuckets
[23] },
169 { .prev
= &freeRegionBuckets
[24], .next
= &freeRegionBuckets
[24] },
170 { .prev
= &freeRegionBuckets
[25], .next
= &freeRegionBuckets
[25] },
171 { .prev
= &freeRegionBuckets
[26], .next
= &freeRegionBuckets
[26] },
172 { .prev
= &freeRegionBuckets
[27], .next
= &freeRegionBuckets
[27] },
173 { .prev
= &freeRegionBuckets
[28], .next
= &freeRegionBuckets
[28] },
174 { .prev
= &freeRegionBuckets
[29], .next
= &freeRegionBuckets
[29] },
175 { .prev
= &freeRegionBuckets
[30], .next
= &freeRegionBuckets
[30] },
176 { .prev
= &freeRegionBuckets
[31], .next
= &freeRegionBuckets
[31] },
177 { .prev
= &freeRegionBuckets
[32], .next
= &freeRegionBuckets
[32] },
178 { .prev
= &freeRegionBuckets
[33], .next
= &freeRegionBuckets
[33] },
179 { .prev
= &freeRegionBuckets
[34], .next
= &freeRegionBuckets
[34] },
180 { .prev
= &freeRegionBuckets
[35], .next
= &freeRegionBuckets
[35] },
181 { .prev
= &freeRegionBuckets
[36], .next
= &freeRegionBuckets
[36] },
182 { .prev
= &freeRegionBuckets
[37], .next
= &freeRegionBuckets
[37] },
183 { .prev
= &freeRegionBuckets
[38], .next
= &freeRegionBuckets
[38] },
184 { .prev
= &freeRegionBuckets
[39], .next
= &freeRegionBuckets
[39] },
185 { .prev
= &freeRegionBuckets
[40], .next
= &freeRegionBuckets
[40] },
186 { .prev
= &freeRegionBuckets
[41], .next
= &freeRegionBuckets
[41] },
187 { .prev
= &freeRegionBuckets
[42], .next
= &freeRegionBuckets
[42] },
188 { .prev
= &freeRegionBuckets
[43], .next
= &freeRegionBuckets
[43] },
189 { .prev
= &freeRegionBuckets
[44], .next
= &freeRegionBuckets
[44] },
190 { .prev
= &freeRegionBuckets
[45], .next
= &freeRegionBuckets
[45] },
191 { .prev
= &freeRegionBuckets
[46], .next
= &freeRegionBuckets
[46] },
192 { .prev
= &freeRegionBuckets
[47], .next
= &freeRegionBuckets
[47] },
193 { .prev
= &freeRegionBuckets
[48], .next
= &freeRegionBuckets
[48] },
194 { .prev
= &freeRegionBuckets
[49], .next
= &freeRegionBuckets
[49] },
195 { .prev
= &freeRegionBuckets
[50], .next
= &freeRegionBuckets
[50] },
196 { .prev
= &freeRegionBuckets
[51], .next
= &freeRegionBuckets
[51] },
197 { .prev
= &freeRegionBuckets
[52], .next
= &freeRegionBuckets
[52] },
198 { .prev
= &freeRegionBuckets
[53], .next
= &freeRegionBuckets
[53] },
199 { .prev
= &freeRegionBuckets
[54], .next
= &freeRegionBuckets
[54] },
200 { .prev
= &freeRegionBuckets
[55], .next
= &freeRegionBuckets
[55] },
201 { .prev
= &freeRegionBuckets
[56], .next
= &freeRegionBuckets
[56] },
202 { .prev
= &freeRegionBuckets
[57], .next
= &freeRegionBuckets
[57] },
203 { .prev
= &freeRegionBuckets
[58], .next
= &freeRegionBuckets
[58] },
204 { .prev
= &freeRegionBuckets
[59], .next
= &freeRegionBuckets
[59] },
205 { .prev
= &freeRegionBuckets
[60], .next
= &freeRegionBuckets
[60] },
206 { .prev
= &freeRegionBuckets
[61], .next
= &freeRegionBuckets
[61] },
207 { .prev
= &freeRegionBuckets
[62], .next
= &freeRegionBuckets
[62] },
208 { .prev
= &freeRegionBuckets
[63], .next
= &freeRegionBuckets
[63] },
211 // A bitmask that tracks the population status for each of the 64 distinct memory regions:
212 // a zero at bit position i means that the free list bucket i is empty. This bitmask is
213 // used to avoid redundant scanning of the 64 different free region buckets: instead by
214 // looking at the bitmask we can find in constant time an index to a free region bucket
215 // that contains free memory of desired size.
216 static BUCKET_BITMASK_T freeRegionBucketsUsed
= 0;
218 // Amount of bytes taken up by allocation header data
219 #define REGION_HEADER_SIZE (2*sizeof(size_t))
221 // Smallest allocation size that is possible is 2*pointer size, since payload of each region must at least contain space
222 // to store the free region linked list prev and next pointers. An allocation size smaller than this will be rounded up
224 #define SMALLEST_ALLOCATION_SIZE (2*sizeof(void*))
226 /* Subdivide regions of free space into distinct circular doubly linked lists, where each linked list
227 represents a range of free space blocks. The following function compute_free_list_bucket() converts
228 an allocation size to the bucket index that should be looked at. The buckets are grouped as follows:
230 Bucket 0: [8, 15], range size=8
231 Bucket 1: [16, 23], range size=8
232 Bucket 2: [24, 31], range size=8
233 Bucket 3: [32, 39], range size=8
234 Bucket 4: [40, 47], range size=8
235 Bucket 5: [48, 55], range size=8
236 Bucket 6: [56, 63], range size=8
237 Bucket 7: [64, 71], range size=8
238 Bucket 8: [72, 79], range size=8
239 Bucket 9: [80, 87], range size=8
240 Bucket 10: [88, 95], range size=8
241 Bucket 11: [96, 103], range size=8
242 Bucket 12: [104, 111], range size=8
243 Bucket 13: [112, 119], range size=8
244 Bucket 14: [120, 159], range size=40
245 Bucket 15: [160, 191], range size=32
246 Bucket 16: [192, 223], range size=32
247 Bucket 17: [224, 255], range size=32
248 Bucket 18: [256, 319], range size=64
249 Bucket 19: [320, 383], range size=64
250 Bucket 20: [384, 447], range size=64
251 Bucket 21: [448, 511], range size=64
252 Bucket 22: [512, 639], range size=128
253 Bucket 23: [640, 767], range size=128
254 Bucket 24: [768, 895], range size=128
255 Bucket 25: [896, 1023], range size=128
256 Bucket 26: [1024, 1279], range size=256
257 Bucket 27: [1280, 1535], range size=256
258 Bucket 28: [1536, 1791], range size=256
259 Bucket 29: [1792, 2047], range size=256
260 Bucket 30: [2048, 2559], range size=512
261 Bucket 31: [2560, 3071], range size=512
262 Bucket 32: [3072, 3583], range size=512
263 Bucket 33: [3584, 6143], range size=2560
264 Bucket 34: [6144, 8191], range size=2048
265 Bucket 35: [8192, 12287], range size=4096
266 Bucket 36: [12288, 16383], range size=4096
267 Bucket 37: [16384, 24575], range size=8192
268 Bucket 38: [24576, 32767], range size=8192
269 Bucket 39: [32768, 49151], range size=16384
270 Bucket 40: [49152, 65535], range size=16384
271 Bucket 41: [65536, 98303], range size=32768
272 Bucket 42: [98304, 131071], range size=32768
273 Bucket 43: [131072, 196607], range size=65536
274 Bucket 44: [196608, 262143], range size=65536
275 Bucket 45: [262144, 393215], range size=131072
276 Bucket 46: [393216, 524287], range size=131072
277 Bucket 47: [524288, 786431], range size=262144
278 Bucket 48: [786432, 1048575], range size=262144
279 Bucket 49: [1048576, 1572863], range size=524288
280 Bucket 50: [1572864, 2097151], range size=524288
281 Bucket 51: [2097152, 3145727], range size=1048576
282 Bucket 52: [3145728, 4194303], range size=1048576
283 Bucket 53: [4194304, 6291455], range size=2097152
284 Bucket 54: [6291456, 8388607], range size=2097152
285 Bucket 55: [8388608, 12582911], range size=4194304
286 Bucket 56: [12582912, 16777215], range size=4194304
287 Bucket 57: [16777216, 25165823], range size=8388608
288 Bucket 58: [25165824, 33554431], range size=8388608
289 Bucket 59: [33554432, 50331647], range size=16777216
290 Bucket 60: [50331648, 67108863], range size=16777216
291 Bucket 61: [67108864, 100663295], range size=33554432
292 Bucket 62: [100663296, 134217727], range size=33554432
293 Bucket 63: 134217728 bytes and larger. */
294 static_assert(NUM_FREE_BUCKETS
== 64, "Following function is tailored specifically for NUM_FREE_BUCKETS == 64 case");
295 static int compute_free_list_bucket(size_t allocSize
)
297 if (allocSize
< 128) return (allocSize
>> 3) - 1;
298 int clz
= __builtin_clz(allocSize
);
299 int bucketIndex
= (clz
> 19) ? 110 - (clz
<<2) + ((allocSize
>> (29-clz
)) ^ 4) : MIN(71 - (clz
<<1) + ((allocSize
>> (30-clz
)) ^ 2), NUM_FREE_BUCKETS
-1);
300 assert(bucketIndex
>= 0);
301 assert(bucketIndex
< NUM_FREE_BUCKETS
);
305 #define DECODE_CEILING_SIZE(size) ((size_t)((size) & ~FREE_REGION_FLAG))
307 static Region
*prev_region(Region
*region
)
309 size_t prevRegionSize
= ((size_t*)region
)[-1];
310 prevRegionSize
= DECODE_CEILING_SIZE(prevRegionSize
);
311 return (Region
*)((uint8_t*)region
- prevRegionSize
);
314 static Region
*next_region(Region
*region
)
316 return (Region
*)((uint8_t*)region
+ region
->size
);
319 static size_t region_ceiling_size(Region
*region
)
321 return ((size_t*)((uint8_t*)region
+ region
->size
))[-1];
324 static bool region_is_free(Region
*r
)
326 return region_ceiling_size(r
) & FREE_REGION_FLAG
;
329 static bool region_is_in_use(Region
*r
)
331 return r
->size
== region_ceiling_size(r
);
334 static size_t size_of_region_from_ceiling(Region
*r
)
336 size_t size
= region_ceiling_size(r
);
337 return DECODE_CEILING_SIZE(size
);
340 static bool debug_region_is_consistent(Region
*r
)
343 size_t sizeAtBottom
= r
->size
;
344 size_t sizeAtCeiling
= size_of_region_from_ceiling(r
);
345 return sizeAtBottom
== sizeAtCeiling
;
348 static uint8_t *region_payload_start_ptr(Region
*region
)
350 return (uint8_t*)region
+ sizeof(size_t);
353 static uint8_t *region_payload_end_ptr(Region
*region
)
355 return (uint8_t*)region
+ region
->size
- sizeof(size_t);
358 static void create_used_region(void *ptr
, size_t size
)
361 assert(HAS_ALIGNMENT(ptr
, sizeof(size_t)));
362 assert(HAS_ALIGNMENT(size
, sizeof(size_t)));
363 assert(size
>= sizeof(Region
));
364 *(size_t*)ptr
= size
;
365 ((size_t*)ptr
)[(size
/sizeof(size_t))-1] = size
;
368 static void create_free_region(void *ptr
, size_t size
)
371 assert(HAS_ALIGNMENT(ptr
, sizeof(size_t)));
372 assert(HAS_ALIGNMENT(size
, sizeof(size_t)));
373 assert(size
>= sizeof(Region
));
374 Region
*freeRegion
= (Region
*)ptr
;
375 freeRegion
->size
= size
;
376 ((size_t*)ptr
)[(size
/sizeof(size_t))-1] = size
| FREE_REGION_FLAG
;
379 static void prepend_to_free_list(Region
*region
, Region
*prependTo
)
383 // N.b. the region we are prepending to is always the sentinel node,
384 // which represents a dummy node that is technically not a free node, so
385 // region_is_free(prependTo) does not hold.
386 assert(region_is_free((Region
*)region
));
387 region
->next
= prependTo
;
388 region
->prev
= prependTo
->prev
;
389 assert(region
->prev
);
390 prependTo
->prev
= region
;
391 region
->prev
->next
= region
;
394 static void unlink_from_free_list(Region
*region
)
397 assert(region_is_free((Region
*)region
));
398 assert(region
->prev
);
399 assert(region
->next
);
400 region
->prev
->next
= region
->next
;
401 region
->next
->prev
= region
->prev
;
404 static void link_to_free_list(Region
*freeRegion
)
407 assert(freeRegion
->size
>= sizeof(Region
));
408 int bucketIndex
= compute_free_list_bucket(freeRegion
->size
-REGION_HEADER_SIZE
);
409 Region
*freeListHead
= freeRegionBuckets
+ bucketIndex
;
410 freeRegion
->prev
= freeListHead
;
411 freeRegion
->next
= freeListHead
->next
;
412 assert(freeRegion
->next
);
413 freeListHead
->next
= freeRegion
;
414 freeRegion
->next
->prev
= freeRegion
;
415 freeRegionBucketsUsed
|= ((BUCKET_BITMASK_T
)1) << bucketIndex
;
419 static void dump_memory_regions()
421 ASSERT_MALLOC_IS_ACQUIRED();
422 RootRegion
*root
= listOfAllRegions
;
423 MAIN_THREAD_ASYNC_EM_ASM(console
.log('All memory regions:'));
426 Region
*r
= (Region
*)root
;
427 assert(debug_region_is_consistent(r
));
428 uint8_t *lastRegionEnd
= root
->endPtr
;
429 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Region block 0x'+($0>>>0).toString(16)+' - 0x'+($
1>>>0).toString(16)+ ' ('+($
2>>>0)+' bytes):'),
430 r
, lastRegionEnd
, lastRegionEnd
-(uint8_t*)r
);
431 while((uint8_t*)r
< lastRegionEnd
)
433 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Region 0x'+($0>>>0).toString(16)+', size
: '+($1>>>0)+' ('+($2?"used":"--FREE--")+')'),
434 r, r->size, region_ceiling_size(r) == r->size);
436 assert(debug_region_is_consistent(r));
437 size_t sizeFromCeiling = size_of_region_from_ceiling(r);
438 if (sizeFromCeiling != r->size)
439 MAIN_THREAD_ASYNC_EM_ASM(console.log('Corrupt region
! Size marker at the end of the region does
not match
: '+($0>>>0)), sizeFromCeiling);
445 MAIN_THREAD_ASYNC_EM_ASM(console.log(""));
447 MAIN_THREAD_ASYNC_EM_ASM(console.log('Free regions
:'));
448 for(int i = 0; i < NUM_FREE_BUCKETS; ++i)
450 Region *prev = &freeRegionBuckets[i];
451 Region *fr = freeRegionBuckets[i].next;
452 while(fr != &freeRegionBuckets[i])
454 MAIN_THREAD_ASYNC_EM_ASM(console.log('In bucket
'+$0+', free region
0x'+($
1>>>0).toString(16)+', size: ' + ($
2>>>0) + ' (size at ceiling: '+($
3>>>0)+'), prev: 0x' + ($4>>>0).toString(16) + ', next
: 0x' + ($
5>>>0).toString(16)),
455 i
, fr
, fr
->size
, size_of_region_from_ceiling(fr
), fr
->prev
, fr
->next
);
456 assert(debug_region_is_consistent(fr
));
457 assert(region_is_free(fr
));
458 assert(fr
->prev
== prev
);
460 assert(fr
->next
!= fr
);
461 assert(fr
->prev
!= fr
);
465 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Free bucket index map: ' + ($
0>>>0).toString(2) + ' ' + ($
1>>>0).toString(2)), (uint32_t)(freeRegionBucketsUsed
>> 32), (uint32_t)freeRegionBucketsUsed
);
466 MAIN_THREAD_ASYNC_EM_ASM(console
.log(""));
469 void emmalloc_dump_memory_regions()
472 dump_memory_regions();
476 static int validate_memory_regions()
478 ASSERT_MALLOC_IS_ACQUIRED();
479 RootRegion
*root
= listOfAllRegions
;
482 Region
*r
= (Region
*)root
;
483 if (!debug_region_is_consistent(r
))
485 MAIN_THREAD_ASYNC_EM_ASM(console
.error('Used region 0x'+($0>>>0).toString(16)+', size
: '+($1>>>0)+' ('+($2?"used":"--FREE--")+') is
corrupt (size markers in the beginning
and at the end of the region
do not match
!)'),
486 r, r->size, region_ceiling_size(r) == r->size);
489 uint8_t *lastRegionEnd = root->endPtr;
490 while((uint8_t*)r < lastRegionEnd)
492 if (!debug_region_is_consistent(r))
494 MAIN_THREAD_ASYNC_EM_ASM(console.error('Used region
0x'+($
0>>>0).toString(16)+', size: '+($
1>>>0)+' ('+($
2?"used":"--FREE--")+') is corrupt (size markers in the beginning and at the end of the region do not match!)'),
495 r
, r
->size
, region_ceiling_size(r
) == r
->size
);
504 for(int i
= 0; i
< NUM_FREE_BUCKETS
; ++i
)
506 Region
*prev
= &freeRegionBuckets
[i
];
507 Region
*fr
= freeRegionBuckets
[i
].next
;
508 while(fr
!= &freeRegionBuckets
[i
])
510 if (!debug_region_is_consistent(fr
) || !region_is_free(fr
) || fr
->prev
!= prev
|| fr
->next
== fr
|| fr
->prev
== fr
)
512 MAIN_THREAD_ASYNC_EM_ASM(console
.log('In bucket '+$
0+', free region 0x'+($1>>>0).toString(16)+', size
: ' + ($2>>>0) + ' (size at ceiling
: '+($3>>>0)+'), prev
: 0x' + ($
4>>>0).toString(16) + ', next: 0x' + ($5>>>0).toString(16) + ' is corrupt
!'),
513 i, fr, fr->size, size_of_region_from_ceiling(fr), fr->prev, fr->next);
523 int emmalloc_validate_memory_regions()
526 int memoryError = validate_memory_regions();
532 static bool claim_more_memory(size_t numBytes)
534 #ifdef EMMALLOC_VERBOSE
535 MAIN_THREAD_ASYNC_EM_ASM(console.log('claim_more_memory(numBytes
='+($0>>>0)+ ')'), numBytes);
538 #ifdef EMMALLOC_MEMVALIDATE
539 validate_memory_regions();
545 // If this is the first time we're called
, see
if we can use
546 // the initial heap memory set up by wasm-ld.
547 if (!listOfAllRegions
) {
548 unsigned char *heap_end
= sbrk(0);
549 if (numBytes
<= (size_t)(heap_end
- &__heap_base
)) {
550 startPtr
= &__heap_base
;
556 // Round numBytes up to the nearest page size.
557 numBytes
= (numBytes
+ (PAGE_SIZE
-1)) & -PAGE_SIZE
;
559 // Claim memory via sbrk
560 startPtr
= (uint8_t*)sbrk(numBytes
);
561 if ((intptr_t)startPtr
== -1)
563 #ifdef EMMALLOC_VERBOSE
564 MAIN_THREAD_ASYNC_EM_ASM(console
.error('claim_more_memory: sbrk failed!'));
568 #ifdef EMMALLOC_VERBOSE
569 MAIN_THREAD_ASYNC_EM_ASM(console
.log('claim_more_memory: claimed 0x' + ($0>>>0).toString(16) + ' - 0x' + ($
1>>>0).toString(16) + ' (' + ($
2>>>0) + ' bytes) via sbrk()'), startPtr
, startPtr
+ numBytes
, numBytes
);
571 assert(HAS_ALIGNMENT(startPtr
, alignof(size_t)));
572 endPtr
= startPtr
+ numBytes
;
575 // Create a sentinel region at the end of the new heap block
576 Region
*endSentinelRegion
= (Region
*)(endPtr
- sizeof(Region
));
577 create_used_region(endSentinelRegion
, sizeof(Region
));
579 // If we are the sole user of sbrk(), it will feed us continuous/consecutive memory addresses - take advantage
580 // of that if so: instead of creating two disjoint memory regions blocks, expand the previous one to a larger size.
581 uint8_t *previousSbrkEndAddress
= listOfAllRegions
? listOfAllRegions
->endPtr
: 0;
582 if (startPtr
== previousSbrkEndAddress
)
584 Region
*prevEndSentinel
= prev_region((Region
*)startPtr
);
585 assert(debug_region_is_consistent(prevEndSentinel
));
586 assert(region_is_in_use(prevEndSentinel
));
587 Region
*prevRegion
= prev_region(prevEndSentinel
);
588 assert(debug_region_is_consistent(prevRegion
));
590 listOfAllRegions
->endPtr
= endPtr
;
592 // Two scenarios, either the last region of the previous block was in use, in which case we need to create
593 // a new free region in the newly allocated space; or it was free, in which case we can extend that region
594 // to cover a larger size.
595 if (region_is_free(prevRegion
))
597 size_t newFreeRegionSize
= (uint8_t*)endSentinelRegion
- (uint8_t*)prevRegion
;
598 unlink_from_free_list(prevRegion
);
599 create_free_region(prevRegion
, newFreeRegionSize
);
600 link_to_free_list(prevRegion
);
603 // else: last region of the previous block was in use. Since we are joining two consecutive sbrk() blocks,
604 // we can swallow the end sentinel of the previous block away.
605 startPtr
-= sizeof(Region
);
609 // Create a root region at the start of the heap block
610 create_used_region(startPtr
, sizeof(Region
));
612 // Dynamic heap start region:
613 RootRegion
*newRegionBlock
= (RootRegion
*)startPtr
;
614 newRegionBlock
->next
= listOfAllRegions
; // Pointer to next region block head
615 newRegionBlock
->endPtr
= endPtr
; // Pointer to the end address of this region block
616 listOfAllRegions
= newRegionBlock
;
617 startPtr
+= sizeof(Region
);
620 // Create a new memory region for the new claimed free space.
621 create_free_region(startPtr
, (uint8_t*)endSentinelRegion
- startPtr
);
622 link_to_free_list((Region
*)startPtr
);
627 // Initialize emmalloc during static initialization.
628 // See system/lib/README.md for static constructor ordering.
629 __attribute__((constructor(47)))
630 static void initialize_emmalloc_heap()
632 // Initialize circular doubly linked lists representing free space
633 // Never useful to unroll this for loop, just takes up code size.
634 #pragma clang loop unroll(disable)
635 for(int i
= 0; i
< NUM_FREE_BUCKETS
; ++i
)
636 freeRegionBuckets
[i
].prev
= freeRegionBuckets
[i
].next
= &freeRegionBuckets
[i
];
638 #ifdef EMMALLOC_VERBOSE
639 MAIN_THREAD_ASYNC_EM_ASM(console
.log('initialize_emmalloc_heap()'));
642 // Start with a tiny dynamic region.
643 claim_more_memory(3*sizeof(Region
));
646 void emmalloc_blank_slate_from_orbit()
649 listOfAllRegions
= NULL
;
650 freeRegionBucketsUsed
= 0;
651 initialize_emmalloc_heap();
656 static void *attempt_allocate(Region
*freeRegion
, size_t alignment
, size_t size
)
658 ASSERT_MALLOC_IS_ACQUIRED();
660 // Look at the next potential free region to allocate into.
661 // First, we should check if the free region has enough of payload bytes contained
662 // in it to accommodate the new allocation. This check needs to take account the
663 // requested allocation alignment, so the payload memory area needs to be rounded
664 // upwards to the desired alignment.
665 uint8_t *payloadStartPtr
= region_payload_start_ptr(freeRegion
);
666 uint8_t *payloadStartPtrAligned
= ALIGN_UP(payloadStartPtr
, alignment
);
667 uint8_t *payloadEndPtr
= region_payload_end_ptr(freeRegion
);
669 // Do we have enough free space, taking into account alignment?
670 if (payloadStartPtrAligned
+ size
> payloadEndPtr
)
673 // We have enough free space, so the memory allocation will be made into this region. Remove this free region
674 // from the list of free regions: whatever slop remains will be later added back to the free region pool.
675 unlink_from_free_list(freeRegion
);
677 // Before we proceed further, fix up the boundary of this region and the region that precedes this one,
678 // so that the boundary between the two regions happens at a right spot for the payload to be aligned.
679 if (payloadStartPtr
!= payloadStartPtrAligned
)
681 Region
*prevRegion
= prev_region((Region
*)freeRegion
);
682 // We never have two free regions adjacent to each other, so the region before this free
683 // region should be in use.
684 assert(region_is_in_use(prevRegion
));
685 size_t regionBoundaryBumpAmount
= payloadStartPtrAligned
- payloadStartPtr
;
686 size_t newThisRegionSize
= freeRegion
->size
- regionBoundaryBumpAmount
;
687 create_used_region(prevRegion
, prevRegion
->size
+ regionBoundaryBumpAmount
);
688 freeRegion
= (Region
*)((uint8_t*)freeRegion
+ regionBoundaryBumpAmount
);
689 freeRegion
->size
= newThisRegionSize
;
691 // Next, we need to decide whether this region is so large that it should be split into two regions,
692 // one representing the newly used memory area, and at the high end a remaining leftover free area.
693 // This splitting to two is done always if there is enough space for the high end to fit a region.
694 // Carve 'size' bytes of payload off this region. So,
697 // [sz payload sz] [sz prev next sz]
698 if (sizeof(Region
) + REGION_HEADER_SIZE
+ size
<= freeRegion
->size
)
700 // There is enough space to keep a free region at the end of the carved out block
701 // -> construct the new block
702 Region
*newFreeRegion
= (Region
*)((uint8_t*)freeRegion
+ REGION_HEADER_SIZE
+ size
);
703 create_free_region(newFreeRegion
, freeRegion
->size
- size
- REGION_HEADER_SIZE
);
704 link_to_free_list(newFreeRegion
);
706 // Recreate the resized Region under its new size.
707 create_used_region(freeRegion
, size
+ REGION_HEADER_SIZE
);
711 // There is not enough space to split the free memory region into used+free parts, so consume the whole
712 // region as used memory, not leaving a free memory region behind.
713 // Initialize the free region as used by resetting the ceiling size to the same value as the size at bottom.
714 ((size_t*)((uint8_t*)freeRegion
+ freeRegion
->size
))[-1] = freeRegion
->size
;
717 #ifdef __EMSCRIPTEN_TRACING__
718 emscripten_trace_record_allocation(freeRegion
, freeRegion
->size
);
721 #ifdef EMMALLOC_VERBOSE
722 MAIN_THREAD_ASYNC_EM_ASM(console
.log('attempt_allocate - succeeded allocating memory, region ptr=0x' + ($0>>>0).toString(16) + ', align
=' + $1 + ', payload size
=' + ($2>>>0) + ' bytes
)'), freeRegion, alignment, size);
725 return (uint8_t*)freeRegion + sizeof(size_t);
728 static size_t validate_alloc_alignment(size_t alignment)
730 // Cannot perform allocations that are less than 4 byte aligned, because the Region
731 // control structures need to be aligned. Also round up to minimum outputted alignment.
732 alignment = MAX(alignment, MALLOC_ALIGNMENT);
733 // Arbitrary upper limit on alignment - very likely a programming bug if alignment is higher than this.
734 assert(alignment <= 1024*1024);
738 static size_t validate_alloc_size(size_t size)
740 assert(size + REGION_HEADER_SIZE > size);
742 // Allocation sizes must be a multiple of pointer sizes, and at least 2*sizeof(pointer).
743 size_t validatedSize = size > SMALLEST_ALLOCATION_SIZE ? (size_t)ALIGN_UP(size, sizeof(Region*)) : SMALLEST_ALLOCATION_SIZE;
744 assert(validatedSize >= size); // 32-bit wraparound should not occur, too large sizes should be stopped before
746 return validatedSize;
749 static void *allocate_memory(size_t alignment, size_t size)
751 ASSERT_MALLOC_IS_ACQUIRED();
753 #ifdef EMMALLOC_VERBOSE
754 MAIN_THREAD_ASYNC_EM_ASM(console.log('allocate_memory(align
=' + $0 + ', size
=' + ($1>>>0) + ' bytes
)'), alignment, size);
757 #ifdef EMMALLOC_MEMVALIDATE
758 validate_memory_regions();
761 if (!IS_POWER_OF_2(alignment))
763 #ifdef EMMALLOC_VERBOSE
764 MAIN_THREAD_ASYNC_EM_ASM(console.log('Allocation failed
: alignment
not power of
2!'));
769 if (size > MAX_ALLOC_SIZE)
771 #ifdef EMMALLOC_VERBOSE
772 MAIN_THREAD_ASYNC_EM_ASM(console.log('Allocation failed
: attempted allocation size is too large
: ' + ($0 >>> 0) + 'bytes
! (negative integer wraparound
?)'), size);
777 alignment = validate_alloc_alignment(alignment);
778 size = validate_alloc_size(size);
780 // Attempt to allocate memory starting from smallest bucket that can contain the required amount of memory.
781 // Under normal alignment conditions this should always be the first or second bucket we look at, but if
782 // performing an allocation with complex alignment, we may need to look at multiple buckets.
783 int bucketIndex = compute_free_list_bucket(size);
784 BUCKET_BITMASK_T bucketMask = freeRegionBucketsUsed >> bucketIndex;
786 // Loop through each bucket that has free regions in it, based on bits set in freeRegionBucketsUsed bitmap.
789 BUCKET_BITMASK_T indexAdd = __builtin_ctzll(bucketMask);
790 bucketIndex += indexAdd;
791 bucketMask >>= indexAdd;
792 assert(bucketIndex >= 0);
793 assert(bucketIndex <= NUM_FREE_BUCKETS-1);
794 assert(freeRegionBucketsUsed & (((BUCKET_BITMASK_T)1) << bucketIndex));
796 Region *freeRegion = freeRegionBuckets[bucketIndex].next;
798 if (freeRegion != &freeRegionBuckets[bucketIndex])
800 void *ptr = attempt_allocate(freeRegion, alignment, size);
804 // We were not able to allocate from the first region found in this bucket, so penalize
805 // the region by cycling it to the end of the doubly circular linked list. (constant time)
806 // This provides a randomized guarantee that when performing allocations of size k to a
807 // bucket of [k-something, k+something] range, we will not always attempt to satisfy the
808 // allocation from the same available region at the front of the list, but we try each
810 unlink_from_free_list(freeRegion);
811 prepend_to_free_list(freeRegion, &freeRegionBuckets[bucketIndex]);
812 // But do not stick around to attempt to look at other regions in this bucket - move
813 // to search the next populated bucket index if this did not fit. This gives a practical
814 // "allocation in constant time" guarantee, since the next higher bucket will only have
815 // regions that are all of strictly larger size than the requested allocation. Only if
816 // there is a difficult alignment requirement we may fail to perform the allocation from
817 // a region in the next bucket, and if so, we keep trying higher buckets until one of them
824 // This bucket was not populated after all with any regions,
825 // but we just had a stale bit set to mark a populated bucket.
826 // Reset the bit to update latest status so that we do not
827 // redundantly look at this bucket again.
828 freeRegionBucketsUsed &= ~(((BUCKET_BITMASK_T)1) << bucketIndex);
831 // Instead of recomputing bucketMask from scratch at the end of each loop, it is updated as we go,
832 // to avoid undefined behavior with (x >> 32)/(x >> 64) when bucketIndex reaches 32/64, (the shift would comes out as a no-op instead of 0).
834 assert((bucketIndex == NUM_FREE_BUCKETS && bucketMask == 0) || (bucketMask == freeRegionBucketsUsed >> bucketIndex));
837 // None of the buckets were able to accommodate an allocation. If this happens we are almost out of memory.
838 // The largest bucket might contain some suitable regions, but we only looked at one region in that bucket, so
839 // as a last resort, loop through more free regions in the bucket that represents the largest allocations available.
840 // But only if the bucket representing largest allocations available is not any of the first thirty buckets,
841 // these represent allocatable areas less than <1024 bytes - which could be a lot of scrap.
842 // In such case, prefer to sbrk() in more memory right away.
843 int largestBucketIndex = NUM_FREE_BUCKETS - 1 - __builtin_clzll(freeRegionBucketsUsed);
844 // freeRegion will be null if there is absolutely no memory left. (all buckets are 100% used)
845 Region *freeRegion = freeRegionBucketsUsed ? freeRegionBuckets[largestBucketIndex].next : 0;
846 if (freeRegionBucketsUsed >> 30)
848 // Look only at a constant number of regions in this bucket max, to avoid bad worst case behavior.
849 // If this many regions cannot find free space, we give up and prefer to sbrk() more instead.
850 const int maxRegionsToTryBeforeGivingUp = 99;
851 int numTriesLeft = maxRegionsToTryBeforeGivingUp;
852 while(freeRegion != &freeRegionBuckets[largestBucketIndex] && numTriesLeft-- > 0)
854 void *ptr = attempt_allocate(freeRegion, alignment, size);
857 freeRegion = freeRegion->next;
861 // We were unable to find a free memory region. Must sbrk() in more memory!
862 size_t numBytesToClaim = size+sizeof(Region)*3;
863 assert(numBytesToClaim > size); // 32-bit wraparound should not happen here, allocation size has been validated above!
864 bool success = claim_more_memory(numBytesToClaim);
866 return allocate_memory(alignment, size); // Recurse back to itself to try again
868 // also sbrk() failed, we are really really constrained :( As a last resort, go back to looking at the
869 // bucket we already looked at above, continuing where the above search left off - perhaps there are
870 // regions we overlooked the first time that might be able to satisfy the allocation.
873 while(freeRegion != &freeRegionBuckets[largestBucketIndex])
875 void *ptr = attempt_allocate(freeRegion, alignment, size);
878 freeRegion = freeRegion->next;
882 #ifdef EMMALLOC_VERBOSE
883 MAIN_THREAD_ASYNC_EM_ASM(console.log('Could
not find a free memory block
!'));
890 void *emmalloc_memalign(size_t alignment, size_t size)
893 void *ptr = allocate_memory(alignment, size);
899 void * EMMALLOC_EXPORT memalign(size_t alignment, size_t size)
901 return emmalloc_memalign(alignment, size);
905 void * EMMALLOC_EXPORT aligned_alloc(size_t alignment, size_t size)
907 if ((alignment % sizeof(void *) != 0) || (size % alignment) != 0)
909 return emmalloc_memalign(alignment, size);
913 void *emmalloc_malloc(size_t size)
915 return emmalloc_memalign(MALLOC_ALIGNMENT, size);
918 void * EMMALLOC_EXPORT malloc(size_t size)
920 return emmalloc_malloc(size);
924 size_t emmalloc_usable_size(void *ptr)
929 uint8_t *regionStartPtr = (uint8_t*)ptr - sizeof(size_t);
930 Region *region = (Region*)(regionStartPtr);
931 assert(HAS_ALIGNMENT(region, sizeof(size_t)));
935 size_t size = region->size;
936 assert(size >= sizeof(Region));
937 assert(region_is_in_use(region));
941 return size - REGION_HEADER_SIZE;
944 size_t EMMALLOC_EXPORT malloc_usable_size(void *ptr)
946 return emmalloc_usable_size(ptr);
950 void emmalloc_free(void *ptr)
952 #ifdef EMMALLOC_MEMVALIDATE
953 emmalloc_validate_memory_regions();
959 #ifdef EMMALLOC_VERBOSE
960 MAIN_THREAD_ASYNC_EM_ASM(console.log('free(ptr
=0x'+($
0>>>0).toString(16)+')'), ptr
);
963 uint8_t *regionStartPtr
= (uint8_t*)ptr
- sizeof(size_t);
964 Region
*region
= (Region
*)(regionStartPtr
);
965 assert(HAS_ALIGNMENT(region
, sizeof(size_t)));
969 size_t size
= region
->size
;
970 #ifdef EMMALLOC_VERBOSE
971 if (size
< sizeof(Region
) || !region_is_in_use(region
))
973 if (debug_region_is_consistent(region
))
974 // LLVM wasm backend bug: cannot use MAIN_THREAD_ASYNC_EM_ASM() here, that generates internal compiler error
975 // Reproducible by running e.g. other.test_alloc_3GB
976 EM_ASM(console
.error('Double free at region ptr 0x' + ($0>>>0).toString(16) + ', region
->size
: 0x' + ($
1>>>0).toString(16) + ', region->sizeAtCeiling: 0x' + ($2>>>0).toString(16) + ')'), region, size, region_ceiling_size(region));
978 MAIN_THREAD_ASYNC_EM_ASM(console.error('Corrupt region at region ptr
0x' + ($
0>>>0).toString(16) + ' region->size: 0x' + ($1>>>0).toString(16) + ', region
->sizeAtCeiling
: 0x' + ($
2>>>0).toString(16) + ')'), region
, size
, region_ceiling_size(region
));
981 assert(size
>= sizeof(Region
));
982 assert(region_is_in_use(region
));
984 #ifdef __EMSCRIPTEN_TRACING__
985 emscripten_trace_record_free(region
);
988 // Check merging with left side
989 size_t prevRegionSizeField
= ((size_t*)region
)[-1];
990 size_t prevRegionSize
= prevRegionSizeField
& ~FREE_REGION_FLAG
;
991 if (prevRegionSizeField
!= prevRegionSize
) // Previous region is free?
993 Region
*prevRegion
= (Region
*)((uint8_t*)region
- prevRegionSize
);
994 assert(debug_region_is_consistent(prevRegion
));
995 unlink_from_free_list(prevRegion
);
996 regionStartPtr
= (uint8_t*)prevRegion
;
997 size
+= prevRegionSize
;
1000 // Check merging with right side
1001 Region
*nextRegion
= next_region(region
);
1002 assert(debug_region_is_consistent(nextRegion
));
1003 size_t sizeAtEnd
= *(size_t*)region_payload_end_ptr(nextRegion
);
1004 if (nextRegion
->size
!= sizeAtEnd
)
1006 unlink_from_free_list(nextRegion
);
1007 size
+= nextRegion
->size
;
1010 create_free_region(regionStartPtr
, size
);
1011 link_to_free_list((Region
*)regionStartPtr
);
1015 #ifdef EMMALLOC_MEMVALIDATE
1016 emmalloc_validate_memory_regions();
1020 void EMMALLOC_EXPORT
free(void *ptr
)
1025 // Can be called to attempt to increase or decrease the size of the given region
1026 // to a new size (in-place). Returns 1 if resize succeeds, and 0 on failure.
1027 static int attempt_region_resize(Region
*region
, size_t size
)
1029 ASSERT_MALLOC_IS_ACQUIRED();
1031 assert(HAS_ALIGNMENT(size
, sizeof(size_t)));
1033 #ifdef EMMALLOC_VERBOSE
1034 MAIN_THREAD_ASYNC_EM_ASM(console
.log('attempt_region_resize(region=0x' + ($0>>>0).toString(16) + ', size
=' + ($1>>>0) + ' bytes
)'), region, size);
1037 // First attempt to resize this region, if the next region that follows this one
1038 // is a free region.
1039 Region *nextRegion = next_region(region);
1040 uint8_t *nextRegionEndPtr = (uint8_t*)nextRegion + nextRegion->size;
1041 size_t sizeAtCeiling = ((size_t*)nextRegionEndPtr)[-1];
1042 if (nextRegion->size != sizeAtCeiling) // Next region is free?
1044 assert(region_is_free(nextRegion));
1045 uint8_t *newNextRegionStartPtr = (uint8_t*)region + size;
1046 assert(HAS_ALIGNMENT(newNextRegionStartPtr, sizeof(size_t)));
1047 // Next region does not shrink to too small size?
1048 if (newNextRegionStartPtr + sizeof(Region) <= nextRegionEndPtr)
1050 unlink_from_free_list(nextRegion);
1051 create_free_region(newNextRegionStartPtr, nextRegionEndPtr - newNextRegionStartPtr);
1052 link_to_free_list((Region*)newNextRegionStartPtr);
1053 create_used_region(region, newNextRegionStartPtr - (uint8_t*)region);
1056 // If we remove the next region altogether, allocation is satisfied?
1057 if (newNextRegionStartPtr <= nextRegionEndPtr)
1059 unlink_from_free_list(nextRegion);
1060 create_used_region(region, region->size + nextRegion->size);
1066 // Next region is an used region - we cannot change its starting address. However if we are shrinking the
1067 // size of this region, we can create a new free region between this and the next used region.
1068 if (size + sizeof(Region) <= region->size)
1070 size_t freeRegionSize = region->size - size;
1071 create_used_region(region, size);
1072 Region *freeRegion = (Region *)((uint8_t*)region + size);
1073 create_free_region(freeRegion, freeRegionSize);
1074 link_to_free_list(freeRegion);
1077 else if (size <= region->size)
1079 // Caller was asking to shrink the size, but due to not being able to fit a full Region in the shrunk
1080 // area, we cannot actually do anything. This occurs if the shrink amount is really small. In such case,
1081 // just call it success without doing any work.
1085 #ifdef EMMALLOC_VERBOSE
1086 MAIN_THREAD_ASYNC_EM_ASM(console.log('attempt_region_resize failed
.'));
1091 static int acquire_and_attempt_region_resize(Region *region, size_t size)
1094 int success = attempt_region_resize(region, size);
1100 void *emmalloc_aligned_realloc(void *ptr, size_t alignment, size_t size)
1102 #ifdef EMMALLOC_VERBOSE
1103 MAIN_THREAD_ASYNC_EM_ASM(console.log('aligned_realloc(ptr
=0x' + ($
0>>>0).toString(16) + ', alignment=' + $
1 + ', size=' + ($
2>>>0)), ptr
, alignment
, size
);
1107 return emmalloc_memalign(alignment
, size
);
1115 if (size
> MAX_ALLOC_SIZE
)
1117 #ifdef EMMALLOC_VERBOSE
1118 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Allocation failed: attempted allocation size is too large: ' + ($
0 >>> 0) + 'bytes! (negative integer wraparound?)'), size
);
1123 assert(IS_POWER_OF_2(alignment
));
1124 // aligned_realloc() cannot be used to ask to change the alignment of a pointer.
1125 assert(HAS_ALIGNMENT(ptr
, alignment
));
1126 size
= validate_alloc_size(size
);
1128 // Calculate the region start address of the original allocation
1129 Region
*region
= (Region
*)((uint8_t*)ptr
- sizeof(size_t));
1131 // First attempt to resize the given region to avoid having to copy memory around
1132 if (acquire_and_attempt_region_resize(region
, size
+ REGION_HEADER_SIZE
))
1134 #ifdef __EMSCRIPTEN_TRACING__
1135 emscripten_trace_record_reallocation(ptr
, ptr
, size
);
1140 // If resize failed, we must allocate a new region, copy the data over, and then
1141 // free the old region.
1142 void *newptr
= emmalloc_memalign(alignment
, size
);
1145 memcpy(newptr
, ptr
, MIN(size
, region
->size
- REGION_HEADER_SIZE
));
1148 // N.B. If there is not enough memory, the old memory block should not be freed and
1149 // null pointer is returned.
1154 void * EMMALLOC_EXPORT
aligned_realloc(void *ptr
, size_t alignment
, size_t size
)
1156 return emmalloc_aligned_realloc(ptr
, alignment
, size
);
1161 // realloc_try() is like realloc(), but only attempts to try to resize the existing memory
1162 // area. If resizing the existing memory area fails, then realloc_try() will return 0
1163 // (the original memory block is not freed or modified). If resizing succeeds, previous
1164 // memory contents will be valid up to min(old length, new length) bytes.
1165 void *emmalloc_realloc_try(void *ptr
, size_t size
)
1176 if (size
> MAX_ALLOC_SIZE
)
1178 #ifdef EMMALLOC_VERBOSE
1179 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Allocation failed: attempted allocation size is too large: ' + ($
0 >>> 0) + 'bytes! (negative integer wraparound?)'), size
);
1184 size
= validate_alloc_size(size
);
1186 // Calculate the region start address of the original allocation
1187 Region
*region
= (Region
*)((uint8_t*)ptr
- sizeof(size_t));
1189 // Attempt to resize the given region to avoid having to copy memory around
1190 int success
= acquire_and_attempt_region_resize(region
, size
+ REGION_HEADER_SIZE
);
1191 #ifdef __EMSCRIPTEN_TRACING__
1193 emscripten_trace_record_reallocation(ptr
, ptr
, size
);
1195 return success
? ptr
: 0;
1198 // emmalloc_aligned_realloc_uninitialized() is like aligned_realloc(), but old memory contents
1199 // will be undefined after reallocation. (old memory is not preserved in any case)
1200 void *emmalloc_aligned_realloc_uninitialized(void *ptr
, size_t alignment
, size_t size
)
1203 return emmalloc_memalign(alignment
, size
);
1211 if (size
> MAX_ALLOC_SIZE
)
1213 #ifdef EMMALLOC_VERBOSE
1214 MAIN_THREAD_ASYNC_EM_ASM(console
.log('Allocation failed: attempted allocation size is too large: ' + ($
0 >>> 0) + 'bytes! (negative integer wraparound?)'), size
);
1219 size
= validate_alloc_size(size
);
1221 // Calculate the region start address of the original allocation
1222 Region
*region
= (Region
*)((uint8_t*)ptr
- sizeof(size_t));
1224 // First attempt to resize the given region to avoid having to copy memory around
1225 if (acquire_and_attempt_region_resize(region
, size
+ REGION_HEADER_SIZE
))
1227 #ifdef __EMSCRIPTEN_TRACING__
1228 emscripten_trace_record_reallocation(ptr
, ptr
, size
);
1233 // If resize failed, drop the old region and allocate a new region. Memory is not
1236 return emmalloc_memalign(alignment
, size
);
1241 void *emmalloc_realloc(void *ptr
, size_t size
)
1243 return emmalloc_aligned_realloc(ptr
, MALLOC_ALIGNMENT
, size
);
1246 void * EMMALLOC_EXPORT
realloc(void *ptr
, size_t size
)
1248 return emmalloc_realloc(ptr
, size
);
1252 // realloc_uninitialized() is like realloc(), but old memory contents
1253 // will be undefined after reallocation. (old memory is not preserved in any case)
1254 void *emmalloc_realloc_uninitialized(void *ptr
, size_t size
)
1256 return emmalloc_aligned_realloc_uninitialized(ptr
, MALLOC_ALIGNMENT
, size
);
1261 int emmalloc_posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1264 if (alignment
% sizeof(void *) != 0)
1265 return 22/* EINVAL*/;
1266 *memptr
= emmalloc_memalign(alignment
, size
);
1267 return *memptr
? 0 : 12/*ENOMEM*/;
1270 int EMMALLOC_EXPORT
posix_memalign(void **memptr
, size_t alignment
, size_t size
)
1272 return emmalloc_posix_memalign(memptr
, alignment
, size
);
1276 void *emmalloc_calloc(size_t num
, size_t size
)
1278 size_t bytes
= num
*size
;
1279 void *ptr
= emmalloc_memalign(MALLOC_ALIGNMENT
, bytes
);
1281 memset(ptr
, 0, bytes
);
1285 void * EMMALLOC_EXPORT
calloc(size_t num
, size_t size
)
1287 return emmalloc_calloc(num
, size
);
1291 static int count_linked_list_size(Region
*list
)
1294 for(Region
*i
= list
->next
; i
!= list
; list
= list
->next
)
1299 static size_t count_linked_list_space(Region
*list
)
1302 for(Region
*i
= list
->next
; i
!= list
; list
= list
->next
)
1303 space
+= region_payload_end_ptr(i
) - region_payload_start_ptr(i
);
1307 struct mallinfo
emmalloc_mallinfo()
1311 struct mallinfo info
;
1312 // Non-mmapped space allocated (bytes): For emmalloc,
1313 // let's define this as the difference between heap size and dynamic top end.
1314 info
.arena
= emscripten_get_heap_size() - (size_t)sbrk(0);
1315 // Number of "ordinary" blocks. Let's define this as the number of highest
1316 // size blocks. (subtract one from each, since there is a sentinel node in each list)
1317 info
.ordblks
= count_linked_list_size(&freeRegionBuckets
[NUM_FREE_BUCKETS
-1])-1;
1318 // Number of free "fastbin" blocks. For emmalloc, define this as the number
1319 // of blocks that are not in the largest pristine block.
1321 // The total number of bytes in free "fastbin" blocks.
1323 for(int i
= 0; i
< NUM_FREE_BUCKETS
-1; ++i
)
1325 info
.smblks
+= count_linked_list_size(&freeRegionBuckets
[i
])-1;
1326 info
.fsmblks
+= count_linked_list_space(&freeRegionBuckets
[i
]);
1329 info
.hblks
= 0; // Number of mmapped regions: always 0. (no mmap support)
1330 info
.hblkhd
= 0; // Amount of bytes in mmapped regions: always 0. (no mmap support)
1332 // Walk through all the heap blocks to report the following data:
1333 // The "highwater mark" for allocated space—that is, the maximum amount of
1334 // space that was ever allocated. Emmalloc does not want to pay code to
1335 // track this, so this is only reported from current allocation data, and
1336 // may not be accurate.
1338 info
.uordblks
= 0; // The total number of bytes used by in-use allocations.
1339 info
.fordblks
= 0; // The total number of bytes in free blocks.
1340 // The total amount of releasable free space at the top of the heap.
1341 // This is the maximum number of bytes that could ideally be released by malloc_trim(3).
1342 Region
*lastActualRegion
= prev_region((Region
*)(listOfAllRegions
->endPtr
- sizeof(Region
)));
1343 info
.keepcost
= region_is_free(lastActualRegion
) ? lastActualRegion
->size
: 0;
1345 RootRegion
*root
= listOfAllRegions
;
1348 Region
*r
= (Region
*)root
;
1349 assert(debug_region_is_consistent(r
));
1350 uint8_t *lastRegionEnd
= root
->endPtr
;
1351 while((uint8_t*)r
< lastRegionEnd
)
1353 assert(debug_region_is_consistent(r
));
1355 if (region_is_free(r
))
1357 // Count only the payload of the free block towards free memory.
1358 info
.fordblks
+= region_payload_end_ptr(r
) - region_payload_start_ptr(r
);
1359 // But the header data of the free block goes towards used memory.
1360 info
.uordblks
+= REGION_HEADER_SIZE
;
1364 info
.uordblks
+= r
->size
;
1366 // Update approximate watermark data
1367 info
.usmblks
= MAX(info
.usmblks
, (intptr_t)r
+ r
->size
);
1380 struct mallinfo EMMALLOC_EXPORT
mallinfo()
1382 return emmalloc_mallinfo();
1385 // Note! This function is not fully multithreadin safe: while this function is running, other threads should not be
1386 // allowed to call sbrk()!
1387 static int trim_dynamic_heap_reservation(size_t pad
)
1389 ASSERT_MALLOC_IS_ACQUIRED();
1391 if (!listOfAllRegions
)
1392 return 0; // emmalloc is not controlling any dynamic memory at all - cannot release memory.
1393 uint8_t *previousSbrkEndAddress
= listOfAllRegions
->endPtr
;
1394 assert(sbrk(0) == previousSbrkEndAddress
);
1395 size_t lastMemoryRegionSize
= ((size_t*)previousSbrkEndAddress
)[-1];
1396 assert(lastMemoryRegionSize
== 16); // // The last memory region should be a sentinel node of exactly 16 bytes in size.
1397 Region
*endSentinelRegion
= (Region
*)(previousSbrkEndAddress
- sizeof(Region
));
1398 Region
*lastActualRegion
= prev_region(endSentinelRegion
);
1400 // Round padding up to multiple of 4 bytes to keep sbrk() and memory region alignment intact.
1401 // Also have at least 8 bytes of payload so that we can form a full free region.
1402 size_t newRegionSize
= (size_t)ALIGN_UP(pad
, 4);
1404 newRegionSize
+= sizeof(Region
) - (newRegionSize
- pad
);
1406 if (!region_is_free(lastActualRegion
) || lastActualRegion
->size
<= newRegionSize
)
1407 return 0; // Last actual region is in use, or caller desired to leave more free memory intact than there is.
1409 // This many bytes will be shrunk away.
1410 size_t shrinkAmount
= lastActualRegion
->size
- newRegionSize
;
1411 assert(HAS_ALIGNMENT(shrinkAmount
, 4));
1413 unlink_from_free_list(lastActualRegion
);
1414 // If pad == 0, we should delete the last free region altogether. If pad > 0,
1415 // shrink the last free region to the desired size.
1416 if (newRegionSize
> 0)
1418 create_free_region(lastActualRegion
, newRegionSize
);
1419 link_to_free_list(lastActualRegion
);
1422 // Recreate the sentinel region at the end of the last free region
1423 endSentinelRegion
= (Region
*)((uint8_t*)lastActualRegion
+ newRegionSize
);
1424 create_used_region(endSentinelRegion
, sizeof(Region
));
1426 // And update the size field of the whole region block.
1427 listOfAllRegions
->endPtr
= (uint8_t*)endSentinelRegion
+ sizeof(Region
);
1429 // Finally call sbrk() to shrink the memory area.
1430 void *oldSbrk
= sbrk(-(intptr_t)shrinkAmount
);
1431 assert((intptr_t)oldSbrk
!= -1); // Shrinking with sbrk() should never fail.
1432 assert(oldSbrk
== previousSbrkEndAddress
); // Another thread should not have raced to increase sbrk() on us!
1434 // All successful, and we actually trimmed memory!
1438 int emmalloc_trim(size_t pad
)
1441 int success
= trim_dynamic_heap_reservation(pad
);
1446 int EMMALLOC_EXPORT
malloc_trim(size_t pad
)
1448 return emmalloc_trim(pad
);
1451 size_t emmalloc_dynamic_heap_size()
1453 size_t dynamicHeapSize
= 0;
1456 RootRegion
*root
= listOfAllRegions
;
1459 dynamicHeapSize
+= root
->endPtr
- (uint8_t*)root
;
1463 return dynamicHeapSize
;
1466 size_t emmalloc_free_dynamic_memory()
1468 size_t freeDynamicMemory
= 0;
1470 int bucketIndex
= 0;
1473 BUCKET_BITMASK_T bucketMask
= freeRegionBucketsUsed
;
1475 // Loop through each bucket that has free regions in it, based on bits set in freeRegionBucketsUsed bitmap.
1478 BUCKET_BITMASK_T indexAdd
= __builtin_ctzll(bucketMask
);
1479 bucketIndex
+= indexAdd
;
1480 bucketMask
>>= indexAdd
;
1481 for(Region
*freeRegion
= freeRegionBuckets
[bucketIndex
].next
;
1482 freeRegion
!= &freeRegionBuckets
[bucketIndex
];
1483 freeRegion
= freeRegion
->next
)
1485 freeDynamicMemory
+= freeRegion
->size
- REGION_HEADER_SIZE
;
1491 return freeDynamicMemory
;
1494 size_t emmalloc_compute_free_dynamic_memory_fragmentation_map(size_t freeMemorySizeMap
[32])
1496 memset((void*)freeMemorySizeMap
, 0, sizeof(freeMemorySizeMap
[0])*32);
1498 size_t numFreeMemoryRegions
= 0;
1499 int bucketIndex
= 0;
1501 BUCKET_BITMASK_T bucketMask
= freeRegionBucketsUsed
;
1503 // Loop through each bucket that has free regions in it, based on bits set in freeRegionBucketsUsed bitmap.
1506 BUCKET_BITMASK_T indexAdd
= __builtin_ctzll(bucketMask
);
1507 bucketIndex
+= indexAdd
;
1508 bucketMask
>>= indexAdd
;
1509 for(Region
*freeRegion
= freeRegionBuckets
[bucketIndex
].next
;
1510 freeRegion
!= &freeRegionBuckets
[bucketIndex
];
1511 freeRegion
= freeRegion
->next
)
1513 ++numFreeMemoryRegions
;
1514 size_t freeDynamicMemory
= freeRegion
->size
- REGION_HEADER_SIZE
;
1515 if (freeDynamicMemory
> 0)
1516 ++freeMemorySizeMap
[31-__builtin_clz(freeDynamicMemory
)];
1518 ++freeMemorySizeMap
[0];
1524 return numFreeMemoryRegions
;
1527 size_t emmalloc_unclaimed_heap_memory(void) {
1528 return emscripten_get_heap_max() - (size_t)sbrk(0);
1532 // Define these to satisfy musl references.
1533 void *__libc_malloc(size_t) __attribute__((alias("malloc")));
1534 void __libc_free(void *) __attribute__((alias("free")));
1535 void *__libc_calloc(size_t nmemb
, size_t size
) __attribute__((alias("calloc")));