1 // This is a version of dlmalloc.c ported to Rust. You can find the original
2 // source at ftp://g.oswego.edu/pub/misc/malloc.c
4 // The original source was written by Doug Lea and released to the public domain
15 smallbins
: [*mut Chunk
; (NSMALLBINS
+ 1) * 2],
16 treebins
: [*mut TreeChunk
; NTREEBINS
],
26 release_checks
: usize,
29 pub const DLMALLOC_INIT
: Dlmalloc
= Dlmalloc
{
32 smallbins
: [0 as *mut _
; (NSMALLBINS
+ 1) * 2],
33 treebins
: [0 as *mut _
; NTREEBINS
],
47 least_addr
: 0 as *mut _
,
51 // TODO: document this
52 const NSMALLBINS
: usize = 32;
53 const NTREEBINS
: usize = 32;
54 const SMALLBIN_SHIFT
: usize = 3;
55 const TREEBIN_SHIFT
: usize = 8;
57 // TODO: runtime configurable? documentation?
58 const DEFAULT_GRANULARITY
: usize = 64 * 1024;
59 const DEFAULT_TRIM_THRESHOLD
: usize = 2 * 1024 * 1024;
60 const MAX_RELEASE_CHECK_RATE
: usize = 4095;
73 child
: [*mut TreeChunk
; 2],
74 parent
: *mut TreeChunk
,
79 #[derive(Clone, Copy)]
87 fn align_up(a
: usize, alignment
: usize) -> usize {
88 debug_assert
!(alignment
.is_power_of_two());
89 (a
+ (alignment
- 1)) & !(alignment
- 1)
92 fn left_bits(x
: u32) -> u32 {
93 (x
<< 1) | (!(x
<< 1) + 1)
96 fn least_bit(x
: u32) -> u32 {
100 fn leftshift_for_tree_index(x
: u32) -> u32 {
102 if x
== NTREEBINS
- 1 {
105 (mem
::size_of
::<usize>() * 8 - 1 - ((x
>> 1) + TREEBIN_SHIFT
- 2)) as u32
110 pub fn new() -> Dlmalloc
{
114 // TODO: can we get rid of this?
115 pub fn malloc_alignment(&self) -> usize {
116 mem
::size_of
::<usize>() * 2
120 fn chunk_overhead(&self) -> usize {
121 mem
::size_of
::<usize>()
124 fn mmap_chunk_overhead(&self) -> usize {
125 2 * mem
::size_of
::<usize>()
129 fn min_large_size(&self) -> usize {
134 fn max_small_size(&self) -> usize {
135 self.min_large_size() - 1
139 fn max_small_request(&self) -> usize {
140 self.max_small_size() - (self.malloc_alignment() - 1) - self.chunk_overhead()
144 fn min_chunk_size(&self) -> usize {
145 align_up(mem
::size_of
::<Chunk
>(), self.malloc_alignment())
149 fn min_request(&self) -> usize {
150 self.min_chunk_size() - self.chunk_overhead() - 1
154 fn max_request(&self) -> usize {
155 (!self.min_chunk_size() + 1) << 2
158 fn pad_request(&self, amt
: usize) -> usize {
159 align_up(amt
+ self.chunk_overhead(), self.malloc_alignment())
162 fn small_index(&self, size
: usize) -> u32 {
163 (size
>> SMALLBIN_SHIFT
) as u32
166 fn small_index2size(&self, idx
: u32) -> usize {
167 (idx
as usize) << SMALLBIN_SHIFT
170 fn is_small(&self, s
: usize) -> bool
{
171 s
>> SMALLBIN_SHIFT
< NSMALLBINS
174 fn is_aligned(&self, a
: usize) -> bool
{
175 a
& (self.malloc_alignment() - 1) == 0
178 fn align_offset(&self, addr
: *mut u8) -> usize {
179 align_up(addr
as usize, self.malloc_alignment()) - (addr
as usize)
182 fn top_foot_size(&self) -> usize {
183 self.align_offset(unsafe { Chunk::to_mem(ptr::null_mut()) }
) +
184 self.pad_request(mem
::size_of
::<Segment
>()) +
185 self.min_chunk_size()
188 fn mmap_foot_pad(&self) -> usize {
189 4 * mem
::size_of
::<usize>()
192 fn align_as_chunk(&self, ptr
: *mut u8) -> *mut Chunk
{
194 let chunk
= Chunk
::to_mem(ptr
as *mut Chunk
);
195 ptr
.offset(self.align_offset(chunk
) as isize)
200 fn request2size(&self, req
: usize) -> usize {
201 if req
< self.min_request() {
202 self.min_chunk_size()
204 self.pad_request(req
)
208 unsafe fn overhead_for(&self, p
: *mut Chunk
) -> usize {
209 if Chunk
::mmapped(p
) {
210 self.mmap_chunk_overhead()
212 self.chunk_overhead()
216 pub unsafe fn calloc_must_clear(&self, ptr
: *mut u8) -> bool
{
217 !sys
::allocates_zeros() || !Chunk
::mmapped(Chunk
::from_mem(ptr
))
220 pub unsafe fn malloc(&mut self, size
: usize) -> *mut u8 {
221 self.check_malloc_state();
224 if size
<= self.max_small_request() {
225 nb
= self.request2size(size
);
226 let mut idx
= self.small_index(nb
);
227 let smallbits
= self.smallmap
>> idx
;
229 // Check the bin for `idx` (the lowest bit) but also check the next
230 // bin up to use that to satisfy our request, if needed.
231 if smallbits
& 0b11 != 0 {
232 // If our the lowest bit, our `idx`, is unset then bump up the
233 // index as we'll be using the next bucket up.
234 idx
+= !smallbits
& 1;
236 let b
= self.smallbin_at(idx
);
238 self.unlink_first_small_chunk(b
, p
, idx
);
239 let smallsize
= self.small_index2size(idx
);
240 Chunk
::set_inuse_and_pinuse(p
, smallsize
);
241 let ret
= Chunk
::to_mem(p
);
242 self.check_malloced_chunk(ret
, nb
);
246 if nb
> self.dvsize
{
247 // If there's some other bin with some memory, then we just use
248 // the next smallest bin
250 let leftbits
= (smallbits
<< idx
) & left_bits(1 << idx
);
251 let leastbit
= least_bit(leftbits
);
252 let i
= leastbit
.trailing_zeros();
253 let b
= self.smallbin_at(i
);
255 debug_assert_eq
!(Chunk
::size(p
), self.small_index2size(i
));
256 self.unlink_first_small_chunk(b
, p
, i
);
257 let smallsize
= self.small_index2size(i
);
258 let rsize
= smallsize
- nb
;
259 if mem
::size_of
::<usize>() != 4 && rsize
< self.min_chunk_size() {
260 Chunk
::set_inuse_and_pinuse(p
, smallsize
);
262 Chunk
::set_size_and_pinuse_of_inuse_chunk(p
, nb
);
263 let r
= Chunk
::plus_offset(p
, nb
);
264 Chunk
::set_size_and_pinuse_of_free_chunk(r
, rsize
);
265 self.replace_dv(r
, rsize
);
267 let ret
= Chunk
::to_mem(p
);
268 self.check_malloced_chunk(ret
, nb
);
271 } else if self.treemap
!= 0 {
272 let mem
= self.tmalloc_small(nb
);
274 self.check_malloced_chunk(mem
, nb
);
275 self.check_malloc_state();
280 } else if size
>= self.max_request() {
281 // TODO: translate this to unsupported
282 return ptr
::null_mut()
284 nb
= self.pad_request(size
);
285 if self.treemap
!= 0 {
286 let mem
= self.tmalloc_large(nb
);
288 self.check_malloced_chunk(mem
, nb
);
289 self.check_malloc_state();
295 // use the `dv` node if we can, splitting it if necessary or otherwise
296 // exhausting the entire chunk
297 if nb
<= self.dvsize
{
298 let rsize
= self.dvsize
- nb
;
300 if rsize
>= self.min_chunk_size() {
301 self.dv
= Chunk
::plus_offset(p
, nb
);
304 Chunk
::set_size_and_pinuse_of_free_chunk(r
, rsize
);
305 Chunk
::set_size_and_pinuse_of_inuse_chunk(p
, nb
);
307 let dvs
= self.dvsize
;
309 self.dv
= ptr
::null_mut();
310 Chunk
::set_inuse_and_pinuse(p
, dvs
);
312 let ret
= Chunk
::to_mem(p
);
313 self.check_malloced_chunk(ret
, nb
);
314 self.check_malloc_state();
318 // Split the top node if we can
319 if nb
< self.topsize
{
321 let rsize
= self.topsize
;
323 self.top
= Chunk
::plus_offset(p
, nb
);
325 (*r
).head
= rsize
| PINUSE
;
326 Chunk
::set_size_and_pinuse_of_inuse_chunk(p
, nb
);
327 self.check_top_chunk(self.top
);
328 let ret
= Chunk
::to_mem(p
);
329 self.check_malloced_chunk(ret
, nb
);
330 self.check_malloc_state();
337 unsafe fn sys_alloc(&mut self, size
: usize) -> *mut u8 {
338 self.check_malloc_state();
339 let asize
= align_up(size
+ self.top_foot_size() + self.malloc_alignment(),
340 DEFAULT_GRANULARITY
);
342 let (tbase
, tsize
, flags
) = sys
::alloc(asize
);
347 self.footprint
+= tsize
;
348 self.max_footprint
= cmp
::max(self.max_footprint
, self.footprint
);
350 if self.top
.is_null() {
351 if self.least_addr
.is_null() || tbase
< self.least_addr
{
352 self.least_addr
= tbase
;
354 self.seg
.base
= tbase
;
355 self.seg
.size
= tsize
;
356 self.seg
.flags
= flags
;
357 self.release_checks
= MAX_RELEASE_CHECK_RATE
;
359 let tsize
= tsize
- self.top_foot_size();
360 self.init_top(tbase
as *mut Chunk
, tsize
);
361 // let mn = Chunk::next(Chunk::from_mem(self as *mut _ as *mut u8));
362 // let top_foot_size = self.top_foot_size();
363 // self.init_top(mn, tbase as usize + tsize - mn as usize - top_foot_size);
365 let mut sp
= &mut self.seg
as *mut Segment
;
366 while !sp
.is_null() && tbase
!= Segment
::top(sp
) {
370 !Segment
::is_extern(sp
) &&
371 Segment
::sys_flags(sp
) == flags
&&
372 Segment
::holds(sp
, self.top
as *mut u8)
376 let size
= self.topsize
+ tsize
;
377 self.init_top(ptr
, size
);
379 self.least_addr
= cmp
::min(tbase
, self.least_addr
);
380 let mut sp
= &mut self.seg
as *mut Segment
;
381 while !sp
.is_null() && (*sp
).base
!= tbase
.offset(tsize
as isize) {
385 !Segment
::is_extern(sp
) &&
386 Segment
::sys_flags(sp
) == flags
388 let oldbase
= (*sp
).base
;
391 return self.prepend_alloc(tbase
, oldbase
, size
);
393 self.add_segment(tbase
, tsize
, flags
);
398 if size
< self.topsize
{
399 self.topsize
-= size
;
400 let rsize
= self.topsize
;
402 self.top
= Chunk
::plus_offset(p
, size
);
404 (*r
).head
= rsize
| PINUSE
;
405 Chunk
::set_size_and_pinuse_of_inuse_chunk(p
, size
);
406 let ret
= Chunk
::to_mem(p
);
407 self.check_top_chunk(self.top
);
408 self.check_malloced_chunk(ret
, size
);
409 self.check_malloc_state();
413 return ptr
::null_mut()
416 pub unsafe fn realloc(&mut self, oldmem
: *mut u8, bytes
: usize) -> *mut u8 {
417 if bytes
>= self.max_request() {
418 return ptr
::null_mut()
420 let nb
= self.request2size(bytes
);
421 let oldp
= Chunk
::from_mem(oldmem
);
422 let newp
= self.try_realloc_chunk(oldp
, nb
, true);
424 self.check_inuse_chunk(newp
);
425 return Chunk
::to_mem(newp
)
427 let ptr
= self.malloc(bytes
);
429 let oc
= Chunk
::size(oldp
) - self.overhead_for(oldp
);
430 ptr
::copy_nonoverlapping(oldmem
, ptr
, cmp
::min(oc
, bytes
));
436 unsafe fn try_realloc_chunk(&mut self, p
: *mut Chunk
, nb
: usize, can_move
: bool
)
439 let oldsize
= Chunk
::size(p
);
440 let next
= Chunk
::plus_offset(p
, oldsize
);
442 if Chunk
::mmapped(p
) {
443 self.mmap_resize(p
, nb
, can_move
)
444 } else if oldsize
>= nb
{
445 let rsize
= oldsize
- nb
;
446 if rsize
>= self.min_chunk_size() {
447 let r
= Chunk
::plus_offset(p
, nb
);
448 Chunk
::set_inuse(p
, nb
);
449 Chunk
::set_inuse(r
, rsize
);
450 self.dispose_chunk(r
, rsize
);
453 } else if next
== self.top
{ // extend into top
454 if oldsize
+ self.topsize
<= nb
{
455 return ptr
::null_mut()
457 let newsize
= oldsize
+ self.topsize
;
458 let newtopsize
= newsize
- nb
;
459 let newtop
= Chunk
::plus_offset(p
, nb
);
460 Chunk
::set_inuse(p
, nb
);
461 (*newtop
).head
= newtopsize
| PINUSE
;
463 self.topsize
= newtopsize
;
465 } else if next
== self.dv
{ // extend into dv
466 let dvs
= self.dvsize
;
467 if oldsize
+ dvs
< nb
{
468 return ptr
::null_mut()
470 let dsize
= oldsize
+ dvs
- nb
;
471 if dsize
>= self.min_chunk_size() {
472 let r
= Chunk
::plus_offset(p
, nb
);
473 let n
= Chunk
::plus_offset(r
, dsize
);
474 Chunk
::set_inuse(p
, nb
);
475 Chunk
::set_size_and_pinuse_of_free_chunk(r
, dsize
);
476 Chunk
::clear_pinuse(n
);
479 } else { // exhaust dv
480 let newsize
= oldsize
+ dvs
;
481 Chunk
::set_inuse(p
, newsize
);
483 self.dv
= ptr
::null_mut();
486 } else if !Chunk
::cinuse(next
) { // extend into the next free chunk
487 let nextsize
= Chunk
::size(next
);
488 if oldsize
+ nextsize
< nb
{
489 return ptr
::null_mut()
491 let rsize
= oldsize
+ nextsize
- nb
;
492 self.unlink_chunk(next
, nextsize
);
493 if rsize
< self.min_chunk_size() {
494 let newsize
= oldsize
+ nextsize
;
495 Chunk
::set_inuse(p
, newsize
);
497 let r
= Chunk
::plus_offset(p
, nb
);
498 Chunk
::set_inuse(p
, nb
);
499 Chunk
::set_inuse(r
, rsize
);
500 self.dispose_chunk(r
, rsize
);
508 unsafe fn mmap_resize(&mut self, oldp
: *mut Chunk
, nb
: usize, can_move
: bool
)
511 let oldsize
= Chunk
::size(oldp
);
512 // Can't shrink mmap regions below a small size
513 if self.is_small(nb
) {
514 return ptr
::null_mut()
517 // Keep the old chunk if it's big enough but not too big
518 if oldsize
>= nb
+ mem
::size_of
::<usize>() &&
519 (oldsize
- nb
) <= (DEFAULT_GRANULARITY
<< 1)
524 let offset
= (*oldp
).prev_foot
;
525 let oldmmsize
= oldsize
+ offset
+ self.mmap_foot_pad();
526 let newmmsize
= self.mmap_align(nb
+ 6 * mem
::size_of
::<usize>() +
527 self.malloc_alignment() - 1);
528 let ptr
= sys
::remap((oldp
as *mut u8).offset(-(offset
as isize)),
533 return ptr
::null_mut();
535 let newp
= ptr
.offset(offset
as isize) as *mut Chunk
;
536 let psize
= newmmsize
- offset
- self.mmap_foot_pad();
537 (*newp
).head
= psize
;
538 (*Chunk
::plus_offset(newp
, psize
)).head
= Chunk
::fencepost_head();
539 (*Chunk
::plus_offset(newp
, psize
+ mem
::size_of
::<usize>())).head
= 0;
540 self.least_addr
= cmp
::min(ptr
, self.least_addr
);
541 self.footprint
= self.footprint
+ newmmsize
- oldmmsize
;
542 self.max_footprint
= cmp
::max(self.max_footprint
, self.footprint
);
543 self.check_mmapped_chunk(newp
);
547 fn mmap_align(&self, a
: usize) -> usize {
548 align_up(a
, sys
::page_size())
551 // Only call this with power-of-two alignment and alignment >
552 // `self.malloc_alignment()`
553 pub unsafe fn memalign(&mut self, mut alignment
: usize, bytes
: usize)
556 if alignment
< self.min_chunk_size() {
557 alignment
= self.min_chunk_size();
559 if bytes
>= self.max_request() - alignment
{
560 return ptr
::null_mut()
562 let nb
= self.request2size(bytes
);
563 let req
= nb
+ alignment
+ self.min_chunk_size() - self.chunk_overhead();
564 let mem
= self.malloc(req
);
568 let mut p
= Chunk
::from_mem(mem
);
569 if mem
as usize & (alignment
- 1) != 0 {
570 // Here we find an aligned sopt inside the chunk. Since we need to
571 // give back leading space in a chunk of at least `min_chunk_size`,
572 // if the first calculation places us at a spot with less than
573 // `min_chunk_size` leader we can move to the next aligned spot.
574 // we've allocated enough total room so that this is always possible
575 let br
= Chunk
::from_mem(((mem
as usize + alignment
- 1) & (!alignment
+ 1)) as *mut u8);
576 let pos
= if (br
as usize - p
as usize) > self.min_chunk_size() {
579 (br
as *mut u8).offset(alignment
as isize)
581 let newp
= pos
as *mut Chunk
;
582 let leadsize
= pos
as usize - p
as usize;
583 let newsize
= Chunk
::size(p
) - leadsize
;
585 // for mmapped chunks just adjust the offset
586 if Chunk
::mmapped(p
) {
587 (*newp
).prev_foot
= (*p
).prev_foot
+ leadsize
;
588 (*newp
).head
= newsize
;
590 // give back the leader, use the rest
591 Chunk
::set_inuse(newp
, newsize
);
592 Chunk
::set_inuse(p
, leadsize
);
593 self.dispose_chunk(p
, leadsize
);
598 // give back spare room at the end
599 if !Chunk
::mmapped(p
) {
600 let size
= Chunk
::size(p
);
601 if size
> nb
+ self.min_chunk_size() {
602 let remainder_size
= size
- nb
;
603 let remainder
= Chunk
::plus_offset(p
, nb
);
604 Chunk
::set_inuse(p
, nb
);
605 Chunk
::set_inuse(remainder
, remainder_size
);
606 self.dispose_chunk(remainder
, remainder_size
);
610 let mem
= Chunk
::to_mem(p
);
611 debug_assert
!(Chunk
::size(p
) >= nb
);
612 debug_assert_eq
!(align_up(mem
as usize, alignment
), mem
as usize);
613 self.check_inuse_chunk(p
);
617 // consolidate and bin a chunk, differs from exported versions of free
618 // mainly in that the chunk need not be marked as inuse
619 unsafe fn dispose_chunk(&mut self, mut p
: *mut Chunk
, mut psize
: usize) {
620 let next
= Chunk
::plus_offset(p
, psize
);
621 if !Chunk
::pinuse(p
) {
622 let prevsize
= (*p
).prev_foot
;
623 if Chunk
::mmapped(p
) {
624 psize
+= prevsize
+ self.mmap_foot_pad();
625 if sys
::free((p
as *mut u8).offset(-(prevsize
as isize)), psize
) {
626 self.footprint
-= psize
;
630 let prev
= Chunk
::minus_offset(p
, prevsize
);
634 self.unlink_chunk(p
, prevsize
);
635 } else if (*next
).head
& INUSE
== INUSE
{
637 Chunk
::set_free_with_pinuse(p
, psize
, next
);
642 if !Chunk
::cinuse(next
) { // consolidate forward
643 if next
== self.top
{
644 self.topsize
+= psize
;
645 let tsize
= self.topsize
;
647 (*p
).head
= tsize
| PINUSE
;
649 self.dv
= ptr
::null_mut();
653 } else if next
== self.dv
{
654 self.dvsize
+= psize
;
655 let dsize
= self.dvsize
;
657 Chunk
::set_size_and_pinuse_of_free_chunk(p
, dsize
);
660 let nsize
= Chunk
::size(next
);
662 self.unlink_chunk(next
, nsize
);
663 Chunk
::set_size_and_pinuse_of_free_chunk(p
, psize
);
670 Chunk
::set_free_with_pinuse(p
, psize
, next
);
672 self.insert_chunk(p
, psize
);
675 unsafe fn init_top(&mut self, ptr
: *mut Chunk
, size
: usize) {
676 let offset
= self.align_offset(Chunk
::to_mem(ptr
));
677 let p
= Chunk
::plus_offset(ptr
, offset
);
678 let size
= size
- offset
;
682 (*p
).head
= size
| PINUSE
;
683 (*Chunk
::plus_offset(p
, size
)).head
= self.top_foot_size();
684 self.trim_check
= DEFAULT_TRIM_THRESHOLD
;
687 unsafe fn init_bins(&mut self) {
688 for i
in 0..NSMALLBINS
as u32 {
689 let bin
= self.smallbin_at(i
);
695 unsafe fn prepend_alloc(&mut self,
698 size
: usize) -> *mut u8 {
699 let p
= self.align_as_chunk(newbase
);
700 let mut oldfirst
= self.align_as_chunk(oldbase
);
701 let psize
= oldfirst
as usize - p
as usize;
702 let q
= Chunk
::plus_offset(p
, size
);
703 let mut qsize
= psize
- size
;
704 Chunk
::set_size_and_pinuse_of_inuse_chunk(p
, size
);
706 debug_assert
!(oldfirst
> q
);
707 debug_assert
!(Chunk
::pinuse(oldfirst
));
708 debug_assert
!(qsize
>= self.min_chunk_size());
711 // consolidate the remainder with the first chunk of the old base
712 if oldfirst
== self.top
{
713 self.topsize
+= qsize
;
714 let tsize
= self.topsize
;
716 (*q
).head
= tsize
| PINUSE
;
717 self.check_top_chunk(q
);
718 } else if oldfirst
== self.dv
{
719 self.dvsize
+= qsize
;
720 let dsize
= self.dvsize
;
722 Chunk
::set_size_and_pinuse_of_free_chunk(q
, dsize
);
724 if !Chunk
::inuse(oldfirst
) {
725 let nsize
= Chunk
::size(oldfirst
);
726 self.unlink_chunk(oldfirst
, nsize
);
727 oldfirst
= Chunk
::plus_offset(oldfirst
, nsize
);
730 Chunk
::set_free_with_pinuse(q
, qsize
, oldfirst
);
731 self.insert_chunk(q
, qsize
);
732 self.check_free_chunk(q
);
735 let ret
= Chunk
::to_mem(p
);
736 self.check_malloced_chunk(ret
, size
);
737 self.check_malloc_state();
741 // add a segment to hold a new noncontiguous region
742 unsafe fn add_segment(&mut self, tbase
: *mut u8, tsize
: usize, flags
: u32) {
743 // TODO: what in the world is this function doing
745 // Determine locations and sizes of segment, fenceposts, and the old top
746 let old_top
= self.top
as *mut u8;
747 let oldsp
= self.segment_holding(old_top
);
748 let old_end
= Segment
::top(oldsp
);
749 let ssize
= self.pad_request(mem
::size_of
::<Segment
>());
750 let offset
= ssize
+ mem
::size_of
::<usize>() * 4 + self.malloc_alignment() - 1;
751 let rawsp
= old_end
.offset(-(offset
as isize));
752 let offset
= self.align_offset(Chunk
::to_mem(rawsp
as *mut Chunk
));
753 let asp
= rawsp
.offset(offset
as isize);
754 let csp
= if asp
< old_top
.offset(self.min_chunk_size() as isize) {
759 let sp
= csp
as *mut Chunk
;
760 let ss
= Chunk
::to_mem(sp
) as *mut Segment
;
761 let tnext
= Chunk
::plus_offset(sp
, ssize
);
765 // reset the top to our new space
766 let size
= tsize
- self.top_foot_size();
767 self.init_top(tbase
as *mut Chunk
, size
);
769 // set up our segment record
770 debug_assert
!(self.is_aligned(ss
as usize));
771 Chunk
::set_size_and_pinuse_of_inuse_chunk(sp
, ssize
);
772 *ss
= self.seg
; // push our current record
773 self.seg
.base
= tbase
;
774 self.seg
.size
= tsize
;
775 self.seg
.flags
= flags
;
778 // insert trailing fences
780 let nextp
= Chunk
::plus_offset(p
, mem
::size_of
::<usize>());
781 (*p
).head
= Chunk
::fencepost_head();
783 if (&(*nextp
).head
as *const usize as *mut u8) < old_end
{
789 debug_assert
!(nfences
>= 2);
791 // insert the rest of the old top into a bin as an ordinary free chunk
793 let q
= old_top
as *mut Chunk
;
794 let psize
= csp
as usize - old_top
as usize;
795 let tn
= Chunk
::plus_offset(q
, psize
);
796 Chunk
::set_free_with_pinuse(q
, psize
, tn
);
797 self.insert_chunk(q
, psize
);
800 self.check_top_chunk(self.top
);
801 self.check_malloc_state();
804 unsafe fn segment_holding(&self, ptr
: *mut u8) -> *mut Segment
{
805 let mut sp
= &self.seg
as *const Segment
as *mut Segment
;
806 while !sp
.is_null() {
807 if (*sp
).base
<= ptr
&& ptr
< Segment
::top(sp
) {
815 unsafe fn tmalloc_small(&mut self, size
: usize) -> *mut u8 {
816 let leastbit
= least_bit(self.treemap
);
817 let i
= leastbit
.trailing_zeros();
818 let mut v
= *self.treebin_at(i
);
820 let mut rsize
= Chunk
::size(TreeChunk
::chunk(t
)) - size
;
823 t
= TreeChunk
::leftmost_child(t
);
827 let trem
= Chunk
::size(TreeChunk
::chunk(t
)) - size
;
834 let vc
= TreeChunk
::chunk(v
);
835 let r
= Chunk
::plus_offset(vc
, size
) as *mut TreeChunk
;
836 debug_assert_eq
!(Chunk
::size(vc
), rsize
+ size
);
837 self.unlink_large_chunk(v
);
838 if rsize
< self.min_chunk_size() {
839 Chunk
::set_inuse_and_pinuse(vc
, rsize
+ size
);
841 let rc
= TreeChunk
::chunk(r
);
842 Chunk
::set_size_and_pinuse_of_inuse_chunk(vc
, size
);
843 Chunk
::set_size_and_pinuse_of_free_chunk(rc
, rsize
);
844 self.replace_dv(rc
, rsize
);
849 unsafe fn tmalloc_large(&mut self, size
: usize) -> *mut u8 {
850 let mut v
= ptr
::null_mut();
851 let mut rsize
= !size
+ 1;
852 let idx
= self.compute_tree_index(size
);
853 let mut t
= *self.treebin_at(idx
);
855 // Traverse thre tree for this bin looking for a node with size
856 // equal to the `size` above.
857 let mut sizebits
= size
<< leftshift_for_tree_index(idx
);
858 // Keep track of the deepest untaken right subtree
859 let mut rst
= ptr
::null_mut();
861 let csize
= Chunk
::size(TreeChunk
::chunk(t
));
862 if csize
>= size
&& csize
- size
< rsize
{
864 rsize
= csize
- size
;
869 let rt
= (*t
).child
[1];
870 t
= (*t
).child
[(sizebits
>> (mem
::size_of
::<usize>() * 8 - 1)) & 1];
871 if !rt
.is_null() && rt
!= t
{
875 // Reset `t` to the least subtree holding sizes greater than
876 // the `size` above, breaking out
884 // Set t to the root of the next non-empty treebin
885 if t
.is_null() && v
.is_null() {
886 let leftbits
= left_bits(1 << idx
) & self.treemap
;
888 let leastbit
= least_bit(leftbits
);
889 let i
= leastbit
.trailing_zeros();
890 t
= *self.treebin_at(i
);
894 // Find the smallest of this tree or subtree
896 let csize
= Chunk
::size(TreeChunk
::chunk(t
));
897 if csize
>= size
&& csize
- size
< rsize
{
898 rsize
= csize
- size
;
901 t
= TreeChunk
::leftmost_child(t
);
904 // If dv is a better fit, then return null so malloc will use it
905 if v
.is_null() || rsize
+ size
>= self.dvsize
{
906 return ptr
::null_mut()
909 let vc
= TreeChunk
::chunk(v
);
910 let r
= Chunk
::plus_offset(vc
, size
);
911 debug_assert_eq
!(Chunk
::size(vc
), rsize
+ size
);
912 self.unlink_large_chunk(v
);
913 if rsize
< self.min_chunk_size() {
914 Chunk
::set_inuse_and_pinuse(vc
, rsize
+ size
);
916 Chunk
::set_size_and_pinuse_of_inuse_chunk(vc
, size
);
917 Chunk
::set_size_and_pinuse_of_free_chunk(r
, rsize
);
918 self.insert_chunk(r
, rsize
);
923 unsafe fn smallbin_at(&mut self, idx
: u32) -> *mut Chunk
{
924 debug_assert
!(((idx
* 2) as usize) < self.smallbins
.len());
925 &mut *self.smallbins
.get_unchecked_mut((idx
as usize) * 2)
926 as *mut *mut Chunk
as *mut Chunk
929 unsafe fn treebin_at(&mut self, idx
: u32) -> *mut *mut TreeChunk
{
930 debug_assert
!((idx
as usize) < self.treebins
.len());
931 &mut *self.treebins
.get_unchecked_mut(idx
as usize)
934 fn compute_tree_index(&self, size
: usize) -> u32 {
935 let x
= size
>> TREEBIN_SHIFT
;
938 } else if x
> 0xffff {
941 let k
= mem
::size_of_val(&x
) * 8 - 1 - (x
.leading_zeros() as usize);
942 ((k
<< 1) + (size
>> (k
+ TREEBIN_SHIFT
- 1) & 1)) as u32
946 unsafe fn unlink_first_small_chunk(&mut self,
950 let ptr
= (*next
).prev
;
951 debug_assert
!(next
!= head
);
952 debug_assert
!(next
!= ptr
);
953 debug_assert_eq
!(Chunk
::size(next
), self.small_index2size(idx
));
955 self.clear_smallmap(idx
);
962 unsafe fn replace_dv(&mut self, chunk
: *mut Chunk
, size
: usize) {
963 let dvs
= self.dvsize
;
964 debug_assert
!(self.is_small(dvs
));
967 self.insert_small_chunk(dv
, dvs
);
973 unsafe fn insert_chunk(&mut self, chunk
: *mut Chunk
, size
: usize) {
974 if self.is_small(size
) {
975 self.insert_small_chunk(chunk
, size
);
977 self.insert_large_chunk(chunk
as *mut TreeChunk
, size
);
981 unsafe fn insert_small_chunk(&mut self, chunk
: *mut Chunk
, size
: usize) {
982 let idx
= self.small_index(size
);
983 let head
= self.smallbin_at(idx
);
985 debug_assert
!(size
>= self.min_chunk_size());
986 if !self.smallmap_is_marked(idx
) {
987 self.mark_smallmap(idx
);
992 (*head
).prev
= chunk
;
995 (*chunk
).next
= head
;
998 unsafe fn insert_large_chunk(&mut self, chunk
: *mut TreeChunk
, size
: usize) {
999 let idx
= self.compute_tree_index(size
);
1000 let h
= self.treebin_at(idx
);
1001 (*chunk
).index
= idx
;
1002 (*chunk
).child
[0] = ptr
::null_mut();
1003 (*chunk
).child
[1] = ptr
::null_mut();
1004 let chunkc
= TreeChunk
::chunk(chunk
);
1005 if !self.treemap_is_marked(idx
) {
1006 self.mark_treemap(idx
);
1008 (*chunk
).parent
= h
as *mut TreeChunk
; // TODO: dubious?
1009 (*chunkc
).next
= chunkc
;
1010 (*chunkc
).prev
= chunkc
;
1013 let mut k
= size
<< leftshift_for_tree_index(idx
);
1015 if Chunk
::size(TreeChunk
::chunk(t
)) != size
{
1016 let c
= &mut (*t
).child
[(k
>> mem
::size_of
::<usize>() * 8 - 1) & 1];
1022 (*chunk
).parent
= t
;
1023 (*chunkc
).next
= chunkc
;
1024 (*chunkc
).prev
= chunkc
;
1028 let tc
= TreeChunk
::chunk(t
);
1031 (*tc
).prev
= chunkc
;
1033 (*chunkc
).next
= tc
;
1034 (*chunk
).parent
= ptr
::null_mut();
1041 unsafe fn smallmap_is_marked(&self, idx
: u32) -> bool
{
1042 self.smallmap
& (1 << idx
) != 0
1045 unsafe fn mark_smallmap(&mut self, idx
: u32) {
1046 self.smallmap
|= 1 << idx
;
1049 unsafe fn clear_smallmap(&mut self, idx
: u32) {
1050 self.smallmap
&= !(1 << idx
);
1053 unsafe fn treemap_is_marked(&self, idx
: u32) -> bool
{
1054 self.treemap
& (1 << idx
) != 0
1057 unsafe fn mark_treemap(&mut self, idx
: u32) {
1058 self.treemap
|= 1 << idx
;
1061 unsafe fn clear_treemap(&mut self, idx
: u32) {
1062 self.treemap
&= !(1 << idx
);
1065 unsafe fn unlink_chunk(&mut self, chunk
: *mut Chunk
, size
: usize) {
1066 if self.is_small(size
) {
1067 self.unlink_small_chunk(chunk
, size
)
1069 self.unlink_large_chunk(chunk
as *mut TreeChunk
);
1073 unsafe fn unlink_small_chunk(&mut self, chunk
: *mut Chunk
, size
: usize) {
1074 let f
= (*chunk
).prev
;
1075 let b
= (*chunk
).next
;
1076 let idx
= self.small_index(size
);
1077 debug_assert
!(chunk
!= b
);
1078 debug_assert
!(chunk
!= f
);
1079 debug_assert_eq
!(Chunk
::size(chunk
), self.small_index2size(idx
));
1081 self.clear_smallmap(idx
);
1088 unsafe fn unlink_large_chunk(&mut self, chunk
: *mut TreeChunk
) {
1089 let xp
= (*chunk
).parent
;
1091 if TreeChunk
::next(chunk
) != chunk
{
1092 let f
= TreeChunk
::prev(chunk
);
1093 r
= TreeChunk
::next(chunk
);
1094 (*f
).chunk
.next
= TreeChunk
::chunk(r
);
1095 (*r
).chunk
.prev
= TreeChunk
::chunk(f
);
1097 let mut rp
= &mut (*chunk
).child
[1];
1099 rp
= &mut (*chunk
).child
[0];
1104 let mut cp
= &mut (**rp
).child
[1];
1106 cp
= &mut (**rp
).child
[0];
1114 *rp
= ptr
::null_mut();
1122 let h
= self.treebin_at((*chunk
).index
);
1126 self.clear_treemap((*chunk
).index
);
1129 if (*xp
).child
[0] == chunk
{
1138 let c0
= (*chunk
).child
[0];
1143 let c1
= (*chunk
).child
[1];
1151 pub unsafe fn free(&mut self, mem
: *mut u8) {
1152 self.check_malloc_state();
1154 let mut p
= Chunk
::from_mem(mem
);
1155 let mut psize
= Chunk
::size(p
);
1156 let next
= Chunk
::plus_offset(p
, psize
);
1157 if !Chunk
::pinuse(p
) {
1158 let prevsize
= (*p
).prev_foot
;
1160 if Chunk
::mmapped(p
) {
1161 psize
+= prevsize
+ self.mmap_foot_pad();
1162 if sys
::free((p
as *mut u8).offset(-(prevsize
as isize)), psize
) {
1163 self.footprint
-= psize
;
1168 let prev
= Chunk
::minus_offset(p
, prevsize
);
1172 self.unlink_chunk(p
, prevsize
);
1173 } else if (*next
).head
& INUSE
== INUSE
{
1174 self.dvsize
= psize
;
1175 Chunk
::set_free_with_pinuse(p
, psize
, next
);
1180 // Consolidate forward if we can
1181 if !Chunk
::cinuse(next
) {
1182 if next
== self.top
{
1183 self.topsize
+= psize
;
1184 let tsize
= self.topsize
;
1186 (*p
).head
= tsize
| PINUSE
;
1188 self.dv
= ptr
::null_mut();
1191 if self.should_trim(tsize
) {
1195 } else if next
== self.dv
{
1196 self.dvsize
+= psize
;
1197 let dsize
= self.dvsize
;
1199 Chunk
::set_size_and_pinuse_of_free_chunk(p
, dsize
);
1202 let nsize
= Chunk
::size(next
);
1204 self.unlink_chunk(next
, nsize
);
1205 Chunk
::set_size_and_pinuse_of_free_chunk(p
, psize
);
1207 self.dvsize
= psize
;
1212 Chunk
::set_free_with_pinuse(p
, psize
, next
);
1215 if self.is_small(psize
) {
1216 self.insert_small_chunk(p
, psize
);
1217 self.check_free_chunk(p
);
1219 self.insert_large_chunk(p
as *mut TreeChunk
, psize
);
1220 self.check_free_chunk(p
);
1221 self.release_checks
-= 1;
1222 if self.release_checks
== 0 {
1223 self.release_unused_segments();
1228 fn should_trim(&self, size
: usize) -> bool
{
1229 size
> self.trim_check
1232 unsafe fn sys_trim(&mut self, mut pad
: usize) -> bool
{
1233 let mut released
= 0;
1234 if pad
< self.max_request() && !self.top
.is_null() {
1235 pad
+= self.top_foot_size();
1236 if self.topsize
> pad
{
1237 let unit
= DEFAULT_GRANULARITY
;
1238 let extra
= ((self.topsize
- pad
+ unit
- 1) / unit
- 1) * unit
;
1239 let sp
= self.segment_holding(self.top
as *mut u8);
1240 debug_assert
!(!sp
.is_null());
1242 if !Segment
::is_extern(sp
) {
1243 if Segment
::can_release_part(sp
) {
1244 if (*sp
).size
>= extra
&& !self.has_segment_link(sp
) {
1245 let newsize
= (*sp
).size
- extra
;
1246 if sys
::free_part((*sp
).base
, (*sp
).size
, newsize
) {
1254 (*sp
).size
-= released
;
1255 self.footprint
-= released
;
1257 let topsize
= self.topsize
- released
;
1258 self.init_top(top
, topsize
);
1259 self.check_top_chunk(self.top
);
1263 released
+= self.release_unused_segments();
1265 if released
== 0 && self.topsize
> self.trim_check
{
1266 self.trim_check
= usize::max_value();
1273 unsafe fn has_segment_link(&self, ptr
: *mut Segment
) -> bool
{
1274 let mut sp
= &self.seg
as *const Segment
as *mut Segment
;
1275 while !sp
.is_null() {
1276 if Segment
::holds(ptr
, sp
as *mut u8) {
1284 /// Unmap and unlink any mapped segments that don't contain used chunks
1285 unsafe fn release_unused_segments(&mut self) -> usize {
1286 let mut released
= 0;
1288 let mut pred
= &mut self.seg
as *mut Segment
;
1289 let mut sp
= (*pred
).next
;
1290 while !sp
.is_null() {
1291 let base
= (*sp
).base
;
1292 let size
= (*sp
).size
;
1293 let next
= (*sp
).next
;
1296 if Segment
::can_release_part(sp
) && !Segment
::is_extern(sp
) {
1297 let p
= self.align_as_chunk(base
);
1298 let psize
= Chunk
::size(p
);
1299 // We can unmap if the first chunk holds the entire segment and
1301 let chunk_top
= (p
as *mut u8).offset(psize
as isize);
1302 let top
= base
.offset((size
- self.top_foot_size()) as isize);
1303 if !Chunk
::inuse(p
) && chunk_top
>= top
{
1304 let tp
= p
as *mut TreeChunk
;
1305 debug_assert
!(Segment
::holds(sp
, sp
as *mut u8));
1307 self.dv
= ptr
::null_mut();
1310 self.unlink_large_chunk(tp
);
1312 if sys
::free(base
, size
) {
1314 self.footprint
-= size
;
1315 // unlink our obsolete record
1319 // back out if we can't unmap
1320 self.insert_large_chunk(tp
, psize
);
1327 self.release_checks
= if nsegs
> MAX_RELEASE_CHECK_RATE
{
1330 MAX_RELEASE_CHECK_RATE
1337 unsafe fn check_any_chunk(&self, p
: *mut Chunk
) {
1338 if !cfg
!(debug_assertions
) {
1341 debug_assert
!(self.is_aligned(Chunk
::to_mem(p
) as usize) ||
1342 (*p
).head
== Chunk
::fencepost_head());
1343 debug_assert
!(p
as *mut u8 >= self.least_addr
);
1346 unsafe fn check_top_chunk(&self, p
: *mut Chunk
) {
1347 if !cfg
!(debug_assertions
) {
1350 let sp
= self.segment_holding(p
as *mut u8);
1351 let sz
= (*p
).head
& !INUSE
;
1352 debug_assert
!(!sp
.is_null());
1353 debug_assert
!(self.is_aligned(Chunk
::to_mem(p
) as usize) ||
1354 (*p
).head
== Chunk
::fencepost_head());
1355 debug_assert
!(p
as *mut u8 >= self.least_addr
);
1356 debug_assert_eq
!(sz
, self.topsize
);
1357 debug_assert
!(sz
> 0);
1358 debug_assert_eq
!(sz
, (*sp
).base
as usize + (*sp
).size
- p
as usize - self.top_foot_size());
1359 debug_assert
!(Chunk
::pinuse(p
));
1360 debug_assert
!(!Chunk
::pinuse(Chunk
::plus_offset(p
, sz
)));
1363 unsafe fn check_malloced_chunk(&self, mem
: *mut u8, s
: usize) {
1364 if !cfg
!(debug_assertions
) {
1370 let p
= Chunk
::from_mem(mem
);
1371 let sz
= (*p
).head
& !INUSE
;
1372 self.check_inuse_chunk(p
);
1373 debug_assert_eq
!(align_up(sz
, self.malloc_alignment()), sz
);
1374 debug_assert
!(sz
>= self.min_chunk_size());
1375 debug_assert
!(sz
>= s
);
1376 debug_assert
!(Chunk
::mmapped(p
) || sz
< (s
+ self.min_chunk_size()));
1379 unsafe fn check_inuse_chunk(&self, p
: *mut Chunk
) {
1380 self.check_any_chunk(p
);
1381 debug_assert
!(Chunk
::inuse(p
));
1382 debug_assert
!(Chunk
::pinuse(Chunk
::next(p
)));
1383 debug_assert
!(Chunk
::mmapped(p
) || Chunk
::pinuse(p
) ||
1384 Chunk
::next(Chunk
::prev(p
)) == p
);
1385 if Chunk
::mmapped(p
) {
1386 self.check_mmapped_chunk(p
);
1390 unsafe fn check_mmapped_chunk(&self, p
: *mut Chunk
) {
1391 if !cfg
!(debug_assertions
) {
1394 let sz
= Chunk
::size(p
);
1395 let len
= sz
+ (*p
).prev_foot
+ self.mmap_foot_pad();
1396 debug_assert
!(Chunk
::mmapped(p
));
1397 debug_assert
!(self.is_aligned(Chunk
::to_mem(p
) as usize) ||
1398 (*p
).head
== Chunk
::fencepost_head());
1399 debug_assert
!(p
as *mut u8 >= self.least_addr
);
1400 debug_assert
!(!self.is_small(sz
));
1401 debug_assert_eq
!(align_up(len
, sys
::page_size()), len
);
1402 debug_assert_eq
!((*Chunk
::plus_offset(p
, sz
)).head
, Chunk
::fencepost_head());
1403 debug_assert_eq
!((*Chunk
::plus_offset(p
, sz
+ mem
::size_of
::<usize>())).head
, 0);
1406 unsafe fn check_free_chunk(&self, p
: *mut Chunk
) {
1407 if !cfg
!(debug_assertions
) {
1410 let sz
= Chunk
::size(p
);
1411 let next
= Chunk
::plus_offset(p
, sz
);
1412 self.check_any_chunk(p
);
1413 debug_assert
!(!Chunk
::inuse(p
));
1414 debug_assert
!(!Chunk
::pinuse(Chunk
::next(p
)));
1415 debug_assert
!(!Chunk
::mmapped(p
));
1416 if p
!= self.dv
&& p
!= self.top
{
1417 if sz
>= self.min_chunk_size() {
1418 debug_assert_eq
!(align_up(sz
, self.malloc_alignment()), sz
);
1419 debug_assert
!(self.is_aligned(Chunk
::to_mem(p
) as usize));
1420 debug_assert_eq
!((*next
).prev_foot
, sz
);
1421 debug_assert
!(Chunk
::pinuse(p
));
1422 debug_assert
!(next
== self.top
|| Chunk
::inuse(next
));
1423 debug_assert_eq
!((*(*p
).next
).prev
, p
);
1424 debug_assert_eq
!((*(*p
).prev
).next
, p
);
1426 debug_assert_eq
!(sz
, mem
::size_of
::<usize>());
1431 unsafe fn check_malloc_state(&mut self) {
1432 if !cfg
!(debug_assertions
) {
1435 for i
in 0..NSMALLBINS
{
1436 self.check_smallbin(i
as u32);
1438 for i
in 0..NTREEBINS
{
1439 self.check_treebin(i
as u32);
1441 if self.dvsize
!= 0 {
1442 self.check_any_chunk(self.dv
);
1443 debug_assert_eq
!(self.dvsize
, Chunk
::size(self.dv
));
1444 debug_assert
!(self.dvsize
>= self.min_chunk_size());
1446 debug_assert
!(!self.bin_find(dv
));
1448 if !self.top
.is_null() {
1449 self.check_top_chunk(self.top
);
1450 debug_assert
!(self.topsize
> 0);
1452 debug_assert
!(!self.bin_find(top
));
1454 let total
= self.traverse_and_check();
1455 debug_assert
!(total
<= self.footprint
);
1456 debug_assert
!(self.footprint
<= self.max_footprint
);
1459 unsafe fn check_smallbin(&mut self, idx
: u32) {
1460 if !cfg
!(debug_assertions
) {
1463 let b
= self.smallbin_at(idx
);
1464 let mut p
= (*b
).next
;
1465 let empty
= self.smallmap
& (1 << idx
) == 0;
1467 debug_assert
!(empty
)
1471 let size
= Chunk
::size(p
);
1472 self.check_free_chunk(p
);
1473 debug_assert_eq
!(self.small_index(size
), idx
);
1474 debug_assert
!((*p
).next
== b
||
1475 Chunk
::size((*p
).next
) == Chunk
::size(p
));
1476 let q
= Chunk
::next(p
);
1477 if (*q
).head
!= Chunk
::fencepost_head() {
1478 self.check_inuse_chunk(q
);
1485 unsafe fn check_treebin(&mut self, idx
: u32) {
1486 if !cfg
!(debug_assertions
) {
1489 let tb
= self.treebin_at(idx
);
1491 let empty
= self.treemap
& (1 << idx
) == 0;
1493 debug_assert
!(empty
);
1500 unsafe fn check_tree(&mut self, t
: *mut TreeChunk
) {
1501 if !cfg
!(debug_assertions
) {
1504 let tc
= TreeChunk
::chunk(t
);
1505 let tindex
= (*t
).index
;
1506 let tsize
= Chunk
::size(tc
);
1507 let idx
= self.compute_tree_index(tsize
);
1508 debug_assert_eq
!(tindex
, idx
);
1509 debug_assert
!(tsize
>= self.min_large_size());
1510 debug_assert
!(tsize
>= self.min_size_for_tree_index(idx
));
1511 debug_assert
!(idx
== NTREEBINS
as u32 - 1 ||
1512 tsize
< self.min_size_for_tree_index(idx
+ 1));
1515 let mut head
= ptr
::null_mut();
1517 let uc
= TreeChunk
::chunk(u
);
1518 self.check_any_chunk(uc
);
1519 debug_assert_eq
!((*u
).index
, tindex
);
1520 debug_assert_eq
!(Chunk
::size(uc
), tsize
);
1521 debug_assert
!(!Chunk
::inuse(uc
));
1522 debug_assert
!(!Chunk
::pinuse(Chunk
::next(uc
)));
1523 debug_assert_eq
!((*(*uc
).next
).prev
, uc
);
1524 debug_assert_eq
!((*(*uc
).prev
).next
, uc
);
1525 let left
= (*u
).child
[0];
1526 let right
= (*u
).child
[1];
1527 if (*u
).parent
.is_null() {
1528 debug_assert
!(left
.is_null());
1529 debug_assert
!(right
.is_null());
1531 debug_assert
!(head
.is_null());
1533 debug_assert
!((*u
).parent
!= u
);
1534 debug_assert
!((*(*u
).parent
).child
[0] == u
||
1535 (*(*u
).parent
).child
[1] == u
||
1536 *((*u
).parent
as *mut *mut TreeChunk
) == u
);
1537 if !left
.is_null() {
1538 debug_assert_eq
!((*left
).parent
, u
);
1539 debug_assert
!(left
!= u
);
1540 self.check_tree(left
);
1542 if !right
.is_null() {
1543 debug_assert_eq
!((*right
).parent
, u
);
1544 debug_assert
!(right
!= u
);
1545 self.check_tree(right
);
1547 if !left
.is_null() && !right
.is_null() {
1548 debug_assert
!(Chunk
::size(TreeChunk
::chunk(left
)) <
1549 Chunk
::size(TreeChunk
::chunk(right
)));
1553 u
= TreeChunk
::prev(u
);
1558 debug_assert
!(!head
.is_null());
1561 fn min_size_for_tree_index(&self, idx
: u32) -> usize {
1562 let idx
= idx
as usize;
1563 (1 << ((idx
>> 1) + TREEBIN_SHIFT
)) |
1564 ((idx
& 1) << ((idx
>> 1) + TREEBIN_SHIFT
- 1))
1567 unsafe fn bin_find(&mut self, chunk
: *mut Chunk
) -> bool
{
1568 let size
= Chunk
::size(chunk
);
1569 if self.is_small(size
) {
1570 let sidx
= self.small_index(size
);
1571 let b
= self.smallbin_at(sidx
);
1572 if !self.smallmap_is_marked(sidx
) {
1586 let tidx
= self.compute_tree_index(size
);
1587 if !self.treemap_is_marked(tidx
) {
1590 let mut t
= *self.treebin_at(tidx
);
1591 let mut sizebits
= size
<< leftshift_for_tree_index(tidx
);
1592 while !t
.is_null() && Chunk
::size(TreeChunk
::chunk(t
)) != size
{
1593 t
= (*t
).child
[(sizebits
>> (mem
::size_of
::<usize>() * 8 - 1)) & 1];
1600 let chunk
= chunk
as *mut TreeChunk
;
1605 u
= TreeChunk
::prev(u
);
1614 unsafe fn traverse_and_check(&self) -> usize {
1619 const PINUSE
: usize = 1 << 0;
1620 const CINUSE
: usize = 1 << 1;
1621 const FLAG4
: usize = 1 << 2;
1622 const INUSE
: usize = PINUSE
| CINUSE
;
1623 const FLAG_BITS
: usize = PINUSE
| CINUSE
| FLAG4
;
1626 unsafe fn fencepost_head() -> usize {
1627 INUSE
| mem
::size_of
::<usize>()
1630 unsafe fn size(me
: *mut Chunk
) -> usize {
1631 (*me
).head
& !FLAG_BITS
1634 unsafe fn next(me
: *mut Chunk
) -> *mut Chunk
{
1635 (me
as *mut u8).offset(((*me
).head
& !FLAG_BITS
) as isize) as *mut Chunk
1638 unsafe fn prev(me
: *mut Chunk
) -> *mut Chunk
{
1639 (me
as *mut u8).offset(-((*me
).prev_foot
as isize)) as *mut Chunk
1642 unsafe fn cinuse(me
: *mut Chunk
) -> bool
{
1643 (*me
).head
& CINUSE
!= 0
1646 unsafe fn pinuse(me
: *mut Chunk
) -> bool
{
1647 (*me
).head
& PINUSE
!= 0
1650 unsafe fn clear_pinuse(me
: *mut Chunk
) {
1651 (*me
).head
&= !PINUSE
;
1654 unsafe fn inuse(me
: *mut Chunk
) -> bool
{
1655 (*me
).head
& INUSE
!= PINUSE
1658 unsafe fn mmapped(me
: *mut Chunk
) -> bool
{
1659 (*me
).head
& INUSE
== 0
1662 unsafe fn set_inuse(me
: *mut Chunk
, size
: usize) {
1663 (*me
).head
= ((*me
).head
& PINUSE
) | size
| CINUSE
;
1664 let next
= Chunk
::plus_offset(me
, size
);
1665 (*next
).head
|= PINUSE
;
1668 unsafe fn set_inuse_and_pinuse(me
: *mut Chunk
, size
: usize) {
1669 (*me
).head
= PINUSE
| size
| CINUSE
;
1670 let next
= Chunk
::plus_offset(me
, size
);
1671 (*next
).head
|= PINUSE
;
1674 unsafe fn set_size_and_pinuse_of_inuse_chunk(me
: *mut Chunk
, size
: usize) {
1675 (*me
).head
= size
| PINUSE
| CINUSE
;
1678 unsafe fn set_size_and_pinuse_of_free_chunk(me
: *mut Chunk
, size
: usize) {
1679 (*me
).head
= size
| PINUSE
;
1680 Chunk
::set_foot(me
, size
);
1683 unsafe fn set_free_with_pinuse(p
: *mut Chunk
, size
: usize, n
: *mut Chunk
) {
1684 Chunk
::clear_pinuse(n
);
1685 Chunk
::set_size_and_pinuse_of_free_chunk(p
, size
);
1688 unsafe fn set_foot(me
: *mut Chunk
, size
: usize) {
1689 let next
= Chunk
::plus_offset(me
, size
);
1690 (*next
).prev_foot
= size
;
1693 unsafe fn plus_offset(me
: *mut Chunk
, offset
: usize) -> *mut Chunk
{
1694 (me
as *mut u8).offset(offset
as isize) as *mut Chunk
1697 unsafe fn minus_offset(me
: *mut Chunk
, offset
: usize) -> *mut Chunk
{
1698 (me
as *mut u8).offset(-(offset
as isize)) as *mut Chunk
1701 unsafe fn to_mem(me
: *mut Chunk
) -> *mut u8 {
1702 (me
as *mut u8).offset(2 * (mem
::size_of
::<usize>() as isize))
1705 unsafe fn from_mem(mem
: *mut u8) -> *mut Chunk
{
1706 mem
.offset(-2 * (mem
::size_of
::<usize>() as isize)) as *mut Chunk
1711 unsafe fn leftmost_child(me
: *mut TreeChunk
) -> *mut TreeChunk
{
1712 let left
= (*me
).child
[0];
1720 unsafe fn chunk(me
: *mut TreeChunk
) -> *mut Chunk
{
1724 unsafe fn next(me
: *mut TreeChunk
) -> *mut TreeChunk
{
1725 (*TreeChunk
::chunk(me
)).next
as *mut TreeChunk
1728 unsafe fn prev(me
: *mut TreeChunk
) -> *mut TreeChunk
{
1729 (*TreeChunk
::chunk(me
)).prev
as *mut TreeChunk
1733 const EXTERN
: u32 = 1 << 0;
1736 unsafe fn is_extern(seg
: *mut Segment
) -> bool
{
1737 (*seg
).flags
& EXTERN
!= 0
1740 unsafe fn can_release_part(seg
: *mut Segment
) -> bool
{
1741 sys
::can_release_part((*seg
).flags
>> 1)
1744 unsafe fn sys_flags(seg
: *mut Segment
) -> u32 {
1748 unsafe fn holds(seg
: *mut Segment
, addr
: *mut u8) -> bool
{
1749 (*seg
).base
<= addr
&& addr
< Segment
::top(seg
)
1752 unsafe fn top(seg
: *mut Segment
) -> *mut u8 {
1753 (*seg
).base
.offset((*seg
).size
as isize)