]> git.proxmox.com Git - rustc.git/blob - src/dlmalloc/src/dlmalloc.rs
New upstream version 1.23.0+dfsg1
[rustc.git] / src / dlmalloc / src / dlmalloc.rs
1 // This is a version of dlmalloc.c ported to Rust. You can find the original
2 // source at ftp://g.oswego.edu/pub/misc/malloc.c
3 //
4 // The original source was written by Doug Lea and released to the public domain
5
6 use core::cmp;
7 use core::mem;
8 use core::ptr;
9
10 use sys;
11
12 pub struct Dlmalloc {
13 smallmap: u32,
14 treemap: u32,
15 smallbins: [*mut Chunk; (NSMALLBINS + 1) * 2],
16 treebins: [*mut TreeChunk; NTREEBINS],
17 dvsize: usize,
18 topsize: usize,
19 dv: *mut Chunk,
20 top: *mut Chunk,
21 footprint: usize,
22 max_footprint: usize,
23 seg: Segment,
24 trim_check: usize,
25 least_addr: *mut u8,
26 release_checks: usize,
27 }
28
29 pub const DLMALLOC_INIT: Dlmalloc = Dlmalloc {
30 smallmap: 0,
31 treemap: 0,
32 smallbins: [0 as *mut _; (NSMALLBINS + 1) * 2],
33 treebins: [0 as *mut _; NTREEBINS],
34 dvsize: 0,
35 topsize: 0,
36 dv: 0 as *mut _,
37 top: 0 as *mut _,
38 footprint: 0,
39 max_footprint: 0,
40 seg: Segment {
41 base: 0 as *mut _,
42 size: 0,
43 next: 0 as *mut _,
44 flags: 0,
45 },
46 trim_check: 0,
47 least_addr: 0 as *mut _,
48 release_checks: 0,
49 };
50
51 // TODO: document this
52 const NSMALLBINS: usize = 32;
53 const NTREEBINS: usize = 32;
54 const SMALLBIN_SHIFT: usize = 3;
55 const TREEBIN_SHIFT: usize = 8;
56
57 // TODO: runtime configurable? documentation?
58 const DEFAULT_GRANULARITY: usize = 64 * 1024;
59 const DEFAULT_TRIM_THRESHOLD: usize = 2 * 1024 * 1024;
60 const MAX_RELEASE_CHECK_RATE: usize = 4095;
61
62 #[repr(C)]
63 struct Chunk {
64 prev_foot: usize,
65 head: usize,
66 prev: *mut Chunk,
67 next: *mut Chunk,
68 }
69
70 #[repr(C)]
71 struct TreeChunk {
72 chunk: Chunk,
73 child: [*mut TreeChunk; 2],
74 parent: *mut TreeChunk,
75 index: u32,
76 }
77
78 #[repr(C)]
79 #[derive(Clone, Copy)]
80 struct Segment {
81 base: *mut u8,
82 size: usize,
83 next: *mut Segment,
84 flags: u32,
85 }
86
87 fn align_up(a: usize, alignment: usize) -> usize {
88 debug_assert!(alignment.is_power_of_two());
89 (a + (alignment - 1)) & !(alignment - 1)
90 }
91
92 fn left_bits(x: u32) -> u32 {
93 (x << 1) | (!(x << 1) + 1)
94 }
95
96 fn least_bit(x: u32) -> u32 {
97 x & (!x + 1)
98 }
99
100 fn leftshift_for_tree_index(x: u32) -> u32 {
101 let x = x as usize;
102 if x == NTREEBINS - 1 {
103 0
104 } else {
105 (mem::size_of::<usize>() * 8 - 1 - ((x >> 1) + TREEBIN_SHIFT - 2)) as u32
106 }
107 }
108
109 impl Dlmalloc {
110 pub fn new() -> Dlmalloc {
111 DLMALLOC_INIT
112 }
113
114 // TODO: can we get rid of this?
115 pub fn malloc_alignment(&self) -> usize {
116 mem::size_of::<usize>() * 2
117 }
118
119 // TODO: dox
120 fn chunk_overhead(&self) -> usize {
121 mem::size_of::<usize>()
122 }
123
124 fn mmap_chunk_overhead(&self) -> usize {
125 2 * mem::size_of::<usize>()
126 }
127
128 // TODO: dox
129 fn min_large_size(&self) -> usize {
130 1 << TREEBIN_SHIFT
131 }
132
133 // TODO: dox
134 fn max_small_size(&self) -> usize {
135 self.min_large_size() - 1
136 }
137
138 // TODO: dox
139 fn max_small_request(&self) -> usize {
140 self.max_small_size() - (self.malloc_alignment() - 1) - self.chunk_overhead()
141 }
142
143 // TODO: dox
144 fn min_chunk_size(&self) -> usize {
145 align_up(mem::size_of::<Chunk>(), self.malloc_alignment())
146 }
147
148 // TODO: dox
149 fn min_request(&self) -> usize {
150 self.min_chunk_size() - self.chunk_overhead() - 1
151 }
152
153 // TODO: dox
154 fn max_request(&self) -> usize {
155 (!self.min_chunk_size() + 1) << 2
156 }
157
158 fn pad_request(&self, amt: usize) -> usize {
159 align_up(amt + self.chunk_overhead(), self.malloc_alignment())
160 }
161
162 fn small_index(&self, size: usize) -> u32 {
163 (size >> SMALLBIN_SHIFT) as u32
164 }
165
166 fn small_index2size(&self, idx: u32) -> usize {
167 (idx as usize) << SMALLBIN_SHIFT
168 }
169
170 fn is_small(&self, s: usize) -> bool {
171 s >> SMALLBIN_SHIFT < NSMALLBINS
172 }
173
174 fn is_aligned(&self, a: usize) -> bool {
175 a & (self.malloc_alignment() - 1) == 0
176 }
177
178 fn align_offset(&self, addr: *mut u8) -> usize {
179 align_up(addr as usize, self.malloc_alignment()) - (addr as usize)
180 }
181
182 fn top_foot_size(&self) -> usize {
183 self.align_offset(unsafe { Chunk::to_mem(ptr::null_mut()) }) +
184 self.pad_request(mem::size_of::<Segment>()) +
185 self.min_chunk_size()
186 }
187
188 fn mmap_foot_pad(&self) -> usize {
189 4 * mem::size_of::<usize>()
190 }
191
192 fn align_as_chunk(&self, ptr: *mut u8) -> *mut Chunk {
193 unsafe {
194 let chunk = Chunk::to_mem(ptr as *mut Chunk);
195 ptr.offset(self.align_offset(chunk) as isize)
196 as *mut Chunk
197 }
198 }
199
200 fn request2size(&self, req: usize) -> usize {
201 if req < self.min_request() {
202 self.min_chunk_size()
203 } else {
204 self.pad_request(req)
205 }
206 }
207
208 unsafe fn overhead_for(&self, p: *mut Chunk) -> usize {
209 if Chunk::mmapped(p) {
210 self.mmap_chunk_overhead()
211 } else {
212 self.chunk_overhead()
213 }
214 }
215
216 pub unsafe fn calloc_must_clear(&self, ptr: *mut u8) -> bool {
217 !sys::allocates_zeros() || !Chunk::mmapped(Chunk::from_mem(ptr))
218 }
219
220 pub unsafe fn malloc(&mut self, size: usize) -> *mut u8 {
221 self.check_malloc_state();
222
223 let nb;
224 if size <= self.max_small_request() {
225 nb = self.request2size(size);
226 let mut idx = self.small_index(nb);
227 let smallbits = self.smallmap >> idx;
228
229 // Check the bin for `idx` (the lowest bit) but also check the next
230 // bin up to use that to satisfy our request, if needed.
231 if smallbits & 0b11 != 0 {
232 // If our the lowest bit, our `idx`, is unset then bump up the
233 // index as we'll be using the next bucket up.
234 idx += !smallbits & 1;
235
236 let b = self.smallbin_at(idx);
237 let p = (*b).prev;
238 self.unlink_first_small_chunk(b, p, idx);
239 let smallsize = self.small_index2size(idx);
240 Chunk::set_inuse_and_pinuse(p, smallsize);
241 let ret = Chunk::to_mem(p);
242 self.check_malloced_chunk(ret, nb);
243 return ret
244 }
245
246 if nb > self.dvsize {
247 // If there's some other bin with some memory, then we just use
248 // the next smallest bin
249 if smallbits != 0 {
250 let leftbits = (smallbits << idx) & left_bits(1 << idx);
251 let leastbit = least_bit(leftbits);
252 let i = leastbit.trailing_zeros();
253 let b = self.smallbin_at(i);
254 let p = (*b).prev;
255 debug_assert_eq!(Chunk::size(p), self.small_index2size(i));
256 self.unlink_first_small_chunk(b, p, i);
257 let smallsize = self.small_index2size(i);
258 let rsize = smallsize - nb;
259 if mem::size_of::<usize>() != 4 && rsize < self.min_chunk_size() {
260 Chunk::set_inuse_and_pinuse(p, smallsize);
261 } else {
262 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
263 let r = Chunk::plus_offset(p, nb);
264 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
265 self.replace_dv(r, rsize);
266 }
267 let ret = Chunk::to_mem(p);
268 self.check_malloced_chunk(ret, nb);
269 return ret
270
271 } else if self.treemap != 0 {
272 let mem = self.tmalloc_small(nb);
273 if !mem.is_null() {
274 self.check_malloced_chunk(mem, nb);
275 self.check_malloc_state();
276 return mem
277 }
278 }
279 }
280 } else if size >= self.max_request() {
281 // TODO: translate this to unsupported
282 return ptr::null_mut()
283 } else {
284 nb = self.pad_request(size);
285 if self.treemap != 0 {
286 let mem = self.tmalloc_large(nb);
287 if !mem.is_null() {
288 self.check_malloced_chunk(mem, nb);
289 self.check_malloc_state();
290 return mem
291 }
292 }
293 }
294
295 // use the `dv` node if we can, splitting it if necessary or otherwise
296 // exhausting the entire chunk
297 if nb <= self.dvsize {
298 let rsize = self.dvsize - nb;
299 let p = self.dv;
300 if rsize >= self.min_chunk_size() {
301 self.dv = Chunk::plus_offset(p, nb);
302 self.dvsize = rsize;
303 let r = self.dv;
304 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
305 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
306 } else {
307 let dvs = self.dvsize;
308 self.dvsize = 0;
309 self.dv = ptr::null_mut();
310 Chunk::set_inuse_and_pinuse(p, dvs);
311 }
312 let ret = Chunk::to_mem(p);
313 self.check_malloced_chunk(ret, nb);
314 self.check_malloc_state();
315 return ret
316 }
317
318 // Split the top node if we can
319 if nb < self.topsize {
320 self.topsize -= nb;
321 let rsize = self.topsize;
322 let p = self.top;
323 self.top = Chunk::plus_offset(p, nb);
324 let r = self.top;
325 (*r).head = rsize | PINUSE;
326 Chunk::set_size_and_pinuse_of_inuse_chunk(p, nb);
327 self.check_top_chunk(self.top);
328 let ret = Chunk::to_mem(p);
329 self.check_malloced_chunk(ret, nb);
330 self.check_malloc_state();
331 return ret
332 }
333
334 self.sys_alloc(nb)
335 }
336
337 unsafe fn sys_alloc(&mut self, size: usize) -> *mut u8 {
338 self.check_malloc_state();
339 let asize = align_up(size + self.top_foot_size() + self.malloc_alignment(),
340 DEFAULT_GRANULARITY);
341
342 let (tbase, tsize, flags) = sys::alloc(asize);
343 if tbase.is_null() {
344 return tbase
345 }
346
347 self.footprint += tsize;
348 self.max_footprint = cmp::max(self.max_footprint, self.footprint);
349
350 if self.top.is_null() {
351 if self.least_addr.is_null() || tbase < self.least_addr {
352 self.least_addr = tbase;
353 }
354 self.seg.base = tbase;
355 self.seg.size = tsize;
356 self.seg.flags = flags;
357 self.release_checks = MAX_RELEASE_CHECK_RATE;
358 self.init_bins();
359 let tsize = tsize - self.top_foot_size();
360 self.init_top(tbase as *mut Chunk, tsize);
361 // let mn = Chunk::next(Chunk::from_mem(self as *mut _ as *mut u8));
362 // let top_foot_size = self.top_foot_size();
363 // self.init_top(mn, tbase as usize + tsize - mn as usize - top_foot_size);
364 } else {
365 let mut sp = &mut self.seg as *mut Segment;
366 while !sp.is_null() && tbase != Segment::top(sp) {
367 sp = (*sp).next;
368 }
369 if !sp.is_null() &&
370 !Segment::is_extern(sp) &&
371 Segment::sys_flags(sp) == flags &&
372 Segment::holds(sp, self.top as *mut u8)
373 {
374 (*sp).size += tsize;
375 let ptr = self.top;
376 let size = self.topsize + tsize;
377 self.init_top(ptr, size);
378 } else {
379 self.least_addr = cmp::min(tbase, self.least_addr);
380 let mut sp = &mut self.seg as *mut Segment;
381 while !sp.is_null() && (*sp).base != tbase.offset(tsize as isize) {
382 sp = (*sp).next;
383 }
384 if !sp.is_null() &&
385 !Segment::is_extern(sp) &&
386 Segment::sys_flags(sp) == flags
387 {
388 let oldbase = (*sp).base;
389 (*sp).base = tbase;
390 (*sp).size += tsize;
391 return self.prepend_alloc(tbase, oldbase, size);
392 } else {
393 self.add_segment(tbase, tsize, flags);
394 }
395 }
396 }
397
398 if size < self.topsize {
399 self.topsize -= size;
400 let rsize = self.topsize;
401 let p = self.top;
402 self.top = Chunk::plus_offset(p, size);
403 let r = self.top;
404 (*r).head = rsize | PINUSE;
405 Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
406 let ret = Chunk::to_mem(p);
407 self.check_top_chunk(self.top);
408 self.check_malloced_chunk(ret, size);
409 self.check_malloc_state();
410 return ret
411 }
412
413 return ptr::null_mut()
414 }
415
416 pub unsafe fn realloc(&mut self, oldmem: *mut u8, bytes: usize) -> *mut u8 {
417 if bytes >= self.max_request() {
418 return ptr::null_mut()
419 }
420 let nb = self.request2size(bytes);
421 let oldp = Chunk::from_mem(oldmem);
422 let newp = self.try_realloc_chunk(oldp, nb, true);
423 if !newp.is_null() {
424 self.check_inuse_chunk(newp);
425 return Chunk::to_mem(newp)
426 }
427 let ptr = self.malloc(bytes);
428 if !ptr.is_null() {
429 let oc = Chunk::size(oldp) - self.overhead_for(oldp);
430 ptr::copy_nonoverlapping(oldmem, ptr, cmp::min(oc, bytes));
431 self.free(oldmem);
432 }
433 return ptr
434 }
435
436 unsafe fn try_realloc_chunk(&mut self, p: *mut Chunk, nb: usize, can_move: bool)
437 -> *mut Chunk
438 {
439 let oldsize = Chunk::size(p);
440 let next = Chunk::plus_offset(p, oldsize);
441
442 if Chunk::mmapped(p) {
443 self.mmap_resize(p, nb, can_move)
444 } else if oldsize >= nb {
445 let rsize = oldsize - nb;
446 if rsize >= self.min_chunk_size() {
447 let r = Chunk::plus_offset(p, nb);
448 Chunk::set_inuse(p, nb);
449 Chunk::set_inuse(r, rsize);
450 self.dispose_chunk(r, rsize);
451 }
452 p
453 } else if next == self.top { // extend into top
454 if oldsize + self.topsize <= nb {
455 return ptr::null_mut()
456 }
457 let newsize = oldsize + self.topsize;
458 let newtopsize = newsize - nb;
459 let newtop = Chunk::plus_offset(p, nb);
460 Chunk::set_inuse(p, nb);
461 (*newtop).head = newtopsize | PINUSE;
462 self.top = newtop;
463 self.topsize = newtopsize;
464 p
465 } else if next == self.dv { // extend into dv
466 let dvs = self.dvsize;
467 if oldsize + dvs < nb {
468 return ptr::null_mut()
469 }
470 let dsize = oldsize + dvs - nb;
471 if dsize >= self.min_chunk_size() {
472 let r = Chunk::plus_offset(p, nb);
473 let n = Chunk::plus_offset(r, dsize);
474 Chunk::set_inuse(p, nb);
475 Chunk::set_size_and_pinuse_of_free_chunk(r, dsize);
476 Chunk::clear_pinuse(n);
477 self.dvsize = dsize;
478 self.dv = r;
479 } else { // exhaust dv
480 let newsize = oldsize + dvs;
481 Chunk::set_inuse(p, newsize);
482 self.dvsize = 0;
483 self.dv = ptr::null_mut();
484 }
485 return p
486 } else if !Chunk::cinuse(next) { // extend into the next free chunk
487 let nextsize = Chunk::size(next);
488 if oldsize + nextsize < nb {
489 return ptr::null_mut()
490 }
491 let rsize = oldsize + nextsize - nb;
492 self.unlink_chunk(next, nextsize);
493 if rsize < self.min_chunk_size() {
494 let newsize = oldsize + nextsize;
495 Chunk::set_inuse(p, newsize);
496 } else {
497 let r = Chunk::plus_offset(p, nb);
498 Chunk::set_inuse(p, nb);
499 Chunk::set_inuse(r, rsize);
500 self.dispose_chunk(r, rsize);
501 }
502 p
503 } else {
504 ptr::null_mut()
505 }
506 }
507
508 unsafe fn mmap_resize(&mut self, oldp: *mut Chunk, nb: usize, can_move: bool)
509 -> *mut Chunk
510 {
511 let oldsize = Chunk::size(oldp);
512 // Can't shrink mmap regions below a small size
513 if self.is_small(nb) {
514 return ptr::null_mut()
515 }
516
517 // Keep the old chunk if it's big enough but not too big
518 if oldsize >= nb + mem::size_of::<usize>() &&
519 (oldsize - nb) <= (DEFAULT_GRANULARITY << 1)
520 {
521 return oldp
522 }
523
524 let offset = (*oldp).prev_foot;
525 let oldmmsize = oldsize + offset + self.mmap_foot_pad();
526 let newmmsize = self.mmap_align(nb + 6 * mem::size_of::<usize>() +
527 self.malloc_alignment() - 1);
528 let ptr = sys::remap((oldp as *mut u8).offset(-(offset as isize)),
529 oldmmsize,
530 newmmsize,
531 can_move);
532 if ptr.is_null() {
533 return ptr::null_mut();
534 }
535 let newp = ptr.offset(offset as isize) as *mut Chunk;
536 let psize = newmmsize - offset - self.mmap_foot_pad();
537 (*newp).head = psize;
538 (*Chunk::plus_offset(newp, psize)).head = Chunk::fencepost_head();
539 (*Chunk::plus_offset(newp, psize + mem::size_of::<usize>())).head = 0;
540 self.least_addr = cmp::min(ptr, self.least_addr);
541 self.footprint = self.footprint + newmmsize - oldmmsize;
542 self.max_footprint = cmp::max(self.max_footprint, self.footprint);
543 self.check_mmapped_chunk(newp);
544 return newp
545 }
546
547 fn mmap_align(&self, a: usize) -> usize {
548 align_up(a, sys::page_size())
549 }
550
551 // Only call this with power-of-two alignment and alignment >
552 // `self.malloc_alignment()`
553 pub unsafe fn memalign(&mut self, mut alignment: usize, bytes: usize)
554 -> *mut u8
555 {
556 if alignment < self.min_chunk_size() {
557 alignment = self.min_chunk_size();
558 }
559 if bytes >= self.max_request() - alignment {
560 return ptr::null_mut()
561 }
562 let nb = self.request2size(bytes);
563 let req = nb + alignment + self.min_chunk_size() - self.chunk_overhead();
564 let mem = self.malloc(req);
565 if mem.is_null() {
566 return mem
567 }
568 let mut p = Chunk::from_mem(mem);
569 if mem as usize & (alignment - 1) != 0 {
570 // Here we find an aligned sopt inside the chunk. Since we need to
571 // give back leading space in a chunk of at least `min_chunk_size`,
572 // if the first calculation places us at a spot with less than
573 // `min_chunk_size` leader we can move to the next aligned spot.
574 // we've allocated enough total room so that this is always possible
575 let br = Chunk::from_mem(((mem as usize + alignment - 1) & (!alignment + 1)) as *mut u8);
576 let pos = if (br as usize - p as usize) > self.min_chunk_size() {
577 br as *mut u8
578 } else {
579 (br as *mut u8).offset(alignment as isize)
580 };
581 let newp = pos as *mut Chunk;
582 let leadsize = pos as usize - p as usize;
583 let newsize = Chunk::size(p) - leadsize;
584
585 // for mmapped chunks just adjust the offset
586 if Chunk::mmapped(p) {
587 (*newp).prev_foot = (*p).prev_foot + leadsize;
588 (*newp).head = newsize;
589 } else {
590 // give back the leader, use the rest
591 Chunk::set_inuse(newp, newsize);
592 Chunk::set_inuse(p, leadsize);
593 self.dispose_chunk(p, leadsize);
594 }
595 p = newp;
596 }
597
598 // give back spare room at the end
599 if !Chunk::mmapped(p) {
600 let size = Chunk::size(p);
601 if size > nb + self.min_chunk_size() {
602 let remainder_size = size - nb;
603 let remainder = Chunk::plus_offset(p, nb);
604 Chunk::set_inuse(p, nb);
605 Chunk::set_inuse(remainder, remainder_size);
606 self.dispose_chunk(remainder, remainder_size);
607 }
608 }
609
610 let mem = Chunk::to_mem(p);
611 debug_assert!(Chunk::size(p) >= nb);
612 debug_assert_eq!(align_up(mem as usize, alignment), mem as usize);
613 self.check_inuse_chunk(p);
614 return mem
615 }
616
617 // consolidate and bin a chunk, differs from exported versions of free
618 // mainly in that the chunk need not be marked as inuse
619 unsafe fn dispose_chunk(&mut self, mut p: *mut Chunk, mut psize: usize) {
620 let next = Chunk::plus_offset(p, psize);
621 if !Chunk::pinuse(p) {
622 let prevsize = (*p).prev_foot;
623 if Chunk::mmapped(p) {
624 psize += prevsize + self.mmap_foot_pad();
625 if sys::free((p as *mut u8).offset(-(prevsize as isize)), psize) {
626 self.footprint -= psize;
627 }
628 return
629 }
630 let prev = Chunk::minus_offset(p, prevsize);
631 psize += prevsize;
632 p = prev;
633 if p != self.dv {
634 self.unlink_chunk(p, prevsize);
635 } else if (*next).head & INUSE == INUSE {
636 self.dvsize = psize;
637 Chunk::set_free_with_pinuse(p, psize, next);
638 return
639 }
640 }
641
642 if !Chunk::cinuse(next) { // consolidate forward
643 if next == self.top {
644 self.topsize += psize;
645 let tsize = self.topsize;
646 self.top = p;
647 (*p).head = tsize | PINUSE;
648 if p == self.dv {
649 self.dv = ptr::null_mut();
650 self.dvsize = 0;
651 }
652 return
653 } else if next == self.dv {
654 self.dvsize += psize;
655 let dsize = self.dvsize;
656 self.dv = p;
657 Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
658 return
659 } else {
660 let nsize = Chunk::size(next);
661 psize += nsize;
662 self.unlink_chunk(next, nsize);
663 Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
664 if p == self.dv {
665 self.dvsize = psize;
666 return
667 }
668 }
669 } else {
670 Chunk::set_free_with_pinuse(p, psize, next);
671 }
672 self.insert_chunk(p, psize);
673 }
674
675 unsafe fn init_top(&mut self, ptr: *mut Chunk, size: usize) {
676 let offset = self.align_offset(Chunk::to_mem(ptr));
677 let p = Chunk::plus_offset(ptr, offset);
678 let size = size - offset;
679
680 self.top = p;
681 self.topsize = size;
682 (*p).head = size | PINUSE;
683 (*Chunk::plus_offset(p, size)).head = self.top_foot_size();
684 self.trim_check = DEFAULT_TRIM_THRESHOLD;
685 }
686
687 unsafe fn init_bins(&mut self) {
688 for i in 0..NSMALLBINS as u32 {
689 let bin = self.smallbin_at(i);
690 (*bin).next = bin;
691 (*bin).prev = bin;
692 }
693 }
694
695 unsafe fn prepend_alloc(&mut self,
696 newbase: *mut u8,
697 oldbase: *mut u8,
698 size: usize) -> *mut u8 {
699 let p = self.align_as_chunk(newbase);
700 let mut oldfirst = self.align_as_chunk(oldbase);
701 let psize = oldfirst as usize - p as usize;
702 let q = Chunk::plus_offset(p, size);
703 let mut qsize = psize - size;
704 Chunk::set_size_and_pinuse_of_inuse_chunk(p, size);
705
706 debug_assert!(oldfirst > q);
707 debug_assert!(Chunk::pinuse(oldfirst));
708 debug_assert!(qsize >= self.min_chunk_size());
709
710
711 // consolidate the remainder with the first chunk of the old base
712 if oldfirst == self.top {
713 self.topsize += qsize;
714 let tsize = self.topsize ;
715 self.top = q;
716 (*q).head = tsize | PINUSE;
717 self.check_top_chunk(q);
718 } else if oldfirst == self.dv {
719 self.dvsize += qsize;
720 let dsize = self.dvsize;
721 self.dv = q;
722 Chunk::set_size_and_pinuse_of_free_chunk(q, dsize);
723 } else {
724 if !Chunk::inuse(oldfirst) {
725 let nsize = Chunk::size(oldfirst);
726 self.unlink_chunk(oldfirst, nsize);
727 oldfirst = Chunk::plus_offset(oldfirst, nsize);
728 qsize += nsize;
729 }
730 Chunk::set_free_with_pinuse(q, qsize, oldfirst);
731 self.insert_chunk(q, qsize);
732 self.check_free_chunk(q);
733 }
734
735 let ret = Chunk::to_mem(p);
736 self.check_malloced_chunk(ret, size);
737 self.check_malloc_state();
738 return ret
739 }
740
741 // add a segment to hold a new noncontiguous region
742 unsafe fn add_segment(&mut self, tbase: *mut u8, tsize: usize, flags: u32) {
743 // TODO: what in the world is this function doing
744
745 // Determine locations and sizes of segment, fenceposts, and the old top
746 let old_top = self.top as *mut u8;
747 let oldsp = self.segment_holding(old_top);
748 let old_end = Segment::top(oldsp);
749 let ssize = self.pad_request(mem::size_of::<Segment>());
750 let offset = ssize + mem::size_of::<usize>() * 4 + self.malloc_alignment() - 1;
751 let rawsp = old_end.offset(-(offset as isize));
752 let offset = self.align_offset(Chunk::to_mem(rawsp as *mut Chunk));
753 let asp = rawsp.offset(offset as isize);
754 let csp = if asp < old_top.offset(self.min_chunk_size() as isize) {
755 old_top
756 } else {
757 asp
758 };
759 let sp = csp as *mut Chunk;
760 let ss = Chunk::to_mem(sp) as *mut Segment;
761 let tnext = Chunk::plus_offset(sp, ssize);
762 let mut p = tnext;
763 let mut nfences = 0;
764
765 // reset the top to our new space
766 let size = tsize - self.top_foot_size();
767 self.init_top(tbase as *mut Chunk, size);
768
769 // set up our segment record
770 debug_assert!(self.is_aligned(ss as usize));
771 Chunk::set_size_and_pinuse_of_inuse_chunk(sp, ssize);
772 *ss = self.seg; // push our current record
773 self.seg.base = tbase;
774 self.seg.size = tsize;
775 self.seg.flags = flags;
776 self.seg.next = ss;
777
778 // insert trailing fences
779 loop {
780 let nextp = Chunk::plus_offset(p, mem::size_of::<usize>());
781 (*p).head = Chunk::fencepost_head();
782 nfences += 1;
783 if (&(*nextp).head as *const usize as *mut u8) < old_end {
784 p = nextp;
785 } else {
786 break
787 }
788 }
789 debug_assert!(nfences >= 2);
790
791 // insert the rest of the old top into a bin as an ordinary free chunk
792 if csp != old_top {
793 let q = old_top as *mut Chunk;
794 let psize = csp as usize - old_top as usize;
795 let tn = Chunk::plus_offset(q, psize);
796 Chunk::set_free_with_pinuse(q, psize, tn);
797 self.insert_chunk(q, psize);
798 }
799
800 self.check_top_chunk(self.top);
801 self.check_malloc_state();
802 }
803
804 unsafe fn segment_holding(&self, ptr: *mut u8) -> *mut Segment {
805 let mut sp = &self.seg as *const Segment as *mut Segment;
806 while !sp.is_null() {
807 if (*sp).base <= ptr && ptr < Segment::top(sp) {
808 return sp
809 }
810 sp = (*sp).next;
811 }
812 ptr::null_mut()
813 }
814
815 unsafe fn tmalloc_small(&mut self, size: usize) -> *mut u8 {
816 let leastbit = least_bit(self.treemap);
817 let i = leastbit.trailing_zeros();
818 let mut v = *self.treebin_at(i);
819 let mut t = v;
820 let mut rsize = Chunk::size(TreeChunk::chunk(t)) - size;
821
822 loop {
823 t = TreeChunk::leftmost_child(t);
824 if t.is_null() {
825 break
826 }
827 let trem = Chunk::size(TreeChunk::chunk(t)) - size;
828 if trem < rsize {
829 rsize = trem;
830 v = t;
831 }
832 }
833
834 let vc = TreeChunk::chunk(v);
835 let r = Chunk::plus_offset(vc, size) as *mut TreeChunk;
836 debug_assert_eq!(Chunk::size(vc), rsize + size);
837 self.unlink_large_chunk(v);
838 if rsize < self.min_chunk_size() {
839 Chunk::set_inuse_and_pinuse(vc, rsize + size);
840 } else {
841 let rc = TreeChunk::chunk(r);
842 Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
843 Chunk::set_size_and_pinuse_of_free_chunk(rc, rsize);
844 self.replace_dv(rc, rsize);
845 }
846 Chunk::to_mem(vc)
847 }
848
849 unsafe fn tmalloc_large(&mut self, size: usize) -> *mut u8 {
850 let mut v = ptr::null_mut();
851 let mut rsize = !size + 1;
852 let idx = self.compute_tree_index(size);
853 let mut t = *self.treebin_at(idx);
854 if !t.is_null() {
855 // Traverse thre tree for this bin looking for a node with size
856 // equal to the `size` above.
857 let mut sizebits = size << leftshift_for_tree_index(idx);
858 // Keep track of the deepest untaken right subtree
859 let mut rst = ptr::null_mut();
860 loop {
861 let csize = Chunk::size(TreeChunk::chunk(t));
862 if csize >= size && csize - size < rsize {
863 v = t;
864 rsize = csize - size;
865 if rsize == 0 {
866 break
867 }
868 }
869 let rt = (*t).child[1];
870 t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
871 if !rt.is_null() && rt != t {
872 rst = rt;
873 }
874 if t.is_null() {
875 // Reset `t` to the least subtree holding sizes greater than
876 // the `size` above, breaking out
877 t = rst;
878 break
879 }
880 sizebits <<= 1;
881 }
882 }
883
884 // Set t to the root of the next non-empty treebin
885 if t.is_null() && v.is_null() {
886 let leftbits = left_bits(1 << idx) & self.treemap;
887 if leftbits != 0 {
888 let leastbit = least_bit(leftbits);
889 let i = leastbit.trailing_zeros();
890 t = *self.treebin_at(i);
891 }
892 }
893
894 // Find the smallest of this tree or subtree
895 while !t.is_null() {
896 let csize = Chunk::size(TreeChunk::chunk(t));
897 if csize >= size && csize - size < rsize {
898 rsize = csize - size;
899 v = t;
900 }
901 t = TreeChunk::leftmost_child(t);
902 }
903
904 // If dv is a better fit, then return null so malloc will use it
905 if v.is_null() || rsize + size >= self.dvsize {
906 return ptr::null_mut()
907 }
908
909 let vc = TreeChunk::chunk(v);
910 let r = Chunk::plus_offset(vc, size);
911 debug_assert_eq!(Chunk::size(vc), rsize + size);
912 self.unlink_large_chunk(v);
913 if rsize < self.min_chunk_size() {
914 Chunk::set_inuse_and_pinuse(vc, rsize + size);
915 } else {
916 Chunk::set_size_and_pinuse_of_inuse_chunk(vc, size);
917 Chunk::set_size_and_pinuse_of_free_chunk(r, rsize);
918 self.insert_chunk(r, rsize);
919 }
920 Chunk::to_mem(vc)
921 }
922
923 unsafe fn smallbin_at(&mut self, idx: u32) -> *mut Chunk {
924 debug_assert!(((idx * 2) as usize) < self.smallbins.len());
925 &mut *self.smallbins.get_unchecked_mut((idx as usize) * 2)
926 as *mut *mut Chunk as *mut Chunk
927 }
928
929 unsafe fn treebin_at(&mut self, idx: u32) -> *mut *mut TreeChunk {
930 debug_assert!((idx as usize) < self.treebins.len());
931 &mut *self.treebins.get_unchecked_mut(idx as usize)
932 }
933
934 fn compute_tree_index(&self, size: usize) -> u32 {
935 let x = size >> TREEBIN_SHIFT;
936 if x == 0 {
937 0
938 } else if x > 0xffff {
939 NTREEBINS as u32 - 1
940 } else {
941 let k = mem::size_of_val(&x) * 8 - 1 - (x.leading_zeros() as usize);
942 ((k << 1) + (size >> (k + TREEBIN_SHIFT - 1) & 1)) as u32
943 }
944 }
945
946 unsafe fn unlink_first_small_chunk(&mut self,
947 head: *mut Chunk,
948 next: *mut Chunk,
949 idx: u32) {
950 let ptr = (*next).prev;
951 debug_assert!(next != head);
952 debug_assert!(next != ptr);
953 debug_assert_eq!(Chunk::size(next), self.small_index2size(idx));
954 if head == ptr {
955 self.clear_smallmap(idx);
956 } else {
957 (*ptr).next = head;
958 (*head).prev = ptr;
959 }
960 }
961
962 unsafe fn replace_dv(&mut self, chunk: *mut Chunk, size: usize) {
963 let dvs = self.dvsize;
964 debug_assert!(self.is_small(dvs));
965 if dvs != 0 {
966 let dv = self.dv;
967 self.insert_small_chunk(dv, dvs);
968 }
969 self.dvsize = size;
970 self.dv = chunk;
971 }
972
973 unsafe fn insert_chunk(&mut self, chunk: *mut Chunk, size: usize) {
974 if self.is_small(size) {
975 self.insert_small_chunk(chunk, size);
976 } else {
977 self.insert_large_chunk(chunk as *mut TreeChunk, size);
978 }
979 }
980
981 unsafe fn insert_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
982 let idx = self.small_index(size);
983 let head = self.smallbin_at(idx);
984 let mut f = head;
985 debug_assert!(size >= self.min_chunk_size());
986 if !self.smallmap_is_marked(idx) {
987 self.mark_smallmap(idx);
988 } else {
989 f = (*head).prev;
990 }
991
992 (*head).prev = chunk;
993 (*f).next = chunk;
994 (*chunk).prev = f;
995 (*chunk).next = head;
996 }
997
998 unsafe fn insert_large_chunk(&mut self, chunk: *mut TreeChunk, size: usize) {
999 let idx = self.compute_tree_index(size);
1000 let h = self.treebin_at(idx);
1001 (*chunk).index = idx;
1002 (*chunk).child[0] = ptr::null_mut();
1003 (*chunk).child[1] = ptr::null_mut();
1004 let chunkc = TreeChunk::chunk(chunk);
1005 if !self.treemap_is_marked(idx) {
1006 self.mark_treemap(idx);
1007 *h = chunk;
1008 (*chunk).parent = h as *mut TreeChunk; // TODO: dubious?
1009 (*chunkc).next = chunkc;
1010 (*chunkc).prev = chunkc;
1011 } else {
1012 let mut t = *h;
1013 let mut k = size << leftshift_for_tree_index(idx);
1014 loop {
1015 if Chunk::size(TreeChunk::chunk(t)) != size {
1016 let c = &mut (*t).child[(k >> mem::size_of::<usize>() * 8 - 1) & 1];
1017 k <<= 1;
1018 if !c.is_null() {
1019 t = *c;
1020 } else {
1021 *c = chunk;
1022 (*chunk).parent = t;
1023 (*chunkc).next = chunkc;
1024 (*chunkc).prev = chunkc;
1025 break
1026 }
1027 } else {
1028 let tc = TreeChunk::chunk(t);
1029 let f = (*tc).prev;
1030 (*f).next = chunkc;
1031 (*tc).prev = chunkc;
1032 (*chunkc).prev = f;
1033 (*chunkc).next = tc;
1034 (*chunk).parent = ptr::null_mut();
1035 break
1036 }
1037 }
1038 }
1039 }
1040
1041 unsafe fn smallmap_is_marked(&self, idx: u32) -> bool {
1042 self.smallmap & (1 << idx) != 0
1043 }
1044
1045 unsafe fn mark_smallmap(&mut self, idx: u32) {
1046 self.smallmap |= 1 << idx;
1047 }
1048
1049 unsafe fn clear_smallmap(&mut self, idx: u32) {
1050 self.smallmap &= !(1 << idx);
1051 }
1052
1053 unsafe fn treemap_is_marked(&self, idx: u32) -> bool {
1054 self.treemap & (1 << idx) != 0
1055 }
1056
1057 unsafe fn mark_treemap(&mut self, idx: u32) {
1058 self.treemap |= 1 << idx;
1059 }
1060
1061 unsafe fn clear_treemap(&mut self, idx: u32) {
1062 self.treemap &= !(1 << idx);
1063 }
1064
1065 unsafe fn unlink_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1066 if self.is_small(size) {
1067 self.unlink_small_chunk(chunk, size)
1068 } else {
1069 self.unlink_large_chunk(chunk as *mut TreeChunk);
1070 }
1071 }
1072
1073 unsafe fn unlink_small_chunk(&mut self, chunk: *mut Chunk, size: usize) {
1074 let f = (*chunk).prev;
1075 let b = (*chunk).next;
1076 let idx = self.small_index(size);
1077 debug_assert!(chunk != b);
1078 debug_assert!(chunk != f);
1079 debug_assert_eq!(Chunk::size(chunk), self.small_index2size(idx));
1080 if b == f {
1081 self.clear_smallmap(idx);
1082 } else {
1083 (*f).next = b;
1084 (*b).prev = f;
1085 }
1086 }
1087
1088 unsafe fn unlink_large_chunk(&mut self, chunk: *mut TreeChunk) {
1089 let xp = (*chunk).parent;
1090 let mut r;
1091 if TreeChunk::next(chunk) != chunk {
1092 let f = TreeChunk::prev(chunk);
1093 r = TreeChunk::next(chunk);
1094 (*f).chunk.next = TreeChunk::chunk(r);
1095 (*r).chunk.prev = TreeChunk::chunk(f);
1096 } else {
1097 let mut rp = &mut (*chunk).child[1];
1098 if rp.is_null() {
1099 rp = &mut (*chunk).child[0];
1100 }
1101 r = *rp;
1102 if !rp.is_null() {
1103 loop {
1104 let mut cp = &mut (**rp).child[1];
1105 if cp.is_null() {
1106 cp = &mut (**rp).child[0];
1107 }
1108 if cp.is_null() {
1109 break
1110 }
1111 rp = cp;
1112 }
1113 r = *rp;
1114 *rp = ptr::null_mut();
1115 }
1116 }
1117
1118 if xp.is_null() {
1119 return
1120 }
1121
1122 let h = self.treebin_at((*chunk).index);
1123 if chunk == *h {
1124 *h = r;
1125 if r.is_null() {
1126 self.clear_treemap((*chunk).index);
1127 }
1128 } else {
1129 if (*xp).child[0] == chunk {
1130 (*xp).child[0] = r;
1131 } else {
1132 (*xp).child[1] = r;
1133 }
1134 }
1135
1136 if !r.is_null() {
1137 (*r).parent = xp;
1138 let c0 = (*chunk).child[0];
1139 if !c0.is_null() {
1140 (*r).child[0] = c0;
1141 (*c0).parent = r;
1142 }
1143 let c1 = (*chunk).child[1];
1144 if !c1.is_null() {
1145 (*r).child[1] = c1;
1146 (*c1).parent = r;
1147 }
1148 }
1149 }
1150
1151 pub unsafe fn free(&mut self, mem: *mut u8) {
1152 self.check_malloc_state();
1153
1154 let mut p = Chunk::from_mem(mem);
1155 let mut psize = Chunk::size(p);
1156 let next = Chunk::plus_offset(p, psize);
1157 if !Chunk::pinuse(p) {
1158 let prevsize = (*p).prev_foot;
1159
1160 if Chunk::mmapped(p) {
1161 psize += prevsize + self.mmap_foot_pad();
1162 if sys::free((p as *mut u8).offset(-(prevsize as isize)), psize) {
1163 self.footprint -= psize;
1164 }
1165 return
1166 }
1167
1168 let prev = Chunk::minus_offset(p, prevsize);
1169 psize += prevsize;
1170 p = prev;
1171 if p != self.dv {
1172 self.unlink_chunk(p, prevsize);
1173 } else if (*next).head & INUSE == INUSE {
1174 self.dvsize = psize;
1175 Chunk::set_free_with_pinuse(p, psize, next);
1176 return
1177 }
1178 }
1179
1180 // Consolidate forward if we can
1181 if !Chunk::cinuse(next) {
1182 if next == self.top {
1183 self.topsize += psize;
1184 let tsize = self.topsize;
1185 self.top = p;
1186 (*p).head = tsize | PINUSE;
1187 if p == self.dv {
1188 self.dv = ptr::null_mut();
1189 self.dvsize = 0;
1190 }
1191 if self.should_trim(tsize) {
1192 self.sys_trim(0);
1193 }
1194 return
1195 } else if next == self.dv {
1196 self.dvsize += psize;
1197 let dsize = self.dvsize;
1198 self.dv = p;
1199 Chunk::set_size_and_pinuse_of_free_chunk(p, dsize);
1200 return
1201 } else {
1202 let nsize = Chunk::size(next);
1203 psize += nsize;
1204 self.unlink_chunk(next, nsize);
1205 Chunk::set_size_and_pinuse_of_free_chunk(p, psize);
1206 if p == self.dv {
1207 self.dvsize = psize;
1208 return
1209 }
1210 }
1211 } else {
1212 Chunk::set_free_with_pinuse(p, psize, next);
1213 }
1214
1215 if self.is_small(psize) {
1216 self.insert_small_chunk(p, psize);
1217 self.check_free_chunk(p);
1218 } else {
1219 self.insert_large_chunk(p as *mut TreeChunk, psize);
1220 self.check_free_chunk(p);
1221 self.release_checks -= 1;
1222 if self.release_checks == 0 {
1223 self.release_unused_segments();
1224 }
1225 }
1226 }
1227
1228 fn should_trim(&self, size: usize) -> bool {
1229 size > self.trim_check
1230 }
1231
1232 unsafe fn sys_trim(&mut self, mut pad: usize) -> bool {
1233 let mut released = 0;
1234 if pad < self.max_request() && !self.top.is_null() {
1235 pad += self.top_foot_size();
1236 if self.topsize > pad {
1237 let unit = DEFAULT_GRANULARITY;
1238 let extra = ((self.topsize - pad + unit - 1) / unit - 1) * unit;
1239 let sp = self.segment_holding(self.top as *mut u8);
1240 debug_assert!(!sp.is_null());
1241
1242 if !Segment::is_extern(sp) {
1243 if Segment::can_release_part(sp) {
1244 if (*sp).size >= extra && !self.has_segment_link(sp) {
1245 let newsize = (*sp).size - extra;
1246 if sys::free_part((*sp).base, (*sp).size, newsize) {
1247 released = extra;
1248 }
1249 }
1250 }
1251 }
1252
1253 if released != 0 {
1254 (*sp).size -= released;
1255 self.footprint -= released;
1256 let top = self.top;
1257 let topsize = self.topsize - released;
1258 self.init_top(top, topsize);
1259 self.check_top_chunk(self.top);
1260 }
1261 }
1262
1263 released += self.release_unused_segments();
1264
1265 if released == 0 && self.topsize > self.trim_check {
1266 self.trim_check = usize::max_value();
1267 }
1268 }
1269
1270 released != 0
1271 }
1272
1273 unsafe fn has_segment_link(&self, ptr: *mut Segment) -> bool {
1274 let mut sp = &self.seg as *const Segment as *mut Segment;
1275 while !sp.is_null() {
1276 if Segment::holds(ptr, sp as *mut u8) {
1277 return true
1278 }
1279 sp = (*sp).next;
1280 }
1281 false
1282 }
1283
1284 /// Unmap and unlink any mapped segments that don't contain used chunks
1285 unsafe fn release_unused_segments(&mut self) -> usize {
1286 let mut released = 0;
1287 let mut nsegs = 0;
1288 let mut pred = &mut self.seg as *mut Segment;
1289 let mut sp = (*pred).next;
1290 while !sp.is_null() {
1291 let base = (*sp).base;
1292 let size = (*sp).size;
1293 let next = (*sp).next;
1294 nsegs += 1;
1295
1296 if Segment::can_release_part(sp) && !Segment::is_extern(sp) {
1297 let p = self.align_as_chunk(base);
1298 let psize = Chunk::size(p);
1299 // We can unmap if the first chunk holds the entire segment and
1300 // isn't pinned.
1301 let chunk_top = (p as *mut u8).offset(psize as isize);
1302 let top = base.offset((size - self.top_foot_size()) as isize);
1303 if !Chunk::inuse(p) && chunk_top >= top {
1304 let tp = p as *mut TreeChunk;
1305 debug_assert!(Segment::holds(sp, sp as *mut u8));
1306 if p == self.dv {
1307 self.dv = ptr::null_mut();
1308 self.dvsize = 0;
1309 } else {
1310 self.unlink_large_chunk(tp);
1311 }
1312 if sys::free(base, size) {
1313 released += size;
1314 self.footprint -= size;
1315 // unlink our obsolete record
1316 sp = pred;
1317 (*sp).next = next;
1318 } else {
1319 // back out if we can't unmap
1320 self.insert_large_chunk(tp, psize);
1321 }
1322 }
1323 }
1324 pred = sp;
1325 sp = next;
1326 }
1327 self.release_checks = if nsegs > MAX_RELEASE_CHECK_RATE {
1328 nsegs
1329 } else {
1330 MAX_RELEASE_CHECK_RATE
1331 };
1332 return released
1333 }
1334
1335 // Sanity checks
1336
1337 unsafe fn check_any_chunk(&self, p: *mut Chunk) {
1338 if !cfg!(debug_assertions) {
1339 return
1340 }
1341 debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize) ||
1342 (*p).head == Chunk::fencepost_head());
1343 debug_assert!(p as *mut u8 >= self.least_addr);
1344 }
1345
1346 unsafe fn check_top_chunk(&self, p: *mut Chunk) {
1347 if !cfg!(debug_assertions) {
1348 return
1349 }
1350 let sp = self.segment_holding(p as *mut u8);
1351 let sz = (*p).head & !INUSE;
1352 debug_assert!(!sp.is_null());
1353 debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize) ||
1354 (*p).head == Chunk::fencepost_head());
1355 debug_assert!(p as *mut u8 >= self.least_addr);
1356 debug_assert_eq!(sz, self.topsize);
1357 debug_assert!(sz > 0);
1358 debug_assert_eq!(sz, (*sp).base as usize + (*sp).size - p as usize - self.top_foot_size());
1359 debug_assert!(Chunk::pinuse(p));
1360 debug_assert!(!Chunk::pinuse(Chunk::plus_offset(p, sz)));
1361 }
1362
1363 unsafe fn check_malloced_chunk(&self, mem: *mut u8, s: usize) {
1364 if !cfg!(debug_assertions) {
1365 return
1366 }
1367 if mem.is_null() {
1368 return
1369 }
1370 let p = Chunk::from_mem(mem);
1371 let sz = (*p).head & !INUSE;
1372 self.check_inuse_chunk(p);
1373 debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1374 debug_assert!(sz >= self.min_chunk_size());
1375 debug_assert!(sz >= s);
1376 debug_assert!(Chunk::mmapped(p) || sz < (s + self.min_chunk_size()));
1377 }
1378
1379 unsafe fn check_inuse_chunk(&self, p: *mut Chunk) {
1380 self.check_any_chunk(p);
1381 debug_assert!(Chunk::inuse(p));
1382 debug_assert!(Chunk::pinuse(Chunk::next(p)));
1383 debug_assert!(Chunk::mmapped(p) || Chunk::pinuse(p) ||
1384 Chunk::next(Chunk::prev(p)) == p);
1385 if Chunk::mmapped(p) {
1386 self.check_mmapped_chunk(p);
1387 }
1388 }
1389
1390 unsafe fn check_mmapped_chunk(&self, p: *mut Chunk) {
1391 if !cfg!(debug_assertions) {
1392 return
1393 }
1394 let sz = Chunk::size(p);
1395 let len = sz + (*p).prev_foot + self.mmap_foot_pad();
1396 debug_assert!(Chunk::mmapped(p));
1397 debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize) ||
1398 (*p).head == Chunk::fencepost_head());
1399 debug_assert!(p as *mut u8 >= self.least_addr);
1400 debug_assert!(!self.is_small(sz));
1401 debug_assert_eq!(align_up(len, sys::page_size()), len);
1402 debug_assert_eq!((*Chunk::plus_offset(p, sz)).head, Chunk::fencepost_head());
1403 debug_assert_eq!((*Chunk::plus_offset(p, sz + mem::size_of::<usize>())).head, 0);
1404 }
1405
1406 unsafe fn check_free_chunk(&self, p: *mut Chunk) {
1407 if !cfg!(debug_assertions) {
1408 return
1409 }
1410 let sz = Chunk::size(p);
1411 let next = Chunk::plus_offset(p, sz);
1412 self.check_any_chunk(p);
1413 debug_assert!(!Chunk::inuse(p));
1414 debug_assert!(!Chunk::pinuse(Chunk::next(p)));
1415 debug_assert!(!Chunk::mmapped(p));
1416 if p != self.dv && p != self.top {
1417 if sz >= self.min_chunk_size() {
1418 debug_assert_eq!(align_up(sz, self.malloc_alignment()), sz);
1419 debug_assert!(self.is_aligned(Chunk::to_mem(p) as usize));
1420 debug_assert_eq!((*next).prev_foot, sz);
1421 debug_assert!(Chunk::pinuse(p));
1422 debug_assert!(next == self.top || Chunk::inuse(next));
1423 debug_assert_eq!((*(*p).next).prev, p);
1424 debug_assert_eq!((*(*p).prev).next, p);
1425 } else {
1426 debug_assert_eq!(sz, mem::size_of::<usize>());
1427 }
1428 }
1429 }
1430
1431 unsafe fn check_malloc_state(&mut self) {
1432 if !cfg!(debug_assertions) {
1433 return
1434 }
1435 for i in 0..NSMALLBINS {
1436 self.check_smallbin(i as u32);
1437 }
1438 for i in 0..NTREEBINS {
1439 self.check_treebin(i as u32);
1440 }
1441 if self.dvsize != 0 {
1442 self.check_any_chunk(self.dv);
1443 debug_assert_eq!(self.dvsize, Chunk::size(self.dv));
1444 debug_assert!(self.dvsize >= self.min_chunk_size());
1445 let dv = self.dv;
1446 debug_assert!(!self.bin_find(dv));
1447 }
1448 if !self.top.is_null() {
1449 self.check_top_chunk(self.top);
1450 debug_assert!(self.topsize > 0);
1451 let top = self.top;
1452 debug_assert!(!self.bin_find(top));
1453 }
1454 let total = self.traverse_and_check();
1455 debug_assert!(total <= self.footprint);
1456 debug_assert!(self.footprint <= self.max_footprint);
1457 }
1458
1459 unsafe fn check_smallbin(&mut self, idx: u32) {
1460 if !cfg!(debug_assertions) {
1461 return
1462 }
1463 let b = self.smallbin_at(idx);
1464 let mut p = (*b).next;
1465 let empty = self.smallmap & (1 << idx) == 0;
1466 if p == b {
1467 debug_assert!(empty)
1468 }
1469 if !empty {
1470 while p != b {
1471 let size = Chunk::size(p);
1472 self.check_free_chunk(p);
1473 debug_assert_eq!(self.small_index(size), idx);
1474 debug_assert!((*p).next == b ||
1475 Chunk::size((*p).next) == Chunk::size(p));
1476 let q = Chunk::next(p);
1477 if (*q).head != Chunk::fencepost_head() {
1478 self.check_inuse_chunk(q);
1479 }
1480 p = (*p).next;
1481 }
1482 }
1483 }
1484
1485 unsafe fn check_treebin(&mut self, idx: u32) {
1486 if !cfg!(debug_assertions) {
1487 return
1488 }
1489 let tb = self.treebin_at(idx);
1490 let t = *tb;
1491 let empty = self.treemap & (1 << idx) == 0;
1492 if t.is_null() {
1493 debug_assert!(empty);
1494 }
1495 if !empty {
1496 self.check_tree(t);
1497 }
1498 }
1499
1500 unsafe fn check_tree(&mut self, t: *mut TreeChunk) {
1501 if !cfg!(debug_assertions) {
1502 return
1503 }
1504 let tc = TreeChunk::chunk(t);
1505 let tindex = (*t).index;
1506 let tsize = Chunk::size(tc);
1507 let idx = self.compute_tree_index(tsize);
1508 debug_assert_eq!(tindex, idx);
1509 debug_assert!(tsize >= self.min_large_size());
1510 debug_assert!(tsize >= self.min_size_for_tree_index(idx));
1511 debug_assert!(idx == NTREEBINS as u32 - 1 ||
1512 tsize < self.min_size_for_tree_index(idx + 1));
1513
1514 let mut u = t;
1515 let mut head = ptr::null_mut();
1516 loop {
1517 let uc = TreeChunk::chunk(u);
1518 self.check_any_chunk(uc);
1519 debug_assert_eq!((*u).index, tindex);
1520 debug_assert_eq!(Chunk::size(uc), tsize);
1521 debug_assert!(!Chunk::inuse(uc));
1522 debug_assert!(!Chunk::pinuse(Chunk::next(uc)));
1523 debug_assert_eq!((*(*uc).next).prev, uc);
1524 debug_assert_eq!((*(*uc).prev).next, uc);
1525 let left = (*u).child[0];
1526 let right = (*u).child[1];
1527 if (*u).parent.is_null() {
1528 debug_assert!(left.is_null());
1529 debug_assert!(right.is_null());
1530 } else {
1531 debug_assert!(head.is_null());
1532 head = u;
1533 debug_assert!((*u).parent != u);
1534 debug_assert!((*(*u).parent).child[0] == u ||
1535 (*(*u).parent).child[1] == u ||
1536 *((*u).parent as *mut *mut TreeChunk) == u);
1537 if !left.is_null() {
1538 debug_assert_eq!((*left).parent, u);
1539 debug_assert!(left != u);
1540 self.check_tree(left);
1541 }
1542 if !right.is_null() {
1543 debug_assert_eq!((*right).parent, u);
1544 debug_assert!(right != u);
1545 self.check_tree(right);
1546 }
1547 if !left.is_null() && !right.is_null() {
1548 debug_assert!(Chunk::size(TreeChunk::chunk(left)) <
1549 Chunk::size(TreeChunk::chunk(right)));
1550 }
1551 }
1552
1553 u = TreeChunk::prev(u);
1554 if u == t {
1555 break
1556 }
1557 }
1558 debug_assert!(!head.is_null());
1559 }
1560
1561 fn min_size_for_tree_index(&self, idx: u32) -> usize {
1562 let idx = idx as usize;
1563 (1 << ((idx >> 1) + TREEBIN_SHIFT)) |
1564 ((idx & 1) << ((idx >> 1) + TREEBIN_SHIFT - 1))
1565 }
1566
1567 unsafe fn bin_find(&mut self, chunk: *mut Chunk) -> bool {
1568 let size = Chunk::size(chunk);
1569 if self.is_small(size) {
1570 let sidx = self.small_index(size);
1571 let b = self.smallbin_at(sidx);
1572 if !self.smallmap_is_marked(sidx) {
1573 return false
1574 }
1575 let mut p = b;
1576 loop {
1577 if p == chunk {
1578 return true
1579 }
1580 p = (*p).prev;
1581 if p == b {
1582 return false
1583 }
1584 }
1585 } else {
1586 let tidx = self.compute_tree_index(size);
1587 if !self.treemap_is_marked(tidx) {
1588 return false
1589 }
1590 let mut t = *self.treebin_at(tidx);
1591 let mut sizebits = size << leftshift_for_tree_index(tidx);
1592 while !t.is_null() && Chunk::size(TreeChunk::chunk(t)) != size {
1593 t = (*t).child[(sizebits >> (mem::size_of::<usize>() * 8 - 1)) & 1];
1594 sizebits <<= 1;
1595 }
1596 if t.is_null() {
1597 return false
1598 }
1599 let mut u = t;
1600 let chunk = chunk as *mut TreeChunk;
1601 loop {
1602 if u == chunk {
1603 return true
1604 }
1605 u = TreeChunk::prev(u);
1606 if u == t {
1607 return false
1608 }
1609 }
1610 }
1611
1612 }
1613
1614 unsafe fn traverse_and_check(&self) -> usize {
1615 0
1616 }
1617 }
1618
1619 const PINUSE: usize = 1 << 0;
1620 const CINUSE: usize = 1 << 1;
1621 const FLAG4: usize = 1 << 2;
1622 const INUSE: usize = PINUSE | CINUSE;
1623 const FLAG_BITS: usize = PINUSE | CINUSE | FLAG4;
1624
1625 impl Chunk {
1626 unsafe fn fencepost_head() -> usize {
1627 INUSE | mem::size_of::<usize>()
1628 }
1629
1630 unsafe fn size(me: *mut Chunk) -> usize {
1631 (*me).head & !FLAG_BITS
1632 }
1633
1634 unsafe fn next(me: *mut Chunk) -> *mut Chunk {
1635 (me as *mut u8).offset(((*me).head & !FLAG_BITS) as isize) as *mut Chunk
1636 }
1637
1638 unsafe fn prev(me: *mut Chunk) -> *mut Chunk {
1639 (me as *mut u8).offset(-((*me).prev_foot as isize)) as *mut Chunk
1640 }
1641
1642 unsafe fn cinuse(me: *mut Chunk) -> bool {
1643 (*me).head & CINUSE != 0
1644 }
1645
1646 unsafe fn pinuse(me: *mut Chunk) -> bool {
1647 (*me).head & PINUSE != 0
1648 }
1649
1650 unsafe fn clear_pinuse(me: *mut Chunk) {
1651 (*me).head &= !PINUSE;
1652 }
1653
1654 unsafe fn inuse(me: *mut Chunk) -> bool {
1655 (*me).head & INUSE != PINUSE
1656 }
1657
1658 unsafe fn mmapped(me: *mut Chunk) -> bool {
1659 (*me).head & INUSE == 0
1660 }
1661
1662 unsafe fn set_inuse(me: *mut Chunk, size: usize) {
1663 (*me).head = ((*me).head & PINUSE) | size | CINUSE;
1664 let next = Chunk::plus_offset(me, size);
1665 (*next).head |= PINUSE;
1666 }
1667
1668 unsafe fn set_inuse_and_pinuse(me: *mut Chunk, size: usize) {
1669 (*me).head = PINUSE | size | CINUSE;
1670 let next = Chunk::plus_offset(me, size);
1671 (*next).head |= PINUSE;
1672 }
1673
1674 unsafe fn set_size_and_pinuse_of_inuse_chunk(me: *mut Chunk, size: usize) {
1675 (*me).head = size | PINUSE | CINUSE;
1676 }
1677
1678 unsafe fn set_size_and_pinuse_of_free_chunk(me: *mut Chunk, size: usize) {
1679 (*me).head = size | PINUSE;
1680 Chunk::set_foot(me, size);
1681 }
1682
1683 unsafe fn set_free_with_pinuse(p: *mut Chunk, size: usize, n: *mut Chunk) {
1684 Chunk::clear_pinuse(n);
1685 Chunk::set_size_and_pinuse_of_free_chunk(p, size);
1686 }
1687
1688 unsafe fn set_foot(me: *mut Chunk, size: usize) {
1689 let next = Chunk::plus_offset(me, size);
1690 (*next).prev_foot = size;
1691 }
1692
1693 unsafe fn plus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1694 (me as *mut u8).offset(offset as isize) as *mut Chunk
1695 }
1696
1697 unsafe fn minus_offset(me: *mut Chunk, offset: usize) -> *mut Chunk {
1698 (me as *mut u8).offset(-(offset as isize)) as *mut Chunk
1699 }
1700
1701 unsafe fn to_mem(me: *mut Chunk) -> *mut u8 {
1702 (me as *mut u8).offset(2 * (mem::size_of::<usize>() as isize))
1703 }
1704
1705 unsafe fn from_mem(mem: *mut u8) -> *mut Chunk {
1706 mem.offset(-2 * (mem::size_of::<usize>() as isize)) as *mut Chunk
1707 }
1708 }
1709
1710 impl TreeChunk {
1711 unsafe fn leftmost_child(me: *mut TreeChunk) -> *mut TreeChunk {
1712 let left = (*me).child[0];
1713 if left.is_null() {
1714 (*me).child[1]
1715 } else {
1716 left
1717 }
1718 }
1719
1720 unsafe fn chunk(me: *mut TreeChunk) -> *mut Chunk {
1721 &mut (*me).chunk
1722 }
1723
1724 unsafe fn next(me: *mut TreeChunk) -> *mut TreeChunk {
1725 (*TreeChunk::chunk(me)).next as *mut TreeChunk
1726 }
1727
1728 unsafe fn prev(me: *mut TreeChunk) -> *mut TreeChunk {
1729 (*TreeChunk::chunk(me)).prev as *mut TreeChunk
1730 }
1731 }
1732
1733 const EXTERN: u32 = 1 << 0;
1734
1735 impl Segment {
1736 unsafe fn is_extern(seg: *mut Segment) -> bool {
1737 (*seg).flags & EXTERN != 0
1738 }
1739
1740 unsafe fn can_release_part(seg: *mut Segment) -> bool {
1741 sys::can_release_part((*seg).flags >> 1)
1742 }
1743
1744 unsafe fn sys_flags(seg: *mut Segment) -> u32 {
1745 (*seg).flags >> 1
1746 }
1747
1748 unsafe fn holds(seg: *mut Segment, addr: *mut u8) -> bool {
1749 (*seg).base <= addr && addr < Segment::top(seg)
1750 }
1751
1752 unsafe fn top(seg: *mut Segment) -> *mut u8 {
1753 (*seg).base.offset((*seg).size as isize)
1754 }
1755 }