2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
19 #include "private/gc_priv.h"
21 GC_bool GC_use_entire_heap
= 0;
24 * Free heap blocks are kept on one of several free lists,
25 * depending on the size of the block. Each free list is doubly linked.
26 * Adjacent free blocks are coalesced.
30 # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
31 /* largest block we will allocate starting on a black */
32 /* listed block. Must be >= HBLKSIZE. */
35 # define UNIQUE_THRESHOLD 32
36 /* Sizes up to this many HBLKs each have their own free list */
37 # define HUGE_THRESHOLD 256
38 /* Sizes of at least this many heap blocks are mapped to a */
39 /* single free list. */
40 # define FL_COMPRESSION 8
41 /* In between sizes map this many distinct sizes to a single */
44 # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
47 struct hblk
* GC_hblkfreelist
[N_HBLK_FLS
+1] = { 0 };
51 word GC_free_bytes
[N_HBLK_FLS
+1] = { 0 };
52 /* Number of free bytes on each list. */
54 /* Is bytes + the number of free bytes on lists n .. N_HBLK_FLS */
55 /* > GC_max_large_allocd_bytes? */
59 static GC_bool
GC_enough_large_bytes_left(word bytes
, int n
)
62 for (i
= N_HBLK_FLS
; i
>= n
; --i
) {
63 bytes
+= GC_free_bytes
[i
];
64 if (bytes
> GC_max_large_allocd_bytes
) return TRUE
;
69 # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
71 # define FREE_ASSERT(e) GC_ASSERT(e)
73 #else /* USE_MUNMAP */
75 # define INCR_FREE_BYTES(n, b)
76 # define FREE_ASSERT(e)
78 #endif /* USE_MUNMAP */
80 /* Map a number of blocks to the appropriate large block free list index. */
81 int GC_hblk_fl_from_blocks(word blocks_needed
)
83 if (blocks_needed
<= UNIQUE_THRESHOLD
) return (int)blocks_needed
;
84 if (blocks_needed
>= HUGE_THRESHOLD
) return N_HBLK_FLS
;
85 return (int)(blocks_needed
- UNIQUE_THRESHOLD
)/FL_COMPRESSION
90 # define PHDR(hhdr) HDR(hhdr -> hb_prev)
91 # define NHDR(hhdr) HDR(hhdr -> hb_next)
94 # define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
95 # else /* !USE_MMAP */
96 # define IS_MAPPED(hhdr) 1
97 # endif /* USE_MUNMAP */
99 # if !defined(NO_DEBUGGING)
100 void GC_print_hblkfreelist()
108 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
109 h
= GC_hblkfreelist
[i
];
111 if (0 != h
) GC_printf("Free list %ld:\n",
114 if (0 != h
) GC_printf("Free list %lu (Total size %lu):\n",
115 i
, (unsigned long)GC_free_bytes
[i
]);
120 GC_printf("\t%p size %lu ", h
, (unsigned long)sz
);
122 if (GC_is_black_listed(h
, HBLKSIZE
) != 0) {
123 GC_printf("start black listed\n");
124 } else if (GC_is_black_listed(h
, hhdr
-> hb_sz
) != 0) {
125 GC_printf("partially black listed\n");
127 GC_printf("not black listed\n");
133 if (total_free
!= GC_large_free_bytes
) {
134 GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
135 (unsigned long) GC_large_free_bytes
);
138 GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free
);
141 /* Return the free list index on which the block described by the header */
142 /* appears, or -1 if it appears nowhere. */
143 int free_list_index_of(hdr
*wanted
)
149 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
150 h
= GC_hblkfreelist
[i
];
153 if (hhdr
== wanted
) return i
;
160 void GC_dump_regions()
167 for (i
= 0; i
< GC_n_heap_sects
; ++i
) {
168 start
= GC_heap_sects
[i
].hs_start
;
169 bytes
= GC_heap_sects
[i
].hs_bytes
;
171 /* Merge in contiguous sections. */
172 while (i
+1 < GC_n_heap_sects
&& GC_heap_sects
[i
+1].hs_start
== end
) {
174 end
= GC_heap_sects
[i
].hs_start
+ GC_heap_sects
[i
].hs_bytes
;
176 GC_printf("***Section from %p to %p\n", start
, end
);
177 for (p
= start
; p
< end
;) {
179 GC_printf("\t%p ", p
);
180 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
181 GC_printf("Missing header!!(%d)\n", hhdr
);
185 if (HBLK_IS_FREE(hhdr
)) {
186 int correct_index
= GC_hblk_fl_from_blocks(
187 divHBLKSZ(hhdr
-> hb_sz
));
190 GC_printf("\tfree block of size 0x%lx bytes",
191 (unsigned long)(hhdr
-> hb_sz
));
192 if (IS_MAPPED(hhdr
)) {
195 GC_printf("(unmapped)\n");
197 actual_index
= free_list_index_of(hhdr
);
198 if (-1 == actual_index
) {
199 GC_printf("\t\tBlock not on free list %d!!\n",
201 } else if (correct_index
!= actual_index
) {
202 GC_printf("\t\tBlock on list %d, should be on %d!!\n",
203 actual_index
, correct_index
);
207 GC_printf("\tused for blocks of size 0x%lx bytes\n",
208 (unsigned long)(hhdr
-> hb_sz
));
209 p
+= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
215 # endif /* NO_DEBUGGING */
217 /* Initialize hdr for a block containing the indicated size and */
218 /* kind of objects. */
219 /* Return FALSE on failure. */
220 static GC_bool
setup_header(hdr
* hhdr
, struct hblk
*block
, size_t byte_sz
,
221 int kind
, unsigned flags
)
226 /* Set size, kind and mark proc fields */
227 hhdr
-> hb_sz
= byte_sz
;
228 hhdr
-> hb_obj_kind
= (unsigned char)kind
;
229 hhdr
-> hb_flags
= (unsigned char)flags
;
230 hhdr
-> hb_block
= block
;
231 descr
= GC_obj_kinds
[kind
].ok_descriptor
;
232 if (GC_obj_kinds
[kind
].ok_relocate_descr
) descr
+= byte_sz
;
233 hhdr
-> hb_descr
= descr
;
235 # ifdef MARK_BIT_PER_OBJ
236 /* Set hb_inv_sz as portably as possible. */
237 /* We set it to the smallest value such that sz * inv_sz > 2**32 */
238 /* This may be more precision than necessary. */
239 if (byte_sz
> MAXOBJBYTES
) {
240 hhdr
-> hb_inv_sz
= LARGE_INV_SZ
;
244 # if CPP_WORDSZ == 64
245 inv_sz
= ((word
)1 << 32)/byte_sz
;
246 if (((inv_sz
*byte_sz
) >> 32) == 0) ++inv_sz
;
247 # else /* 32 bit words */
248 GC_ASSERT(byte_sz
>= 4);
249 inv_sz
= ((unsigned)1 << 31)/byte_sz
;
251 while (inv_sz
*byte_sz
> byte_sz
) ++inv_sz
;
253 hhdr
-> hb_inv_sz
= inv_sz
;
255 # else /* MARK_BIT_PER_GRANULE */
256 hhdr
-> hb_large_block
= (unsigned char)(byte_sz
> MAXOBJBYTES
);
257 granules
= BYTES_TO_GRANULES(byte_sz
);
258 if (EXPECT(!GC_add_map_entry(granules
), FALSE
)) {
259 /* Make it look like a valid block. */
260 hhdr
-> hb_sz
= HBLKSIZE
;
261 hhdr
-> hb_descr
= 0;
262 hhdr
-> hb_large_block
= TRUE
;
266 size_t index
= (hhdr
-> hb_large_block
? 0 : granules
);
267 hhdr
-> hb_map
= GC_obj_map
[index
];
269 # endif /* MARK_BIT_PER_GRANULE */
271 /* Clear mark bits */
272 GC_clear_hdr_marks(hhdr
);
274 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
278 #define FL_UNKNOWN -1
280 * Remove hhdr from the appropriate free list.
281 * We assume it is on the nth free list, or on the size
282 * appropriate free list if n is FL_UNKNOWN.
284 void GC_remove_from_fl(hdr
*hhdr
, int n
)
288 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
290 /* We always need index to mainatin free counts. */
291 if (FL_UNKNOWN
== n
) {
292 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
297 if (hhdr
-> hb_prev
== 0) {
299 if (FL_UNKNOWN
== n
) {
300 index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
305 GC_ASSERT(HDR(GC_hblkfreelist
[index
]) == hhdr
);
306 GC_hblkfreelist
[index
] = hhdr
-> hb_next
;
309 GET_HDR(hhdr
-> hb_prev
, phdr
);
310 phdr
-> hb_next
= hhdr
-> hb_next
;
312 FREE_ASSERT(GC_free_bytes
[index
] >= hhdr
-> hb_sz
);
313 INCR_FREE_BYTES(index
, - (signed_word
)(hhdr
-> hb_sz
));
314 if (0 != hhdr
-> hb_next
) {
316 GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr
)));
317 GET_HDR(hhdr
-> hb_next
, nhdr
);
318 nhdr
-> hb_prev
= hhdr
-> hb_prev
;
323 * Return a pointer to the free block ending just before h, if any.
325 struct hblk
* GC_free_block_ending_at(struct hblk
*h
)
327 struct hblk
* p
= h
- 1;
331 while (0 != phdr
&& IS_FORWARDING_ADDR_OR_NIL(phdr
)) {
332 p
= FORWARDED_ADDR(p
,phdr
);
336 if(HBLK_IS_FREE(phdr
)) {
342 p
= GC_prev_block(h
- 1);
345 if (HBLK_IS_FREE(phdr
) && (ptr_t
)p
+ phdr
-> hb_sz
== (ptr_t
)h
) {
353 * Add hhdr to the appropriate free list.
354 * We maintain individual free lists sorted by address.
356 void GC_add_to_fl(struct hblk
*h
, hdr
*hhdr
)
358 int index
= GC_hblk_fl_from_blocks(divHBLKSZ(hhdr
-> hb_sz
));
359 struct hblk
*second
= GC_hblkfreelist
[index
];
361 # ifdef GC_ASSERTIONS
362 struct hblk
*next
= (struct hblk
*)((word
)h
+ hhdr
-> hb_sz
);
363 hdr
* nexthdr
= HDR(next
);
364 struct hblk
*prev
= GC_free_block_ending_at(h
);
365 hdr
* prevhdr
= HDR(prev
);
366 GC_ASSERT(nexthdr
== 0 || !HBLK_IS_FREE(nexthdr
) || !IS_MAPPED(nexthdr
));
367 GC_ASSERT(prev
== 0 || !HBLK_IS_FREE(prevhdr
) || !IS_MAPPED(prevhdr
));
369 GC_ASSERT(((hhdr
-> hb_sz
) & (HBLKSIZE
-1)) == 0);
370 GC_hblkfreelist
[index
] = h
;
371 INCR_FREE_BYTES(index
, hhdr
-> hb_sz
);
372 FREE_ASSERT(GC_free_bytes
[index
] <= GC_large_free_bytes
)
373 hhdr
-> hb_next
= second
;
376 GET_HDR(second
, second_hdr
);
377 second_hdr
-> hb_prev
= h
;
379 hhdr
-> hb_flags
|= FREE_BLK
;
384 /* Unmap blocks that haven't been recently touched. This is the only way */
385 /* way blocks are ever unmapped. */
386 void GC_unmap_old(void)
391 unsigned short last_rec
, threshold
;
393 # define UNMAP_THRESHOLD 6
395 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
396 for (h
= GC_hblkfreelist
[i
]; 0 != h
; h
= hhdr
-> hb_next
) {
398 if (!IS_MAPPED(hhdr
)) continue;
399 threshold
= (unsigned short)(GC_gc_no
- UNMAP_THRESHOLD
);
400 last_rec
= hhdr
-> hb_last_reclaimed
;
401 if ((last_rec
> GC_gc_no
|| last_rec
< threshold
)
402 && threshold
< GC_gc_no
/* not recently wrapped */) {
404 GC_unmap((ptr_t
)h
, sz
);
405 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
411 /* Merge all unmapped blocks that are adjacent to other free */
412 /* blocks. This may involve remapping, since all blocks are either */
413 /* fully mapped or fully unmapped. */
414 void GC_merge_unmapped(void)
416 struct hblk
* h
, *next
;
417 hdr
* hhdr
, *nexthdr
;
421 for (i
= 0; i
<= N_HBLK_FLS
; ++i
) {
422 h
= GC_hblkfreelist
[i
];
426 next
= (struct hblk
*)((word
)h
+ size
);
427 GET_HDR(next
, nexthdr
);
428 /* Coalesce with successor, if possible */
429 if (0 != nexthdr
&& HBLK_IS_FREE(nexthdr
)) {
430 nextsize
= nexthdr
-> hb_sz
;
431 if (IS_MAPPED(hhdr
)) {
432 GC_ASSERT(!IS_MAPPED(nexthdr
));
433 /* make both consistent, so that we can merge */
434 if (size
> nextsize
) {
435 GC_remap((ptr_t
)next
, nextsize
);
437 GC_unmap((ptr_t
)h
, size
);
438 hhdr
-> hb_flags
|= WAS_UNMAPPED
;
440 } else if (IS_MAPPED(nexthdr
)) {
441 GC_ASSERT(!IS_MAPPED(hhdr
));
442 if (size
> nextsize
) {
443 GC_unmap((ptr_t
)next
, nextsize
);
445 GC_remap((ptr_t
)h
, size
);
446 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
447 hhdr
-> hb_last_reclaimed
= nexthdr
-> hb_last_reclaimed
;
450 /* Unmap any gap in the middle */
451 GC_unmap_gap((ptr_t
)h
, size
, (ptr_t
)next
, nexthdr
-> hb_sz
);
453 /* If they are both unmapped, we merge, but leave unmapped. */
454 GC_remove_from_fl(hhdr
, i
);
455 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
456 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
457 GC_remove_header(next
);
458 GC_add_to_fl(h
, hhdr
);
459 /* Start over at beginning of list */
460 h
= GC_hblkfreelist
[i
];
461 } else /* not mergable with successor */ {
464 } /* while (h != 0) ... */
468 #endif /* USE_MUNMAP */
471 * Return a pointer to a block starting at h of length bytes.
472 * Memory for the block is mapped.
473 * Remove the block from its free list, and return the remainder (if any)
474 * to its appropriate free list.
475 * May fail by returning 0.
476 * The header for the returned block must be set up by the caller.
477 * If the return value is not 0, then hhdr is the header for it.
479 struct hblk
* GC_get_first_part(struct hblk
*h
, hdr
*hhdr
,
480 size_t bytes
, int index
)
482 word total_size
= hhdr
-> hb_sz
;
486 GC_ASSERT((total_size
& (HBLKSIZE
-1)) == 0);
487 GC_remove_from_fl(hhdr
, index
);
488 if (total_size
== bytes
) return h
;
489 rest
= (struct hblk
*)((word
)h
+ bytes
);
490 rest_hdr
= GC_install_header(rest
);
492 /* FIXME: This is likely to be very bad news ... */
493 WARN("Header allocation failed: Dropping block.\n", 0);
496 rest_hdr
-> hb_sz
= total_size
- bytes
;
497 rest_hdr
-> hb_flags
= 0;
498 # ifdef GC_ASSERTIONS
499 /* Mark h not free, to avoid assertion about adjacent free blocks. */
500 hhdr
-> hb_flags
&= ~FREE_BLK
;
502 GC_add_to_fl(rest
, rest_hdr
);
507 * H is a free block. N points at an address inside it.
508 * A new header for n has already been set up. Fix up h's header
509 * to reflect the fact that it is being split, move it to the
510 * appropriate free list.
511 * N replaces h in the original free list.
513 * Nhdr is not completely filled in, since it is about to allocated.
514 * It may in fact end up on the wrong free list for its size.
515 * (Hence adding it to a free list is silly. But this path is hopefully
516 * rare enough that it doesn't matter. The code is cleaner this way.)
518 void GC_split_block(struct hblk
*h
, hdr
*hhdr
, struct hblk
*n
,
519 hdr
*nhdr
, int index
/* Index of free list */)
521 word total_size
= hhdr
-> hb_sz
;
522 word h_size
= (word
)n
- (word
)h
;
523 struct hblk
*prev
= hhdr
-> hb_prev
;
524 struct hblk
*next
= hhdr
-> hb_next
;
526 /* Replace h with n on its freelist */
527 nhdr
-> hb_prev
= prev
;
528 nhdr
-> hb_next
= next
;
529 nhdr
-> hb_sz
= total_size
- h_size
;
530 nhdr
-> hb_flags
= 0;
532 HDR(prev
) -> hb_next
= n
;
534 GC_hblkfreelist
[index
] = n
;
537 HDR(next
) -> hb_prev
= n
;
539 INCR_FREE_BYTES(index
, -(signed_word
)h_size
);
540 FREE_ASSERT(GC_free_bytes
[index
] > 0);
541 # ifdef GC_ASSERTIONS
542 nhdr
-> hb_flags
&= ~FREE_BLK
;
543 /* Don't fail test for consecutive */
544 /* free blocks in GC_add_to_fl. */
547 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
549 hhdr
-> hb_sz
= h_size
;
550 GC_add_to_fl(h
, hhdr
);
551 nhdr
-> hb_flags
|= FREE_BLK
;
555 GC_allochblk_nth(size_t sz
/* bytes */, int kind
, unsigned flags
, int n
);
558 * Allocate (and return pointer to) a heap block
559 * for objects of size sz bytes, searching the nth free list.
561 * NOTE: We set obj_map field in header correctly.
562 * Caller is responsible for building an object freelist in block.
564 * The client is responsible for clearing the block, if necessary.
567 GC_allochblk(size_t sz
, int kind
, unsigned flags
/* IGNORE_OFF_PAGE or 0 */)
573 GC_ASSERT((sz
& (GRANULE_BYTES
- 1)) == 0);
574 blocks
= OBJ_SZ_TO_BLOCKS(sz
);
575 start_list
= GC_hblk_fl_from_blocks(blocks
);
576 for (i
= start_list
; i
<= N_HBLK_FLS
; ++i
) {
577 struct hblk
* result
= GC_allochblk_nth(sz
, kind
, flags
, i
);
585 * The same, but with search restricted to nth free list.
586 * Flags is IGNORE_OFF_PAGE or zero.
587 * Unlike the above, sz is in bytes.
590 GC_allochblk_nth(size_t sz
, int kind
, unsigned flags
, int n
)
593 hdr
* hhdr
; /* Header corr. to hbp */
594 /* Initialized after loop if hbp !=0 */
595 /* Gcc uninitialized use warning is bogus. */
596 struct hblk
*thishbp
;
597 hdr
* thishdr
; /* Header corr. to hbp */
598 signed_word size_needed
; /* number of bytes in requested objects */
599 signed_word size_avail
; /* bytes available in this block */
601 size_needed
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(sz
);
603 /* search for a big enough block in free list */
604 hbp
= GC_hblkfreelist
[n
];
605 for(; 0 != hbp
; hbp
= hhdr
-> hb_next
) {
607 size_avail
= hhdr
->hb_sz
;
608 if (size_avail
< size_needed
) continue;
609 if (size_avail
!= size_needed
610 && !GC_use_entire_heap
612 && USED_HEAP_SIZE
>= GC_requested_heapsize
613 && !TRUE_INCREMENTAL
&& GC_should_collect()) {
617 /* If we have enough large blocks left to cover any */
618 /* previous request for large blocks, we go ahead */
619 /* and split. Assuming a steady state, that should */
620 /* be safe. It means that we can use the full */
621 /* heap if we allocate only small objects. */
622 if (!GC_enough_large_bytes_left(GC_large_allocd_bytes
, n
)) {
625 /* If we are deallocating lots of memory from */
626 /* finalizers, fail and collect sooner rather */
628 if (GC_finalizer_bytes_freed
> (GC_heapsize
>> 4)) {
631 # endif /* !USE_MUNMAP */
633 /* If the next heap block is obviously better, go on. */
634 /* This prevents us from disassembling a single large block */
635 /* to get tiny blocks. */
637 signed_word next_size
;
639 thishbp
= hhdr
-> hb_next
;
641 GET_HDR(thishbp
, thishdr
);
642 next_size
= (signed_word
)(thishdr
-> hb_sz
);
643 if (next_size
< size_avail
644 && next_size
>= size_needed
645 && !GC_is_black_listed(thishbp
, (word
)size_needed
)) {
650 if ( !IS_UNCOLLECTABLE(kind
) &&
651 (kind
!= PTRFREE
|| size_needed
> MAX_BLACK_LIST_ALLOC
)) {
652 struct hblk
* lasthbp
= hbp
;
653 ptr_t search_end
= (ptr_t
)hbp
+ size_avail
- size_needed
;
654 signed_word orig_avail
= size_avail
;
655 signed_word eff_size_needed
= ((flags
& IGNORE_OFF_PAGE
)?
660 while ((ptr_t
)lasthbp
<= search_end
661 && (thishbp
= GC_is_black_listed(lasthbp
,
662 (word
)eff_size_needed
))
666 size_avail
-= (ptr_t
)lasthbp
- (ptr_t
)hbp
;
668 if (size_avail
>= size_needed
) {
669 if (thishbp
!= hbp
&&
670 0 != (thishdr
= GC_install_header(thishbp
))) {
671 /* Make sure it's mapped before we mangle it. */
673 if (!IS_MAPPED(hhdr
)) {
674 GC_remap((ptr_t
)hbp
, hhdr
-> hb_sz
);
675 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
678 /* Split the block at thishbp */
679 GC_split_block(hbp
, hhdr
, thishbp
, thishdr
, n
);
680 /* Advance to thishbp */
683 /* We must now allocate thishbp, since it may */
684 /* be on the wrong free list. */
686 } else if (size_needed
> (signed_word
)BL_LIMIT
687 && orig_avail
- size_needed
688 > (signed_word
)BL_LIMIT
) {
689 /* Punt, since anything else risks unreasonable heap growth. */
690 if (++GC_large_alloc_warn_suppressed
691 >= GC_large_alloc_warn_interval
) {
692 WARN("Repeated allocation of very large block "
693 "(appr. size %ld):\n"
694 "\tMay lead to memory leak and poor performance.\n",
696 GC_large_alloc_warn_suppressed
= 0;
698 size_avail
= orig_avail
;
699 } else if (size_avail
== 0 && size_needed
== HBLKSIZE
700 && IS_MAPPED(hhdr
)) {
702 static unsigned count
= 0;
704 /* The block is completely blacklisted. We need */
705 /* to drop some such blocks, since otherwise we spend */
706 /* all our time traversing them if pointerfree */
707 /* blocks are unpopular. */
708 /* A dropped block will be reconsidered at next GC. */
709 if ((++count
& 3) == 0) {
710 /* Allocate and drop the block in small chunks, to */
711 /* maximize the chance that we will recover some */
713 word total_size
= hhdr
-> hb_sz
;
714 struct hblk
* limit
= hbp
+ divHBLKSZ(total_size
);
716 struct hblk
* prev
= hhdr
-> hb_prev
;
718 GC_large_free_bytes
-= total_size
;
719 GC_remove_from_fl(hhdr
, n
);
720 for (h
= hbp
; h
< limit
; h
++) {
721 if (h
== hbp
|| 0 != (hhdr
= GC_install_header(h
))) {
725 PTRFREE
, 0); /* Cant fail */
726 if (GC_debugging_started
) {
731 /* Restore hbp to point at free block */
734 return GC_allochblk_nth(sz
, kind
, flags
, n
);
741 if( size_avail
>= size_needed
) {
743 if (!IS_MAPPED(hhdr
)) {
744 GC_remap((ptr_t
)hbp
, hhdr
-> hb_sz
);
745 hhdr
-> hb_flags
&= ~WAS_UNMAPPED
;
748 /* hbp may be on the wrong freelist; the parameter n */
750 hbp
= GC_get_first_part(hbp
, hhdr
, size_needed
, n
);
755 if (0 == hbp
) return 0;
757 /* Add it to map of valid blocks */
758 if (!GC_install_counts(hbp
, (word
)size_needed
)) return(0);
759 /* This leaks memory under very rare conditions. */
762 if (!setup_header(hhdr
, hbp
, sz
, kind
, flags
)) {
763 GC_remove_counts(hbp
, (word
)size_needed
);
764 return(0); /* ditto */
767 /* Notify virtual dirty bit implementation that we are about to write. */
768 /* Ensure that pointerfree objects are not protected if it's avoidable. */
769 GC_remove_protection(hbp
, divHBLKSZ(size_needed
),
770 (hhdr
-> hb_descr
== 0) /* pointer-free */);
772 /* We just successfully allocated a block. Restart count of */
773 /* consecutive failures. */
775 extern unsigned GC_fail_count
;
780 GC_large_free_bytes
-= size_needed
;
782 GC_ASSERT(IS_MAPPED(hhdr
));
786 struct hblk
* GC_freehblk_ptr
= 0; /* Search position hint for GC_freehblk */
791 * Coalesce the block with its neighbors if possible.
793 * All mark words are assumed to be cleared.
796 GC_freehblk(struct hblk
*hbp
)
798 struct hblk
*next
, *prev
;
799 hdr
*hhdr
, *prevhdr
, *nexthdr
;
805 size
= HBLKSIZE
* OBJ_SZ_TO_BLOCKS(size
);
806 GC_remove_counts(hbp
, (word
)size
);
809 hhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
812 /* Check for duplicate deallocation in the easy case */
813 if (HBLK_IS_FREE(hhdr
)) {
814 GC_printf("Duplicate large block deallocation of %p\n", hbp
);
815 ABORT("Duplicate large block deallocation");
818 GC_ASSERT(IS_MAPPED(hhdr
));
819 hhdr
-> hb_flags
|= FREE_BLK
;
820 next
= (struct hblk
*)((word
)hbp
+ size
);
821 GET_HDR(next
, nexthdr
);
822 prev
= GC_free_block_ending_at(hbp
);
823 /* Coalesce with successor, if possible */
824 if(0 != nexthdr
&& HBLK_IS_FREE(nexthdr
) && IS_MAPPED(nexthdr
)) {
825 GC_remove_from_fl(nexthdr
, FL_UNKNOWN
);
826 hhdr
-> hb_sz
+= nexthdr
-> hb_sz
;
827 GC_remove_header(next
);
829 /* Coalesce with predecessor, if possible. */
832 if (IS_MAPPED(prevhdr
)) {
833 GC_remove_from_fl(prevhdr
, FL_UNKNOWN
);
834 prevhdr
-> hb_sz
+= hhdr
-> hb_sz
;
836 prevhdr
-> hb_last_reclaimed
= (unsigned short)GC_gc_no
;
838 GC_remove_header(hbp
);
843 /* FIXME: It is not clear we really always want to do these merges */
844 /* with -DUSE_MUNMAP, since it updates ages and hence prevents */
847 GC_large_free_bytes
+= size
;
848 GC_add_to_fl(hbp
, hhdr
);