]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/tools/build/src/engine/boehm_gc/alloc.c
Add patch for failing prerm scripts
[ceph.git] / ceph / src / boost / tools / build / src / engine / boehm_gc / alloc.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 *
16 */
17
18
19 # include "private/gc_priv.h"
20
21 # include <stdio.h>
22 # if !defined(MACOS) && !defined(MSWINCE)
23 # include <signal.h>
24 # include <sys/types.h>
25 # endif
26
27 /*
28 * Separate free lists are maintained for different sized objects
29 * up to MAXOBJBYTES.
30 * The call GC_allocobj(i,k) ensures that the freelist for
31 * kind k objects of size i points to a non-empty
32 * free list. It returns a pointer to the first entry on the free list.
33 * In a single-threaded world, GC_allocobj may be called to allocate
34 * an object of (small) size i as follows:
35 *
36 * opp = &(GC_objfreelist[i]);
37 * if (*opp == 0) GC_allocobj(i, NORMAL);
38 * ptr = *opp;
39 * *opp = obj_link(ptr);
40 *
41 * Note that this is very fast if the free list is non-empty; it should
42 * only involve the execution of 4 or 5 simple instructions.
43 * All composite objects on freelists are cleared, except for
44 * their first word.
45 */
46
47 /*
48 * The allocator uses GC_allochblk to allocate large chunks of objects.
49 * These chunks all start on addresses which are multiples of
50 * HBLKSZ. Each allocated chunk has an associated header,
51 * which can be located quickly based on the address of the chunk.
52 * (See headers.c for details.)
53 * This makes it possible to check quickly whether an
54 * arbitrary address corresponds to an object administered by the
55 * allocator.
56 */
57
58 word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
59
60 word GC_gc_no = 0;
61
62 #ifndef SMALL_CONFIG
63 int GC_incremental = 0; /* By default, stop the world. */
64 #endif
65
66 int GC_parallel = FALSE; /* By default, parallel GC is off. */
67
68 int GC_full_freq = 19; /* Every 20th collection is a full */
69 /* collection, whether we need it */
70 /* or not. */
71
72 GC_bool GC_need_full_gc = FALSE;
73 /* Need full GC do to heap growth. */
74
75 #ifdef THREADS
76 GC_bool GC_world_stopped = FALSE;
77 # define IF_THREADS(x) x
78 #else
79 # define IF_THREADS(x)
80 #endif
81
82 word GC_used_heap_size_after_full = 0;
83
84 char * GC_copyright[] =
85 {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
86 "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
87 "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
88 "Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. ",
89 "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
90 " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
91 "See source code for details." };
92
93 # include "version.h"
94
95 /* some more variables */
96
97 extern signed_word GC_bytes_found; /* Number of reclaimed bytes */
98 /* after garbage collection */
99
100 GC_bool GC_dont_expand = 0;
101
102 word GC_free_space_divisor = 3;
103
104 extern GC_bool GC_collection_in_progress();
105 /* Collection is in progress, or was abandoned. */
106
107 int GC_never_stop_func (void) { return(0); }
108
109 unsigned long GC_time_limit = TIME_LIMIT;
110
111 CLOCK_TYPE GC_start_time; /* Time at which we stopped world. */
112 /* used only in GC_timeout_stop_func. */
113
114 int GC_n_attempts = 0; /* Number of attempts at finishing */
115 /* collection within GC_time_limit. */
116
117 #if defined(SMALL_CONFIG) || defined(NO_CLOCK)
118 # define GC_timeout_stop_func GC_never_stop_func
119 #else
120 int GC_timeout_stop_func (void)
121 {
122 CLOCK_TYPE current_time;
123 static unsigned count = 0;
124 unsigned long time_diff;
125
126 if ((count++ & 3) != 0) return(0);
127 GET_TIME(current_time);
128 time_diff = MS_TIME_DIFF(current_time,GC_start_time);
129 if (time_diff >= GC_time_limit) {
130 if (GC_print_stats) {
131 GC_log_printf("Abandoning stopped marking after ");
132 GC_log_printf("%lu msecs", time_diff);
133 GC_log_printf("(attempt %d)\n", GC_n_attempts);
134 }
135 return(1);
136 }
137 return(0);
138 }
139 #endif /* !SMALL_CONFIG */
140
141 /* Return the minimum number of words that must be allocated between */
142 /* collections to amortize the collection cost. */
143 static word min_bytes_allocd()
144 {
145 # ifdef THREADS
146 /* We punt, for now. */
147 signed_word stack_size = 10000;
148 # else
149 int dummy;
150 signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
151 # endif
152 word total_root_size; /* includes double stack size, */
153 /* since the stack is expensive */
154 /* to scan. */
155 word scan_size; /* Estimate of memory to be scanned */
156 /* during normal GC. */
157
158 if (stack_size < 0) stack_size = -stack_size;
159 total_root_size = 2 * stack_size + GC_root_size;
160 scan_size = 2 * GC_composite_in_use + GC_atomic_in_use
161 + total_root_size;
162 if (TRUE_INCREMENTAL) {
163 return scan_size / (2 * GC_free_space_divisor);
164 } else {
165 return scan_size / GC_free_space_divisor;
166 }
167 }
168
169 /* Return the number of bytes allocated, adjusted for explicit storage */
170 /* management, etc.. This number is used in deciding when to trigger */
171 /* collections. */
172 word GC_adj_bytes_allocd(void)
173 {
174 signed_word result;
175 signed_word expl_managed =
176 (signed_word)GC_non_gc_bytes
177 - (signed_word)GC_non_gc_bytes_at_gc;
178
179 /* Don't count what was explicitly freed, or newly allocated for */
180 /* explicit management. Note that deallocating an explicitly */
181 /* managed object should not alter result, assuming the client */
182 /* is playing by the rules. */
183 result = (signed_word)GC_bytes_allocd
184 - (signed_word)GC_bytes_freed
185 + (signed_word)GC_finalizer_bytes_freed
186 - expl_managed;
187 if (result > (signed_word)GC_bytes_allocd) {
188 result = GC_bytes_allocd;
189 /* probably client bug or unfortunate scheduling */
190 }
191 result += GC_bytes_finalized;
192 /* We count objects enqueued for finalization as though they */
193 /* had been reallocated this round. Finalization is user */
194 /* visible progress. And if we don't count this, we have */
195 /* stability problems for programs that finalize all objects. */
196 if (result < (signed_word)(GC_bytes_allocd >> 3)) {
197 /* Always count at least 1/8 of the allocations. We don't want */
198 /* to collect too infrequently, since that would inhibit */
199 /* coalescing of free storage blocks. */
200 /* This also makes us partially robust against client bugs. */
201 return(GC_bytes_allocd >> 3);
202 } else {
203 return(result);
204 }
205 }
206
207
208 /* Clear up a few frames worth of garbage left at the top of the stack. */
209 /* This is used to prevent us from accidentally treating garbade left */
210 /* on the stack by other parts of the collector as roots. This */
211 /* differs from the code in misc.c, which actually tries to keep the */
212 /* stack clear of long-lived, client-generated garbage. */
213 void GC_clear_a_few_frames()
214 {
215 # define NWORDS 64
216 word frames[NWORDS];
217 int i;
218
219 for (i = 0; i < NWORDS; i++) frames[i] = 0;
220 }
221
222 /* Heap size at which we need a collection to avoid expanding past */
223 /* limits used by blacklisting. */
224 static word GC_collect_at_heapsize = (word)(-1);
225
226 /* Have we allocated enough to amortize a collection? */
227 GC_bool GC_should_collect(void)
228 {
229 return(GC_adj_bytes_allocd() >= min_bytes_allocd()
230 || GC_heapsize >= GC_collect_at_heapsize);
231 }
232
233
234 void GC_notify_full_gc(void)
235 {
236 if (GC_start_call_back != (void (*) (void))0) {
237 (*GC_start_call_back)();
238 }
239 }
240
241 GC_bool GC_is_full_gc = FALSE;
242
243 /*
244 * Initiate a garbage collection if appropriate.
245 * Choose judiciously
246 * between partial, full, and stop-world collections.
247 * Assumes lock held, signals disabled.
248 */
249 void GC_maybe_gc(void)
250 {
251 static int n_partial_gcs = 0;
252
253 if (GC_should_collect()) {
254 if (!GC_incremental) {
255 GC_gcollect_inner();
256 n_partial_gcs = 0;
257 return;
258 } else {
259 # ifdef PARALLEL_MARK
260 GC_wait_for_reclaim();
261 # endif
262 if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
263 if (GC_print_stats) {
264 GC_log_printf(
265 "***>Full mark for collection %lu after %ld allocd bytes\n",
266 (unsigned long)GC_gc_no+1,
267 (long)GC_bytes_allocd);
268 }
269 GC_promote_black_lists();
270 (void)GC_reclaim_all((GC_stop_func)0, TRUE);
271 GC_clear_marks();
272 n_partial_gcs = 0;
273 GC_notify_full_gc();
274 GC_is_full_gc = TRUE;
275 } else {
276 n_partial_gcs++;
277 }
278 }
279 /* We try to mark with the world stopped. */
280 /* If we run out of time, this turns into */
281 /* incremental marking. */
282 # ifndef NO_CLOCK
283 if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
284 # endif
285 if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
286 GC_never_stop_func : GC_timeout_stop_func)) {
287 # ifdef SAVE_CALL_CHAIN
288 GC_save_callers(GC_last_stack);
289 # endif
290 GC_finish_collection();
291 } else {
292 if (!GC_is_full_gc) {
293 /* Count this as the first attempt */
294 GC_n_attempts++;
295 }
296 }
297 }
298 }
299
300
301 /*
302 * Stop the world garbage collection. Assumes lock held, signals disabled.
303 * If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
304 * Return TRUE if we successfully completed the collection.
305 */
306 GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
307 {
308 CLOCK_TYPE start_time, current_time;
309 if (GC_dont_gc) return FALSE;
310 if (GC_incremental && GC_collection_in_progress()) {
311 if (GC_print_stats) {
312 GC_log_printf(
313 "GC_try_to_collect_inner: finishing collection in progress\n");
314 }
315 /* Just finish collection already in progress. */
316 while(GC_collection_in_progress()) {
317 if (stop_func()) return(FALSE);
318 GC_collect_a_little_inner(1);
319 }
320 }
321 if (stop_func == GC_never_stop_func) GC_notify_full_gc();
322 if (GC_print_stats) {
323 GET_TIME(start_time);
324 GC_log_printf(
325 "Initiating full world-stop collection %lu after %ld allocd bytes\n",
326 (unsigned long)GC_gc_no+1, (long)GC_bytes_allocd);
327 }
328 GC_promote_black_lists();
329 /* Make sure all blocks have been reclaimed, so sweep routines */
330 /* don't see cleared mark bits. */
331 /* If we're guaranteed to finish, then this is unnecessary. */
332 /* In the find_leak case, we have to finish to guarantee that */
333 /* previously unmarked objects are not reported as leaks. */
334 # ifdef PARALLEL_MARK
335 GC_wait_for_reclaim();
336 # endif
337 if ((GC_find_leak || stop_func != GC_never_stop_func)
338 && !GC_reclaim_all(stop_func, FALSE)) {
339 /* Aborted. So far everything is still consistent. */
340 return(FALSE);
341 }
342 GC_invalidate_mark_state(); /* Flush mark stack. */
343 GC_clear_marks();
344 # ifdef SAVE_CALL_CHAIN
345 GC_save_callers(GC_last_stack);
346 # endif
347 GC_is_full_gc = TRUE;
348 if (!GC_stopped_mark(stop_func)) {
349 if (!GC_incremental) {
350 /* We're partially done and have no way to complete or use */
351 /* current work. Reestablish invariants as cheaply as */
352 /* possible. */
353 GC_invalidate_mark_state();
354 GC_unpromote_black_lists();
355 } /* else we claim the world is already still consistent. We'll */
356 /* finish incrementally. */
357 return(FALSE);
358 }
359 GC_finish_collection();
360 if (GC_print_stats) {
361 GET_TIME(current_time);
362 GC_log_printf("Complete collection took %lu msecs\n",
363 MS_TIME_DIFF(current_time,start_time));
364 }
365 return(TRUE);
366 }
367
368
369
370 /*
371 * Perform n units of garbage collection work. A unit is intended to touch
372 * roughly GC_RATE pages. Every once in a while, we do more than that.
373 * This needs to be a fairly large number with our current incremental
374 * GC strategy, since otherwise we allocate too much during GC, and the
375 * cleanup gets expensive.
376 */
377 # define GC_RATE 10
378 # define MAX_PRIOR_ATTEMPTS 1
379 /* Maximum number of prior attempts at world stop marking */
380 /* A value of 1 means that we finish the second time, no matter */
381 /* how long it takes. Doesn't count the initial root scan */
382 /* for a full GC. */
383
384 int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
385 /* that we have made. */
386
387 void GC_collect_a_little_inner(int n)
388 {
389 int i;
390
391 if (GC_dont_gc) return;
392 if (GC_incremental && GC_collection_in_progress()) {
393 for (i = GC_deficit; i < GC_RATE*n; i++) {
394 if (GC_mark_some((ptr_t)0)) {
395 /* Need to finish a collection */
396 # ifdef SAVE_CALL_CHAIN
397 GC_save_callers(GC_last_stack);
398 # endif
399 # ifdef PARALLEL_MARK
400 GC_wait_for_reclaim();
401 # endif
402 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
403 && GC_time_limit != GC_TIME_UNLIMITED) {
404 GET_TIME(GC_start_time);
405 if (!GC_stopped_mark(GC_timeout_stop_func)) {
406 GC_n_attempts++;
407 break;
408 }
409 } else {
410 (void)GC_stopped_mark(GC_never_stop_func);
411 }
412 GC_finish_collection();
413 break;
414 }
415 }
416 if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
417 if (GC_deficit < 0) GC_deficit = 0;
418 } else {
419 GC_maybe_gc();
420 }
421 }
422
423 int GC_collect_a_little(void)
424 {
425 int result;
426 DCL_LOCK_STATE;
427
428 LOCK();
429 GC_collect_a_little_inner(1);
430 result = (int)GC_collection_in_progress();
431 UNLOCK();
432 if (!result && GC_debugging_started) GC_print_all_smashed();
433 return(result);
434 }
435
436 /*
437 * Assumes lock is held, signals are disabled.
438 * We stop the world.
439 * If stop_func() ever returns TRUE, we may fail and return FALSE.
440 * Increment GC_gc_no if we succeed.
441 */
442 GC_bool GC_stopped_mark(GC_stop_func stop_func)
443 {
444 unsigned i;
445 int dummy;
446 CLOCK_TYPE start_time, current_time;
447
448 if (GC_print_stats)
449 GET_TIME(start_time);
450
451 # if defined(REGISTER_LIBRARIES_EARLY)
452 GC_cond_register_dynamic_libraries();
453 # endif
454 STOP_WORLD();
455 IF_THREADS(GC_world_stopped = TRUE);
456 if (GC_print_stats) {
457 GC_log_printf("--> Marking for collection %lu ",
458 (unsigned long)GC_gc_no + 1);
459 GC_log_printf("after %lu allocd bytes\n",
460 (unsigned long) GC_bytes_allocd);
461 }
462 # ifdef MAKE_BACK_GRAPH
463 if (GC_print_back_height) {
464 GC_build_back_graph();
465 }
466 # endif
467
468 /* Mark from all roots. */
469 /* Minimize junk left in my registers and on the stack */
470 GC_clear_a_few_frames();
471 GC_noop(0,0,0,0,0,0);
472 GC_initiate_gc();
473 for(i = 0;;i++) {
474 if ((*stop_func)()) {
475 if (GC_print_stats) {
476 GC_log_printf("Abandoned stopped marking after ");
477 GC_log_printf("%u iterations\n", i);
478 }
479 GC_deficit = i; /* Give the mutator a chance. */
480 IF_THREADS(GC_world_stopped = FALSE);
481 START_WORLD();
482 return(FALSE);
483 }
484 if (GC_mark_some((ptr_t)(&dummy))) break;
485 }
486
487 GC_gc_no++;
488 if (GC_print_stats) {
489 GC_log_printf("Collection %lu reclaimed %ld bytes",
490 (unsigned long)GC_gc_no - 1,
491 (long)GC_bytes_found);
492 GC_log_printf(" ---> heapsize = %lu bytes\n",
493 (unsigned long) GC_heapsize);
494 /* Printf arguments may be pushed in funny places. Clear the */
495 /* space. */
496 GC_log_printf("");
497 }
498
499 /* Check all debugged objects for consistency */
500 if (GC_debugging_started) {
501 (*GC_check_heap)();
502 }
503
504 IF_THREADS(GC_world_stopped = FALSE);
505 START_WORLD();
506 if (GC_print_stats) {
507 GET_TIME(current_time);
508 GC_log_printf("World-stopped marking took %lu msecs\n",
509 MS_TIME_DIFF(current_time,start_time));
510 }
511 return(TRUE);
512 }
513
514 /* Set all mark bits for the free list whose first entry is q */
515 void GC_set_fl_marks(ptr_t q)
516 {
517 ptr_t p;
518 struct hblk * h, * last_h = 0;
519 hdr *hhdr; /* gcc "might be uninitialized" warning is bogus. */
520 IF_PER_OBJ(size_t sz;)
521 unsigned bit_no;
522
523 for (p = q; p != 0; p = obj_link(p)){
524 h = HBLKPTR(p);
525 if (h != last_h) {
526 last_h = h;
527 hhdr = HDR(h);
528 IF_PER_OBJ(sz = hhdr->hb_sz;)
529 }
530 bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
531 if (!mark_bit_from_hdr(hhdr, bit_no)) {
532 set_mark_bit_from_hdr(hhdr, bit_no);
533 ++hhdr -> hb_n_marks;
534 }
535 }
536 }
537
538 #ifdef GC_ASSERTIONS
539 /* Check that all mark bits for the free list whose first entry is q */
540 /* are set. */
541 void GC_check_fl_marks(ptr_t q)
542 {
543 ptr_t p;
544
545 for (p = q; p != 0; p = obj_link(p)){
546 if (!GC_is_marked(p)) {
547 GC_err_printf("Unmarked object %p on list %p\n", p, q);
548 ABORT("Unmarked local free list entry.");
549 }
550 }
551 }
552 #endif
553
554 /* Clear all mark bits for the free list whose first entry is q */
555 /* Decrement GC_bytes_found by number of bytes on free list. */
556 void GC_clear_fl_marks(ptr_t q)
557 {
558 ptr_t p;
559 struct hblk * h, * last_h = 0;
560 hdr *hhdr;
561 size_t sz;
562 unsigned bit_no;
563
564 for (p = q; p != 0; p = obj_link(p)){
565 h = HBLKPTR(p);
566 if (h != last_h) {
567 last_h = h;
568 hhdr = HDR(h);
569 sz = hhdr->hb_sz; /* Normally set only once. */
570 }
571 bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, sz);
572 if (mark_bit_from_hdr(hhdr, bit_no)) {
573 size_t n_marks = hhdr -> hb_n_marks - 1;
574 clear_mark_bit_from_hdr(hhdr, bit_no);
575 # ifdef PARALLEL_MARK
576 /* Appr. count, don't decrement to zero! */
577 if (0 != n_marks) {
578 hhdr -> hb_n_marks = n_marks;
579 }
580 # else
581 hhdr -> hb_n_marks = n_marks;
582 # endif
583 }
584 GC_bytes_found -= sz;
585 }
586 }
587
588 #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
589 extern void GC_check_tls(void);
590 #endif
591
592 /* Finish up a collection. Assumes lock is held, signals are disabled, */
593 /* but the world is otherwise running. */
594 void GC_finish_collection()
595 {
596 CLOCK_TYPE start_time;
597 CLOCK_TYPE finalize_time;
598 CLOCK_TYPE done_time;
599
600 # if defined(GC_ASSERTIONS) && defined(THREADS) \
601 && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
602 /* Check that we marked some of our own data. */
603 /* FIXME: Add more checks. */
604 GC_check_tls();
605 # endif
606
607 if (GC_print_stats)
608 GET_TIME(start_time);
609
610 GC_bytes_found = 0;
611 # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
612 if (getenv("GC_PRINT_ADDRESS_MAP") != 0) {
613 GC_print_address_map();
614 }
615 # endif
616 COND_DUMP;
617 if (GC_find_leak) {
618 /* Mark all objects on the free list. All objects should be */
619 /* marked when we're done. */
620 {
621 word size; /* current object size */
622 unsigned kind;
623 ptr_t q;
624
625 for (kind = 0; kind < GC_n_kinds; kind++) {
626 for (size = 1; size <= MAXOBJGRANULES; size++) {
627 q = GC_obj_kinds[kind].ok_freelist[size];
628 if (q != 0) GC_set_fl_marks(q);
629 }
630 }
631 }
632 GC_start_reclaim(TRUE);
633 /* The above just checks; it doesn't really reclaim anything. */
634 }
635
636 GC_finalize();
637 # ifdef STUBBORN_ALLOC
638 GC_clean_changing_list();
639 # endif
640
641 if (GC_print_stats)
642 GET_TIME(finalize_time);
643
644 if (GC_print_back_height) {
645 # ifdef MAKE_BACK_GRAPH
646 GC_traverse_back_graph();
647 # else
648 # ifndef SMALL_CONFIG
649 GC_err_printf("Back height not available: "
650 "Rebuild collector with -DMAKE_BACK_GRAPH\n");
651 # endif
652 # endif
653 }
654
655 /* Clear free list mark bits, in case they got accidentally marked */
656 /* (or GC_find_leak is set and they were intentionally marked). */
657 /* Also subtract memory remaining from GC_bytes_found count. */
658 /* Note that composite objects on free list are cleared. */
659 /* Thus accidentally marking a free list is not a problem; only */
660 /* objects on the list itself will be marked, and that's fixed here. */
661 {
662 word size; /* current object size */
663 ptr_t q; /* pointer to current object */
664 unsigned kind;
665
666 for (kind = 0; kind < GC_n_kinds; kind++) {
667 for (size = 1; size <= MAXOBJGRANULES; size++) {
668 q = GC_obj_kinds[kind].ok_freelist[size];
669 if (q != 0) GC_clear_fl_marks(q);
670 }
671 }
672 }
673
674
675 if (GC_print_stats == VERBOSE)
676 GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
677 (long)GC_bytes_found);
678
679 /* Reconstruct free lists to contain everything not marked */
680 GC_start_reclaim(FALSE);
681 if (GC_print_stats) {
682 GC_log_printf("Heap contains %lu pointer-containing "
683 "+ %lu pointer-free reachable bytes\n",
684 (unsigned long)GC_composite_in_use,
685 (unsigned long)GC_atomic_in_use);
686 }
687 if (GC_is_full_gc) {
688 GC_used_heap_size_after_full = USED_HEAP_SIZE;
689 GC_need_full_gc = FALSE;
690 } else {
691 GC_need_full_gc =
692 USED_HEAP_SIZE - GC_used_heap_size_after_full
693 > min_bytes_allocd();
694 }
695
696 if (GC_print_stats == VERBOSE) {
697 GC_log_printf(
698 "Immediately reclaimed %ld bytes in heap of size %lu bytes",
699 (long)GC_bytes_found,
700 (unsigned long)GC_heapsize);
701 # ifdef USE_MUNMAP
702 GC_log_printf("(%lu unmapped)", (unsigned long)GC_unmapped_bytes);
703 # endif
704 GC_log_printf("\n");
705 }
706
707 /* Reset or increment counters for next cycle */
708 GC_n_attempts = 0;
709 GC_is_full_gc = FALSE;
710 GC_bytes_allocd_before_gc += GC_bytes_allocd;
711 GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
712 GC_bytes_allocd = 0;
713 GC_bytes_freed = 0;
714 GC_finalizer_bytes_freed = 0;
715
716 # ifdef USE_MUNMAP
717 GC_unmap_old();
718 # endif
719 if (GC_print_stats) {
720 GET_TIME(done_time);
721 GC_log_printf("Finalize + initiate sweep took %lu + %lu msecs\n",
722 MS_TIME_DIFF(finalize_time,start_time),
723 MS_TIME_DIFF(done_time,finalize_time));
724 }
725 }
726
727 /* Externally callable routine to invoke full, stop-world collection */
728 int GC_try_to_collect(GC_stop_func stop_func)
729 {
730 int result;
731 DCL_LOCK_STATE;
732
733 if (!GC_is_initialized) GC_init();
734 if (GC_debugging_started) GC_print_all_smashed();
735 GC_INVOKE_FINALIZERS();
736 LOCK();
737 ENTER_GC();
738 if (!GC_is_initialized) GC_init_inner();
739 /* Minimize junk left in my registers */
740 GC_noop(0,0,0,0,0,0);
741 result = (int)GC_try_to_collect_inner(stop_func);
742 EXIT_GC();
743 UNLOCK();
744 if(result) {
745 if (GC_debugging_started) GC_print_all_smashed();
746 GC_INVOKE_FINALIZERS();
747 }
748 return(result);
749 }
750
751 void GC_gcollect(void)
752 {
753 (void)GC_try_to_collect(GC_never_stop_func);
754 if (GC_have_errors) GC_print_all_errors();
755 }
756
757 word GC_n_heap_sects = 0; /* Number of sections currently in heap. */
758
759 /*
760 * Use the chunk of memory starting at p of size bytes as part of the heap.
761 * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
762 */
763 void GC_add_to_heap(struct hblk *p, size_t bytes)
764 {
765 hdr * phdr;
766
767 if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
768 ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
769 }
770 phdr = GC_install_header(p);
771 if (0 == phdr) {
772 /* This is extremely unlikely. Can't add it. This will */
773 /* almost certainly result in a 0 return from the allocator, */
774 /* which is entirely appropriate. */
775 return;
776 }
777 GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
778 GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
779 GC_n_heap_sects++;
780 phdr -> hb_sz = bytes;
781 phdr -> hb_flags = 0;
782 GC_freehblk(p);
783 GC_heapsize += bytes;
784 if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
785 || GC_least_plausible_heap_addr == 0) {
786 GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
787 /* Making it a little smaller than necessary prevents */
788 /* us from getting a false hit from the variable */
789 /* itself. There's some unintentional reflection */
790 /* here. */
791 }
792 if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
793 GC_greatest_plausible_heap_addr = (void *)((ptr_t)p + bytes);
794 }
795 }
796
797 # if !defined(NO_DEBUGGING)
798 void GC_print_heap_sects(void)
799 {
800 unsigned i;
801
802 GC_printf("Total heap size: %lu\n", (unsigned long) GC_heapsize);
803 for (i = 0; i < GC_n_heap_sects; i++) {
804 ptr_t start = GC_heap_sects[i].hs_start;
805 size_t len = GC_heap_sects[i].hs_bytes;
806 struct hblk *h;
807 unsigned nbl = 0;
808
809 GC_printf("Section %d from %p to %p ", i,
810 start, start + len);
811 for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
812 if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
813 }
814 GC_printf("%lu/%lu blacklisted\n", (unsigned long)nbl,
815 (unsigned long)(len/HBLKSIZE));
816 }
817 }
818 # endif
819
820 void * GC_least_plausible_heap_addr = (void *)ONES;
821 void * GC_greatest_plausible_heap_addr = 0;
822
823 static INLINE ptr_t GC_max(ptr_t x, ptr_t y)
824 {
825 return(x > y? x : y);
826 }
827
828 static INLINE ptr_t GC_min(ptr_t x, ptr_t y)
829 {
830 return(x < y? x : y);
831 }
832
833 void GC_set_max_heap_size(GC_word n)
834 {
835 GC_max_heapsize = n;
836 }
837
838 GC_word GC_max_retries = 0;
839
840 /*
841 * this explicitly increases the size of the heap. It is used
842 * internally, but may also be invoked from GC_expand_hp by the user.
843 * The argument is in units of HBLKSIZE.
844 * Tiny values of n are rounded up.
845 * Returns FALSE on failure.
846 */
847 GC_bool GC_expand_hp_inner(word n)
848 {
849 word bytes;
850 struct hblk * space;
851 word expansion_slop; /* Number of bytes by which we expect the */
852 /* heap to expand soon. */
853
854 if (n < MINHINCR) n = MINHINCR;
855 bytes = n * HBLKSIZE;
856 /* Make sure bytes is a multiple of GC_page_size */
857 {
858 word mask = GC_page_size - 1;
859 bytes += mask;
860 bytes &= ~mask;
861 }
862
863 if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
864 /* Exceeded self-imposed limit */
865 return(FALSE);
866 }
867 space = GET_MEM(bytes);
868 if( space == 0 ) {
869 if (GC_print_stats) {
870 GC_log_printf("Failed to expand heap by %ld bytes\n",
871 (unsigned long)bytes);
872 }
873 return(FALSE);
874 }
875 if (GC_print_stats) {
876 GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
877 (unsigned long)bytes,
878 (unsigned long)GC_bytes_allocd);
879 }
880 expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
881 if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
882 || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
883 /* Assume the heap is growing up */
884 GC_greatest_plausible_heap_addr =
885 (void *)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
886 (ptr_t)space + bytes + expansion_slop);
887 } else {
888 /* Heap is growing down */
889 GC_least_plausible_heap_addr =
890 (void *)GC_min((ptr_t)GC_least_plausible_heap_addr,
891 (ptr_t)space - expansion_slop);
892 }
893 # if defined(LARGE_CONFIG)
894 if (((ptr_t)GC_greatest_plausible_heap_addr <= (ptr_t)space + bytes
895 || (ptr_t)GC_least_plausible_heap_addr >= (ptr_t)space)
896 && GC_heapsize > 0) {
897 /* GC_add_to_heap will fix this, but ... */
898 WARN("Too close to address space limit: blacklisting ineffective\n", 0);
899 }
900 # endif
901 GC_prev_heap_addr = GC_last_heap_addr;
902 GC_last_heap_addr = (ptr_t)space;
903 GC_add_to_heap(space, bytes);
904 /* Force GC before we are likely to allocate past expansion_slop */
905 GC_collect_at_heapsize =
906 GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
907 # if defined(LARGE_CONFIG)
908 if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
909 GC_collect_at_heapsize = (word)(-1);
910 # endif
911 return(TRUE);
912 }
913
914 /* Really returns a bool, but it's externally visible, so that's clumsy. */
915 /* Arguments is in bytes. */
916 int GC_expand_hp(size_t bytes)
917 {
918 int result;
919 DCL_LOCK_STATE;
920
921 LOCK();
922 if (!GC_is_initialized) GC_init_inner();
923 result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
924 if (result) GC_requested_heapsize += bytes;
925 UNLOCK();
926 return(result);
927 }
928
929 unsigned GC_fail_count = 0;
930 /* How many consecutive GC/expansion failures? */
931 /* Reset by GC_allochblk. */
932
933 GC_bool GC_collect_or_expand(word needed_blocks, GC_bool ignore_off_page)
934 {
935 if (!GC_incremental && !GC_dont_gc &&
936 ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
937 GC_gcollect_inner();
938 } else {
939 word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
940 + needed_blocks;
941
942 if (blocks_to_get > MAXHINCR) {
943 word slop;
944
945 /* Get the minimum required to make it likely that we */
946 /* can satisfy the current request in the presence of black- */
947 /* listing. This will probably be more than MAXHINCR. */
948 if (ignore_off_page) {
949 slop = 4;
950 } else {
951 slop = 2*divHBLKSZ(BL_LIMIT);
952 if (slop > needed_blocks) slop = needed_blocks;
953 }
954 if (needed_blocks + slop > MAXHINCR) {
955 blocks_to_get = needed_blocks + slop;
956 } else {
957 blocks_to_get = MAXHINCR;
958 }
959 }
960 if (!GC_expand_hp_inner(blocks_to_get)
961 && !GC_expand_hp_inner(needed_blocks)) {
962 if (GC_fail_count++ < GC_max_retries) {
963 WARN("Out of Memory! Trying to continue ...\n", 0);
964 GC_gcollect_inner();
965 } else {
966 # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
967 WARN("Out of Memory! Returning NIL!\n", 0);
968 # endif
969 return(FALSE);
970 }
971 } else {
972 if (GC_fail_count && GC_print_stats) {
973 GC_printf("Memory available again ...\n");
974 }
975 }
976 }
977 return(TRUE);
978 }
979
980 /*
981 * Make sure the object free list for size gran (in granules) is not empty.
982 * Return a pointer to the first object on the free list.
983 * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
984 * Assumes we hold the allocator lock and signals are disabled.
985 *
986 */
987 ptr_t GC_allocobj(size_t gran, int kind)
988 {
989 void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
990 GC_bool tried_minor = FALSE;
991
992 if (gran == 0) return(0);
993
994 while (*flh == 0) {
995 ENTER_GC();
996 /* Do our share of marking work */
997 if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
998 /* Sweep blocks for objects of this size */
999 GC_continue_reclaim(gran, kind);
1000 EXIT_GC();
1001 if (*flh == 0) {
1002 GC_new_hblk(gran, kind);
1003 }
1004 if (*flh == 0) {
1005 ENTER_GC();
1006 if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1007 && ! tried_minor ) {
1008 GC_collect_a_little_inner(1);
1009 tried_minor = TRUE;
1010 } else {
1011 if (!GC_collect_or_expand((word)1,FALSE)) {
1012 EXIT_GC();
1013 return(0);
1014 }
1015 }
1016 EXIT_GC();
1017 }
1018 }
1019 /* Successful allocation; reset failure count. */
1020 GC_fail_count = 0;
1021
1022 return(*flh);
1023 }