2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved.
6 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 * Permission is hereby granted to use or copy this program
10 * for any purpose, provided the above notices are retained on all copies.
11 * Permission to modify the code and to distribute modified code is granted,
12 * provided the above notices are retained, and a notice that the code was
13 * modified is included with the above copyright notice.
15 /* Boehm, July 31, 1995 5:02 pm PDT */
25 #define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */
26 #include "private/gc_pmark.h"
28 #ifdef GC_SOLARIS_THREADS
29 # include <sys/syscall.h>
31 #if defined(MSWIN32) || defined(MSWINCE)
32 # define WIN32_LEAN_AND_MEAN
40 # include <sys/types.h>
41 # include <sys/stat.h>
43 int GC_log
; /* Forward decl, so we can set it. */
50 #if defined(THREADS) && defined(PCR)
51 # include "il/PCR_IL.h"
52 PCR_Th_ML GC_allocate_ml
;
54 /* For other platforms with threads, the lock and possibly */
55 /* GC_lock_holder variables are defined in the thread support code. */
57 #if defined(NOSYS) || defined(ECOS)
61 /* Dont unnecessarily call GC_register_main_static_data() in case */
62 /* dyn_load.c isn't linked in. */
63 #ifdef DYNAMIC_LOADING
64 # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data()
66 # define GC_REGISTER_MAIN_STATIC_DATA() TRUE
69 GC_FAR
struct _GC_arrays GC_arrays
/* = { 0 } */;
72 GC_bool GC_debugging_started
= FALSE
;
73 /* defined here so we don't have to load debug_malloc.o */
75 void (*GC_check_heap
) (void) = (void (*) (void))0;
76 void (*GC_print_all_smashed
) (void) = (void (*) (void))0;
78 void (*GC_start_call_back
) (void) = (void (*) (void))0;
80 ptr_t GC_stackbottom
= 0;
83 ptr_t GC_register_stackbottom
= 0;
86 GC_bool GC_dont_gc
= 0;
88 GC_bool GC_dont_precollect
= 0;
93 GC_bool GC_print_stats
= 0;
96 GC_bool GC_print_back_height
= 0;
99 GC_bool GC_dump_regularly
= 0; /* Generate regular debugging dumps. */
102 #ifdef KEEP_BACK_PTRS
103 long GC_backtraces
= 0; /* Number of random backtraces to */
104 /* generate for each GC. */
108 int GC_find_leak
= 1;
110 int GC_find_leak
= 0;
113 #ifdef ALL_INTERIOR_POINTERS
114 int GC_all_interior_pointers
= 1;
116 int GC_all_interior_pointers
= 0;
119 long GC_large_alloc_warn_interval
= 5;
120 /* Interval between unsuppressed warnings. */
122 long GC_large_alloc_warn_suppressed
= 0;
123 /* Number of warnings suppressed so far. */
126 void * GC_default_oom_fn(size_t bytes_requested
)
131 void * (*GC_oom_fn
) (size_t bytes_requested
) = GC_default_oom_fn
;
133 void * GC_project2(void *arg1
, void *arg2
)
138 /* Set things up so that GC_size_map[i] >= granules(i), */
139 /* but not too much bigger */
140 /* and so that size_map contains relatively few distinct entries */
141 /* This was originally stolen from Russ Atkinson's Cedar */
142 /* quantization alogrithm (but we precompute it). */
143 void GC_init_size_map(void)
147 /* Map size 0 to something bigger. */
148 /* This avoids problems at lower levels. */
150 for (i
= 1; i
<= GRANULES_TO_BYTES(TINY_FREELISTS
-1) - EXTRA_BYTES
; i
++) {
151 GC_size_map
[i
] = ROUNDED_UP_GRANULES(i
);
152 GC_ASSERT(GC_size_map
[i
] < TINY_FREELISTS
);
154 /* We leave the rest of the array to be filled in on demand. */
157 /* Fill in additional entries in GC_size_map, including the ith one */
158 /* We assume the ith entry is currently 0. */
159 /* Note that a filled in section of the array ending at n always */
160 /* has length at least n/4. */
161 void GC_extend_size_map(size_t i
)
163 size_t orig_granule_sz
= ROUNDED_UP_GRANULES(i
);
164 size_t granule_sz
= orig_granule_sz
;
165 size_t byte_sz
= GRANULES_TO_BYTES(granule_sz
);
166 /* The size we try to preserve. */
167 /* Close to i, unless this would */
168 /* introduce too many distinct sizes. */
169 size_t smaller_than_i
= byte_sz
- (byte_sz
>> 3);
170 size_t much_smaller_than_i
= byte_sz
- (byte_sz
>> 2);
171 size_t low_limit
; /* The lowest indexed entry we */
175 if (GC_size_map
[smaller_than_i
] == 0) {
176 low_limit
= much_smaller_than_i
;
177 while (GC_size_map
[low_limit
] != 0) low_limit
++;
179 low_limit
= smaller_than_i
+ 1;
180 while (GC_size_map
[low_limit
] != 0) low_limit
++;
181 granule_sz
= ROUNDED_UP_GRANULES(low_limit
);
182 granule_sz
+= granule_sz
>> 3;
183 if (granule_sz
< orig_granule_sz
) granule_sz
= orig_granule_sz
;
185 /* For these larger sizes, we use an even number of granules. */
186 /* This makes it easier to, for example, construct a 16byte-aligned */
187 /* allocator even if GRANULE_BYTES is 8. */
190 if (granule_sz
> MAXOBJGRANULES
) {
191 granule_sz
= MAXOBJGRANULES
;
193 /* If we can fit the same number of larger objects in a block, */
196 size_t number_of_objs
= HBLK_GRANULES
/granule_sz
;
197 granule_sz
= HBLK_GRANULES
/number_of_objs
;
200 byte_sz
= GRANULES_TO_BYTES(granule_sz
);
201 /* We may need one extra byte; */
202 /* don't always fill in GC_size_map[byte_sz] */
203 byte_sz
-= EXTRA_BYTES
;
205 for (j
= low_limit
; j
<= byte_sz
; j
++) GC_size_map
[j
] = granule_sz
;
210 * The following is a gross hack to deal with a problem that can occur
211 * on machines that are sloppy about stack frame sizes, notably SPARC.
212 * Bogus pointers may be written to the stack and not cleared for
213 * a LONG time, because they always fall into holes in stack frames
214 * that are not written. We partially address this by clearing
215 * sections of the stack whenever we get control.
217 word GC_stack_last_cleared
= 0; /* GC_no when we last did this */
219 # define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */
220 # define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */
222 # define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */
223 # define DEGRADE_RATE 50
225 ptr_t GC_min_sp
; /* Coolest stack pointer value from which we've */
226 /* already cleared the stack. */
229 /* "hottest" stack pointer value we have seen */
230 /* recently. Degrades over time. */
232 word GC_bytes_allocd_at_reset
;
234 #if defined(ASM_CLEAR_CODE)
235 extern void *GC_clear_stack_inner(void *, ptr_t
);
237 /* Clear the stack up to about limit. Return arg. */
239 void * GC_clear_stack_inner(void *arg
, ptr_t limit
)
241 word dummy
[CLEAR_SIZE
];
243 BZERO(dummy
, CLEAR_SIZE
*sizeof(word
));
244 if ((ptr_t
)(dummy
) COOLER_THAN limit
) {
245 (void) GC_clear_stack_inner(arg
, limit
);
247 /* Make sure the recursive call is not a tail call, and the bzero */
248 /* call is not recognized as dead code. */
249 GC_noop1((word
)dummy
);
254 /* Clear some of the inaccessible part of the stack. Returns its */
255 /* argument, so it can be used in a tail call position, hence clearing */
257 void * GC_clear_stack(void *arg
)
259 ptr_t sp
= GC_approx_sp(); /* Hotter than actual sp */
261 word dummy
[SMALL_CLEAR_SIZE
];
262 static unsigned random_no
= 0;
263 /* Should be more random than it is ... */
264 /* Used to occasionally clear a bigger */
270 /* Extra bytes we clear every time. This clears our own */
271 /* activation record, and should cause more frequent */
272 /* clearing near the cold end of the stack, a good thing. */
273 # define GC_SLOP 4000
274 /* We make GC_high_water this much hotter than we really saw */
275 /* saw it, to cover for GC noise etc. above our current frame. */
276 # define CLEAR_THRESHOLD 100000
277 /* We restart the clearing process after this many bytes of */
278 /* allocation. Otherwise very heavily recursive programs */
279 /* with sparse stacks may result in heaps that grow almost */
280 /* without bounds. As the heap gets larger, collection */
281 /* frequency decreases, thus clearing frequency would decrease, */
282 /* thus more junk remains accessible, thus the heap gets */
285 if (++random_no
% 13 == 0) {
287 MAKE_HOTTER(limit
, BIG_CLEAR_SIZE
*sizeof(word
));
288 limit
= (ptr_t
)((word
)limit
& ~0xf);
289 /* Make it sufficiently aligned for assembly */
290 /* implementations of GC_clear_stack_inner. */
291 return GC_clear_stack_inner(arg
, limit
);
293 BZERO(dummy
, SMALL_CLEAR_SIZE
*sizeof(word
));
297 if (GC_gc_no
> GC_stack_last_cleared
) {
298 /* Start things over, so we clear the entire stack again */
299 if (GC_stack_last_cleared
== 0) GC_high_water
= (ptr_t
)GC_stackbottom
;
300 GC_min_sp
= GC_high_water
;
301 GC_stack_last_cleared
= GC_gc_no
;
302 GC_bytes_allocd_at_reset
= GC_bytes_allocd
;
304 /* Adjust GC_high_water */
305 MAKE_COOLER(GC_high_water
, WORDS_TO_BYTES(DEGRADE_RATE
) + GC_SLOP
);
306 if (sp HOTTER_THAN GC_high_water
) {
309 MAKE_HOTTER(GC_high_water
, GC_SLOP
);
311 MAKE_HOTTER(limit
, SLOP
);
312 if (sp COOLER_THAN limit
) {
313 limit
= (ptr_t
)((word
)limit
& ~0xf);
314 /* Make it sufficiently aligned for assembly */
315 /* implementations of GC_clear_stack_inner. */
317 return(GC_clear_stack_inner(arg
, limit
));
318 } else if (GC_bytes_allocd
- GC_bytes_allocd_at_reset
> CLEAR_THRESHOLD
) {
319 /* Restart clearing process, but limit how much clearing we do. */
321 MAKE_HOTTER(GC_min_sp
, CLEAR_THRESHOLD
/4);
322 if (GC_min_sp HOTTER_THAN GC_high_water
) GC_min_sp
= GC_high_water
;
323 GC_bytes_allocd_at_reset
= GC_bytes_allocd
;
330 /* Return a pointer to the base address of p, given a pointer to a */
331 /* an address within an object. Return 0 o.w. */
332 void * GC_base(void * p
)
341 if (!GC_is_initialized
) return 0;
344 candidate_hdr
= HDR_FROM_BI(bi
, r
);
345 if (candidate_hdr
== 0) return(0);
346 /* If it's a pointer to the middle of a large object, move it */
347 /* to the beginning. */
348 while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr
)) {
349 h
= FORWARDED_ADDR(h
,candidate_hdr
);
351 candidate_hdr
= HDR(h
);
353 if (HBLK_IS_FREE(candidate_hdr
)) return(0);
354 /* Make sure r points to the beginning of the object */
355 r
= (ptr_t
)((word
)r
& ~(WORDS_TO_BYTES(1) - 1));
357 size_t offset
= HBLKDISPL(r
);
358 signed_word sz
= candidate_hdr
-> hb_sz
;
359 size_t obj_displ
= offset
% sz
;
363 if (limit
> (ptr_t
)(h
+ 1) && sz
<= HBLKSIZE
) {
366 if ((ptr_t
)p
>= limit
) return(0);
372 /* Return the size of an object, given a pointer to its base. */
373 /* (For small obects this also happens to work from interior pointers, */
374 /* but that shouldn't be relied upon.) */
375 size_t GC_size(void * p
)
379 return hhdr
-> hb_sz
;
382 size_t GC_get_heap_size(void)
387 size_t GC_get_free_bytes(void)
389 return GC_large_free_bytes
;
392 size_t GC_get_bytes_since_gc(void)
394 return GC_bytes_allocd
;
397 size_t GC_get_total_bytes(void)
399 return GC_bytes_allocd
+GC_bytes_allocd_before_gc
;
402 GC_bool GC_is_initialized
= FALSE
;
404 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
405 extern void GC_init_parallel(void);
406 # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
412 #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS)
413 if (!GC_is_initialized
) {
414 BOOL (WINAPI
*pfn
) (LPCRITICAL_SECTION
, DWORD
) = NULL
;
415 HMODULE hK32
= GetModuleHandleA("kernel32.dll");
417 pfn
= (BOOL (WINAPI
*) (LPCRITICAL_SECTION
, DWORD
))
418 GetProcAddress (hK32
,
419 "InitializeCriticalSectionAndSpinCount");
421 pfn(&GC_allocate_ml
, 4000);
423 InitializeCriticalSection (&GC_allocate_ml
);
431 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
432 /* Make sure marker threads and started and thread local */
433 /* allocation is initialized, in case we didn't get */
434 /* called from GC_init_parallel(); */
438 # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
440 # if defined(DYNAMIC_LOADING) && defined(DARWIN)
442 /* This must be called WITHOUT the allocation lock held
443 and before any threads are created */
444 extern void GC_init_dyld();
450 #if defined(MSWIN32) || defined(MSWINCE)
451 CRITICAL_SECTION GC_write_cs
;
455 extern void GC_init_win32(void);
458 extern void GC_setpagesize();
462 extern GC_bool GC_no_win32_dlls
;
464 # define GC_no_win32_dlls FALSE
467 void GC_exit_check(void)
472 #ifdef SEARCH_FOR_DATA_START
473 extern void GC_init_linux_data_start(void);
478 extern void GC_set_and_save_fault_handler(void (*handler
)(int));
480 static void looping_handler(sig
)
483 GC_err_printf("Caught signal %d: looping in handler\n", sig
);
487 static GC_bool installed_looping_handler
= FALSE
;
489 static void maybe_install_looping_handler()
491 /* Install looping handler before the write fault handler, so we */
492 /* handle write faults correctly. */
493 if (!installed_looping_handler
&& 0 != GETENV("GC_LOOP_ON_ABORT")) {
494 GC_set_and_save_fault_handler(looping_handler
);
495 installed_looping_handler
= TRUE
;
499 #else /* !UNIX_LIKE */
501 # define maybe_install_looping_handler()
507 # if !defined(THREADS) && defined(GC_ASSERTIONS)
510 word initial_heap_sz
= (word
)MINHINCR
;
512 if (GC_is_initialized
) return;
513 # if defined(MSWIN32) || defined(MSWINCE)
514 InitializeCriticalSection(&GC_write_cs
);
516 # if (!defined(SMALL_CONFIG))
517 if (0 != GETENV("GC_PRINT_STATS")) {
520 if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) {
521 GC_print_stats
= VERBOSE
;
523 # if defined(UNIX_LIKE)
525 char * file_name
= GETENV("GC_LOG_FILE");
526 if (0 != file_name
) {
527 int log_d
= open(file_name
, O_CREAT
|O_WRONLY
|O_APPEND
, 0666);
529 GC_log_printf("Failed to open %s as log file\n", file_name
);
537 # ifndef NO_DEBUGGING
538 if (0 != GETENV("GC_DUMP_REGULARLY")) {
539 GC_dump_regularly
= 1;
542 # ifdef KEEP_BACK_PTRS
544 char * backtraces_string
= GETENV("GC_BACKTRACES");
545 if (0 != backtraces_string
) {
546 GC_backtraces
= atol(backtraces_string
);
547 if (backtraces_string
[0] == '\0') GC_backtraces
= 1;
551 if (0 != GETENV("GC_FIND_LEAK")) {
553 atexit(GC_exit_check
);
555 if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) {
556 GC_all_interior_pointers
= 1;
558 if (0 != GETENV("GC_DONT_GC")) {
561 if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) {
562 GC_print_back_height
= 1;
564 if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) {
565 GC_large_alloc_warn_interval
= LONG_MAX
;
568 char * addr_string
= GETENV("GC_TRACE");
569 if (0 != addr_string
) {
570 # ifndef ENABLE_TRACE
571 WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0);
574 long long addr
= strtoull(addr_string
, NULL
, 16);
576 long addr
= strtoul(addr_string
, NULL
, 16);
579 WARN("Unlikely trace address: 0x%lx\n", (GC_word
)addr
);
580 GC_trace_addr
= (ptr_t
)addr
;
585 char * time_limit_string
= GETENV("GC_PAUSE_TIME_TARGET");
586 if (0 != time_limit_string
) {
587 long time_limit
= atol(time_limit_string
);
588 if (time_limit
< 5) {
589 WARN("GC_PAUSE_TIME_TARGET environment variable value too small "
590 "or bad syntax: Ignoring\n", 0);
592 GC_time_limit
= time_limit
;
597 char * interval_string
= GETENV("GC_LARGE_ALLOC_WARN_INTERVAL");
598 if (0 != interval_string
) {
599 long interval
= atol(interval_string
);
601 WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has "
602 "bad value: Ignoring\n", 0);
604 GC_large_alloc_warn_interval
= interval
;
608 maybe_install_looping_handler();
609 /* Adjust normal object descriptor for extra allocation. */
610 if (ALIGNMENT
> GC_DS_TAGS
&& EXTRA_BYTES
!= 0) {
611 GC_obj_kinds
[NORMAL
].ok_descriptor
= ((word
)(-ALIGNMENT
) | GC_DS_LENGTH
);
614 GC_exclude_static_roots(beginGC_arrays
, endGC_arrays
);
615 GC_exclude_static_roots(beginGC_obj_kinds
, endGC_obj_kinds
);
616 # ifdef SEPARATE_GLOBALS
617 GC_exclude_static_roots(beginGC_objfreelist
, endGC_objfreelist
);
618 GC_exclude_static_roots(beginGC_aobjfreelist
, endGC_aobjfreelist
);
623 # if defined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS)
624 WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0);
625 /* If thread stacks are cached, they tend to be scanned in */
626 /* entirety as part of the root set. This wil grow them to */
627 /* maximum size, and is generally not desirable. */
629 # if defined(SEARCH_FOR_DATA_START)
630 GC_init_linux_data_start();
632 # if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
633 GC_init_netbsd_elf();
635 # if !defined(THREADS) || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) \
636 || defined(GC_SOLARIS_THREADS)
637 if (GC_stackbottom
== 0) {
638 GC_stackbottom
= GC_get_main_stack_base();
639 # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
640 GC_register_stackbottom
= GC_get_register_stack_base();
643 # if (defined(LINUX) || defined(HPUX)) && defined(IA64)
644 if (GC_register_stackbottom
== 0) {
645 WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0);
646 /* The following may fail, since we may rely on */
647 /* alignment properties that may not hold with a user set */
648 /* GC_stackbottom. */
649 GC_register_stackbottom
= GC_get_register_stack_base();
654 /* Ignore gcc -Wall warnings on the following. */
655 GC_STATIC_ASSERT(sizeof (ptr_t
) == sizeof(word
));
656 GC_STATIC_ASSERT(sizeof (signed_word
) == sizeof(word
));
657 GC_STATIC_ASSERT(sizeof (struct hblk
) == HBLKSIZE
);
659 # ifdef STACK_GROWS_DOWN
660 GC_ASSERT((word
)(&dummy
) <= (word
)GC_stackbottom
);
662 GC_ASSERT((word
)(&dummy
) >= (word
)GC_stackbottom
);
665 # if !defined(_AUX_SOURCE) || defined(__GNUC__)
666 GC_ASSERT((word
)(-1) > (word
)0);
667 /* word should be unsigned */
669 GC_ASSERT((ptr_t
)(word
)(-1) > (ptr_t
)0);
670 /* Ptr_t comparisons should behave as unsigned comparisons. */
671 GC_ASSERT((signed_word
)(-1) < (signed_word
)0);
672 # if !defined(SMALL_CONFIG)
673 if (GC_incremental
|| 0 != GETENV("GC_ENABLE_INCREMENTAL")) {
674 /* This used to test for !GC_no_win32_dlls. Why? */
676 /* For GWW_MPROTECT on Win32, this needs to happen before any */
677 /* heap memory is allocated. */
679 GC_ASSERT(GC_bytes_allocd
== 0)
680 GC_incremental
= TRUE
;
682 # endif /* !SMALL_CONFIG */
684 /* Add initial guess of root sets. Do this first, since sbrk(0) */
686 if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments();
691 char * sz_str
= GETENV("GC_INITIAL_HEAP_SIZE");
692 if (sz_str
!= NULL
) {
693 initial_heap_sz
= atoi(sz_str
);
694 if (initial_heap_sz
<= MINHINCR
* HBLKSIZE
) {
695 WARN("Bad initial heap size %s - ignoring it.\n",
698 initial_heap_sz
= divHBLKSZ(initial_heap_sz
);
702 char * sz_str
= GETENV("GC_MAXIMUM_HEAP_SIZE");
703 if (sz_str
!= NULL
) {
704 word max_heap_sz
= (word
)atol(sz_str
);
705 if (max_heap_sz
< initial_heap_sz
* HBLKSIZE
) {
706 WARN("Bad maximum heap size %s - ignoring it.\n",
709 if (0 == GC_max_retries
) GC_max_retries
= 2;
710 GC_set_max_heap_size(max_heap_sz
);
713 if (!GC_expand_hp_inner(initial_heap_sz
)) {
714 GC_err_printf("Can't start up: not enough memory\n");
717 GC_initialize_offsets();
718 GC_register_displacement_inner(0L);
719 # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
720 if (!GC_all_interior_pointers
) {
721 /* TLS ABI uses pointer-sized offsets for dtv. */
722 GC_register_displacement_inner(sizeof(void *));
727 if (PCR_IL_Lock(PCR_Bool_false
, PCR_allSigsBlocked
, PCR_waitForever
)
729 ABORT("Can't lock load state\n");
730 } else if (PCR_IL_Unlock() != PCR_ERes_okay
) {
731 ABORT("Can't unlock load state\n");
736 GC_is_initialized
= TRUE
;
737 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
741 /* Get black list set up and/or incremental GC started */
742 if (!GC_dont_precollect
|| GC_incremental
) GC_gcollect_inner();
743 # ifdef STUBBORN_ALLOC
746 # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC)
748 extern void GC_init_lib_bounds(void);
749 GC_init_lib_bounds();
752 /* Convince lint that some things are used */
755 extern char * GC_copyright
[];
756 extern int GC_read();
757 extern void GC_register_finalizer_no_order();
759 GC_noop(GC_copyright
, GC_find_header
,
760 GC_push_one
, GC_call_with_alloc_lock
, GC_read
,
762 # ifndef NO_DEBUGGING
765 GC_register_finalizer_no_order
);
770 void GC_enable_incremental(void)
772 # if !defined(SMALL_CONFIG) && !defined(KEEP_BACK_PTRS)
773 /* If we are keeping back pointers, the GC itself dirties all */
774 /* pages on which objects have been marked, making */
775 /* incremental GC pointless. */
780 if (GC_incremental
) goto out
;
782 /* if (GC_no_win32_dlls) goto out; Should be win32S test? */
783 maybe_install_looping_handler(); /* Before write fault handler! */
784 GC_incremental
= TRUE
;
785 if (!GC_is_initialized
) {
790 if (!GC_dirty_maintained
) goto out
;
792 /* Can't easily do it. */
796 if (GC_bytes_allocd
> 0) {
797 /* There may be unmarked reachable objects */
799 } /* else we're OK in assuming everything's */
800 /* clean since nothing can point to an */
801 /* unmarked object. */
814 #if defined(MSWIN32) || defined(MSWINCE)
815 # if defined(_MSC_VER) && defined(_DEBUG)
818 # ifdef OLD_WIN32_LOG_FILE
819 # define LOG_FILE _T("gc.log")
822 HANDLE GC_stdout
= 0;
826 if (GC_is_initialized
) {
827 DeleteCriticalSection(&GC_write_cs
);
832 # define GC_need_to_lock 0 /* Not defined without threads */
834 int GC_write(const char *buf
, size_t len
)
840 if (GC_need_to_lock
) EnterCriticalSection(&GC_write_cs
);
841 if (GC_stdout
== INVALID_HANDLE_VALUE
) {
842 if (GC_need_to_lock
) LeaveCriticalSection(&GC_write_cs
);
844 } else if (GC_stdout
== 0) {
845 char * file_name
= GETENV("GC_LOG_FILE");
846 char logPath
[_MAX_PATH
+ 5];
848 if (0 == file_name
) {
849 # ifdef OLD_WIN32_LOG_FILE
850 strcpy(logPath
, LOG_FILE
);
852 GetModuleFileName(NULL
, logPath
, _MAX_PATH
);
853 strcat(logPath
, ".log");
857 GC_stdout
= CreateFile(logPath
, GENERIC_WRITE
,
859 NULL
, CREATE_ALWAYS
, FILE_FLAG_WRITE_THROUGH
,
861 if (GC_stdout
== INVALID_HANDLE_VALUE
)
862 ABORT("Open of log file failed");
864 tmp
= WriteFile(GC_stdout
, buf
, (DWORD
)len
, &written
, NULL
);
867 # if defined(_MSC_VER) && defined(_DEBUG)
868 _CrtDbgReport(_CRT_WARN
, NULL
, 0, NULL
, "%.*s", len
, buf
);
870 if (GC_need_to_lock
) LeaveCriticalSection(&GC_write_cs
);
871 return tmp
? (int)written
: -1;
873 # undef GC_need_to_lock
877 #if defined(OS2) || defined(MACOS)
878 FILE * GC_stdout
= NULL
;
879 FILE * GC_stderr
= NULL
;
880 FILE * GC_log
= NULL
;
881 int GC_tmp
; /* Should really be local ... */
885 if (GC_stdout
== NULL
) {
888 if (GC_stderr
== NULL
) {
891 if (GC_log
== NULL
) {
897 #if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE)
906 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(OS2) \
907 && !defined(MACOS) && !defined(ECOS) && !defined(NOSYS)
908 int GC_write(fd
, buf
, len
)
913 register int bytes_written
= 0;
916 while (bytes_written
< len
) {
917 # ifdef GC_SOLARIS_THREADS
918 result
= syscall(SYS_write
, fd
, buf
+ bytes_written
,
919 len
- bytes_written
);
921 result
= write(fd
, buf
+ bytes_written
, len
- bytes_written
);
923 if (-1 == result
) return(result
);
924 bytes_written
+= result
;
926 return(bytes_written
);
931 int GC_write(fd
, buf
, len
)
933 _Jv_diag_write (buf
, len
);
939 int GC_write(fd
, buf
, len
)
947 #if defined(MSWIN32) || defined(MSWINCE)
948 /* FIXME: This is pretty ugly ... */
949 # define WRITE(f, buf, len) GC_write(buf, len)
951 # if defined(OS2) || defined(MACOS)
952 # define WRITE(f, buf, len) (GC_set_files(), \
953 GC_tmp = fwrite((buf), 1, (len), (f)), \
956 # define WRITE(f, buf, len) GC_write((f), (buf), (len))
962 # define vsnprintf _vsnprintf
964 /* A version of printf that is unlikely to call malloc, and is thus safer */
965 /* to call from the collector in case malloc has been bound to GC_malloc. */
966 /* Floating point arguments ans formats should be avoided, since fp */
967 /* conversion is more likely to allocate. */
968 /* Assumes that no more than BUFSZ-1 characters are written at once. */
969 void GC_printf(const char *format
, ...)
974 va_start(args
, format
);
975 if (GC_quiet
) return;
977 (void) vsnprintf(buf
, BUFSZ
, format
, args
);
979 if (buf
[BUFSZ
] != 0x15) ABORT("GC_printf clobbered stack");
980 if (WRITE(GC_stdout
, buf
, strlen(buf
)) < 0) ABORT("write to stdout failed");
983 void GC_err_printf(const char *format
, ...)
988 va_start(args
, format
);
990 (void) vsnprintf(buf
, BUFSZ
, format
, args
);
992 if (buf
[BUFSZ
] != 0x15) ABORT("GC_printf clobbered stack");
993 if (WRITE(GC_stderr
, buf
, strlen(buf
)) < 0) ABORT("write to stderr failed");
996 void GC_log_printf(const char *format
, ...)
1001 va_start(args
, format
);
1003 (void) vsnprintf(buf
, BUFSZ
, format
, args
);
1005 if (buf
[BUFSZ
] != 0x15) ABORT("GC_printf clobbered stack");
1006 if (WRITE(GC_log
, buf
, strlen(buf
)) < 0) ABORT("write to log failed");
1009 void GC_err_puts(const char *s
)
1011 if (WRITE(GC_stderr
, s
, strlen(s
)) < 0) ABORT("write to stderr failed");
1014 #if defined(LINUX) && !defined(SMALL_CONFIG)
1015 void GC_err_write(buf
, len
)
1019 if (WRITE(GC_stderr
, buf
, len
) < 0) ABORT("write to stderr failed");
1023 void GC_default_warn_proc(char *msg
, GC_word arg
)
1025 GC_err_printf(msg
, arg
);
1028 GC_warn_proc GC_current_warn_proc
= GC_default_warn_proc
;
1030 GC_warn_proc
GC_set_warn_proc(GC_warn_proc p
)
1032 GC_warn_proc result
;
1034 # ifdef GC_WIN32_THREADS
1035 GC_ASSERT(GC_is_initialized
);
1038 result
= GC_current_warn_proc
;
1039 GC_current_warn_proc
= p
;
1044 GC_word
GC_set_free_space_divisor (GC_word value
)
1046 GC_word old
= GC_free_space_divisor
;
1047 GC_free_space_divisor
= value
;
1052 void GC_abort(const char *msg
)
1054 # if defined(MSWIN32)
1055 (void) MessageBoxA(NULL
, msg
, "Fatal error in gc", MB_ICONERROR
|MB_OK
);
1057 GC_err_printf("%s\n", msg
);
1059 if (GETENV("GC_LOOP_ON_ABORT") != NULL
) {
1060 /* In many cases it's easier to debug a running process. */
1061 /* It's arguably nicer to sleep, but that makes it harder */
1062 /* to look at the thread if the debugger doesn't know much */
1063 /* about threads. */
1066 # if defined(MSWIN32) || defined(MSWINCE)
1088 /* Helper procedures for new kind creation. */
1089 void ** GC_new_free_list_inner()
1091 void *result
= GC_INTERNAL_MALLOC((MAXOBJGRANULES
+1)*sizeof(ptr_t
),
1093 if (result
== 0) ABORT("Failed to allocate freelist for new kind");
1094 BZERO(result
, (MAXOBJGRANULES
+1)*sizeof(ptr_t
));
1098 void ** GC_new_free_list()
1102 result
= GC_new_free_list_inner();
1107 unsigned GC_new_kind_inner(void **fl
, GC_word descr
, int adjust
, int clear
)
1109 unsigned result
= GC_n_kinds
++;
1111 if (GC_n_kinds
> MAXOBJKINDS
) ABORT("Too many kinds");
1112 GC_obj_kinds
[result
].ok_freelist
= fl
;
1113 GC_obj_kinds
[result
].ok_reclaim_list
= 0;
1114 GC_obj_kinds
[result
].ok_descriptor
= descr
;
1115 GC_obj_kinds
[result
].ok_relocate_descr
= adjust
;
1116 GC_obj_kinds
[result
].ok_init
= clear
;
1120 unsigned GC_new_kind(void **fl
, GC_word descr
, int adjust
, int clear
)
1124 result
= GC_new_kind_inner(fl
, descr
, adjust
, clear
);
1129 unsigned GC_new_proc_inner(GC_mark_proc proc
)
1131 unsigned result
= GC_n_mark_procs
++;
1133 if (GC_n_mark_procs
> MAX_MARK_PROCS
) ABORT("Too many mark procedures");
1134 GC_mark_procs
[result
] = proc
;
1138 unsigned GC_new_proc(GC_mark_proc proc
)
1142 result
= GC_new_proc_inner(proc
);
1147 void * GC_call_with_stack_base(GC_stack_base_func fn
, void *arg
)
1150 struct GC_stack_base base
;
1152 base
.mem_base
= (void *)&dummy
;
1154 base
.reg_base
= (void *)GC_save_regs_in_stack();
1155 /* Unnecessarily flushes register stack, */
1156 /* but that probably doesn't hurt. */
1158 return fn(&base
, arg
);
1161 #if !defined(NO_DEBUGGING)
1165 GC_printf("***Static roots:\n");
1166 GC_print_static_roots();
1167 GC_printf("\n***Heap sections:\n");
1168 GC_print_heap_sects();
1169 GC_printf("\n***Free blocks:\n");
1170 GC_print_hblkfreelist();
1171 GC_printf("\n***Blocks in use:\n");
1172 GC_print_block_list();
1173 GC_printf("\n***Finalization statistics:\n");
1174 GC_print_finalization_stats();
1177 #endif /* NO_DEBUGGING */