]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers | |
3 | * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved. | |
4 | * Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. | |
5 | * | |
6 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED | |
7 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. | |
8 | * | |
9 | * Permission is hereby granted to use or copy this program | |
10 | * for any purpose, provided the above notices are retained on all copies. | |
11 | * Permission to modify the code and to distribute modified code is granted, | |
12 | * provided the above notices are retained, and a notice that the code was | |
13 | * modified is included with the above copyright notice. | |
14 | */ | |
15 | /* Boehm, July 31, 1995 5:02 pm PDT */ | |
16 | ||
17 | ||
18 | #include <stdio.h> | |
19 | #include <limits.h> | |
20 | #include <stdarg.h> | |
21 | #ifndef _WIN32_WCE | |
22 | #include <signal.h> | |
23 | #endif | |
24 | ||
25 | #define I_HIDE_POINTERS /* To make GC_call_with_alloc_lock visible */ | |
26 | #include "private/gc_pmark.h" | |
27 | ||
28 | #ifdef GC_SOLARIS_THREADS | |
29 | # include <sys/syscall.h> | |
30 | #endif | |
31 | #if defined(MSWIN32) || defined(MSWINCE) | |
32 | # define WIN32_LEAN_AND_MEAN | |
33 | # define NOSERVICE | |
34 | # include <windows.h> | |
35 | # include <tchar.h> | |
36 | #endif | |
37 | ||
38 | #ifdef UNIX_LIKE | |
39 | # include <fcntl.h> | |
40 | # include <sys/types.h> | |
41 | # include <sys/stat.h> | |
42 | ||
43 | int GC_log; /* Forward decl, so we can set it. */ | |
44 | #endif | |
45 | ||
46 | #ifdef NONSTOP | |
47 | # include <floss.h> | |
48 | #endif | |
49 | ||
50 | #if defined(THREADS) && defined(PCR) | |
51 | # include "il/PCR_IL.h" | |
52 | PCR_Th_ML GC_allocate_ml; | |
53 | #endif | |
54 | /* For other platforms with threads, the lock and possibly */ | |
55 | /* GC_lock_holder variables are defined in the thread support code. */ | |
56 | ||
57 | #if defined(NOSYS) || defined(ECOS) | |
58 | #undef STACKBASE | |
59 | #endif | |
60 | ||
61 | /* Dont unnecessarily call GC_register_main_static_data() in case */ | |
62 | /* dyn_load.c isn't linked in. */ | |
63 | #ifdef DYNAMIC_LOADING | |
64 | # define GC_REGISTER_MAIN_STATIC_DATA() GC_register_main_static_data() | |
65 | #else | |
66 | # define GC_REGISTER_MAIN_STATIC_DATA() TRUE | |
67 | #endif | |
68 | ||
69 | GC_FAR struct _GC_arrays GC_arrays /* = { 0 } */; | |
70 | ||
71 | ||
72 | GC_bool GC_debugging_started = FALSE; | |
73 | /* defined here so we don't have to load debug_malloc.o */ | |
74 | ||
75 | void (*GC_check_heap) (void) = (void (*) (void))0; | |
76 | void (*GC_print_all_smashed) (void) = (void (*) (void))0; | |
77 | ||
78 | void (*GC_start_call_back) (void) = (void (*) (void))0; | |
79 | ||
80 | ptr_t GC_stackbottom = 0; | |
81 | ||
82 | #ifdef IA64 | |
83 | ptr_t GC_register_stackbottom = 0; | |
84 | #endif | |
85 | ||
86 | GC_bool GC_dont_gc = 0; | |
87 | ||
88 | GC_bool GC_dont_precollect = 0; | |
89 | ||
90 | GC_bool GC_quiet = 0; | |
91 | ||
92 | #ifndef SMALL_CONFIG | |
93 | GC_bool GC_print_stats = 0; | |
94 | #endif | |
95 | ||
96 | GC_bool GC_print_back_height = 0; | |
97 | ||
98 | #ifndef NO_DEBUGGING | |
99 | GC_bool GC_dump_regularly = 0; /* Generate regular debugging dumps. */ | |
100 | #endif | |
101 | ||
102 | #ifdef KEEP_BACK_PTRS | |
103 | long GC_backtraces = 0; /* Number of random backtraces to */ | |
104 | /* generate for each GC. */ | |
105 | #endif | |
106 | ||
107 | #ifdef FIND_LEAK | |
108 | int GC_find_leak = 1; | |
109 | #else | |
110 | int GC_find_leak = 0; | |
111 | #endif | |
112 | ||
113 | #ifdef ALL_INTERIOR_POINTERS | |
114 | int GC_all_interior_pointers = 1; | |
115 | #else | |
116 | int GC_all_interior_pointers = 0; | |
117 | #endif | |
118 | ||
119 | long GC_large_alloc_warn_interval = 5; | |
120 | /* Interval between unsuppressed warnings. */ | |
121 | ||
122 | long GC_large_alloc_warn_suppressed = 0; | |
123 | /* Number of warnings suppressed so far. */ | |
124 | ||
125 | /*ARGSUSED*/ | |
126 | void * GC_default_oom_fn(size_t bytes_requested) | |
127 | { | |
128 | return(0); | |
129 | } | |
130 | ||
131 | void * (*GC_oom_fn) (size_t bytes_requested) = GC_default_oom_fn; | |
132 | ||
133 | void * GC_project2(void *arg1, void *arg2) | |
134 | { | |
135 | return arg2; | |
136 | } | |
137 | ||
138 | /* Set things up so that GC_size_map[i] >= granules(i), */ | |
139 | /* but not too much bigger */ | |
140 | /* and so that size_map contains relatively few distinct entries */ | |
141 | /* This was originally stolen from Russ Atkinson's Cedar */ | |
142 | /* quantization alogrithm (but we precompute it). */ | |
143 | void GC_init_size_map(void) | |
144 | { | |
145 | int i; | |
146 | ||
147 | /* Map size 0 to something bigger. */ | |
148 | /* This avoids problems at lower levels. */ | |
149 | GC_size_map[0] = 1; | |
150 | for (i = 1; i <= GRANULES_TO_BYTES(TINY_FREELISTS-1) - EXTRA_BYTES; i++) { | |
151 | GC_size_map[i] = ROUNDED_UP_GRANULES(i); | |
152 | GC_ASSERT(GC_size_map[i] < TINY_FREELISTS); | |
153 | } | |
154 | /* We leave the rest of the array to be filled in on demand. */ | |
155 | } | |
156 | ||
157 | /* Fill in additional entries in GC_size_map, including the ith one */ | |
158 | /* We assume the ith entry is currently 0. */ | |
159 | /* Note that a filled in section of the array ending at n always */ | |
160 | /* has length at least n/4. */ | |
161 | void GC_extend_size_map(size_t i) | |
162 | { | |
163 | size_t orig_granule_sz = ROUNDED_UP_GRANULES(i); | |
164 | size_t granule_sz = orig_granule_sz; | |
165 | size_t byte_sz = GRANULES_TO_BYTES(granule_sz); | |
166 | /* The size we try to preserve. */ | |
167 | /* Close to i, unless this would */ | |
168 | /* introduce too many distinct sizes. */ | |
169 | size_t smaller_than_i = byte_sz - (byte_sz >> 3); | |
170 | size_t much_smaller_than_i = byte_sz - (byte_sz >> 2); | |
171 | size_t low_limit; /* The lowest indexed entry we */ | |
172 | /* initialize. */ | |
173 | size_t j; | |
174 | ||
175 | if (GC_size_map[smaller_than_i] == 0) { | |
176 | low_limit = much_smaller_than_i; | |
177 | while (GC_size_map[low_limit] != 0) low_limit++; | |
178 | } else { | |
179 | low_limit = smaller_than_i + 1; | |
180 | while (GC_size_map[low_limit] != 0) low_limit++; | |
181 | granule_sz = ROUNDED_UP_GRANULES(low_limit); | |
182 | granule_sz += granule_sz >> 3; | |
183 | if (granule_sz < orig_granule_sz) granule_sz = orig_granule_sz; | |
184 | } | |
185 | /* For these larger sizes, we use an even number of granules. */ | |
186 | /* This makes it easier to, for example, construct a 16byte-aligned */ | |
187 | /* allocator even if GRANULE_BYTES is 8. */ | |
188 | granule_sz += 1; | |
189 | granule_sz &= ~1; | |
190 | if (granule_sz > MAXOBJGRANULES) { | |
191 | granule_sz = MAXOBJGRANULES; | |
192 | } | |
193 | /* If we can fit the same number of larger objects in a block, */ | |
194 | /* do so. */ | |
195 | { | |
196 | size_t number_of_objs = HBLK_GRANULES/granule_sz; | |
197 | granule_sz = HBLK_GRANULES/number_of_objs; | |
198 | granule_sz &= ~1; | |
199 | } | |
200 | byte_sz = GRANULES_TO_BYTES(granule_sz); | |
201 | /* We may need one extra byte; */ | |
202 | /* don't always fill in GC_size_map[byte_sz] */ | |
203 | byte_sz -= EXTRA_BYTES; | |
204 | ||
205 | for (j = low_limit; j <= byte_sz; j++) GC_size_map[j] = granule_sz; | |
206 | } | |
207 | ||
208 | ||
209 | /* | |
210 | * The following is a gross hack to deal with a problem that can occur | |
211 | * on machines that are sloppy about stack frame sizes, notably SPARC. | |
212 | * Bogus pointers may be written to the stack and not cleared for | |
213 | * a LONG time, because they always fall into holes in stack frames | |
214 | * that are not written. We partially address this by clearing | |
215 | * sections of the stack whenever we get control. | |
216 | */ | |
217 | word GC_stack_last_cleared = 0; /* GC_no when we last did this */ | |
218 | # ifdef THREADS | |
219 | # define BIG_CLEAR_SIZE 2048 /* Clear this much now and then. */ | |
220 | # define SMALL_CLEAR_SIZE 256 /* Clear this much every time. */ | |
221 | # endif | |
222 | # define CLEAR_SIZE 213 /* Granularity for GC_clear_stack_inner */ | |
223 | # define DEGRADE_RATE 50 | |
224 | ||
225 | ptr_t GC_min_sp; /* Coolest stack pointer value from which we've */ | |
226 | /* already cleared the stack. */ | |
227 | ||
228 | ptr_t GC_high_water; | |
229 | /* "hottest" stack pointer value we have seen */ | |
230 | /* recently. Degrades over time. */ | |
231 | ||
232 | word GC_bytes_allocd_at_reset; | |
233 | ||
234 | #if defined(ASM_CLEAR_CODE) | |
235 | extern void *GC_clear_stack_inner(void *, ptr_t); | |
236 | #else | |
237 | /* Clear the stack up to about limit. Return arg. */ | |
238 | /*ARGSUSED*/ | |
239 | void * GC_clear_stack_inner(void *arg, ptr_t limit) | |
240 | { | |
241 | word dummy[CLEAR_SIZE]; | |
242 | ||
243 | BZERO(dummy, CLEAR_SIZE*sizeof(word)); | |
244 | if ((ptr_t)(dummy) COOLER_THAN limit) { | |
245 | (void) GC_clear_stack_inner(arg, limit); | |
246 | } | |
247 | /* Make sure the recursive call is not a tail call, and the bzero */ | |
248 | /* call is not recognized as dead code. */ | |
249 | GC_noop1((word)dummy); | |
250 | return(arg); | |
251 | } | |
252 | #endif | |
253 | ||
254 | /* Clear some of the inaccessible part of the stack. Returns its */ | |
255 | /* argument, so it can be used in a tail call position, hence clearing */ | |
256 | /* another frame. */ | |
257 | void * GC_clear_stack(void *arg) | |
258 | { | |
259 | ptr_t sp = GC_approx_sp(); /* Hotter than actual sp */ | |
260 | # ifdef THREADS | |
261 | word dummy[SMALL_CLEAR_SIZE]; | |
262 | static unsigned random_no = 0; | |
263 | /* Should be more random than it is ... */ | |
264 | /* Used to occasionally clear a bigger */ | |
265 | /* chunk. */ | |
266 | # endif | |
267 | ptr_t limit; | |
268 | ||
269 | # define SLOP 400 | |
270 | /* Extra bytes we clear every time. This clears our own */ | |
271 | /* activation record, and should cause more frequent */ | |
272 | /* clearing near the cold end of the stack, a good thing. */ | |
273 | # define GC_SLOP 4000 | |
274 | /* We make GC_high_water this much hotter than we really saw */ | |
275 | /* saw it, to cover for GC noise etc. above our current frame. */ | |
276 | # define CLEAR_THRESHOLD 100000 | |
277 | /* We restart the clearing process after this many bytes of */ | |
278 | /* allocation. Otherwise very heavily recursive programs */ | |
279 | /* with sparse stacks may result in heaps that grow almost */ | |
280 | /* without bounds. As the heap gets larger, collection */ | |
281 | /* frequency decreases, thus clearing frequency would decrease, */ | |
282 | /* thus more junk remains accessible, thus the heap gets */ | |
283 | /* larger ... */ | |
284 | # ifdef THREADS | |
285 | if (++random_no % 13 == 0) { | |
286 | limit = sp; | |
287 | MAKE_HOTTER(limit, BIG_CLEAR_SIZE*sizeof(word)); | |
288 | limit = (ptr_t)((word)limit & ~0xf); | |
289 | /* Make it sufficiently aligned for assembly */ | |
290 | /* implementations of GC_clear_stack_inner. */ | |
291 | return GC_clear_stack_inner(arg, limit); | |
292 | } else { | |
293 | BZERO(dummy, SMALL_CLEAR_SIZE*sizeof(word)); | |
294 | return arg; | |
295 | } | |
296 | # else | |
297 | if (GC_gc_no > GC_stack_last_cleared) { | |
298 | /* Start things over, so we clear the entire stack again */ | |
299 | if (GC_stack_last_cleared == 0) GC_high_water = (ptr_t)GC_stackbottom; | |
300 | GC_min_sp = GC_high_water; | |
301 | GC_stack_last_cleared = GC_gc_no; | |
302 | GC_bytes_allocd_at_reset = GC_bytes_allocd; | |
303 | } | |
304 | /* Adjust GC_high_water */ | |
305 | MAKE_COOLER(GC_high_water, WORDS_TO_BYTES(DEGRADE_RATE) + GC_SLOP); | |
306 | if (sp HOTTER_THAN GC_high_water) { | |
307 | GC_high_water = sp; | |
308 | } | |
309 | MAKE_HOTTER(GC_high_water, GC_SLOP); | |
310 | limit = GC_min_sp; | |
311 | MAKE_HOTTER(limit, SLOP); | |
312 | if (sp COOLER_THAN limit) { | |
313 | limit = (ptr_t)((word)limit & ~0xf); | |
314 | /* Make it sufficiently aligned for assembly */ | |
315 | /* implementations of GC_clear_stack_inner. */ | |
316 | GC_min_sp = sp; | |
317 | return(GC_clear_stack_inner(arg, limit)); | |
318 | } else if (GC_bytes_allocd - GC_bytes_allocd_at_reset > CLEAR_THRESHOLD) { | |
319 | /* Restart clearing process, but limit how much clearing we do. */ | |
320 | GC_min_sp = sp; | |
321 | MAKE_HOTTER(GC_min_sp, CLEAR_THRESHOLD/4); | |
322 | if (GC_min_sp HOTTER_THAN GC_high_water) GC_min_sp = GC_high_water; | |
323 | GC_bytes_allocd_at_reset = GC_bytes_allocd; | |
324 | } | |
325 | return(arg); | |
326 | # endif | |
327 | } | |
328 | ||
329 | ||
330 | /* Return a pointer to the base address of p, given a pointer to a */ | |
331 | /* an address within an object. Return 0 o.w. */ | |
332 | void * GC_base(void * p) | |
333 | { | |
334 | ptr_t r; | |
335 | struct hblk *h; | |
336 | bottom_index *bi; | |
337 | hdr *candidate_hdr; | |
338 | ptr_t limit; | |
339 | ||
340 | r = p; | |
341 | if (!GC_is_initialized) return 0; | |
342 | h = HBLKPTR(r); | |
343 | GET_BI(r, bi); | |
344 | candidate_hdr = HDR_FROM_BI(bi, r); | |
345 | if (candidate_hdr == 0) return(0); | |
346 | /* If it's a pointer to the middle of a large object, move it */ | |
347 | /* to the beginning. */ | |
348 | while (IS_FORWARDING_ADDR_OR_NIL(candidate_hdr)) { | |
349 | h = FORWARDED_ADDR(h,candidate_hdr); | |
350 | r = (ptr_t)h; | |
351 | candidate_hdr = HDR(h); | |
352 | } | |
353 | if (HBLK_IS_FREE(candidate_hdr)) return(0); | |
354 | /* Make sure r points to the beginning of the object */ | |
355 | r = (ptr_t)((word)r & ~(WORDS_TO_BYTES(1) - 1)); | |
356 | { | |
357 | size_t offset = HBLKDISPL(r); | |
358 | signed_word sz = candidate_hdr -> hb_sz; | |
359 | size_t obj_displ = offset % sz; | |
360 | ||
361 | r -= obj_displ; | |
362 | limit = r + sz; | |
363 | if (limit > (ptr_t)(h + 1) && sz <= HBLKSIZE) { | |
364 | return(0); | |
365 | } | |
366 | if ((ptr_t)p >= limit) return(0); | |
367 | } | |
368 | return((void *)r); | |
369 | } | |
370 | ||
371 | ||
372 | /* Return the size of an object, given a pointer to its base. */ | |
373 | /* (For small obects this also happens to work from interior pointers, */ | |
374 | /* but that shouldn't be relied upon.) */ | |
375 | size_t GC_size(void * p) | |
376 | { | |
377 | hdr * hhdr = HDR(p); | |
378 | ||
379 | return hhdr -> hb_sz; | |
380 | } | |
381 | ||
382 | size_t GC_get_heap_size(void) | |
383 | { | |
384 | return GC_heapsize; | |
385 | } | |
386 | ||
387 | size_t GC_get_free_bytes(void) | |
388 | { | |
389 | return GC_large_free_bytes; | |
390 | } | |
391 | ||
392 | size_t GC_get_bytes_since_gc(void) | |
393 | { | |
394 | return GC_bytes_allocd; | |
395 | } | |
396 | ||
397 | size_t GC_get_total_bytes(void) | |
398 | { | |
399 | return GC_bytes_allocd+GC_bytes_allocd_before_gc; | |
400 | } | |
401 | ||
402 | GC_bool GC_is_initialized = FALSE; | |
403 | ||
404 | # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) | |
405 | extern void GC_init_parallel(void); | |
406 | # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */ | |
407 | ||
408 | void GC_init(void) | |
409 | { | |
410 | DCL_LOCK_STATE; | |
411 | ||
412 | #if defined(GC_WIN32_THREADS) && !defined(GC_PTHREADS) | |
413 | if (!GC_is_initialized) { | |
414 | BOOL (WINAPI *pfn) (LPCRITICAL_SECTION, DWORD) = NULL; | |
415 | HMODULE hK32 = GetModuleHandleA("kernel32.dll"); | |
416 | if (hK32) | |
417 | pfn = (BOOL (WINAPI *) (LPCRITICAL_SECTION, DWORD)) | |
418 | GetProcAddress (hK32, | |
419 | "InitializeCriticalSectionAndSpinCount"); | |
420 | if (pfn) | |
421 | pfn(&GC_allocate_ml, 4000); | |
422 | else | |
423 | InitializeCriticalSection (&GC_allocate_ml); | |
424 | } | |
425 | #endif /* MSWIN32 */ | |
426 | ||
427 | LOCK(); | |
428 | GC_init_inner(); | |
429 | UNLOCK(); | |
430 | ||
431 | # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC) | |
432 | /* Make sure marker threads and started and thread local */ | |
433 | /* allocation is initialized, in case we didn't get */ | |
434 | /* called from GC_init_parallel(); */ | |
435 | { | |
436 | GC_init_parallel(); | |
437 | } | |
438 | # endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */ | |
439 | ||
440 | # if defined(DYNAMIC_LOADING) && defined(DARWIN) | |
441 | { | |
442 | /* This must be called WITHOUT the allocation lock held | |
443 | and before any threads are created */ | |
444 | extern void GC_init_dyld(); | |
445 | GC_init_dyld(); | |
446 | } | |
447 | # endif | |
448 | } | |
449 | ||
450 | #if defined(MSWIN32) || defined(MSWINCE) | |
451 | CRITICAL_SECTION GC_write_cs; | |
452 | #endif | |
453 | ||
454 | #ifdef MSWIN32 | |
455 | extern void GC_init_win32(void); | |
456 | #endif | |
457 | ||
458 | extern void GC_setpagesize(); | |
459 | ||
460 | ||
461 | #ifdef MSWIN32 | |
462 | extern GC_bool GC_no_win32_dlls; | |
463 | #else | |
464 | # define GC_no_win32_dlls FALSE | |
465 | #endif | |
466 | ||
467 | void GC_exit_check(void) | |
468 | { | |
469 | GC_gcollect(); | |
470 | } | |
471 | ||
472 | #ifdef SEARCH_FOR_DATA_START | |
473 | extern void GC_init_linux_data_start(void); | |
474 | #endif | |
475 | ||
476 | #ifdef UNIX_LIKE | |
477 | ||
478 | extern void GC_set_and_save_fault_handler(void (*handler)(int)); | |
479 | ||
480 | static void looping_handler(sig) | |
481 | int sig; | |
482 | { | |
483 | GC_err_printf("Caught signal %d: looping in handler\n", sig); | |
484 | for(;;); | |
485 | } | |
486 | ||
487 | static GC_bool installed_looping_handler = FALSE; | |
488 | ||
489 | static void maybe_install_looping_handler() | |
490 | { | |
491 | /* Install looping handler before the write fault handler, so we */ | |
492 | /* handle write faults correctly. */ | |
493 | if (!installed_looping_handler && 0 != GETENV("GC_LOOP_ON_ABORT")) { | |
494 | GC_set_and_save_fault_handler(looping_handler); | |
495 | installed_looping_handler = TRUE; | |
496 | } | |
497 | } | |
498 | ||
499 | #else /* !UNIX_LIKE */ | |
500 | ||
501 | # define maybe_install_looping_handler() | |
502 | ||
503 | #endif | |
504 | ||
505 | void GC_init_inner() | |
506 | { | |
507 | # if !defined(THREADS) && defined(GC_ASSERTIONS) | |
508 | word dummy; | |
509 | # endif | |
510 | word initial_heap_sz = (word)MINHINCR; | |
511 | ||
512 | if (GC_is_initialized) return; | |
513 | # if defined(MSWIN32) || defined(MSWINCE) | |
514 | InitializeCriticalSection(&GC_write_cs); | |
515 | # endif | |
516 | # if (!defined(SMALL_CONFIG)) | |
517 | if (0 != GETENV("GC_PRINT_STATS")) { | |
518 | GC_print_stats = 1; | |
519 | } | |
520 | if (0 != GETENV("GC_PRINT_VERBOSE_STATS")) { | |
521 | GC_print_stats = VERBOSE; | |
522 | } | |
523 | # if defined(UNIX_LIKE) | |
524 | { | |
525 | char * file_name = GETENV("GC_LOG_FILE"); | |
526 | if (0 != file_name) { | |
527 | int log_d = open(file_name, O_CREAT|O_WRONLY|O_APPEND, 0666); | |
528 | if (log_d < 0) { | |
529 | GC_log_printf("Failed to open %s as log file\n", file_name); | |
530 | } else { | |
531 | GC_log = log_d; | |
532 | } | |
533 | } | |
534 | } | |
535 | # endif | |
536 | # endif | |
537 | # ifndef NO_DEBUGGING | |
538 | if (0 != GETENV("GC_DUMP_REGULARLY")) { | |
539 | GC_dump_regularly = 1; | |
540 | } | |
541 | # endif | |
542 | # ifdef KEEP_BACK_PTRS | |
543 | { | |
544 | char * backtraces_string = GETENV("GC_BACKTRACES"); | |
545 | if (0 != backtraces_string) { | |
546 | GC_backtraces = atol(backtraces_string); | |
547 | if (backtraces_string[0] == '\0') GC_backtraces = 1; | |
548 | } | |
549 | } | |
550 | # endif | |
551 | if (0 != GETENV("GC_FIND_LEAK")) { | |
552 | GC_find_leak = 1; | |
553 | atexit(GC_exit_check); | |
554 | } | |
555 | if (0 != GETENV("GC_ALL_INTERIOR_POINTERS")) { | |
556 | GC_all_interior_pointers = 1; | |
557 | } | |
558 | if (0 != GETENV("GC_DONT_GC")) { | |
559 | GC_dont_gc = 1; | |
560 | } | |
561 | if (0 != GETENV("GC_PRINT_BACK_HEIGHT")) { | |
562 | GC_print_back_height = 1; | |
563 | } | |
564 | if (0 != GETENV("GC_NO_BLACKLIST_WARNING")) { | |
565 | GC_large_alloc_warn_interval = LONG_MAX; | |
566 | } | |
567 | { | |
568 | char * addr_string = GETENV("GC_TRACE"); | |
569 | if (0 != addr_string) { | |
570 | # ifndef ENABLE_TRACE | |
571 | WARN("Tracing not enabled: Ignoring GC_TRACE value\n", 0); | |
572 | # else | |
573 | # ifdef STRTOULL | |
574 | long long addr = strtoull(addr_string, NULL, 16); | |
575 | # else | |
576 | long addr = strtoul(addr_string, NULL, 16); | |
577 | # endif | |
578 | if (addr < 0x1000) | |
579 | WARN("Unlikely trace address: 0x%lx\n", (GC_word)addr); | |
580 | GC_trace_addr = (ptr_t)addr; | |
581 | # endif | |
582 | } | |
583 | } | |
584 | { | |
585 | char * time_limit_string = GETENV("GC_PAUSE_TIME_TARGET"); | |
586 | if (0 != time_limit_string) { | |
587 | long time_limit = atol(time_limit_string); | |
588 | if (time_limit < 5) { | |
589 | WARN("GC_PAUSE_TIME_TARGET environment variable value too small " | |
590 | "or bad syntax: Ignoring\n", 0); | |
591 | } else { | |
592 | GC_time_limit = time_limit; | |
593 | } | |
594 | } | |
595 | } | |
596 | { | |
597 | char * interval_string = GETENV("GC_LARGE_ALLOC_WARN_INTERVAL"); | |
598 | if (0 != interval_string) { | |
599 | long interval = atol(interval_string); | |
600 | if (interval <= 0) { | |
601 | WARN("GC_LARGE_ALLOC_WARN_INTERVAL environment variable has " | |
602 | "bad value: Ignoring\n", 0); | |
603 | } else { | |
604 | GC_large_alloc_warn_interval = interval; | |
605 | } | |
606 | } | |
607 | } | |
608 | maybe_install_looping_handler(); | |
609 | /* Adjust normal object descriptor for extra allocation. */ | |
610 | if (ALIGNMENT > GC_DS_TAGS && EXTRA_BYTES != 0) { | |
611 | GC_obj_kinds[NORMAL].ok_descriptor = ((word)(-ALIGNMENT) | GC_DS_LENGTH); | |
612 | } | |
613 | GC_setpagesize(); | |
614 | GC_exclude_static_roots(beginGC_arrays, endGC_arrays); | |
615 | GC_exclude_static_roots(beginGC_obj_kinds, endGC_obj_kinds); | |
616 | # ifdef SEPARATE_GLOBALS | |
617 | GC_exclude_static_roots(beginGC_objfreelist, endGC_objfreelist); | |
618 | GC_exclude_static_roots(beginGC_aobjfreelist, endGC_aobjfreelist); | |
619 | # endif | |
620 | # ifdef MSWIN32 | |
621 | GC_init_win32(); | |
622 | # endif | |
623 | # if defined(USE_PROC_FOR_LIBRARIES) && defined(GC_LINUX_THREADS) | |
624 | WARN("USE_PROC_FOR_LIBRARIES + GC_LINUX_THREADS performs poorly.\n", 0); | |
625 | /* If thread stacks are cached, they tend to be scanned in */ | |
626 | /* entirety as part of the root set. This wil grow them to */ | |
627 | /* maximum size, and is generally not desirable. */ | |
628 | # endif | |
629 | # if defined(SEARCH_FOR_DATA_START) | |
630 | GC_init_linux_data_start(); | |
631 | # endif | |
632 | # if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__) | |
633 | GC_init_netbsd_elf(); | |
634 | # endif | |
635 | # if !defined(THREADS) || defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) \ | |
636 | || defined(GC_SOLARIS_THREADS) | |
637 | if (GC_stackbottom == 0) { | |
638 | GC_stackbottom = GC_get_main_stack_base(); | |
639 | # if (defined(LINUX) || defined(HPUX)) && defined(IA64) | |
640 | GC_register_stackbottom = GC_get_register_stack_base(); | |
641 | # endif | |
642 | } else { | |
643 | # if (defined(LINUX) || defined(HPUX)) && defined(IA64) | |
644 | if (GC_register_stackbottom == 0) { | |
645 | WARN("GC_register_stackbottom should be set with GC_stackbottom\n", 0); | |
646 | /* The following may fail, since we may rely on */ | |
647 | /* alignment properties that may not hold with a user set */ | |
648 | /* GC_stackbottom. */ | |
649 | GC_register_stackbottom = GC_get_register_stack_base(); | |
650 | } | |
651 | # endif | |
652 | } | |
653 | # endif | |
654 | /* Ignore gcc -Wall warnings on the following. */ | |
655 | GC_STATIC_ASSERT(sizeof (ptr_t) == sizeof(word)); | |
656 | GC_STATIC_ASSERT(sizeof (signed_word) == sizeof(word)); | |
657 | GC_STATIC_ASSERT(sizeof (struct hblk) == HBLKSIZE); | |
658 | # ifndef THREADS | |
659 | # ifdef STACK_GROWS_DOWN | |
660 | GC_ASSERT((word)(&dummy) <= (word)GC_stackbottom); | |
661 | # else | |
662 | GC_ASSERT((word)(&dummy) >= (word)GC_stackbottom); | |
663 | # endif | |
664 | # endif | |
665 | # if !defined(_AUX_SOURCE) || defined(__GNUC__) | |
666 | GC_ASSERT((word)(-1) > (word)0); | |
667 | /* word should be unsigned */ | |
668 | # endif | |
669 | GC_ASSERT((ptr_t)(word)(-1) > (ptr_t)0); | |
670 | /* Ptr_t comparisons should behave as unsigned comparisons. */ | |
671 | GC_ASSERT((signed_word)(-1) < (signed_word)0); | |
672 | # if !defined(SMALL_CONFIG) | |
673 | if (GC_incremental || 0 != GETENV("GC_ENABLE_INCREMENTAL")) { | |
674 | /* This used to test for !GC_no_win32_dlls. Why? */ | |
675 | GC_setpagesize(); | |
676 | /* For GWW_MPROTECT on Win32, this needs to happen before any */ | |
677 | /* heap memory is allocated. */ | |
678 | GC_dirty_init(); | |
679 | GC_ASSERT(GC_bytes_allocd == 0) | |
680 | GC_incremental = TRUE; | |
681 | } | |
682 | # endif /* !SMALL_CONFIG */ | |
683 | ||
684 | /* Add initial guess of root sets. Do this first, since sbrk(0) */ | |
685 | /* might be used. */ | |
686 | if (GC_REGISTER_MAIN_STATIC_DATA()) GC_register_data_segments(); | |
687 | GC_init_headers(); | |
688 | GC_bl_init(); | |
689 | GC_mark_init(); | |
690 | { | |
691 | char * sz_str = GETENV("GC_INITIAL_HEAP_SIZE"); | |
692 | if (sz_str != NULL) { | |
693 | initial_heap_sz = atoi(sz_str); | |
694 | if (initial_heap_sz <= MINHINCR * HBLKSIZE) { | |
695 | WARN("Bad initial heap size %s - ignoring it.\n", | |
696 | sz_str); | |
697 | } | |
698 | initial_heap_sz = divHBLKSZ(initial_heap_sz); | |
699 | } | |
700 | } | |
701 | { | |
702 | char * sz_str = GETENV("GC_MAXIMUM_HEAP_SIZE"); | |
703 | if (sz_str != NULL) { | |
704 | word max_heap_sz = (word)atol(sz_str); | |
705 | if (max_heap_sz < initial_heap_sz * HBLKSIZE) { | |
706 | WARN("Bad maximum heap size %s - ignoring it.\n", | |
707 | sz_str); | |
708 | } | |
709 | if (0 == GC_max_retries) GC_max_retries = 2; | |
710 | GC_set_max_heap_size(max_heap_sz); | |
711 | } | |
712 | } | |
713 | if (!GC_expand_hp_inner(initial_heap_sz)) { | |
714 | GC_err_printf("Can't start up: not enough memory\n"); | |
715 | EXIT(); | |
716 | } | |
717 | GC_initialize_offsets(); | |
718 | GC_register_displacement_inner(0L); | |
719 | # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) | |
720 | if (!GC_all_interior_pointers) { | |
721 | /* TLS ABI uses pointer-sized offsets for dtv. */ | |
722 | GC_register_displacement_inner(sizeof(void *)); | |
723 | } | |
724 | # endif | |
725 | GC_init_size_map(); | |
726 | # ifdef PCR | |
727 | if (PCR_IL_Lock(PCR_Bool_false, PCR_allSigsBlocked, PCR_waitForever) | |
728 | != PCR_ERes_okay) { | |
729 | ABORT("Can't lock load state\n"); | |
730 | } else if (PCR_IL_Unlock() != PCR_ERes_okay) { | |
731 | ABORT("Can't unlock load state\n"); | |
732 | } | |
733 | PCR_IL_Unlock(); | |
734 | GC_pcr_install(); | |
735 | # endif | |
736 | GC_is_initialized = TRUE; | |
737 | # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS) | |
738 | GC_thr_init(); | |
739 | # endif | |
740 | COND_DUMP; | |
741 | /* Get black list set up and/or incremental GC started */ | |
742 | if (!GC_dont_precollect || GC_incremental) GC_gcollect_inner(); | |
743 | # ifdef STUBBORN_ALLOC | |
744 | GC_stubborn_init(); | |
745 | # endif | |
746 | # if defined(GC_LINUX_THREADS) && defined(REDIRECT_MALLOC) | |
747 | { | |
748 | extern void GC_init_lib_bounds(void); | |
749 | GC_init_lib_bounds(); | |
750 | } | |
751 | # endif | |
752 | /* Convince lint that some things are used */ | |
753 | # ifdef LINT | |
754 | { | |
755 | extern char * GC_copyright[]; | |
756 | extern int GC_read(); | |
757 | extern void GC_register_finalizer_no_order(); | |
758 | ||
759 | GC_noop(GC_copyright, GC_find_header, | |
760 | GC_push_one, GC_call_with_alloc_lock, GC_read, | |
761 | GC_dont_expand, | |
762 | # ifndef NO_DEBUGGING | |
763 | GC_dump, | |
764 | # endif | |
765 | GC_register_finalizer_no_order); | |
766 | } | |
767 | # endif | |
768 | } | |
769 | ||
770 | void GC_enable_incremental(void) | |
771 | { | |
772 | # if !defined(SMALL_CONFIG) && !defined(KEEP_BACK_PTRS) | |
773 | /* If we are keeping back pointers, the GC itself dirties all */ | |
774 | /* pages on which objects have been marked, making */ | |
775 | /* incremental GC pointless. */ | |
776 | if (!GC_find_leak) { | |
777 | DCL_LOCK_STATE; | |
778 | ||
779 | LOCK(); | |
780 | if (GC_incremental) goto out; | |
781 | GC_setpagesize(); | |
782 | /* if (GC_no_win32_dlls) goto out; Should be win32S test? */ | |
783 | maybe_install_looping_handler(); /* Before write fault handler! */ | |
784 | GC_incremental = TRUE; | |
785 | if (!GC_is_initialized) { | |
786 | GC_init_inner(); | |
787 | } else { | |
788 | GC_dirty_init(); | |
789 | } | |
790 | if (!GC_dirty_maintained) goto out; | |
791 | if (GC_dont_gc) { | |
792 | /* Can't easily do it. */ | |
793 | UNLOCK(); | |
794 | return; | |
795 | } | |
796 | if (GC_bytes_allocd > 0) { | |
797 | /* There may be unmarked reachable objects */ | |
798 | GC_gcollect_inner(); | |
799 | } /* else we're OK in assuming everything's */ | |
800 | /* clean since nothing can point to an */ | |
801 | /* unmarked object. */ | |
802 | GC_read_dirty(); | |
803 | out: | |
804 | UNLOCK(); | |
805 | } else { | |
806 | GC_init(); | |
807 | } | |
808 | # else | |
809 | GC_init(); | |
810 | # endif | |
811 | } | |
812 | ||
813 | ||
814 | #if defined(MSWIN32) || defined(MSWINCE) | |
815 | # if defined(_MSC_VER) && defined(_DEBUG) | |
816 | # include <crtdbg.h> | |
817 | # endif | |
818 | # ifdef OLD_WIN32_LOG_FILE | |
819 | # define LOG_FILE _T("gc.log") | |
820 | # endif | |
821 | ||
822 | HANDLE GC_stdout = 0; | |
823 | ||
824 | void GC_deinit() | |
825 | { | |
826 | if (GC_is_initialized) { | |
827 | DeleteCriticalSection(&GC_write_cs); | |
828 | } | |
829 | } | |
830 | ||
831 | # ifndef THREADS | |
832 | # define GC_need_to_lock 0 /* Not defined without threads */ | |
833 | # endif | |
834 | int GC_write(const char *buf, size_t len) | |
835 | { | |
836 | BOOL tmp; | |
837 | DWORD written; | |
838 | if (len == 0) | |
839 | return 0; | |
840 | if (GC_need_to_lock) EnterCriticalSection(&GC_write_cs); | |
841 | if (GC_stdout == INVALID_HANDLE_VALUE) { | |
842 | if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs); | |
843 | return -1; | |
844 | } else if (GC_stdout == 0) { | |
845 | char * file_name = GETENV("GC_LOG_FILE"); | |
846 | char logPath[_MAX_PATH + 5]; | |
847 | ||
848 | if (0 == file_name) { | |
849 | # ifdef OLD_WIN32_LOG_FILE | |
850 | strcpy(logPath, LOG_FILE); | |
851 | # else | |
852 | GetModuleFileName(NULL, logPath, _MAX_PATH); | |
853 | strcat(logPath, ".log"); | |
854 | # endif | |
855 | file_name = logPath; | |
856 | } | |
857 | GC_stdout = CreateFile(logPath, GENERIC_WRITE, | |
858 | FILE_SHARE_READ, | |
859 | NULL, CREATE_ALWAYS, FILE_FLAG_WRITE_THROUGH, | |
860 | NULL); | |
861 | if (GC_stdout == INVALID_HANDLE_VALUE) | |
862 | ABORT("Open of log file failed"); | |
863 | } | |
864 | tmp = WriteFile(GC_stdout, buf, (DWORD)len, &written, NULL); | |
865 | if (!tmp) | |
866 | DebugBreak(); | |
867 | # if defined(_MSC_VER) && defined(_DEBUG) | |
868 | _CrtDbgReport(_CRT_WARN, NULL, 0, NULL, "%.*s", len, buf); | |
869 | # endif | |
870 | if (GC_need_to_lock) LeaveCriticalSection(&GC_write_cs); | |
871 | return tmp ? (int)written : -1; | |
872 | } | |
873 | # undef GC_need_to_lock | |
874 | ||
875 | #endif | |
876 | ||
877 | #if defined(OS2) || defined(MACOS) | |
878 | FILE * GC_stdout = NULL; | |
879 | FILE * GC_stderr = NULL; | |
880 | FILE * GC_log = NULL; | |
881 | int GC_tmp; /* Should really be local ... */ | |
882 | ||
883 | void GC_set_files() | |
884 | { | |
885 | if (GC_stdout == NULL) { | |
886 | GC_stdout = stdout; | |
887 | } | |
888 | if (GC_stderr == NULL) { | |
889 | GC_stderr = stderr; | |
890 | } | |
891 | if (GC_log == NULL) { | |
892 | GC_log = stderr; | |
893 | } | |
894 | } | |
895 | #endif | |
896 | ||
897 | #if !defined(OS2) && !defined(MACOS) && !defined(MSWIN32) && !defined(MSWINCE) | |
898 | int GC_stdout = 1; | |
899 | int GC_stderr = 2; | |
900 | int GC_log = 2; | |
901 | # if !defined(AMIGA) | |
902 | # include <unistd.h> | |
903 | # endif | |
904 | #endif | |
905 | ||
906 | #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(OS2) \ | |
907 | && !defined(MACOS) && !defined(ECOS) && !defined(NOSYS) | |
908 | int GC_write(fd, buf, len) | |
909 | int fd; | |
910 | const char *buf; | |
911 | size_t len; | |
912 | { | |
913 | register int bytes_written = 0; | |
914 | register int result; | |
915 | ||
916 | while (bytes_written < len) { | |
917 | # ifdef GC_SOLARIS_THREADS | |
918 | result = syscall(SYS_write, fd, buf + bytes_written, | |
919 | len - bytes_written); | |
920 | # else | |
921 | result = write(fd, buf + bytes_written, len - bytes_written); | |
922 | # endif | |
923 | if (-1 == result) return(result); | |
924 | bytes_written += result; | |
925 | } | |
926 | return(bytes_written); | |
927 | } | |
928 | #endif /* UN*X */ | |
929 | ||
930 | #ifdef ECOS | |
931 | int GC_write(fd, buf, len) | |
932 | { | |
933 | _Jv_diag_write (buf, len); | |
934 | return len; | |
935 | } | |
936 | #endif | |
937 | ||
938 | #ifdef NOSYS | |
939 | int GC_write(fd, buf, len) | |
940 | { | |
941 | /* No writing. */ | |
942 | return len; | |
943 | } | |
944 | #endif | |
945 | ||
946 | ||
947 | #if defined(MSWIN32) || defined(MSWINCE) | |
948 | /* FIXME: This is pretty ugly ... */ | |
949 | # define WRITE(f, buf, len) GC_write(buf, len) | |
950 | #else | |
951 | # if defined(OS2) || defined(MACOS) | |
952 | # define WRITE(f, buf, len) (GC_set_files(), \ | |
953 | GC_tmp = fwrite((buf), 1, (len), (f)), \ | |
954 | fflush(f), GC_tmp) | |
955 | # else | |
956 | # define WRITE(f, buf, len) GC_write((f), (buf), (len)) | |
957 | # endif | |
958 | #endif | |
959 | ||
960 | #define BUFSZ 1024 | |
961 | #ifdef _MSC_VER | |
962 | # define vsnprintf _vsnprintf | |
963 | #endif | |
964 | /* A version of printf that is unlikely to call malloc, and is thus safer */ | |
965 | /* to call from the collector in case malloc has been bound to GC_malloc. */ | |
966 | /* Floating point arguments ans formats should be avoided, since fp */ | |
967 | /* conversion is more likely to allocate. */ | |
968 | /* Assumes that no more than BUFSZ-1 characters are written at once. */ | |
969 | void GC_printf(const char *format, ...) | |
970 | { | |
971 | va_list args; | |
972 | char buf[BUFSZ+1]; | |
973 | ||
974 | va_start(args, format); | |
975 | if (GC_quiet) return; | |
976 | buf[BUFSZ] = 0x15; | |
977 | (void) vsnprintf(buf, BUFSZ, format, args); | |
978 | va_end(args); | |
979 | if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack"); | |
980 | if (WRITE(GC_stdout, buf, strlen(buf)) < 0) ABORT("write to stdout failed"); | |
981 | } | |
982 | ||
983 | void GC_err_printf(const char *format, ...) | |
984 | { | |
985 | va_list args; | |
986 | char buf[BUFSZ+1]; | |
987 | ||
988 | va_start(args, format); | |
989 | buf[BUFSZ] = 0x15; | |
990 | (void) vsnprintf(buf, BUFSZ, format, args); | |
991 | va_end(args); | |
992 | if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack"); | |
993 | if (WRITE(GC_stderr, buf, strlen(buf)) < 0) ABORT("write to stderr failed"); | |
994 | } | |
995 | ||
996 | void GC_log_printf(const char *format, ...) | |
997 | { | |
998 | va_list args; | |
999 | char buf[BUFSZ+1]; | |
1000 | ||
1001 | va_start(args, format); | |
1002 | buf[BUFSZ] = 0x15; | |
1003 | (void) vsnprintf(buf, BUFSZ, format, args); | |
1004 | va_end(args); | |
1005 | if (buf[BUFSZ] != 0x15) ABORT("GC_printf clobbered stack"); | |
1006 | if (WRITE(GC_log, buf, strlen(buf)) < 0) ABORT("write to log failed"); | |
1007 | } | |
1008 | ||
1009 | void GC_err_puts(const char *s) | |
1010 | { | |
1011 | if (WRITE(GC_stderr, s, strlen(s)) < 0) ABORT("write to stderr failed"); | |
1012 | } | |
1013 | ||
1014 | #if defined(LINUX) && !defined(SMALL_CONFIG) | |
1015 | void GC_err_write(buf, len) | |
1016 | const char *buf; | |
1017 | size_t len; | |
1018 | { | |
1019 | if (WRITE(GC_stderr, buf, len) < 0) ABORT("write to stderr failed"); | |
1020 | } | |
1021 | #endif | |
1022 | ||
1023 | void GC_default_warn_proc(char *msg, GC_word arg) | |
1024 | { | |
1025 | GC_err_printf(msg, arg); | |
1026 | } | |
1027 | ||
1028 | GC_warn_proc GC_current_warn_proc = GC_default_warn_proc; | |
1029 | ||
1030 | GC_warn_proc GC_set_warn_proc(GC_warn_proc p) | |
1031 | { | |
1032 | GC_warn_proc result; | |
1033 | ||
1034 | # ifdef GC_WIN32_THREADS | |
1035 | GC_ASSERT(GC_is_initialized); | |
1036 | # endif | |
1037 | LOCK(); | |
1038 | result = GC_current_warn_proc; | |
1039 | GC_current_warn_proc = p; | |
1040 | UNLOCK(); | |
1041 | return(result); | |
1042 | } | |
1043 | ||
1044 | GC_word GC_set_free_space_divisor (GC_word value) | |
1045 | { | |
1046 | GC_word old = GC_free_space_divisor; | |
1047 | GC_free_space_divisor = value; | |
1048 | return old; | |
1049 | } | |
1050 | ||
1051 | #ifndef PCR | |
1052 | void GC_abort(const char *msg) | |
1053 | { | |
1054 | # if defined(MSWIN32) | |
1055 | (void) MessageBoxA(NULL, msg, "Fatal error in gc", MB_ICONERROR|MB_OK); | |
1056 | # else | |
1057 | GC_err_printf("%s\n", msg); | |
1058 | # endif | |
1059 | if (GETENV("GC_LOOP_ON_ABORT") != NULL) { | |
1060 | /* In many cases it's easier to debug a running process. */ | |
1061 | /* It's arguably nicer to sleep, but that makes it harder */ | |
1062 | /* to look at the thread if the debugger doesn't know much */ | |
1063 | /* about threads. */ | |
1064 | for(;;) {} | |
1065 | } | |
1066 | # if defined(MSWIN32) || defined(MSWINCE) | |
1067 | DebugBreak(); | |
1068 | # else | |
1069 | (void) abort(); | |
1070 | # endif | |
1071 | } | |
1072 | #endif | |
1073 | ||
1074 | void GC_enable() | |
1075 | { | |
1076 | LOCK(); | |
1077 | GC_dont_gc--; | |
1078 | UNLOCK(); | |
1079 | } | |
1080 | ||
1081 | void GC_disable() | |
1082 | { | |
1083 | LOCK(); | |
1084 | GC_dont_gc++; | |
1085 | UNLOCK(); | |
1086 | } | |
1087 | ||
1088 | /* Helper procedures for new kind creation. */ | |
1089 | void ** GC_new_free_list_inner() | |
1090 | { | |
1091 | void *result = GC_INTERNAL_MALLOC((MAXOBJGRANULES+1)*sizeof(ptr_t), | |
1092 | PTRFREE); | |
1093 | if (result == 0) ABORT("Failed to allocate freelist for new kind"); | |
1094 | BZERO(result, (MAXOBJGRANULES+1)*sizeof(ptr_t)); | |
1095 | return result; | |
1096 | } | |
1097 | ||
1098 | void ** GC_new_free_list() | |
1099 | { | |
1100 | void *result; | |
1101 | LOCK(); | |
1102 | result = GC_new_free_list_inner(); | |
1103 | UNLOCK(); | |
1104 | return result; | |
1105 | } | |
1106 | ||
1107 | unsigned GC_new_kind_inner(void **fl, GC_word descr, int adjust, int clear) | |
1108 | { | |
1109 | unsigned result = GC_n_kinds++; | |
1110 | ||
1111 | if (GC_n_kinds > MAXOBJKINDS) ABORT("Too many kinds"); | |
1112 | GC_obj_kinds[result].ok_freelist = fl; | |
1113 | GC_obj_kinds[result].ok_reclaim_list = 0; | |
1114 | GC_obj_kinds[result].ok_descriptor = descr; | |
1115 | GC_obj_kinds[result].ok_relocate_descr = adjust; | |
1116 | GC_obj_kinds[result].ok_init = clear; | |
1117 | return result; | |
1118 | } | |
1119 | ||
1120 | unsigned GC_new_kind(void **fl, GC_word descr, int adjust, int clear) | |
1121 | { | |
1122 | unsigned result; | |
1123 | LOCK(); | |
1124 | result = GC_new_kind_inner(fl, descr, adjust, clear); | |
1125 | UNLOCK(); | |
1126 | return result; | |
1127 | } | |
1128 | ||
1129 | unsigned GC_new_proc_inner(GC_mark_proc proc) | |
1130 | { | |
1131 | unsigned result = GC_n_mark_procs++; | |
1132 | ||
1133 | if (GC_n_mark_procs > MAX_MARK_PROCS) ABORT("Too many mark procedures"); | |
1134 | GC_mark_procs[result] = proc; | |
1135 | return result; | |
1136 | } | |
1137 | ||
1138 | unsigned GC_new_proc(GC_mark_proc proc) | |
1139 | { | |
1140 | unsigned result; | |
1141 | LOCK(); | |
1142 | result = GC_new_proc_inner(proc); | |
1143 | UNLOCK(); | |
1144 | return result; | |
1145 | } | |
1146 | ||
1147 | void * GC_call_with_stack_base(GC_stack_base_func fn, void *arg) | |
1148 | { | |
1149 | int dummy; | |
1150 | struct GC_stack_base base; | |
1151 | ||
1152 | base.mem_base = (void *)&dummy; | |
1153 | # ifdef IA64 | |
1154 | base.reg_base = (void *)GC_save_regs_in_stack(); | |
1155 | /* Unnecessarily flushes register stack, */ | |
1156 | /* but that probably doesn't hurt. */ | |
1157 | # endif | |
1158 | return fn(&base, arg); | |
1159 | } | |
1160 | ||
1161 | #if !defined(NO_DEBUGGING) | |
1162 | ||
1163 | void GC_dump() | |
1164 | { | |
1165 | GC_printf("***Static roots:\n"); | |
1166 | GC_print_static_roots(); | |
1167 | GC_printf("\n***Heap sections:\n"); | |
1168 | GC_print_heap_sects(); | |
1169 | GC_printf("\n***Free blocks:\n"); | |
1170 | GC_print_hblkfreelist(); | |
1171 | GC_printf("\n***Blocks in use:\n"); | |
1172 | GC_print_block_list(); | |
1173 | GC_printf("\n***Finalization statistics:\n"); | |
1174 | GC_print_finalization_stats(); | |
1175 | } | |
1176 | ||
1177 | #endif /* NO_DEBUGGING */ |