2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
15 # include "private/gc_priv.h"
17 /* Data structure for list of root sets. */
18 /* We keep a hash table, so that we can filter out duplicate additions. */
19 /* Under Win32, we need to do a better job of filtering overlaps, so */
20 /* we resort to sequential search, and pay the price. */
21 /* This is really declared in gc_priv.h:
25 # if !defined(MSWIN32) && !defined(MSWINCE)
26 struct roots * r_next;
29 -- Delete before registering new dynamic libraries
32 struct roots GC_static_roots[MAX_ROOT_SETS];
35 int GC_no_dls
= 0; /* Register dynamic library data segments. */
37 static int n_root_sets
= 0;
39 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
41 # if !defined(NO_DEBUGGING)
43 void GC_print_static_roots(void)
48 for (i
= 0; i
< n_root_sets
; i
++) {
49 GC_printf("From %p to %p ",
50 GC_static_roots
[i
].r_start
,
51 GC_static_roots
[i
].r_end
);
52 if (GC_static_roots
[i
].r_tmp
) {
53 GC_printf(" (temporary)\n");
57 total
+= GC_static_roots
[i
].r_end
- GC_static_roots
[i
].r_start
;
59 GC_printf("Total size: %ld\n", (unsigned long) total
);
60 if (GC_root_size
!= total
) {
61 GC_printf("GC_root_size incorrect: %ld!!\n",
62 (unsigned long) GC_root_size
);
65 # endif /* NO_DEBUGGING */
67 /* Primarily for debugging support: */
68 /* Is the address p in one of the registered static */
70 GC_bool
GC_is_static_root(ptr_t p
)
72 static int last_root_set
= MAX_ROOT_SETS
;
76 if (last_root_set
< n_root_sets
77 && p
>= GC_static_roots
[last_root_set
].r_start
78 && p
< GC_static_roots
[last_root_set
].r_end
) return(TRUE
);
79 for (i
= 0; i
< n_root_sets
; i
++) {
80 if (p
>= GC_static_roots
[i
].r_start
81 && p
< GC_static_roots
[i
].r_end
) {
89 #if !defined(MSWIN32) && !defined(MSWINCE)
91 # define LOG_RT_SIZE 6
92 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
94 struct roots * GC_root_index[RT_SIZE];
95 -- Hash table header. Used only to check whether a range is
97 -- really defined in gc_priv.h
100 static INLINE
int rt_hash(ptr_t addr
)
102 word result
= (word
) addr
;
103 # if CPP_WORDSZ > 8*LOG_RT_SIZE
104 result
^= result
>> 8*LOG_RT_SIZE
;
106 # if CPP_WORDSZ > 4*LOG_RT_SIZE
107 result
^= result
>> 4*LOG_RT_SIZE
;
109 result
^= result
>> 2*LOG_RT_SIZE
;
110 result
^= result
>> LOG_RT_SIZE
;
111 result
&= (RT_SIZE
-1);
115 /* Is a range starting at b already in the table? If so return a */
116 /* pointer to it, else NIL. */
117 struct roots
* GC_roots_present(ptr_t b
)
120 struct roots
*p
= GC_root_index
[h
];
123 if (p
-> r_start
== (ptr_t
)b
) return(p
);
129 /* Add the given root structure to the index. */
130 static void add_roots_to_index(struct roots
*p
)
132 int h
= rt_hash(p
-> r_start
);
134 p
-> r_next
= GC_root_index
[h
];
135 GC_root_index
[h
] = p
;
138 # else /* MSWIN32 || MSWINCE */
140 # define add_roots_to_index(p)
147 word GC_root_size
= 0;
149 void GC_add_roots(void *b
, void *e
)
153 if (!GC_is_initialized
) GC_init();
155 GC_add_roots_inner((ptr_t
)b
, (ptr_t
)e
, FALSE
);
160 /* Add [b,e) to the root set. Adding the same interval a second time */
161 /* is a moderately fast noop, and hence benign. We do not handle */
162 /* different but overlapping intervals efficiently. (We do handle */
163 /* them correctly.) */
164 /* Tmp specifies that the interval may be deleted before */
165 /* reregistering dynamic libraries. */
166 void GC_add_roots_inner(ptr_t b
, ptr_t e
, GC_bool tmp
)
170 # if defined(MSWIN32) || defined(MSWINCE)
171 /* Spend the time to ensure that there are no overlapping */
172 /* or adjacent intervals. */
173 /* This could be done faster with e.g. a */
174 /* balanced tree. But the execution time here is */
175 /* virtually guaranteed to be dominated by the time it */
176 /* takes to scan the roots. */
180 for (i
= 0; i
< n_root_sets
; i
++) {
181 old
= GC_static_roots
+ i
;
182 if (b
<= old
-> r_end
&& e
>= old
-> r_start
) {
183 if (b
< old
-> r_start
) {
185 GC_root_size
+= (old
-> r_start
- b
);
187 if (e
> old
-> r_end
) {
189 GC_root_size
+= (e
- old
-> r_end
);
195 if (i
< n_root_sets
) {
196 /* merge other overlapping intervals */
199 for (i
++; i
< n_root_sets
; i
++) {
200 other
= GC_static_roots
+ i
;
201 b
= other
-> r_start
;
203 if (b
<= old
-> r_end
&& e
>= old
-> r_start
) {
204 if (b
< old
-> r_start
) {
206 GC_root_size
+= (old
-> r_start
- b
);
208 if (e
> old
-> r_end
) {
210 GC_root_size
+= (e
- old
-> r_end
);
212 old
-> r_tmp
&= other
-> r_tmp
;
213 /* Delete this entry. */
214 GC_root_size
-= (other
-> r_end
- other
-> r_start
);
215 other
-> r_start
= GC_static_roots
[n_root_sets
-1].r_start
;
216 other
-> r_end
= GC_static_roots
[n_root_sets
-1].r_end
;
224 old
= GC_roots_present(b
);
226 if (e
<= old
-> r_end
) /* already there */ return;
228 GC_root_size
+= e
- old
-> r_end
;
233 if (n_root_sets
== MAX_ROOT_SETS
) {
234 ABORT("Too many root sets\n");
236 GC_static_roots
[n_root_sets
].r_start
= (ptr_t
)b
;
237 GC_static_roots
[n_root_sets
].r_end
= (ptr_t
)e
;
238 GC_static_roots
[n_root_sets
].r_tmp
= tmp
;
239 # if !defined(MSWIN32) && !defined(MSWINCE)
240 GC_static_roots
[n_root_sets
].r_next
= 0;
242 add_roots_to_index(GC_static_roots
+ n_root_sets
);
243 GC_root_size
+= e
- b
;
247 static GC_bool roots_were_cleared
= FALSE
;
249 void GC_clear_roots (void)
253 if (!GC_is_initialized
) GC_init();
255 roots_were_cleared
= TRUE
;
258 # if !defined(MSWIN32) && !defined(MSWINCE)
262 for (i
= 0; i
< RT_SIZE
; i
++) GC_root_index
[i
] = 0;
268 /* Internal use only; lock held. */
269 static void GC_remove_root_at_pos(int i
)
271 GC_root_size
-= (GC_static_roots
[i
].r_end
- GC_static_roots
[i
].r_start
);
272 GC_static_roots
[i
].r_start
= GC_static_roots
[n_root_sets
-1].r_start
;
273 GC_static_roots
[i
].r_end
= GC_static_roots
[n_root_sets
-1].r_end
;
274 GC_static_roots
[i
].r_tmp
= GC_static_roots
[n_root_sets
-1].r_tmp
;
278 #if !defined(MSWIN32) && !defined(MSWINCE)
279 static void GC_rebuild_root_index(void)
283 for (i
= 0; i
< RT_SIZE
; i
++) GC_root_index
[i
] = 0;
284 for (i
= 0; i
< n_root_sets
; i
++)
285 add_roots_to_index(GC_static_roots
+ i
);
289 /* Internal use only; lock held. */
290 void GC_remove_tmp_roots(void)
294 for (i
= 0; i
< n_root_sets
; ) {
295 if (GC_static_roots
[i
].r_tmp
) {
296 GC_remove_root_at_pos(i
);
301 #if !defined(MSWIN32) && !defined(MSWINCE)
302 GC_rebuild_root_index();
306 #if !defined(MSWIN32) && !defined(MSWINCE)
307 void GC_remove_roots(void *b
, void *e
)
312 GC_remove_roots_inner((ptr_t
)b
, (ptr_t
)e
);
316 /* Should only be called when the lock is held */
317 void GC_remove_roots_inner(ptr_t b
, ptr_t e
)
320 for (i
= 0; i
< n_root_sets
; ) {
321 if (GC_static_roots
[i
].r_start
>= b
322 && GC_static_roots
[i
].r_end
<= e
) {
323 GC_remove_root_at_pos(i
);
328 GC_rebuild_root_index();
330 #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
332 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
333 /* Workaround for the OS mapping and unmapping behind our back: */
334 /* Is the address p in one of the temporary static root sections? */
335 GC_bool
GC_is_tmp_root(ptr_t p
)
337 static int last_root_set
= MAX_ROOT_SETS
;
340 if (last_root_set
< n_root_sets
341 && p
>= GC_static_roots
[last_root_set
].r_start
342 && p
< GC_static_roots
[last_root_set
].r_end
)
343 return GC_static_roots
[last_root_set
].r_tmp
;
344 for (i
= 0; i
< n_root_sets
; i
++) {
345 if (p
>= GC_static_roots
[i
].r_start
346 && p
< GC_static_roots
[i
].r_end
) {
348 return GC_static_roots
[i
].r_tmp
;
353 #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
355 ptr_t
GC_approx_sp(void)
359 dummy
= 42; /* Force stack to grow if necessary. Otherwise the */
360 /* later accesses might cause the kernel to think we're */
361 /* doing something wrong. */
363 # pragma warning(disable:4172)
365 return((ptr_t
)(&dummy
));
367 # pragma warning(default:4172)
372 * Data structure for excluded static roots.
373 * Real declaration is in gc_priv.h.
380 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
381 -- Array of exclusions, ascending
385 size_t GC_excl_table_entries
= 0; /* Number of entries in use. */
387 /* Return the first exclusion range that includes an address >= start_addr */
388 /* Assumes the exclusion table contains at least one entry (namely the */
389 /* GC data structures). */
390 struct exclusion
* GC_next_exclusion(ptr_t start_addr
)
393 size_t high
= GC_excl_table_entries
- 1;
397 mid
= (low
+ high
) >> 1;
398 /* low <= mid < high */
399 if ((word
) GC_excl_table
[mid
].e_end
<= (word
) start_addr
) {
405 if ((word
) GC_excl_table
[low
].e_end
<= (word
) start_addr
) return 0;
406 return GC_excl_table
+ low
;
409 void GC_exclude_static_roots(void *start
, void *finish
)
411 struct exclusion
* next
;
412 size_t next_index
, i
;
414 if (0 == GC_excl_table_entries
) {
417 next
= GC_next_exclusion(start
);
420 if ((word
)(next
-> e_start
) < (word
) finish
) {
421 /* incomplete error check. */
422 ABORT("exclusion ranges overlap");
424 if ((word
)(next
-> e_start
) == (word
) finish
) {
425 /* extend old range backwards */
426 next
-> e_start
= (ptr_t
)start
;
429 next_index
= next
- GC_excl_table
;
430 for (i
= GC_excl_table_entries
; i
> next_index
; --i
) {
431 GC_excl_table
[i
] = GC_excl_table
[i
-1];
434 next_index
= GC_excl_table_entries
;
436 if (GC_excl_table_entries
== MAX_EXCLUSIONS
) ABORT("Too many exclusions");
437 GC_excl_table
[next_index
].e_start
= (ptr_t
)start
;
438 GC_excl_table
[next_index
].e_end
= (ptr_t
)finish
;
439 ++GC_excl_table_entries
;
442 /* Invoke push_conditional on ranges that are not excluded. */
443 void GC_push_conditional_with_exclusions(ptr_t bottom
, ptr_t top
, GC_bool all
)
445 struct exclusion
* next
;
448 while (bottom
< top
) {
449 next
= GC_next_exclusion(bottom
);
450 if (0 == next
|| (excl_start
= next
-> e_start
) >= top
) {
451 GC_push_conditional(bottom
, top
, all
);
454 if (excl_start
> bottom
) GC_push_conditional(bottom
, excl_start
, all
);
455 bottom
= next
-> e_end
;
460 * In the absence of threads, push the stack contents.
461 * In the presence of threads, push enough of the current stack
462 * to ensure that callee-save registers saved in collector frames have been
464 * FIXME: Merge with per-thread stuff.
466 void GC_push_current_stack(ptr_t cold_gc_frame
, void * context
)
468 # if defined(THREADS)
469 if (0 == cold_gc_frame
) return;
470 # ifdef STACK_GROWS_DOWN
471 GC_push_all_eager(GC_approx_sp(), cold_gc_frame
);
472 /* For IA64, the register stack backing store is handled */
473 /* in the thread-specific code. */
475 GC_push_all_eager( cold_gc_frame
, GC_approx_sp() );
478 # ifdef STACK_GROWS_DOWN
479 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom
,
482 /* We also need to push the register stack backing store. */
483 /* This should really be done in the same way as the */
484 /* regular stack. For now we fudge it a bit. */
485 /* Note that the backing store grows up, so we can't use */
486 /* GC_push_all_stack_partially_eager. */
488 extern word GC_save_regs_ret_val
;
489 /* Previously set to backing store pointer. */
490 ptr_t bsp
= (ptr_t
) GC_save_regs_ret_val
;
491 ptr_t cold_gc_bs_pointer
;
492 if (GC_all_interior_pointers
) {
493 cold_gc_bs_pointer
= bsp
- 2048;
494 if (cold_gc_bs_pointer
< BACKING_STORE_BASE
) {
495 cold_gc_bs_pointer
= BACKING_STORE_BASE
;
497 GC_push_all_stack(BACKING_STORE_BASE
, cold_gc_bs_pointer
);
500 cold_gc_bs_pointer
= BACKING_STORE_BASE
;
502 GC_push_all_eager(cold_gc_bs_pointer
, bsp
);
503 /* All values should be sufficiently aligned that we */
504 /* dont have to worry about the boundary. */
508 GC_push_all_stack_partially_eager( GC_stackbottom
, GC_approx_sp(),
511 # endif /* !THREADS */
515 * Push GC internal roots. Only called if there is some reason to believe
516 * these would not otherwise get registered.
518 void GC_push_gc_structures(void)
520 GC_push_finalizer_structures();
521 # if defined(THREADS)
522 GC_push_thread_structures();
526 #ifdef THREAD_LOCAL_ALLOC
527 void GC_mark_thread_local_free_lists(void);
530 void GC_cond_register_dynamic_libraries(void)
532 # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
534 GC_remove_tmp_roots();
535 if (!GC_no_dls
) GC_register_dynamic_libraries();
542 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
543 * on groups of pointers) on every top level accessible pointer.
544 * If all is FALSE, arrange to push only possibly altered values.
545 * Cold_gc_frame is an address inside a GC frame that
546 * remains valid until all marking is complete.
547 * A zero value indicates that it's OK to miss some
550 void GC_push_roots(GC_bool all
, ptr_t cold_gc_frame
)
556 * Next push static data. This must happen early on, since it's
557 * not robust against mark stack overflow.
559 /* Reregister dynamic libraries, in case one got added. */
560 /* There is some argument for doing this as late as possible, */
561 /* especially on win32, where it can change asynchronously. */
562 /* In those cases, we do it here. But on other platforms, it's */
563 /* not safe with the world stopped, so we do it earlier. */
564 # if !defined(REGISTER_LIBRARIES_EARLY)
565 GC_cond_register_dynamic_libraries();
568 /* Mark everything in static data areas */
569 for (i
= 0; i
< n_root_sets
; i
++) {
570 GC_push_conditional_with_exclusions(
571 GC_static_roots
[i
].r_start
,
572 GC_static_roots
[i
].r_end
, all
);
575 /* Mark all free list header blocks, if those were allocated from */
576 /* the garbage collected heap. This makes sure they don't */
577 /* disappear if we are not marking from static data. It also */
578 /* saves us the trouble of scanning them, and possibly that of */
579 /* marking the freelists. */
580 for (kind
= 0; kind
< GC_n_kinds
; kind
++) {
581 void *base
= GC_base(GC_obj_kinds
[kind
].ok_freelist
);
583 GC_set_mark_bit(base
);
587 /* Mark from GC internal roots if those might otherwise have */
589 if (GC_no_dls
|| roots_were_cleared
) {
590 GC_push_gc_structures();
593 /* Mark thread local free lists, even if their mark */
594 /* descriptor excludes the link field. */
595 /* If the world is not stopped, this is unsafe. It is */
596 /* also unnecessary, since we will do this again with the */
598 # if defined(THREAD_LOCAL_ALLOC)
599 if (GC_world_stopped
) GC_mark_thread_local_free_lists();
603 * Now traverse stacks, and mark from register contents.
604 * These must be done last, since they can legitimately overflow
606 * This is usually done by saving the current context on the
607 * stack, and then just tracing from the stack.
609 GC_push_regs_and_stack(cold_gc_frame
);
611 if (GC_push_other_roots
!= 0) (*GC_push_other_roots
)();
612 /* In the threads case, this also pushes thread stacks. */
613 /* Note that without interior pointer recognition lots */
614 /* of stuff may have been pushed already, and this */
615 /* should be careful about mark stack overflows. */