]> git.proxmox.com Git - ceph.git/blob - ceph/src/boost/tools/build/src/engine/boehm_gc/mark_rts.c
Add patch for failing prerm scripts
[ceph.git] / ceph / src / boost / tools / build / src / engine / boehm_gc / mark_rts.c
1 /*
2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 *
5 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
6 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
7 *
8 * Permission is hereby granted to use or copy this program
9 * for any purpose, provided the above notices are retained on all copies.
10 * Permission to modify the code and to distribute modified code is granted,
11 * provided the above notices are retained, and a notice that the code was
12 * modified is included with the above copyright notice.
13 */
14 # include <stdio.h>
15 # include "private/gc_priv.h"
16
17 /* Data structure for list of root sets. */
18 /* We keep a hash table, so that we can filter out duplicate additions. */
19 /* Under Win32, we need to do a better job of filtering overlaps, so */
20 /* we resort to sequential search, and pay the price. */
21 /* This is really declared in gc_priv.h:
22 struct roots {
23 ptr_t r_start;
24 ptr_t r_end;
25 # if !defined(MSWIN32) && !defined(MSWINCE)
26 struct roots * r_next;
27 # endif
28 GC_bool r_tmp;
29 -- Delete before registering new dynamic libraries
30 };
31
32 struct roots GC_static_roots[MAX_ROOT_SETS];
33 */
34
35 int GC_no_dls = 0; /* Register dynamic library data segments. */
36
37 static int n_root_sets = 0;
38
39 /* GC_static_roots[0..n_root_sets) contains the valid root sets. */
40
41 # if !defined(NO_DEBUGGING)
42 /* For debugging: */
43 void GC_print_static_roots(void)
44 {
45 register int i;
46 size_t total = 0;
47
48 for (i = 0; i < n_root_sets; i++) {
49 GC_printf("From %p to %p ",
50 GC_static_roots[i].r_start,
51 GC_static_roots[i].r_end);
52 if (GC_static_roots[i].r_tmp) {
53 GC_printf(" (temporary)\n");
54 } else {
55 GC_printf("\n");
56 }
57 total += GC_static_roots[i].r_end - GC_static_roots[i].r_start;
58 }
59 GC_printf("Total size: %ld\n", (unsigned long) total);
60 if (GC_root_size != total) {
61 GC_printf("GC_root_size incorrect: %ld!!\n",
62 (unsigned long) GC_root_size);
63 }
64 }
65 # endif /* NO_DEBUGGING */
66
67 /* Primarily for debugging support: */
68 /* Is the address p in one of the registered static */
69 /* root sections? */
70 GC_bool GC_is_static_root(ptr_t p)
71 {
72 static int last_root_set = MAX_ROOT_SETS;
73 register int i;
74
75
76 if (last_root_set < n_root_sets
77 && p >= GC_static_roots[last_root_set].r_start
78 && p < GC_static_roots[last_root_set].r_end) return(TRUE);
79 for (i = 0; i < n_root_sets; i++) {
80 if (p >= GC_static_roots[i].r_start
81 && p < GC_static_roots[i].r_end) {
82 last_root_set = i;
83 return(TRUE);
84 }
85 }
86 return(FALSE);
87 }
88
89 #if !defined(MSWIN32) && !defined(MSWINCE)
90 /*
91 # define LOG_RT_SIZE 6
92 # define RT_SIZE (1 << LOG_RT_SIZE) -- Power of 2, may be != MAX_ROOT_SETS
93
94 struct roots * GC_root_index[RT_SIZE];
95 -- Hash table header. Used only to check whether a range is
96 -- already present.
97 -- really defined in gc_priv.h
98 */
99
100 static INLINE int rt_hash(ptr_t addr)
101 {
102 word result = (word) addr;
103 # if CPP_WORDSZ > 8*LOG_RT_SIZE
104 result ^= result >> 8*LOG_RT_SIZE;
105 # endif
106 # if CPP_WORDSZ > 4*LOG_RT_SIZE
107 result ^= result >> 4*LOG_RT_SIZE;
108 # endif
109 result ^= result >> 2*LOG_RT_SIZE;
110 result ^= result >> LOG_RT_SIZE;
111 result &= (RT_SIZE-1);
112 return(result);
113 }
114
115 /* Is a range starting at b already in the table? If so return a */
116 /* pointer to it, else NIL. */
117 struct roots * GC_roots_present(ptr_t b)
118 {
119 int h = rt_hash(b);
120 struct roots *p = GC_root_index[h];
121
122 while (p != 0) {
123 if (p -> r_start == (ptr_t)b) return(p);
124 p = p -> r_next;
125 }
126 return(FALSE);
127 }
128
129 /* Add the given root structure to the index. */
130 static void add_roots_to_index(struct roots *p)
131 {
132 int h = rt_hash(p -> r_start);
133
134 p -> r_next = GC_root_index[h];
135 GC_root_index[h] = p;
136 }
137
138 # else /* MSWIN32 || MSWINCE */
139
140 # define add_roots_to_index(p)
141
142 # endif
143
144
145
146
147 word GC_root_size = 0;
148
149 void GC_add_roots(void *b, void *e)
150 {
151 DCL_LOCK_STATE;
152
153 if (!GC_is_initialized) GC_init();
154 LOCK();
155 GC_add_roots_inner((ptr_t)b, (ptr_t)e, FALSE);
156 UNLOCK();
157 }
158
159
160 /* Add [b,e) to the root set. Adding the same interval a second time */
161 /* is a moderately fast noop, and hence benign. We do not handle */
162 /* different but overlapping intervals efficiently. (We do handle */
163 /* them correctly.) */
164 /* Tmp specifies that the interval may be deleted before */
165 /* reregistering dynamic libraries. */
166 void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
167 {
168 struct roots * old;
169
170 # if defined(MSWIN32) || defined(MSWINCE)
171 /* Spend the time to ensure that there are no overlapping */
172 /* or adjacent intervals. */
173 /* This could be done faster with e.g. a */
174 /* balanced tree. But the execution time here is */
175 /* virtually guaranteed to be dominated by the time it */
176 /* takes to scan the roots. */
177 {
178 register int i;
179
180 for (i = 0; i < n_root_sets; i++) {
181 old = GC_static_roots + i;
182 if (b <= old -> r_end && e >= old -> r_start) {
183 if (b < old -> r_start) {
184 old -> r_start = b;
185 GC_root_size += (old -> r_start - b);
186 }
187 if (e > old -> r_end) {
188 old -> r_end = e;
189 GC_root_size += (e - old -> r_end);
190 }
191 old -> r_tmp &= tmp;
192 break;
193 }
194 }
195 if (i < n_root_sets) {
196 /* merge other overlapping intervals */
197 struct roots *other;
198
199 for (i++; i < n_root_sets; i++) {
200 other = GC_static_roots + i;
201 b = other -> r_start;
202 e = other -> r_end;
203 if (b <= old -> r_end && e >= old -> r_start) {
204 if (b < old -> r_start) {
205 old -> r_start = b;
206 GC_root_size += (old -> r_start - b);
207 }
208 if (e > old -> r_end) {
209 old -> r_end = e;
210 GC_root_size += (e - old -> r_end);
211 }
212 old -> r_tmp &= other -> r_tmp;
213 /* Delete this entry. */
214 GC_root_size -= (other -> r_end - other -> r_start);
215 other -> r_start = GC_static_roots[n_root_sets-1].r_start;
216 other -> r_end = GC_static_roots[n_root_sets-1].r_end;
217 n_root_sets--;
218 }
219 }
220 return;
221 }
222 }
223 # else
224 old = GC_roots_present(b);
225 if (old != 0) {
226 if (e <= old -> r_end) /* already there */ return;
227 /* else extend */
228 GC_root_size += e - old -> r_end;
229 old -> r_end = e;
230 return;
231 }
232 # endif
233 if (n_root_sets == MAX_ROOT_SETS) {
234 ABORT("Too many root sets\n");
235 }
236 GC_static_roots[n_root_sets].r_start = (ptr_t)b;
237 GC_static_roots[n_root_sets].r_end = (ptr_t)e;
238 GC_static_roots[n_root_sets].r_tmp = tmp;
239 # if !defined(MSWIN32) && !defined(MSWINCE)
240 GC_static_roots[n_root_sets].r_next = 0;
241 # endif
242 add_roots_to_index(GC_static_roots + n_root_sets);
243 GC_root_size += e - b;
244 n_root_sets++;
245 }
246
247 static GC_bool roots_were_cleared = FALSE;
248
249 void GC_clear_roots (void)
250 {
251 DCL_LOCK_STATE;
252
253 if (!GC_is_initialized) GC_init();
254 LOCK();
255 roots_were_cleared = TRUE;
256 n_root_sets = 0;
257 GC_root_size = 0;
258 # if !defined(MSWIN32) && !defined(MSWINCE)
259 {
260 register int i;
261
262 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
263 }
264 # endif
265 UNLOCK();
266 }
267
268 /* Internal use only; lock held. */
269 static void GC_remove_root_at_pos(int i)
270 {
271 GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
272 GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
273 GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
274 GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
275 n_root_sets--;
276 }
277
278 #if !defined(MSWIN32) && !defined(MSWINCE)
279 static void GC_rebuild_root_index(void)
280 {
281 int i;
282
283 for (i = 0; i < RT_SIZE; i++) GC_root_index[i] = 0;
284 for (i = 0; i < n_root_sets; i++)
285 add_roots_to_index(GC_static_roots + i);
286 }
287 #endif
288
289 /* Internal use only; lock held. */
290 void GC_remove_tmp_roots(void)
291 {
292 int i;
293
294 for (i = 0; i < n_root_sets; ) {
295 if (GC_static_roots[i].r_tmp) {
296 GC_remove_root_at_pos(i);
297 } else {
298 i++;
299 }
300 }
301 #if !defined(MSWIN32) && !defined(MSWINCE)
302 GC_rebuild_root_index();
303 #endif
304 }
305
306 #if !defined(MSWIN32) && !defined(MSWINCE)
307 void GC_remove_roots(void *b, void *e)
308 {
309 DCL_LOCK_STATE;
310
311 LOCK();
312 GC_remove_roots_inner((ptr_t)b, (ptr_t)e);
313 UNLOCK();
314 }
315
316 /* Should only be called when the lock is held */
317 void GC_remove_roots_inner(ptr_t b, ptr_t e)
318 {
319 int i;
320 for (i = 0; i < n_root_sets; ) {
321 if (GC_static_roots[i].r_start >= b
322 && GC_static_roots[i].r_end <= e) {
323 GC_remove_root_at_pos(i);
324 } else {
325 i++;
326 }
327 }
328 GC_rebuild_root_index();
329 }
330 #endif /* !defined(MSWIN32) && !defined(MSWINCE) */
331
332 #if defined(MSWIN32) || defined(_WIN32_WCE_EMULATION)
333 /* Workaround for the OS mapping and unmapping behind our back: */
334 /* Is the address p in one of the temporary static root sections? */
335 GC_bool GC_is_tmp_root(ptr_t p)
336 {
337 static int last_root_set = MAX_ROOT_SETS;
338 register int i;
339
340 if (last_root_set < n_root_sets
341 && p >= GC_static_roots[last_root_set].r_start
342 && p < GC_static_roots[last_root_set].r_end)
343 return GC_static_roots[last_root_set].r_tmp;
344 for (i = 0; i < n_root_sets; i++) {
345 if (p >= GC_static_roots[i].r_start
346 && p < GC_static_roots[i].r_end) {
347 last_root_set = i;
348 return GC_static_roots[i].r_tmp;
349 }
350 }
351 return(FALSE);
352 }
353 #endif /* MSWIN32 || _WIN32_WCE_EMULATION */
354
355 ptr_t GC_approx_sp(void)
356 {
357 volatile word dummy;
358
359 dummy = 42; /* Force stack to grow if necessary. Otherwise the */
360 /* later accesses might cause the kernel to think we're */
361 /* doing something wrong. */
362 # ifdef _MSC_VER
363 # pragma warning(disable:4172)
364 # endif
365 return((ptr_t)(&dummy));
366 # ifdef _MSC_VER
367 # pragma warning(default:4172)
368 # endif
369 }
370
371 /*
372 * Data structure for excluded static roots.
373 * Real declaration is in gc_priv.h.
374
375 struct exclusion {
376 ptr_t e_start;
377 ptr_t e_end;
378 };
379
380 struct exclusion GC_excl_table[MAX_EXCLUSIONS];
381 -- Array of exclusions, ascending
382 -- address order.
383 */
384
385 size_t GC_excl_table_entries = 0; /* Number of entries in use. */
386
387 /* Return the first exclusion range that includes an address >= start_addr */
388 /* Assumes the exclusion table contains at least one entry (namely the */
389 /* GC data structures). */
390 struct exclusion * GC_next_exclusion(ptr_t start_addr)
391 {
392 size_t low = 0;
393 size_t high = GC_excl_table_entries - 1;
394 size_t mid;
395
396 while (high > low) {
397 mid = (low + high) >> 1;
398 /* low <= mid < high */
399 if ((word) GC_excl_table[mid].e_end <= (word) start_addr) {
400 low = mid + 1;
401 } else {
402 high = mid;
403 }
404 }
405 if ((word) GC_excl_table[low].e_end <= (word) start_addr) return 0;
406 return GC_excl_table + low;
407 }
408
409 void GC_exclude_static_roots(void *start, void *finish)
410 {
411 struct exclusion * next;
412 size_t next_index, i;
413
414 if (0 == GC_excl_table_entries) {
415 next = 0;
416 } else {
417 next = GC_next_exclusion(start);
418 }
419 if (0 != next) {
420 if ((word)(next -> e_start) < (word) finish) {
421 /* incomplete error check. */
422 ABORT("exclusion ranges overlap");
423 }
424 if ((word)(next -> e_start) == (word) finish) {
425 /* extend old range backwards */
426 next -> e_start = (ptr_t)start;
427 return;
428 }
429 next_index = next - GC_excl_table;
430 for (i = GC_excl_table_entries; i > next_index; --i) {
431 GC_excl_table[i] = GC_excl_table[i-1];
432 }
433 } else {
434 next_index = GC_excl_table_entries;
435 }
436 if (GC_excl_table_entries == MAX_EXCLUSIONS) ABORT("Too many exclusions");
437 GC_excl_table[next_index].e_start = (ptr_t)start;
438 GC_excl_table[next_index].e_end = (ptr_t)finish;
439 ++GC_excl_table_entries;
440 }
441
442 /* Invoke push_conditional on ranges that are not excluded. */
443 void GC_push_conditional_with_exclusions(ptr_t bottom, ptr_t top, GC_bool all)
444 {
445 struct exclusion * next;
446 ptr_t excl_start;
447
448 while (bottom < top) {
449 next = GC_next_exclusion(bottom);
450 if (0 == next || (excl_start = next -> e_start) >= top) {
451 GC_push_conditional(bottom, top, all);
452 return;
453 }
454 if (excl_start > bottom) GC_push_conditional(bottom, excl_start, all);
455 bottom = next -> e_end;
456 }
457 }
458
459 /*
460 * In the absence of threads, push the stack contents.
461 * In the presence of threads, push enough of the current stack
462 * to ensure that callee-save registers saved in collector frames have been
463 * seen.
464 * FIXME: Merge with per-thread stuff.
465 */
466 void GC_push_current_stack(ptr_t cold_gc_frame, void * context)
467 {
468 # if defined(THREADS)
469 if (0 == cold_gc_frame) return;
470 # ifdef STACK_GROWS_DOWN
471 GC_push_all_eager(GC_approx_sp(), cold_gc_frame);
472 /* For IA64, the register stack backing store is handled */
473 /* in the thread-specific code. */
474 # else
475 GC_push_all_eager( cold_gc_frame, GC_approx_sp() );
476 # endif
477 # else
478 # ifdef STACK_GROWS_DOWN
479 GC_push_all_stack_partially_eager( GC_approx_sp(), GC_stackbottom,
480 cold_gc_frame );
481 # ifdef IA64
482 /* We also need to push the register stack backing store. */
483 /* This should really be done in the same way as the */
484 /* regular stack. For now we fudge it a bit. */
485 /* Note that the backing store grows up, so we can't use */
486 /* GC_push_all_stack_partially_eager. */
487 {
488 extern word GC_save_regs_ret_val;
489 /* Previously set to backing store pointer. */
490 ptr_t bsp = (ptr_t) GC_save_regs_ret_val;
491 ptr_t cold_gc_bs_pointer;
492 if (GC_all_interior_pointers) {
493 cold_gc_bs_pointer = bsp - 2048;
494 if (cold_gc_bs_pointer < BACKING_STORE_BASE) {
495 cold_gc_bs_pointer = BACKING_STORE_BASE;
496 } else {
497 GC_push_all_stack(BACKING_STORE_BASE, cold_gc_bs_pointer);
498 }
499 } else {
500 cold_gc_bs_pointer = BACKING_STORE_BASE;
501 }
502 GC_push_all_eager(cold_gc_bs_pointer, bsp);
503 /* All values should be sufficiently aligned that we */
504 /* dont have to worry about the boundary. */
505 }
506 # endif
507 # else
508 GC_push_all_stack_partially_eager( GC_stackbottom, GC_approx_sp(),
509 cold_gc_frame );
510 # endif
511 # endif /* !THREADS */
512 }
513
514 /*
515 * Push GC internal roots. Only called if there is some reason to believe
516 * these would not otherwise get registered.
517 */
518 void GC_push_gc_structures(void)
519 {
520 GC_push_finalizer_structures();
521 # if defined(THREADS)
522 GC_push_thread_structures();
523 # endif
524 }
525
526 #ifdef THREAD_LOCAL_ALLOC
527 void GC_mark_thread_local_free_lists(void);
528 #endif
529
530 void GC_cond_register_dynamic_libraries(void)
531 {
532 # if defined(DYNAMIC_LOADING) || defined(MSWIN32) || defined(MSWINCE) \
533 || defined(PCR)
534 GC_remove_tmp_roots();
535 if (!GC_no_dls) GC_register_dynamic_libraries();
536 # else
537 GC_no_dls = TRUE;
538 # endif
539 }
540
541 /*
542 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
543 * on groups of pointers) on every top level accessible pointer.
544 * If all is FALSE, arrange to push only possibly altered values.
545 * Cold_gc_frame is an address inside a GC frame that
546 * remains valid until all marking is complete.
547 * A zero value indicates that it's OK to miss some
548 * register values.
549 */
550 void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
551 {
552 int i;
553 unsigned kind;
554
555 /*
556 * Next push static data. This must happen early on, since it's
557 * not robust against mark stack overflow.
558 */
559 /* Reregister dynamic libraries, in case one got added. */
560 /* There is some argument for doing this as late as possible, */
561 /* especially on win32, where it can change asynchronously. */
562 /* In those cases, we do it here. But on other platforms, it's */
563 /* not safe with the world stopped, so we do it earlier. */
564 # if !defined(REGISTER_LIBRARIES_EARLY)
565 GC_cond_register_dynamic_libraries();
566 # endif
567
568 /* Mark everything in static data areas */
569 for (i = 0; i < n_root_sets; i++) {
570 GC_push_conditional_with_exclusions(
571 GC_static_roots[i].r_start,
572 GC_static_roots[i].r_end, all);
573 }
574
575 /* Mark all free list header blocks, if those were allocated from */
576 /* the garbage collected heap. This makes sure they don't */
577 /* disappear if we are not marking from static data. It also */
578 /* saves us the trouble of scanning them, and possibly that of */
579 /* marking the freelists. */
580 for (kind = 0; kind < GC_n_kinds; kind++) {
581 void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
582 if (0 != base) {
583 GC_set_mark_bit(base);
584 }
585 }
586
587 /* Mark from GC internal roots if those might otherwise have */
588 /* been excluded. */
589 if (GC_no_dls || roots_were_cleared) {
590 GC_push_gc_structures();
591 }
592
593 /* Mark thread local free lists, even if their mark */
594 /* descriptor excludes the link field. */
595 /* If the world is not stopped, this is unsafe. It is */
596 /* also unnecessary, since we will do this again with the */
597 /* world stopped. */
598 # if defined(THREAD_LOCAL_ALLOC)
599 if (GC_world_stopped) GC_mark_thread_local_free_lists();
600 # endif
601
602 /*
603 * Now traverse stacks, and mark from register contents.
604 * These must be done last, since they can legitimately overflow
605 * the mark stack.
606 * This is usually done by saving the current context on the
607 * stack, and then just tracing from the stack.
608 */
609 GC_push_regs_and_stack(cold_gc_frame);
610
611 if (GC_push_other_roots != 0) (*GC_push_other_roots)();
612 /* In the threads case, this also pushes thread stacks. */
613 /* Note that without interior pointer recognition lots */
614 /* of stuff may have been pushed already, and this */
615 /* should be careful about mark stack overflows. */
616 }
617