2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
17 # include "private/gc_priv.h"
19 # include "atomic_ops.h"
22 # if defined(LINUX) && !defined(POWERPC)
23 # include <linux/version.h>
24 # if (LINUX_VERSION_CODE <= 0x10400)
25 /* Ugly hack to get struct sigcontext_struct definition. Required */
26 /* for some early 1.3.X releases. Will hopefully go away soon. */
27 /* in some later Linux releases, asm/sigcontext.h may have to */
28 /* be included instead. */
30 # include <asm/signal.h>
33 /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
34 /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
35 /* prototypes, so we have to include the top-level sigcontext.h to */
36 /* make sure the former gets defined to be the latter if appropriate. */
37 # include <features.h>
39 # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
40 /* glibc 2.1 no longer has sigcontext.h. But signal.h */
41 /* has the right declaration for glibc 2.1. */
42 # include <sigcontext.h>
43 # endif /* 0 == __GLIBC_MINOR__ */
44 # else /* not 2 <= __GLIBC__ */
45 /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
46 /* one. Check LINUX_VERSION_CODE to see which we should reference. */
47 # include <asm/sigcontext.h>
48 # endif /* 2 <= __GLIBC__ */
51 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
53 # include <sys/types.h>
54 # if !defined(MSWIN32)
61 # define SIGSEGV 0 /* value is irrelevant */
70 #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
74 /* Blatantly OS dependent routines, except for those that are related */
75 /* to dynamic loading. */
83 #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
84 # define WIN32_LEAN_AND_MEAN
87 /* It's not clear this is completely kosher under Cygwin. But it */
88 /* allows us to get a working GC_get_stack_base. */
92 # include <Processes.h>
97 # include <malloc.h> /* for locking */
100 #if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS) || defined(IRIX5) \
101 || defined(USE_MMAP) || defined(USE_MUNMAP)
102 # define MMAP_SUPPORTED
105 #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
106 # if defined(USE_MUNMAP) && !defined(USE_MMAP)
107 --> USE_MUNMAP requires USE_MMAP
109 # include <sys/types.h>
110 # include <sys/mman.h>
111 # include <sys/stat.h>
116 /* for get_etext and friends */
117 #include <mach-o/getsect.h>
121 /* Apparently necessary for djgpp 2.01. May cause problems with */
122 /* other versions. */
123 typedef long unsigned int caddr_t
;
127 # include "il/PCR_IL.h"
128 # include "th/PCR_ThCtl.h"
129 # include "mm/PCR_MM.h"
132 #if !defined(NO_EXECUTE_PERMISSION)
133 # define OPT_PROT_EXEC PROT_EXEC
135 # define OPT_PROT_EXEC 0
138 #if defined(LINUX) && \
139 (defined(USE_PROC_FOR_LIBRARIES) || defined(IA64) || !defined(SMALL_CONFIG))
140 # define NEED_PROC_MAPS
143 #ifdef NEED_PROC_MAPS
144 /* We need to parse /proc/self/maps, either to find dynamic libraries, */
145 /* and/or to find the register backing store base (IA64). Do it once */
150 /* Repeatedly perform a read call until the buffer is filled or */
151 /* we encounter EOF. */
152 ssize_t
GC_repeat_read(int fd
, char *buf
, size_t count
)
154 ssize_t num_read
= 0;
157 while (num_read
< count
) {
158 result
= READ(fd
, buf
+ num_read
, count
- num_read
);
159 if (result
< 0) return result
;
160 if (result
== 0) break;
166 /* Determine the length of a file by incrementally reading it into a */
167 /* This would be sily to use on a file supporting lseek, but Linux */
168 /* /proc files usually do not. */
169 size_t GC_get_file_len(int f
)
173 # define GET_FILE_LEN_BUF_SZ 500
174 char buf
[GET_FILE_LEN_BUF_SZ
];
177 result
= read(f
, buf
, GET_FILE_LEN_BUF_SZ
);
178 if (result
== -1) return 0;
180 } while (result
> 0);
184 size_t GC_get_maps_len(void)
186 int f
= open("/proc/self/maps", O_RDONLY
);
187 size_t result
= GC_get_file_len(f
);
193 * Copy the contents of /proc/self/maps to a buffer in our address space.
194 * Return the address of the buffer, or zero on failure.
195 * This code could be simplified if we could determine its size
198 char * GC_get_maps(void)
202 static char init_buf
[1];
203 static char *maps_buf
= init_buf
;
204 static size_t maps_buf_sz
= 1;
205 size_t maps_size
, old_maps_size
= 0;
207 /* The buffer is essentially static, so there must be a single client. */
208 GC_ASSERT(I_HOLD_LOCK());
210 /* Note that in the presence of threads, the maps file can */
211 /* essentially shrink asynchronously and unexpectedly as */
212 /* threads that we already think of as dead release their */
213 /* stacks. And there is no easy way to read the entire */
214 /* file atomically. This is arguably a misfeature of the */
215 /* /proc/.../maps interface. */
217 /* Since we dont believe the file can grow */
218 /* asynchronously, it should suffice to first determine */
219 /* the size (using lseek or read), and then to reread the */
220 /* file. If the size is inconsistent we have to retry. */
221 /* This only matters with threads enabled, and if we use */
222 /* this to locate roots (not the default). */
224 /* Determine the initial size of /proc/self/maps. */
225 /* Note that lseek doesn't work, at least as of 2.6.15. */
227 maps_size
= GC_get_maps_len();
228 if (0 == maps_size
) return 0;
230 maps_size
= 4000; /* Guess */
233 /* Read /proc/self/maps, growing maps_buf as necessary. */
234 /* Note that we may not allocate conventionally, and */
235 /* thus can't use stdio. */
237 while (maps_size
>= maps_buf_sz
) {
238 /* Grow only by powers of 2, since we leak "too small" buffers. */
239 while (maps_size
>= maps_buf_sz
) maps_buf_sz
*= 2;
240 maps_buf
= GC_scratch_alloc(maps_buf_sz
);
242 /* Recompute initial length, since we allocated. */
243 /* This can only happen a few times per program */
245 maps_size
= GC_get_maps_len();
246 if (0 == maps_size
) return 0;
248 if (maps_buf
== 0) return 0;
250 GC_ASSERT(maps_buf_sz
>= maps_size
+ 1);
251 f
= open("/proc/self/maps", O_RDONLY
);
252 if (-1 == f
) return 0;
254 old_maps_size
= maps_size
;
258 result
= GC_repeat_read(f
, maps_buf
, maps_buf_sz
-1);
259 if (result
<= 0) return 0;
261 } while (result
== maps_buf_sz
-1);
264 if (maps_size
> old_maps_size
) {
265 GC_err_printf("Old maps size = %d, new maps size = %d\n",
266 old_maps_size
, maps_size
);
267 ABORT("Unexpected asynchronous /proc/self/maps growth: "
268 "Unregistered thread?");
271 } while (maps_size
>= maps_buf_sz
|| maps_size
< old_maps_size
);
272 /* In the single-threaded case, the second clause is false. */
273 maps_buf
[maps_size
] = '\0';
275 /* Apply fn to result. */
280 // GC_parse_map_entry parses an entry from /proc/self/maps so we can
281 // locate all writable data segments that belong to shared libraries.
282 // The format of one of these entries and the fields we care about
284 // XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
285 // ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
286 // start end prot maj_dev
288 // Note that since about august 2003 kernels, the columns no longer have
289 // fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
290 // anywhere, which is safer anyway.
294 * Assign various fields of the first line in buf_ptr to *start, *end,
295 * *prot, *maj_dev and *mapping_name. Mapping_name may be NULL.
296 * *prot and *mapping_name are assigned pointers into the original
299 char *GC_parse_map_entry(char *buf_ptr
, ptr_t
*start
, ptr_t
*end
,
300 char **prot
, unsigned int *maj_dev
,
303 char *start_start
, *end_start
, *maj_dev_start
;
307 if (buf_ptr
== NULL
|| *buf_ptr
== '\0') {
312 while (isspace(*p
)) ++p
;
314 GC_ASSERT(isxdigit(*start_start
));
315 *start
= (ptr_t
)strtoul(start_start
, &endp
, 16); p
= endp
;
320 GC_ASSERT(isxdigit(*end_start
));
321 *end
= (ptr_t
)strtoul(end_start
, &endp
, 16); p
= endp
;
322 GC_ASSERT(isspace(*p
));
324 while (isspace(*p
)) ++p
;
325 GC_ASSERT(*p
== 'r' || *p
== '-');
327 /* Skip past protection field to offset field */
328 while (!isspace(*p
)) ++p
; while (isspace(*p
)) ++p
;
329 GC_ASSERT(isxdigit(*p
));
330 /* Skip past offset field, which we ignore */
331 while (!isspace(*p
)) ++p
; while (isspace(*p
)) ++p
;
333 GC_ASSERT(isxdigit(*maj_dev_start
));
334 *maj_dev
= strtoul(maj_dev_start
, NULL
, 16);
336 if (mapping_name
== 0) {
337 while (*p
&& *p
++ != '\n');
339 while (*p
&& *p
!= '\n' && *p
!= '/' && *p
!= '[') p
++;
341 while (*p
&& *p
++ != '\n');
347 /* Try to read the backing store base from /proc/self/maps. */
348 /* Return the bounds of the writable mapping with a 0 major device, */
349 /* which includes the address passed as data. */
350 /* Return FALSE if there is no such mapping. */
351 GC_bool
GC_enclosing_mapping(ptr_t addr
, ptr_t
*startp
, ptr_t
*endp
)
354 ptr_t my_start
, my_end
;
355 unsigned int maj_dev
;
356 char *maps
= GC_get_maps();
357 char *buf_ptr
= maps
;
359 if (0 == maps
) return(FALSE
);
361 buf_ptr
= GC_parse_map_entry(buf_ptr
, &my_start
, &my_end
,
364 if (buf_ptr
== NULL
) return FALSE
;
365 if (prot
[1] == 'w' && maj_dev
== 0) {
366 if (my_end
> addr
&& my_start
<= addr
) {
376 /* Find the text(code) mapping for the library whose name starts with nm. */
377 GC_bool
GC_text_mapping(char *nm
, ptr_t
*startp
, ptr_t
*endp
)
379 size_t nm_len
= strlen(nm
);
382 ptr_t my_start
, my_end
;
383 unsigned int maj_dev
;
384 char *maps
= GC_get_maps();
385 char *buf_ptr
= maps
;
387 if (0 == maps
) return(FALSE
);
389 buf_ptr
= GC_parse_map_entry(buf_ptr
, &my_start
, &my_end
,
390 &prot
, &maj_dev
, &map_path
);
392 if (buf_ptr
== NULL
) return FALSE
;
393 if (prot
[0] == 'r' && prot
[1] == '-' && prot
[2] == 'x' &&
394 strncmp(nm
, map_path
, nm_len
) == 0) {
404 static ptr_t
backing_store_base_from_proc(void)
406 ptr_t my_start
, my_end
;
407 if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start
, &my_end
)) {
408 if (GC_print_stats
) {
409 GC_log_printf("Failed to find backing store base from /proc\n");
417 #endif /* NEED_PROC_MAPS */
419 #if defined(SEARCH_FOR_DATA_START)
420 /* The I386 case can be handled without a search. The Alpha case */
421 /* used to be handled differently as well, but the rules changed */
422 /* for recent Linux versions. This seems to be the easiest way to */
423 /* cover all versions. */
425 # if defined(LINUX) || defined(HURD)
426 /* Some Linux distributions arrange to define __data_start. Some */
427 /* define data_start as a weak symbol. The latter is technically */
428 /* broken, since the user program may define data_start, in which */
429 /* case we lose. Nonetheless, we try both, prefering __data_start. */
430 /* We assume gcc-compatible pragmas. */
431 # pragma weak __data_start
432 extern int __data_start
[];
433 # pragma weak data_start
434 extern int data_start
[];
440 void GC_init_linux_data_start()
442 extern ptr_t
GC_find_limit(ptr_t
, GC_bool
);
444 # if defined(LINUX) || defined(HURD)
445 /* Try the easy approaches first: */
446 if ((ptr_t
)__data_start
!= 0) {
447 GC_data_start
= (ptr_t
)(__data_start
);
450 if ((ptr_t
)data_start
!= 0) {
451 GC_data_start
= (ptr_t
)(data_start
);
455 GC_data_start
= GC_find_limit((ptr_t
)(_end
), FALSE
);
461 # ifndef ECOS_GC_MEMORY_SIZE
462 # define ECOS_GC_MEMORY_SIZE (448 * 1024)
463 # endif /* ECOS_GC_MEMORY_SIZE */
465 // FIXME: This is a simple way of allocating memory which is
466 // compatible with ECOS early releases. Later releases use a more
467 // sophisticated means of allocating memory than this simple static
468 // allocator, but this method is at least bound to work.
469 static char memory
[ECOS_GC_MEMORY_SIZE
];
470 static char *brk
= memory
;
472 static void *tiny_sbrk(ptrdiff_t increment
)
478 if (brk
> memory
+ sizeof memory
)
486 #define sbrk tiny_sbrk
489 #if (defined(NETBSD) || defined(OPENBSD)) && defined(__ELF__)
492 void GC_init_netbsd_elf(void)
494 extern ptr_t
GC_find_limit(ptr_t
, GC_bool
);
495 extern char **environ
;
496 /* This may need to be environ, without the underscore, for */
498 GC_data_start
= GC_find_limit((ptr_t
)&environ
, FALSE
);
506 # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
509 unsigned short magic_number
;
510 unsigned short padding
[29];
514 #define E_MAGIC(x) (x).magic_number
515 #define EMAGIC 0x5A4D
516 #define E_LFANEW(x) (x).new_exe_offset
519 unsigned char magic_number
[2];
520 unsigned char byte_order
;
521 unsigned char word_order
;
522 unsigned long exe_format_level
;
525 unsigned long padding1
[13];
526 unsigned long object_table_offset
;
527 unsigned long object_count
;
528 unsigned long padding2
[31];
531 #define E32_MAGIC1(x) (x).magic_number[0]
532 #define E32MAGIC1 'L'
533 #define E32_MAGIC2(x) (x).magic_number[1]
534 #define E32MAGIC2 'X'
535 #define E32_BORDER(x) (x).byte_order
537 #define E32_WORDER(x) (x).word_order
539 #define E32_CPU(x) (x).cpu
541 #define E32_OBJTAB(x) (x).object_table_offset
542 #define E32_OBJCNT(x) (x).object_count
548 unsigned long pagemap
;
549 unsigned long mapsize
;
550 unsigned long reserved
;
553 #define O32_FLAGS(x) (x).flags
554 #define OBJREAD 0x0001L
555 #define OBJWRITE 0x0002L
556 #define OBJINVALID 0x0080L
557 #define O32_SIZE(x) (x).size
558 #define O32_BASE(x) (x).base
560 # else /* IBM's compiler */
562 /* A kludge to get around what appears to be a header file bug */
564 # define WORD unsigned short
567 # define DWORD unsigned long
574 # endif /* __IBMC__ */
576 # define INCL_DOSEXCEPTIONS
577 # define INCL_DOSPROCESS
578 # define INCL_DOSERRORS
579 # define INCL_DOSMODULEMGR
580 # define INCL_DOSMEMMGR
584 /* Disable and enable signals during nontrivial allocations */
586 void GC_disable_signals(void)
590 DosEnterMustComplete(&nest
);
591 if (nest
!= 1) ABORT("nested GC_disable_signals");
594 void GC_enable_signals(void)
598 DosExitMustComplete(&nest
);
599 if (nest
!= 0) ABORT("GC_enable_signals");
605 # if !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
606 && !defined(MSWINCE) \
607 && !defined(MACOS) && !defined(DJGPP) && !defined(DOS4GW) \
608 && !defined(NOSYS) && !defined(ECOS)
611 /* Use the traditional BSD interface */
612 # define SIGSET_T int
613 # define SIG_DEL(set, signal) (set) &= ~(sigmask(signal))
614 # define SIG_FILL(set) (set) = 0x7fffffff
615 /* Setting the leading bit appears to provoke a bug in some */
616 /* longjmp implementations. Most systems appear not to have */
618 # define SIGSETMASK(old, new) (old) = sigsetmask(new)
621 /* Use POSIX/SYSV interface */
622 # define SIGSET_T sigset_t
623 # define SIG_DEL(set, signal) sigdelset(&(set), (signal))
624 # define SIG_FILL(set) sigfillset(&set)
625 # define SIGSETMASK(old, new) sigprocmask(SIG_SETMASK, &(new), &(old))
628 static GC_bool mask_initialized
= FALSE
;
630 static SIGSET_T new_mask
;
632 static SIGSET_T old_mask
;
634 static SIGSET_T dummy
;
636 #if defined(GC_ASSERTIONS) && !defined(THREADS)
637 # define CHECK_SIGNALS
638 int GC_sig_disabled
= 0;
641 void GC_disable_signals(void)
643 if (!mask_initialized
) {
646 SIG_DEL(new_mask
, SIGSEGV
);
647 SIG_DEL(new_mask
, SIGILL
);
648 SIG_DEL(new_mask
, SIGQUIT
);
650 SIG_DEL(new_mask
, SIGBUS
);
653 SIG_DEL(new_mask
, SIGIOT
);
656 SIG_DEL(new_mask
, SIGEMT
);
659 SIG_DEL(new_mask
, SIGTRAP
);
661 mask_initialized
= TRUE
;
663 # ifdef CHECK_SIGNALS
664 if (GC_sig_disabled
!= 0) ABORT("Nested disables");
667 SIGSETMASK(old_mask
,new_mask
);
670 void GC_enable_signals(void)
672 # ifdef CHECK_SIGNALS
673 if (GC_sig_disabled
!= 1) ABORT("Unmatched enable");
676 SIGSETMASK(dummy
,old_mask
);
683 /* Ivan Demakov: simplest way (to me) */
685 void GC_disable_signals() { }
686 void GC_enable_signals() { }
689 /* Find the page size */
692 # if defined(MSWIN32) || defined(MSWINCE)
693 void GC_setpagesize(void)
695 GetSystemInfo(&GC_sysinfo
);
696 GC_page_size
= GC_sysinfo
.dwPageSize
;
700 # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
701 void GC_setpagesize(void)
703 GC_page_size
= GETPAGESIZE();
706 /* It's acceptable to fake it. */
707 void GC_setpagesize(void)
709 GC_page_size
= HBLKSIZE
;
715 * Find the base of the stack.
716 * Used only in single-threaded environment.
717 * With threads, GC_mark_roots needs to know how to do this.
718 * Called with allocator lock held.
720 # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
721 # define is_writable(prot) ((prot) == PAGE_READWRITE \
722 || (prot) == PAGE_WRITECOPY \
723 || (prot) == PAGE_EXECUTE_READWRITE \
724 || (prot) == PAGE_EXECUTE_WRITECOPY)
725 /* Return the number of bytes that are writable starting at p. */
726 /* The pointer p is assumed to be page aligned. */
727 /* If base is not 0, *base becomes the beginning of the */
728 /* allocation region containing p. */
729 word
GC_get_writable_length(ptr_t p
, ptr_t
*base
)
731 MEMORY_BASIC_INFORMATION buf
;
735 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
736 if (result
!= sizeof(buf
)) ABORT("Weird VirtualQuery result");
737 if (base
!= 0) *base
= (ptr_t
)(buf
.AllocationBase
);
738 protect
= (buf
.Protect
& ~(PAGE_GUARD
| PAGE_NOCACHE
));
739 if (!is_writable(protect
)) {
742 if (buf
.State
!= MEM_COMMIT
) return(0);
743 return(buf
.RegionSize
);
746 int GC_get_stack_base(struct GC_stack_base
*sb
)
749 ptr_t sp
= (ptr_t
)(&dummy
);
750 ptr_t trunc_sp
= (ptr_t
)((word
)sp
& ~(GC_page_size
- 1));
751 word size
= GC_get_writable_length(trunc_sp
, 0);
753 sb
-> mem_base
= trunc_sp
+ size
;
757 #define HAVE_GET_STACK_BASE
759 /* This is always called from the main thread. */
760 ptr_t
GC_get_main_stack_base(void)
762 struct GC_stack_base sb
;
764 GC_get_stack_base(&sb
);
765 return (ptr_t
)sb
.mem_base
;
768 # endif /* MS Windows */
771 # include <kernel/OS.h>
772 ptr_t
GC_get_main_stack_base(void){
774 get_thread_info(find_thread(NULL
),&th
);
782 ptr_t
GC_get_main_stack_base(void)
785 get_thread_info(find_thread(NULL
), &th
);
793 ptr_t
GC_get_main_stack_base(void)
798 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
799 GC_err_printf("DosGetInfoBlocks failed\n");
800 ABORT("DosGetInfoBlocks failed\n");
802 return((ptr_t
)(ptib
-> tib_pstacklimit
));
809 # include "AmigaOS.c"
813 # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
815 typedef void (*handler
)(int);
817 # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
818 || defined(HURD) || defined(NETBSD)
819 static struct sigaction old_segv_act
;
820 # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) \
821 || defined(HURD) || defined(NETBSD)
822 static struct sigaction old_bus_act
;
825 static handler old_segv_handler
, old_bus_handler
;
828 void GC_set_and_save_fault_handler(handler h
)
830 # if defined(SUNOS5SIGS) || defined(IRIX5) \
831 || defined(OSF1) || defined(HURD) || defined(NETBSD)
832 struct sigaction act
;
835 # if 0 /* Was necessary for Solaris 2.3 and very temporary */
837 act
.sa_flags
= SA_RESTART
| SA_NODEFER
;
839 act
.sa_flags
= SA_RESTART
;
842 (void) sigemptyset(&act
.sa_mask
);
843 # ifdef GC_IRIX_THREADS
844 /* Older versions have a bug related to retrieving and */
845 /* and setting a handler at the same time. */
846 (void) sigaction(SIGSEGV
, 0, &old_segv_act
);
847 (void) sigaction(SIGSEGV
, &act
, 0);
849 (void) sigaction(SIGSEGV
, &act
, &old_segv_act
);
850 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
851 || defined(HPUX) || defined(HURD) || defined(NETBSD)
852 /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
853 /* Pthreads doesn't exist under Irix 5.x, so we */
854 /* don't have to worry in the threads case. */
855 (void) sigaction(SIGBUS
, &act
, &old_bus_act
);
857 # endif /* GC_IRIX_THREADS */
859 old_segv_handler
= signal(SIGSEGV
, h
);
861 old_bus_handler
= signal(SIGBUS
, h
);
865 # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
867 # if defined(NEED_FIND_LIMIT) || \
868 defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS)
869 /* Some tools to implement HEURISTIC2 */
870 # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
873 void GC_fault_handler(int sig
)
875 LONGJMP(GC_jmp_buf
, 1);
878 void GC_setup_temporary_fault_handler(void)
880 /* Handler is process-wide, so this should only happen in */
881 /* one thread at a time. */
882 GC_ASSERT(I_HOLD_LOCK());
883 GC_set_and_save_fault_handler(GC_fault_handler
);
886 void GC_reset_fault_handler(void)
888 # if defined(SUNOS5SIGS) || defined(IRIX5) \
889 || defined(OSF1) || defined(HURD) || defined(NETBSD)
890 (void) sigaction(SIGSEGV
, &old_segv_act
, 0);
891 # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
892 || defined(HPUX) || defined(HURD) || defined(NETBSD)
893 (void) sigaction(SIGBUS
, &old_bus_act
, 0);
896 (void) signal(SIGSEGV
, old_segv_handler
);
898 (void) signal(SIGBUS
, old_bus_handler
);
903 /* Return the first nonaddressible location > p (up) or */
904 /* the smallest location q s.t. [q,p) is addressable (!up). */
905 /* We assume that p (up) or p-1 (!up) is addressable. */
906 /* Requires allocation lock. */
907 ptr_t
GC_find_limit_with_bound(ptr_t p
, GC_bool up
, ptr_t bound
)
909 static volatile ptr_t result
;
910 /* Safer if static, since otherwise it may not be */
911 /* preserved across the longjmp. Can safely be */
912 /* static since it's only called with the */
913 /* allocation lock held. */
915 GC_ASSERT(I_HOLD_LOCK());
916 GC_setup_temporary_fault_handler();
917 if (SETJMP(GC_jmp_buf
) == 0) {
918 result
= (ptr_t
)(((word
)(p
))
919 & ~(MIN_PAGE_SIZE
-1));
922 result
+= MIN_PAGE_SIZE
;
923 if (result
>= bound
) return bound
;
925 result
-= MIN_PAGE_SIZE
;
926 if (result
<= bound
) return bound
;
928 GC_noop1((word
)(*result
));
931 GC_reset_fault_handler();
933 result
+= MIN_PAGE_SIZE
;
938 ptr_t
GC_find_limit(ptr_t p
, GC_bool up
)
941 return GC_find_limit_with_bound(p
, up
, (ptr_t
)(word
)(-1));
943 return GC_find_limit_with_bound(p
, up
, 0);
948 #if defined(ECOS) || defined(NOSYS)
949 ptr_t
GC_get_main_stack_base(void)
955 #ifdef HPUX_STACKBOTTOM
957 #include <sys/param.h>
958 #include <sys/pstat.h>
960 ptr_t
GC_get_register_stack_base(void)
962 struct pst_vm_status vm_status
;
965 while (pstat_getprocvm(&vm_status
, sizeof(vm_status
), 0, i
++) == 1) {
966 if (vm_status
.pst_type
== PS_RSESTACK
) {
967 return (ptr_t
) vm_status
.pst_vaddr
;
971 /* old way to get the register stackbottom */
972 return (ptr_t
)(((word
)GC_stackbottom
- BACKING_STORE_DISPLACEMENT
- 1)
973 & ~(BACKING_STORE_ALIGNMENT
- 1));
976 #endif /* HPUX_STACK_BOTTOM */
978 #ifdef LINUX_STACKBOTTOM
980 #include <sys/types.h>
981 #include <sys/stat.h>
983 # define STAT_SKIP 27 /* Number of fields preceding startstack */
984 /* field in /proc/self/stat */
986 #ifdef USE_LIBC_PRIVATES
987 # pragma weak __libc_stack_end
988 extern ptr_t __libc_stack_end
;
992 # ifdef USE_LIBC_PRIVATES
993 # pragma weak __libc_ia64_register_backing_store_base
994 extern ptr_t __libc_ia64_register_backing_store_base
;
997 ptr_t
GC_get_register_stack_base(void)
1001 # ifdef USE_LIBC_PRIVATES
1002 if (0 != &__libc_ia64_register_backing_store_base
1003 && 0 != __libc_ia64_register_backing_store_base
) {
1004 /* Glibc 2.2.4 has a bug such that for dynamically linked */
1005 /* executables __libc_ia64_register_backing_store_base is */
1006 /* defined but uninitialized during constructor calls. */
1007 /* Hence we check for both nonzero address and value. */
1008 return __libc_ia64_register_backing_store_base
;
1011 result
= backing_store_base_from_proc();
1013 result
= GC_find_limit(GC_save_regs_in_stack(), FALSE
);
1014 /* Now seems to work better than constant displacement */
1015 /* heuristic used in 6.X versions. The latter seems to */
1016 /* fail for 2.6 kernels. */
1022 ptr_t
GC_linux_stack_base(void)
1024 /* We read the stack base value from /proc/self/stat. We do this */
1025 /* using direct I/O system calls in order to avoid calling malloc */
1026 /* in case REDIRECT_MALLOC is defined. */
1027 # define STAT_BUF_SIZE 4096
1028 # define STAT_READ read
1029 /* Should probably call the real read, if read is wrapped. */
1030 char stat_buf
[STAT_BUF_SIZE
];
1034 size_t i
, buf_offset
= 0;
1036 /* First try the easy way. This should work for glibc 2.2 */
1037 /* This fails in a prelinked ("prelink" command) executable */
1038 /* since the correct value of __libc_stack_end never */
1039 /* becomes visible to us. The second test works around */
1041 # ifdef USE_LIBC_PRIVATES
1042 if (0 != &__libc_stack_end
&& 0 != __libc_stack_end
) {
1044 /* Some versions of glibc set the address 16 bytes too */
1045 /* low while the initialization code is running. */
1046 if (((word
)__libc_stack_end
& 0xfff) + 0x10 < 0x1000) {
1047 return __libc_stack_end
+ 0x10;
1048 } /* Otherwise it's not safe to add 16 bytes and we fall */
1049 /* back to using /proc. */
1050 # elif defined(SPARC)
1051 /* Older versions of glibc for 64-bit Sparc do not set
1052 * this variable correctly, it gets set to either zero
1055 if (__libc_stack_end
!= (ptr_t
) (unsigned long)0x1)
1056 return __libc_stack_end
;
1058 return __libc_stack_end
;
1062 f
= open("/proc/self/stat", O_RDONLY
);
1063 if (f
< 0 || STAT_READ(f
, stat_buf
, STAT_BUF_SIZE
) < 2 * STAT_SKIP
) {
1064 ABORT("Couldn't read /proc/self/stat");
1066 c
= stat_buf
[buf_offset
++];
1067 /* Skip the required number of fields. This number is hopefully */
1068 /* constant across all Linux implementations. */
1069 for (i
= 0; i
< STAT_SKIP
; ++i
) {
1070 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
1071 while (!isspace(c
)) c
= stat_buf
[buf_offset
++];
1073 while (isspace(c
)) c
= stat_buf
[buf_offset
++];
1074 while (isdigit(c
)) {
1077 c
= stat_buf
[buf_offset
++];
1080 if (result
< 0x10000000) ABORT("Absurd stack bottom value");
1081 return (ptr_t
)result
;
1084 #endif /* LINUX_STACKBOTTOM */
1086 #ifdef FREEBSD_STACKBOTTOM
1088 /* This uses an undocumented sysctl call, but at least one expert */
1089 /* believes it will stay. */
1092 #include <sys/types.h>
1093 #include <sys/sysctl.h>
1095 ptr_t
GC_freebsd_stack_base(void)
1097 int nm
[2] = {CTL_KERN
, KERN_USRSTACK
};
1099 size_t len
= sizeof(ptr_t
);
1100 int r
= sysctl(nm
, 2, &base
, &len
, NULL
, 0);
1102 if (r
) ABORT("Error getting stack base");
1107 #endif /* FREEBSD_STACKBOTTOM */
1109 #if !defined(BEOS) && !defined(AMIGA) && !defined(MSWIN32) \
1110 && !defined(MSWINCE) && !defined(OS2) && !defined(NOSYS) && !defined(ECOS) \
1111 && !defined(CYGWIN32) && !defined(HAIKU)
1113 ptr_t
GC_get_main_stack_base(void)
1115 # if defined(HEURISTIC1) || defined(HEURISTIC2)
1120 # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1123 return(STACKBOTTOM
);
1126 # ifdef STACK_GROWS_DOWN
1127 result
= (ptr_t
)((((word
)(&dummy
))
1128 + STACKBOTTOM_ALIGNMENT_M1
)
1129 & ~STACKBOTTOM_ALIGNMENT_M1
);
1131 result
= (ptr_t
)(((word
)(&dummy
))
1132 & ~STACKBOTTOM_ALIGNMENT_M1
);
1134 # endif /* HEURISTIC1 */
1135 # ifdef LINUX_STACKBOTTOM
1136 result
= GC_linux_stack_base();
1138 # ifdef FREEBSD_STACKBOTTOM
1139 result
= GC_freebsd_stack_base();
1142 # ifdef STACK_GROWS_DOWN
1143 result
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
1144 # ifdef HEURISTIC2_LIMIT
1145 if (result
> HEURISTIC2_LIMIT
1146 && (ptr_t
)(&dummy
) < HEURISTIC2_LIMIT
) {
1147 result
= HEURISTIC2_LIMIT
;
1151 result
= GC_find_limit((ptr_t
)(&dummy
), FALSE
);
1152 # ifdef HEURISTIC2_LIMIT
1153 if (result
< HEURISTIC2_LIMIT
1154 && (ptr_t
)(&dummy
) > HEURISTIC2_LIMIT
) {
1155 result
= HEURISTIC2_LIMIT
;
1160 # endif /* HEURISTIC2 */
1161 # ifdef STACK_GROWS_DOWN
1162 if (result
== 0) result
= (ptr_t
)(signed_word
)(-sizeof(ptr_t
));
1165 # endif /* STACKBOTTOM */
1168 # endif /* ! AMIGA, !OS 2, ! MS Windows, !BEOS, !NOSYS, !ECOS, !HAIKU */
1170 #if defined(GC_LINUX_THREADS) && !defined(HAVE_GET_STACK_BASE)
1172 #include <pthread.h>
1175 ptr_t
GC_greatest_stack_base_below(ptr_t bound
);
1176 /* From pthread_support.c */
1179 int GC_get_stack_base(struct GC_stack_base
*b
)
1181 pthread_attr_t attr
;
1184 if (pthread_getattr_np(pthread_self(), &attr
) != 0) {
1185 WARN("pthread_getattr_np failed\n", 0);
1186 return GC_UNIMPLEMENTED
;
1188 if (pthread_attr_getstack(&attr
, &(b
-> mem_base
), &size
) != 0) {
1189 ABORT("pthread_attr_getstack failed");
1191 # ifdef STACK_GROWS_DOWN
1192 b
-> mem_base
= (char *)(b
-> mem_base
) + size
;
1195 /* We could try backing_store_base_from_proc, but that's safe */
1196 /* only if no mappings are being asynchronously created. */
1197 /* Subtracting the size from the stack base doesn't work for at */
1198 /* least the main thread. */
1201 ptr_t bsp
= GC_save_regs_in_stack();
1202 ptr_t next_stack
= GC_greatest_stack_base_below(bsp
);
1203 if (0 == next_stack
) {
1204 b
-> reg_base
= GC_find_limit(bsp
, FALSE
);
1206 /* Avoid walking backwards into preceding memory stack and */
1208 b
-> reg_base
= GC_find_limit_with_bound(bsp
, FALSE
, next_stack
);
1216 #define HAVE_GET_STACK_BASE
1218 #endif /* GC_LINUX_THREADS */
1220 #ifndef HAVE_GET_STACK_BASE
1221 /* Retrieve stack base. */
1222 /* Using the GC_find_limit version is risky. */
1223 /* On IA64, for example, there is no guard page between the */
1224 /* stack of one thread and the register backing store of the */
1225 /* next. Thus this is likely to identify way too large a */
1226 /* "stack" and thus at least result in disastrous performance. */
1227 /* FIXME - Implement better strategies here. */
1228 int GC_get_stack_base(struct GC_stack_base
*b
)
1232 # ifdef NEED_FIND_LIMIT
1233 # ifdef STACK_GROWS_DOWN
1234 b
-> mem_base
= GC_find_limit((ptr_t
)(&dummy
), TRUE
);
1236 b
-> reg_base
= GC_find_limit(GC_save_regs_in_stack(), FALSE
);
1239 b
-> mem_base
= GC_find_limit(&dummy
, FALSE
);
1243 return GC_UNIMPLEMENTED
;
1249 * Register static data segment(s) as roots.
1250 * If more data segments are added later then they need to be registered
1251 * add that point (as we do with SunOS dynamic loading),
1252 * or GC_mark_roots needs to check for them (as we do with PCR).
1253 * Called with allocator lock held.
1258 void GC_register_data_segments(void)
1262 HMODULE module_handle
;
1263 # define PBUFSIZ 512
1264 UCHAR path
[PBUFSIZ
];
1266 struct exe_hdr hdrdos
; /* MSDOS header. */
1267 struct e32_exe hdr386
; /* Real header for my executable */
1268 struct o32_obj seg
; /* Currrent segment */
1272 if (DosGetInfoBlocks(&ptib
, &ppib
) != NO_ERROR
) {
1273 GC_err_printf("DosGetInfoBlocks failed\n");
1274 ABORT("DosGetInfoBlocks failed\n");
1276 module_handle
= ppib
-> pib_hmte
;
1277 if (DosQueryModuleName(module_handle
, PBUFSIZ
, path
) != NO_ERROR
) {
1278 GC_err_printf("DosQueryModuleName failed\n");
1279 ABORT("DosGetInfoBlocks failed\n");
1281 myexefile
= fopen(path
, "rb");
1282 if (myexefile
== 0) {
1283 GC_err_puts("Couldn't open executable ");
1284 GC_err_puts(path
); GC_err_puts("\n");
1285 ABORT("Failed to open executable\n");
1287 if (fread((char *)(&hdrdos
), 1, sizeof hdrdos
, myexefile
) < sizeof hdrdos
) {
1288 GC_err_puts("Couldn't read MSDOS header from ");
1289 GC_err_puts(path
); GC_err_puts("\n");
1290 ABORT("Couldn't read MSDOS header");
1292 if (E_MAGIC(hdrdos
) != EMAGIC
) {
1293 GC_err_puts("Executable has wrong DOS magic number: ");
1294 GC_err_puts(path
); GC_err_puts("\n");
1295 ABORT("Bad DOS magic number");
1297 if (fseek(myexefile
, E_LFANEW(hdrdos
), SEEK_SET
) != 0) {
1298 GC_err_puts("Seek to new header failed in ");
1299 GC_err_puts(path
); GC_err_puts("\n");
1300 ABORT("Bad DOS magic number");
1302 if (fread((char *)(&hdr386
), 1, sizeof hdr386
, myexefile
) < sizeof hdr386
) {
1303 GC_err_puts("Couldn't read MSDOS header from ");
1304 GC_err_puts(path
); GC_err_puts("\n");
1305 ABORT("Couldn't read OS/2 header");
1307 if (E32_MAGIC1(hdr386
) != E32MAGIC1
|| E32_MAGIC2(hdr386
) != E32MAGIC2
) {
1308 GC_err_puts("Executable has wrong OS/2 magic number:");
1309 GC_err_puts(path
); GC_err_puts("\n");
1310 ABORT("Bad OS/2 magic number");
1312 if ( E32_BORDER(hdr386
) != E32LEBO
|| E32_WORDER(hdr386
) != E32LEWO
) {
1313 GC_err_puts("Executable %s has wrong byte order: ");
1314 GC_err_puts(path
); GC_err_puts("\n");
1315 ABORT("Bad byte order");
1317 if ( E32_CPU(hdr386
) == E32CPU286
) {
1318 GC_err_puts("GC can't handle 80286 executables: ");
1319 GC_err_puts(path
); GC_err_puts("\n");
1322 if (fseek(myexefile
, E_LFANEW(hdrdos
) + E32_OBJTAB(hdr386
),
1324 GC_err_puts("Seek to object table failed: ");
1325 GC_err_puts(path
); GC_err_puts("\n");
1326 ABORT("Seek to object table failed");
1328 for (nsegs
= E32_OBJCNT(hdr386
); nsegs
> 0; nsegs
--) {
1330 if (fread((char *)(&seg
), 1, sizeof seg
, myexefile
) < sizeof seg
) {
1331 GC_err_puts("Couldn't read obj table entry from ");
1332 GC_err_puts(path
); GC_err_puts("\n");
1333 ABORT("Couldn't read obj table entry");
1335 flags
= O32_FLAGS(seg
);
1336 if (!(flags
& OBJWRITE
)) continue;
1337 if (!(flags
& OBJREAD
)) continue;
1338 if (flags
& OBJINVALID
) {
1339 GC_err_printf("Object with invalid pages?\n");
1342 GC_add_roots_inner(O32_BASE(seg
), O32_BASE(seg
)+O32_SIZE(seg
), FALSE
);
1348 # if defined(MSWIN32) || defined(MSWINCE)
1351 /* Unfortunately, we have to handle win32s very differently from NT, */
1352 /* Since VirtualQuery has very different semantics. In particular, */
1353 /* under win32s a VirtualQuery call on an unmapped page returns an */
1354 /* invalid result. Under NT, GC_register_data_segments is a noop and */
1355 /* all real work is done by GC_register_dynamic_libraries. Under */
1356 /* win32s, we cannot find the data segments associated with dll's. */
1357 /* We register the main data segment here. */
1358 GC_bool GC_no_win32_dlls
= FALSE
;
1359 /* This used to be set for gcc, to avoid dealing with */
1360 /* the structured exception handling issues. But we now have */
1361 /* assembly code to do that right. */
1363 # if defined(GWW_VDB)
1365 # ifndef _BASETSD_H_
1366 typedef ULONG
* PULONG_PTR
;
1368 typedef UINT (WINAPI
* GetWriteWatch_type
)(
1369 DWORD
, PVOID
, SIZE_T
, PVOID
*, PULONG_PTR
, PULONG
);
1370 static GetWriteWatch_type GetWriteWatch_func
;
1371 static DWORD GetWriteWatch_alloc_flag
;
1373 # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
1375 static void detect_GetWriteWatch(void)
1377 static GC_bool done
;
1381 GetWriteWatch_func
= (GetWriteWatch_type
)
1382 GetProcAddress(GetModuleHandle("kernel32.dll"), "GetWriteWatch");
1383 if (GetWriteWatch_func
!= NULL
) {
1384 /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
1385 /* as some versions of kernel32.dll have one but not the */
1386 /* other, making the feature completely broken. */
1387 void * page
= VirtualAlloc(NULL
, GC_page_size
,
1388 MEM_WRITE_WATCH
| MEM_RESERVE
,
1392 ULONG_PTR count
= 16;
1394 /* Check that it actually works. In spite of some */
1395 /* documentation it actually seems to exist on W2K. */
1396 /* This test may be unnecessary, but ... */
1397 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET
,
1402 /* GetWriteWatch always fails. */
1403 GetWriteWatch_func
= NULL
;
1405 GetWriteWatch_alloc_flag
= MEM_WRITE_WATCH
;
1407 VirtualFree(page
, GC_page_size
, MEM_RELEASE
);
1409 /* GetWriteWatch will be useless. */
1410 GetWriteWatch_func
= NULL
;
1413 if (GC_print_stats
) {
1414 if (GetWriteWatch_func
== NULL
) {
1415 GC_log_printf("Did not find a usable GetWriteWatch()\n");
1417 GC_log_printf("Using GetWriteWatch()\n");
1423 # endif /* GWW_VDB */
1425 GC_bool GC_wnt
= FALSE
;
1426 /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
1428 void GC_init_win32(void)
1431 /* If we're running under win32s, assume that no DLLs will be loaded */
1432 /* I doubt anyone still runs win32s, but ... */
1433 DWORD v
= GetVersion();
1434 GC_wnt
= !(v
& 0x80000000);
1435 GC_no_win32_dlls
|= ((!GC_wnt
) && (v
& 0xff) <= 3);
1438 /* Return the smallest address a such that VirtualQuery */
1439 /* returns correct results for all addresses between a and start. */
1440 /* Assumes VirtualQuery returns correct information for start. */
1441 ptr_t
GC_least_described_address(ptr_t start
)
1443 MEMORY_BASIC_INFORMATION buf
;
1449 limit
= GC_sysinfo
.lpMinimumApplicationAddress
;
1450 p
= (ptr_t
)((word
)start
& ~(GC_page_size
- 1));
1452 q
= (LPVOID
)(p
- GC_page_size
);
1453 if ((ptr_t
)q
> (ptr_t
)p
/* underflow */ || q
< limit
) break;
1454 result
= VirtualQuery(q
, &buf
, sizeof(buf
));
1455 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0) break;
1456 p
= (ptr_t
)(buf
.AllocationBase
);
1462 # ifndef REDIRECT_MALLOC
1463 /* We maintain a linked list of AllocationBase values that we know */
1464 /* correspond to malloc heap sections. Currently this is only called */
1465 /* during a GC. But there is some hope that for long running */
1466 /* programs we will eventually see most heap sections. */
1468 /* In the long run, it would be more reliable to occasionally walk */
1469 /* the malloc heap with HeapWalk on the default heap. But that */
1470 /* apparently works only for NT-based Windows. */
1472 /* In the long run, a better data structure would also be nice ... */
1473 struct GC_malloc_heap_list
{
1474 void * allocation_base
;
1475 struct GC_malloc_heap_list
*next
;
1476 } *GC_malloc_heap_l
= 0;
1478 /* Is p the base of one of the malloc heap sections we already know */
1480 GC_bool
GC_is_malloc_heap_base(ptr_t p
)
1482 struct GC_malloc_heap_list
*q
= GC_malloc_heap_l
;
1485 if (q
-> allocation_base
== p
) return TRUE
;
1491 void *GC_get_allocation_base(void *p
)
1493 MEMORY_BASIC_INFORMATION buf
;
1494 size_t result
= VirtualQuery(p
, &buf
, sizeof(buf
));
1495 if (result
!= sizeof(buf
)) {
1496 ABORT("Weird VirtualQuery result");
1498 return buf
.AllocationBase
;
1501 size_t GC_max_root_size
= 100000; /* Appr. largest root size. */
1503 void GC_add_current_malloc_heap()
1505 struct GC_malloc_heap_list
*new_l
=
1506 malloc(sizeof(struct GC_malloc_heap_list
));
1507 void * candidate
= GC_get_allocation_base(new_l
);
1509 if (new_l
== 0) return;
1510 if (GC_is_malloc_heap_base(candidate
)) {
1511 /* Try a little harder to find malloc heap. */
1512 size_t req_size
= 10000;
1514 void *p
= malloc(req_size
);
1515 if (0 == p
) { free(new_l
); return; }
1516 candidate
= GC_get_allocation_base(p
);
1519 } while (GC_is_malloc_heap_base(candidate
)
1520 && req_size
< GC_max_root_size
/10 && req_size
< 500000);
1521 if (GC_is_malloc_heap_base(candidate
)) {
1522 free(new_l
); return;
1526 GC_log_printf("Found new system malloc AllocationBase at %p\n",
1528 new_l
-> allocation_base
= candidate
;
1529 new_l
-> next
= GC_malloc_heap_l
;
1530 GC_malloc_heap_l
= new_l
;
1532 # endif /* REDIRECT_MALLOC */
1534 /* Is p the start of either the malloc heap, or of one of our */
1535 /* heap sections? */
1536 GC_bool
GC_is_heap_base (ptr_t p
)
1541 # ifndef REDIRECT_MALLOC
1542 static word last_gc_no
= (word
)(-1);
1544 if (last_gc_no
!= GC_gc_no
) {
1545 GC_add_current_malloc_heap();
1546 last_gc_no
= GC_gc_no
;
1548 if (GC_root_size
> GC_max_root_size
) GC_max_root_size
= GC_root_size
;
1549 if (GC_is_malloc_heap_base(p
)) return TRUE
;
1551 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
1552 if (GC_heap_bases
[i
] == p
) return TRUE
;
1558 void GC_register_root_section(ptr_t static_root
)
1560 MEMORY_BASIC_INFORMATION buf
;
1565 char * limit
, * new_limit
;
1567 if (!GC_no_win32_dlls
) return;
1568 p
= base
= limit
= GC_least_described_address(static_root
);
1569 while (p
< GC_sysinfo
.lpMaximumApplicationAddress
) {
1570 result
= VirtualQuery(p
, &buf
, sizeof(buf
));
1571 if (result
!= sizeof(buf
) || buf
.AllocationBase
== 0
1572 || GC_is_heap_base(buf
.AllocationBase
)) break;
1573 new_limit
= (char *)p
+ buf
.RegionSize
;
1574 protect
= buf
.Protect
;
1575 if (buf
.State
== MEM_COMMIT
1576 && is_writable(protect
)) {
1577 if ((char *)p
== limit
) {
1580 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1585 if (p
> (LPVOID
)new_limit
/* overflow */) break;
1586 p
= (LPVOID
)new_limit
;
1588 if (base
!= limit
) GC_add_roots_inner(base
, limit
, FALSE
);
1592 void GC_register_data_segments()
1596 GC_register_root_section((ptr_t
)(&dummy
));
1600 # else /* !OS2 && !Windows */
1602 # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1603 || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1604 ptr_t
GC_SysVGetDataStart(size_t max_page_size
, ptr_t etext_addr
)
1606 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1607 & ~(sizeof(word
) - 1);
1608 /* etext rounded to word boundary */
1609 word next_page
= ((text_end
+ (word
)max_page_size
- 1)
1610 & ~((word
)max_page_size
- 1));
1611 word page_offset
= (text_end
& ((word
)max_page_size
- 1));
1612 volatile char * result
= (char *)(next_page
+ page_offset
);
1613 /* Note that this isnt equivalent to just adding */
1614 /* max_page_size to &etext if &etext is at a page boundary */
1616 GC_setup_temporary_fault_handler();
1617 if (SETJMP(GC_jmp_buf
) == 0) {
1618 /* Try writing to the address. */
1620 GC_reset_fault_handler();
1622 GC_reset_fault_handler();
1623 /* We got here via a longjmp. The address is not readable. */
1624 /* This is known to happen under Solaris 2.4 + gcc, which place */
1625 /* string constants in the text segment, but after etext. */
1626 /* Use plan B. Note that we now know there is a gap between */
1627 /* text and data segments, so plan A bought us something. */
1628 result
= (char *)GC_find_limit((ptr_t
)(DATAEND
), FALSE
);
1630 return((ptr_t
)result
);
1634 # if defined(FREEBSD) && (defined(I386) || defined(X86_64) || defined(powerpc) || defined(__powerpc__)) && !defined(PCR)
1635 /* Its unclear whether this should be identical to the above, or */
1636 /* whether it should apply to non-X86 architectures. */
1637 /* For now we don't assume that there is always an empty page after */
1638 /* etext. But in some cases there actually seems to be slightly more. */
1639 /* This also deals with holes between read-only data and writable data. */
1640 ptr_t
GC_FreeBSDGetDataStart(size_t max_page_size
, ptr_t etext_addr
)
1642 word text_end
= ((word
)(etext_addr
) + sizeof(word
) - 1)
1643 & ~(sizeof(word
) - 1);
1644 /* etext rounded to word boundary */
1645 volatile word next_page
= (text_end
+ (word
)max_page_size
- 1)
1646 & ~((word
)max_page_size
- 1);
1647 volatile ptr_t result
= (ptr_t
)text_end
;
1648 GC_setup_temporary_fault_handler();
1649 if (SETJMP(GC_jmp_buf
) == 0) {
1650 /* Try reading at the address. */
1651 /* This should happen before there is another thread. */
1652 for (; next_page
< (word
)(DATAEND
); next_page
+= (word
)max_page_size
)
1653 *(volatile char *)next_page
;
1654 GC_reset_fault_handler();
1656 GC_reset_fault_handler();
1657 /* As above, we go to plan B */
1658 result
= GC_find_limit((ptr_t
)(DATAEND
), FALSE
);
1668 # define GC_AMIGA_DS
1669 # include "AmigaOS.c"
1672 #else /* !OS2 && !Windows && !AMIGA */
1674 void GC_register_data_segments(void)
1676 # if !defined(PCR) && !defined(MACOS)
1677 # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1678 /* As of Solaris 2.3, the Solaris threads implementation */
1679 /* allocates the data structure for the initial thread with */
1680 /* sbrk at process startup. It needs to be scanned, so that */
1681 /* we don't lose some malloc allocated data structures */
1682 /* hanging from it. We're on thin ice here ... */
1683 extern caddr_t
sbrk();
1685 GC_add_roots_inner(DATASTART
, (ptr_t
)sbrk(0), FALSE
);
1687 GC_add_roots_inner(DATASTART
, (ptr_t
)(DATAEND
), FALSE
);
1688 # if defined(DATASTART2)
1689 GC_add_roots_inner(DATASTART2
, (ptr_t
)(DATAEND2
), FALSE
);
1695 # if defined(THINK_C)
1696 extern void* GC_MacGetDataStart(void);
1697 /* globals begin above stack and end at a5. */
1698 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1699 (ptr_t
)LMGetCurrentA5(), FALSE
);
1701 # if defined(__MWERKS__)
1703 extern void* GC_MacGetDataStart(void);
1704 /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1705 # if __option(far_data)
1706 extern void* GC_MacGetDataEnd(void);
1708 /* globals begin above stack and end at a5. */
1709 GC_add_roots_inner((ptr_t
)GC_MacGetDataStart(),
1710 (ptr_t
)LMGetCurrentA5(), FALSE
);
1711 /* MATTHEW: Handle Far Globals */
1712 # if __option(far_data)
1713 /* Far globals follow he QD globals: */
1714 GC_add_roots_inner((ptr_t
)LMGetCurrentA5(),
1715 (ptr_t
)GC_MacGetDataEnd(), FALSE
);
1718 extern char __data_start__
[], __data_end__
[];
1719 GC_add_roots_inner((ptr_t
)&__data_start__
,
1720 (ptr_t
)&__data_end__
, FALSE
);
1721 # endif /* __POWERPC__ */
1722 # endif /* __MWERKS__ */
1723 # endif /* !THINK_C */
1727 /* Dynamic libraries are added at every collection, since they may */
1731 # endif /* ! AMIGA */
1732 # endif /* ! MSWIN32 && ! MSWINCE*/
1736 * Auxiliary routines for obtaining memory from OS.
1739 # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
1740 && !defined(MSWIN32) && !defined(MSWINCE) \
1741 && !defined(MACOS) && !defined(DOS4GW) && !defined(NONSTOP)
1743 # define SBRK_ARG_T ptrdiff_t
1745 #if defined(MMAP_SUPPORTED)
1747 #ifdef USE_MMAP_FIXED
1748 # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
1749 /* Seems to yield better performance on Solaris 2, but can */
1750 /* be unreliable if something is already mapped at the address. */
1752 # define GC_MMAP_FLAGS MAP_PRIVATE
1755 #ifdef USE_MMAP_ANON
1757 # if defined(MAP_ANONYMOUS)
1758 # define OPT_MAP_ANON MAP_ANONYMOUS
1760 # define OPT_MAP_ANON MAP_ANON
1764 # define OPT_MAP_ANON 0
1768 # define HEAP_START 0
1771 ptr_t
GC_unix_mmap_get_mem(word bytes
)
1774 static ptr_t last_addr
= HEAP_START
;
1776 # ifndef USE_MMAP_ANON
1777 static GC_bool initialized
= FALSE
;
1780 zero_fd
= open("/dev/zero", O_RDONLY
);
1781 fcntl(zero_fd
, F_SETFD
, FD_CLOEXEC
);
1786 if (bytes
& (GC_page_size
-1)) ABORT("Bad GET_MEM arg");
1787 result
= mmap(last_addr
, bytes
, PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
,
1788 GC_MMAP_FLAGS
| OPT_MAP_ANON
, zero_fd
, 0/* offset */);
1789 if (result
== MAP_FAILED
) return(0);
1790 last_addr
= (ptr_t
)result
+ bytes
+ GC_page_size
- 1;
1791 last_addr
= (ptr_t
)((word
)last_addr
& ~(GC_page_size
- 1));
1792 # if !defined(LINUX)
1793 if (last_addr
== 0) {
1794 /* Oops. We got the end of the address space. This isn't */
1795 /* usable by arbitrary C code, since one-past-end pointers */
1796 /* don't work, so we discard it and try again. */
1797 munmap(result
, (size_t)(-GC_page_size
) - (size_t)result
);
1798 /* Leave last page mapped, so we can't repeat. */
1799 return GC_unix_mmap_get_mem(bytes
);
1802 GC_ASSERT(last_addr
!= 0);
1804 return((ptr_t
)result
);
1807 # endif /* MMAP_SUPPORTED */
1809 #if defined(USE_MMAP)
1811 ptr_t
GC_unix_get_mem(word bytes
)
1813 return GC_unix_mmap_get_mem(bytes
);
1816 #else /* Not USE_MMAP */
1818 ptr_t
GC_unix_sbrk_get_mem(word bytes
)
1822 /* Bare sbrk isn't thread safe. Play by malloc rules. */
1823 /* The equivalent may be needed on other systems as well. */
1827 ptr_t cur_brk
= (ptr_t
)sbrk(0);
1828 SBRK_ARG_T lsbs
= (word
)cur_brk
& (GC_page_size
-1);
1830 if ((SBRK_ARG_T
)bytes
< 0) {
1831 result
= 0; /* too big */
1835 if((ptr_t
)sbrk(GC_page_size
- lsbs
) == (ptr_t
)(-1)) {
1840 # ifdef ADD_HEAP_GUARD_PAGES
1841 /* This is useful for catching severe memory overwrite problems that */
1842 /* span heap sections. It shouldn't otherwise be turned on. */
1844 ptr_t guard
= (ptr_t
)sbrk((SBRK_ARG_T
)GC_page_size
);
1845 if (mprotect(guard
, GC_page_size
, PROT_NONE
) != 0)
1846 ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
1848 # endif /* ADD_HEAP_GUARD_PAGES */
1849 result
= (ptr_t
)sbrk((SBRK_ARG_T
)bytes
);
1850 if (result
== (ptr_t
)(-1)) result
= 0;
1859 #if defined(MMAP_SUPPORTED)
1861 /* By default, we try both sbrk and mmap, in that order. */
1862 ptr_t
GC_unix_get_mem(word bytes
)
1864 static GC_bool sbrk_failed
= FALSE
;
1867 if (!sbrk_failed
) result
= GC_unix_sbrk_get_mem(bytes
);
1870 result
= GC_unix_mmap_get_mem(bytes
);
1873 /* Try sbrk again, in case sbrk memory became available. */
1874 result
= GC_unix_sbrk_get_mem(bytes
);
1879 #else /* !MMAP_SUPPORTED */
1881 ptr_t
GC_unix_get_mem(word bytes
)
1883 return GC_unix_sbrk_get_mem(bytes
);
1888 #endif /* Not USE_MMAP */
1894 void * os2_alloc(size_t bytes
)
1898 if (DosAllocMem(&result
, bytes
, PAG_EXECUTE
| PAG_READ
|
1899 PAG_WRITE
| PAG_COMMIT
)
1903 if (result
== 0) return(os2_alloc(bytes
));
1910 # if defined(MSWIN32) || defined(MSWINCE)
1911 SYSTEM_INFO GC_sysinfo
;
1916 # ifdef USE_GLOBAL_ALLOC
1917 # define GLOBAL_ALLOC_TEST 1
1919 # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
1922 word GC_n_heap_bases
= 0;
1924 word GC_mem_top_down
= 0; /* Change to MEM_TOP_DOWN for better 64-bit */
1925 /* testing. Otherwise all addresses tend to */
1926 /* end up in first 4GB, hiding bugs. */
1928 ptr_t
GC_win32_get_mem(word bytes
)
1932 if (GLOBAL_ALLOC_TEST
) {
1933 /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
1934 /* There are also unconfirmed rumors of other */
1935 /* problems, so we dodge the issue. */
1936 result
= (ptr_t
) GlobalAlloc(0, bytes
+ HBLKSIZE
);
1937 result
= (ptr_t
)(((word
)result
+ HBLKSIZE
- 1) & ~(HBLKSIZE
-1));
1939 /* VirtualProtect only works on regions returned by a */
1940 /* single VirtualAlloc call. Thus we allocate one */
1941 /* extra page, which will prevent merging of blocks */
1942 /* in separate regions, and eliminate any temptation */
1943 /* to call VirtualProtect on a range spanning regions. */
1944 /* This wastes a small amount of memory, and risks */
1945 /* increased fragmentation. But better alternatives */
1946 /* would require effort. */
1947 /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
1948 /* VDBs are enabled and the GetWriteWatch function is */
1949 /* available. Otherwise we waste resources or possibly */
1950 /* cause VirtualAlloc to fail (observed in Windows 2000 */
1952 result
= (ptr_t
) VirtualAlloc(NULL
, bytes
+ 1,
1954 GetWriteWatch_alloc_flag
|
1956 MEM_COMMIT
| MEM_RESERVE
1958 PAGE_EXECUTE_READWRITE
);
1960 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
1961 /* If I read the documentation correctly, this can */
1962 /* only happen if HBLKSIZE > 64k or not a power of 2. */
1963 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
1964 GC_heap_bases
[GC_n_heap_bases
++] = result
;
1968 void GC_win32_free_heap(void)
1970 if (GC_no_win32_dlls
) {
1971 while (GC_n_heap_bases
> 0) {
1972 GlobalFree (GC_heap_bases
[--GC_n_heap_bases
]);
1973 GC_heap_bases
[GC_n_heap_bases
] = 0;
1980 # define GC_AMIGA_AM
1981 # include "AmigaOS.c"
1987 word GC_n_heap_bases
= 0;
1989 ptr_t
GC_wince_get_mem(word bytes
)
1994 /* Round up allocation size to multiple of page size */
1995 bytes
= (bytes
+ GC_page_size
-1) & ~(GC_page_size
-1);
1997 /* Try to find reserved, uncommitted pages */
1998 for (i
= 0; i
< GC_n_heap_bases
; i
++) {
1999 if (((word
)(-(signed_word
)GC_heap_lengths
[i
])
2000 & (GC_sysinfo
.dwAllocationGranularity
-1))
2002 result
= GC_heap_bases
[i
] + GC_heap_lengths
[i
];
2007 if (i
== GC_n_heap_bases
) {
2008 /* Reserve more pages */
2009 word res_bytes
= (bytes
+ GC_sysinfo
.dwAllocationGranularity
-1)
2010 & ~(GC_sysinfo
.dwAllocationGranularity
-1);
2011 /* If we ever support MPROTECT_VDB here, we will probably need to */
2012 /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
2013 /* never spans regions. It seems to be OK for a VirtualFree */
2014 /* argument to span regions, so we should be OK for now. */
2015 result
= (ptr_t
) VirtualAlloc(NULL
, res_bytes
,
2016 MEM_RESERVE
| MEM_TOP_DOWN
,
2017 PAGE_EXECUTE_READWRITE
);
2018 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
2019 /* If I read the documentation correctly, this can */
2020 /* only happen if HBLKSIZE > 64k or not a power of 2. */
2021 if (GC_n_heap_bases
>= MAX_HEAP_SECTS
) ABORT("Too many heap sections");
2022 GC_heap_bases
[GC_n_heap_bases
] = result
;
2023 GC_heap_lengths
[GC_n_heap_bases
] = 0;
2028 result
= (ptr_t
) VirtualAlloc(result
, bytes
,
2030 PAGE_EXECUTE_READWRITE
);
2031 if (result
!= NULL
) {
2032 if (HBLKDISPL(result
) != 0) ABORT("Bad VirtualAlloc result");
2033 GC_heap_lengths
[i
] += bytes
;
2042 /* For now, this only works on Win32/WinCE and some Unix-like */
2043 /* systems. If you have something else, don't define */
2045 /* We assume ANSI C to support this feature. */
2047 #if !defined(MSWIN32) && !defined(MSWINCE)
2050 #include <sys/mman.h>
2051 #include <sys/stat.h>
2052 #include <sys/types.h>
2056 /* Compute a page aligned starting address for the unmap */
2057 /* operation on a block of size bytes starting at start. */
2058 /* Return 0 if the block is too small to make this feasible. */
2059 ptr_t
GC_unmap_start(ptr_t start
, size_t bytes
)
2061 ptr_t result
= start
;
2062 /* Round start to next page boundary. */
2063 result
+= GC_page_size
- 1;
2064 result
= (ptr_t
)((word
)result
& ~(GC_page_size
- 1));
2065 if (result
+ GC_page_size
> start
+ bytes
) return 0;
2069 /* Compute end address for an unmap operation on the indicated */
2071 ptr_t
GC_unmap_end(ptr_t start
, size_t bytes
)
2073 ptr_t end_addr
= start
+ bytes
;
2074 end_addr
= (ptr_t
)((word
)end_addr
& ~(GC_page_size
- 1));
2078 /* Under Win32/WinCE we commit (map) and decommit (unmap) */
2079 /* memory using VirtualAlloc and VirtualFree. These functions */
2080 /* work on individual allocations of virtual memory, made */
2081 /* previously using VirtualAlloc with the MEM_RESERVE flag. */
2082 /* The ranges we need to (de)commit may span several of these */
2083 /* allocations; therefore we use VirtualQuery to check */
2084 /* allocation lengths, and split up the range as necessary. */
2086 /* We assume that GC_remap is called on exactly the same range */
2087 /* as a previous call to GC_unmap. It is safe to consistently */
2088 /* round the endpoints in both places. */
2089 void GC_unmap(ptr_t start
, size_t bytes
)
2091 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
2092 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
2093 word len
= end_addr
- start_addr
;
2094 if (0 == start_addr
) return;
2095 # if defined(MSWIN32) || defined(MSWINCE)
2097 MEMORY_BASIC_INFORMATION mem_info
;
2099 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
2100 != sizeof(mem_info
))
2101 ABORT("Weird VirtualQuery result");
2102 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
2103 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
2104 ABORT("VirtualFree failed");
2105 GC_unmapped_bytes
+= free_len
;
2106 start_addr
+= free_len
;
2110 /* We immediately remap it to prevent an intervening mmap from */
2111 /* accidentally grabbing the same address space. */
2114 result
= mmap(start_addr
, len
, PROT_NONE
,
2115 MAP_PRIVATE
| MAP_FIXED
| OPT_MAP_ANON
,
2116 zero_fd
, 0/* offset */);
2117 if (result
!= (void *)start_addr
) ABORT("mmap(...PROT_NONE...) failed");
2119 GC_unmapped_bytes
+= len
;
2124 void GC_remap(ptr_t start
, size_t bytes
)
2126 ptr_t start_addr
= GC_unmap_start(start
, bytes
);
2127 ptr_t end_addr
= GC_unmap_end(start
, bytes
);
2128 word len
= end_addr
- start_addr
;
2130 # if defined(MSWIN32) || defined(MSWINCE)
2133 if (0 == start_addr
) return;
2135 MEMORY_BASIC_INFORMATION mem_info
;
2137 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
2138 != sizeof(mem_info
))
2139 ABORT("Weird VirtualQuery result");
2140 alloc_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
2141 result
= VirtualAlloc(start_addr
, alloc_len
,
2143 PAGE_EXECUTE_READWRITE
);
2144 if (result
!= start_addr
) {
2145 ABORT("VirtualAlloc remapping failed");
2147 GC_unmapped_bytes
-= alloc_len
;
2148 start_addr
+= alloc_len
;
2152 /* It was already remapped with PROT_NONE. */
2155 if (0 == start_addr
) return;
2156 result
= mprotect(start_addr
, len
,
2157 PROT_READ
| PROT_WRITE
| OPT_PROT_EXEC
);
2160 "Mprotect failed at %p (length %ld) with errno %d\n",
2161 start_addr
, (unsigned long)len
, errno
);
2162 ABORT("Mprotect remapping failed");
2164 GC_unmapped_bytes
-= len
;
2168 /* Two adjacent blocks have already been unmapped and are about to */
2169 /* be merged. Unmap the whole block. This typically requires */
2170 /* that we unmap a small section in the middle that was not previously */
2171 /* unmapped due to alignment constraints. */
2172 void GC_unmap_gap(ptr_t start1
, size_t bytes1
, ptr_t start2
, size_t bytes2
)
2174 ptr_t start1_addr
= GC_unmap_start(start1
, bytes1
);
2175 ptr_t end1_addr
= GC_unmap_end(start1
, bytes1
);
2176 ptr_t start2_addr
= GC_unmap_start(start2
, bytes2
);
2177 ptr_t end2_addr
= GC_unmap_end(start2
, bytes2
);
2178 ptr_t start_addr
= end1_addr
;
2179 ptr_t end_addr
= start2_addr
;
2181 GC_ASSERT(start1
+ bytes1
== start2
);
2182 if (0 == start1_addr
) start_addr
= GC_unmap_start(start1
, bytes1
+ bytes2
);
2183 if (0 == start2_addr
) end_addr
= GC_unmap_end(start1
, bytes1
+ bytes2
);
2184 if (0 == start_addr
) return;
2185 len
= end_addr
- start_addr
;
2186 # if defined(MSWIN32) || defined(MSWINCE)
2188 MEMORY_BASIC_INFORMATION mem_info
;
2190 if (VirtualQuery(start_addr
, &mem_info
, sizeof(mem_info
))
2191 != sizeof(mem_info
))
2192 ABORT("Weird VirtualQuery result");
2193 free_len
= (len
< mem_info
.RegionSize
) ? len
: mem_info
.RegionSize
;
2194 if (!VirtualFree(start_addr
, free_len
, MEM_DECOMMIT
))
2195 ABORT("VirtualFree failed");
2196 GC_unmapped_bytes
+= free_len
;
2197 start_addr
+= free_len
;
2201 if (len
!= 0 && munmap(start_addr
, len
) != 0) ABORT("munmap failed");
2202 GC_unmapped_bytes
+= len
;
2206 #endif /* USE_MUNMAP */
2208 /* Routine for pushing any additional roots. In THREADS */
2209 /* environment, this is also responsible for marking from */
2210 /* thread stacks. */
2212 void (*GC_push_other_roots
)(void) = 0;
2216 PCR_ERes
GC_push_thread_stack(PCR_Th_T
*t
, PCR_Any dummy
)
2218 struct PCR_ThCtl_TInfoRep info
;
2221 info
.ti_stkLow
= info
.ti_stkHi
= 0;
2222 result
= PCR_ThCtl_GetInfo(t
, &info
);
2223 GC_push_all_stack((ptr_t
)(info
.ti_stkLow
), (ptr_t
)(info
.ti_stkHi
));
2227 /* Push the contents of an old object. We treat this as stack */
2228 /* data only becasue that makes it robust against mark stack */
2230 PCR_ERes
GC_push_old_obj(void *p
, size_t size
, PCR_Any data
)
2232 GC_push_all_stack((ptr_t
)p
, (ptr_t
)p
+ size
);
2233 return(PCR_ERes_okay
);
2237 void GC_default_push_other_roots(void)
2239 /* Traverse data allocated by previous memory managers. */
2241 extern struct PCR_MM_ProcsRep
* GC_old_allocator
;
2243 if ((*(GC_old_allocator
->mmp_enumerate
))(PCR_Bool_false
,
2246 ABORT("Old object enumeration failed");
2249 /* Traverse all thread stacks. */
2251 PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack
,0))
2252 || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
2253 ABORT("Thread stack marking failed\n");
2260 # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
2262 extern void GC_push_all_stacks(void);
2264 void GC_default_push_other_roots(void)
2266 GC_push_all_stacks();
2269 # endif /* GC_WIN32_THREADS || GC_PTHREADS */
2271 void (*GC_push_other_roots
)(void) = GC_default_push_other_roots
;
2273 #endif /* THREADS */
2276 * Routines for accessing dirty bits on virtual pages.
2277 * There are six ways to maintain this information:
2278 * DEFAULT_VDB: A simple dummy implementation that treats every page
2279 * as possibly dirty. This makes incremental collection
2280 * useless, but the implementation is still correct.
2281 * MANUAL_VDB: Stacks and static data are always considered dirty.
2282 * Heap pages are considered dirty if GC_dirty(p) has been
2283 * called on some pointer p pointing to somewhere inside
2284 * an object on that page. A GC_dirty() call on a large
2285 * object directly dirties only a single page, but for
2286 * MANUAL_VDB we are careful to treat an object with a dirty
2287 * page as completely dirty.
2288 * In order to avoid races, an object must be marked dirty
2289 * after it is written, and a reference to the object
2290 * must be kept on a stack or in a register in the interim.
2291 * With threads enabled, an object directly reachable from the
2292 * stack at the time of a collection is treated as dirty.
2293 * In single-threaded mode, it suffices to ensure that no
2294 * collection can take place between the pointer assignment
2295 * and the GC_dirty() call.
2296 * PCR_VDB: Use PPCRs virtual dirty bit facility.
2297 * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2298 * works under some SVR4 variants. Even then, it may be
2299 * too slow to be entirely satisfactory. Requires reading
2300 * dirty bits for entire address space. Implementations tend
2301 * to assume that the client is a (slow) debugger.
2302 * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2303 * dirtied pages. The implementation (and implementability)
2304 * is highly system dependent. This usually fails when system
2305 * calls write to a protected page. We prevent the read system
2306 * call from doing so. It is the clients responsibility to
2307 * make sure that other system calls are similarly protected
2308 * or write only to the stack.
2309 * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
2310 * read dirty bits. In case it is not available (because we
2311 * are running on Windows 95, Windows 2000 or earlier),
2312 * MPROTECT_VDB may be defined as a fallback strategy.
2314 GC_bool GC_dirty_maintained
= FALSE
;
2316 #if defined(PROC_VDB) || defined(GWW_VDB)
2318 /* Add all pages in pht2 to pht1 */
2319 void GC_or_pages(page_hash_table pht1
, page_hash_table pht2
)
2323 for (i
= 0; i
< PHT_SIZE
; i
++) pht1
[i
] |= pht2
[i
];
2330 # define GC_GWW_BUF_LEN 1024
2331 static PVOID gww_buf
[GC_GWW_BUF_LEN
];
2333 # ifdef MPROTECT_VDB
2334 GC_bool
GC_gww_dirty_init(void)
2336 detect_GetWriteWatch();
2337 return GC_GWW_AVAILABLE();
2340 void GC_dirty_init(void)
2342 detect_GetWriteWatch();
2343 GC_dirty_maintained
= GC_GWW_AVAILABLE();
2347 # ifdef MPROTECT_VDB
2348 static void GC_gww_read_dirty(void)
2350 void GC_read_dirty(void)
2355 BZERO(GC_grungy_pages
, sizeof(GC_grungy_pages
));
2357 for (i
= 0; i
!= GC_n_heap_sects
; ++i
) {
2361 PVOID
* pages
, * pages_end
;
2365 count
= GC_GWW_BUF_LEN
;
2367 * GetWriteWatch is documented as returning non-zero when it fails,
2368 * but the documentation doesn't explicitly say why it would fail or
2369 * what its behaviour will be if it fails.
2370 * It does appear to fail, at least on recent W2K instances, if
2371 * the underlying memory was not allocated with the appropriate
2372 * flag. This is common if GC_enable_incremental is called
2373 * shortly after GC initialization. To avoid modifying the
2374 * interface, we silently work around such a failure, it it only
2375 * affects the initial (small) heap allocation.
2376 * If there are more dirty
2377 * pages than will fit in the buffer, this is not treated as a
2378 * failure; we must check the page count in the loop condition.
2379 * Since each partial call will reset the status of some
2380 * pages, this should eventually terminate even in the overflow
2383 if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET
,
2384 GC_heap_sects
[i
].hs_start
,
2385 GC_heap_sects
[i
].hs_bytes
,
2389 static int warn_count
= 0;
2391 struct hblk
* start
= (struct hblk
*)GC_heap_sects
[i
].hs_start
;
2392 static struct hblk
*last_warned
= 0;
2393 size_t nblocks
= divHBLKSZ(GC_heap_sects
[i
].hs_bytes
);
2395 if ( i
!= 0 && last_warned
!= start
&& warn_count
++ < 5) {
2396 last_warned
= start
;
2398 "GC_gww_read_dirty unexpectedly failed at %ld: "
2399 "Falling back to marking all pages dirty\n", start
);
2401 for (j
= 0; j
< nblocks
; ++j
) {
2402 word hash
= PHT_HASH(start
+ j
);
2403 set_pht_entry_from_index(GC_grungy_pages
, hash
);
2405 count
= 1; /* Done with this section. */
2406 } else /* succeeded */{
2407 pages_end
= pages
+ count
;
2408 while (pages
!= pages_end
) {
2409 struct hblk
* h
= (struct hblk
*) *pages
++;
2410 struct hblk
* h_end
= (struct hblk
*) ((char *) h
+ page_size
);
2412 set_pht_entry_from_index(GC_grungy_pages
, PHT_HASH(h
));
2413 while (++h
< h_end
);
2416 } while (count
== GC_GWW_BUF_LEN
);
2419 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
2422 # ifdef MPROTECT_VDB
2423 static GC_bool
GC_gww_page_was_dirty(struct hblk
* h
)
2425 GC_bool
GC_page_was_dirty(struct hblk
* h
)
2428 return HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, PHT_HASH(h
));
2431 # ifdef MPROTECT_VDB
2432 static GC_bool
GC_gww_page_was_ever_dirty(struct hblk
* h
)
2434 GC_bool
GC_page_was_ever_dirty(struct hblk
* h
)
2437 return HDR(h
) == 0 || get_pht_entry_from_index(GC_written_pages
, PHT_HASH(h
));
2440 # ifndef MPROTECT_VDB
2441 void GC_remove_protection(struct hblk
*h
, word nblocks
, GC_bool is_ptrfree
)
2445 # endif /* GWW_VDB */
2449 /* All of the following assume the allocation lock is held, and */
2450 /* signals are disabled. */
2452 /* The client asserts that unallocated pages in the heap are never */
2455 /* Initialize virtual dirty bit implementation. */
2456 void GC_dirty_init(void)
2458 if (GC_print_stats
== VERBOSE
)
2459 GC_log_printf("Initializing DEFAULT_VDB...\n");
2460 GC_dirty_maintained
= TRUE
;
2463 /* Retrieve system dirty bits for heap to a local buffer. */
2464 /* Restore the systems notion of which pages are dirty. */
2465 void GC_read_dirty(void)
2468 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2469 /* If the actual page size is different, this returns TRUE if any */
2470 /* of the pages overlapping h are dirty. This routine may err on the */
2471 /* side of labelling pages as dirty (and this implementation does). */
2473 GC_bool
GC_page_was_dirty(struct hblk
*h
)
2479 * The following two routines are typically less crucial. They matter
2480 * most with large dynamic libraries, or if we can't accurately identify
2481 * stacks, e.g. under Solaris 2.X. Otherwise the following default
2482 * versions are adequate.
2485 /* Could any valid GC heap pointer ever have been written to this page? */
2487 GC_bool
GC_page_was_ever_dirty(struct hblk
*h
)
2493 /* I) hints that [h, h+nblocks) is about to be written. */
2494 /* II) guarantees that protection is removed. */
2495 /* (I) may speed up some dirty bit implementations. */
2496 /* (II) may be essential if we need to ensure that */
2497 /* pointer-free system call buffers in the heap are */
2498 /* not protected. */
2500 void GC_remove_protection(struct hblk
*h
, word nblocks
, GC_bool is_ptrfree
)
2504 # endif /* DEFAULT_VDB */
2508 /* Initialize virtual dirty bit implementation. */
2509 void GC_dirty_init(void)
2511 if (GC_print_stats
== VERBOSE
)
2512 GC_log_printf("Initializing MANUAL_VDB...\n");
2513 /* GC_dirty_pages and GC_grungy_pages are already cleared. */
2514 GC_dirty_maintained
= TRUE
;
2517 /* Retrieve system dirty bits for heap to a local buffer. */
2518 /* Restore the systems notion of which pages are dirty. */
2519 void GC_read_dirty(void)
2521 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
2522 (sizeof GC_dirty_pages
));
2523 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
2526 /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2527 /* If the actual page size is different, this returns TRUE if any */
2528 /* of the pages overlapping h are dirty. This routine may err on the */
2529 /* side of labelling pages as dirty (and this implementation does). */
2531 GC_bool
GC_page_was_dirty(struct hblk
*h
)
2533 register word index
;
2535 index
= PHT_HASH(h
);
2536 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
2539 /* Could any valid GC heap pointer ever have been written to this page? */
2541 GC_bool
GC_page_was_ever_dirty(struct hblk
*h
)
2543 /* FIXME - implement me. */
2547 /* Mark the page containing p as dirty. Logically, this dirties the */
2548 /* entire object. */
2549 void GC_dirty(ptr_t p
)
2551 word index
= PHT_HASH(p
);
2552 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2556 void GC_remove_protection(struct hblk
*h
, word nblocks
, GC_bool is_ptrfree
)
2560 # endif /* MANUAL_VDB */
2563 # ifdef MPROTECT_VDB
2566 * See DEFAULT_VDB for interface descriptions.
2570 * This implementation maintains dirty bits itself by catching write
2571 * faults and keeping track of them. We assume nobody else catches
2572 * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2573 * This means that clients must ensure that system calls don't write
2574 * to the write-protected heap. Probably the best way to do this is to
2575 * ensure that system calls write at most to POINTERFREE objects in the
2576 * heap, and do even that only if we are on a platform on which those
2577 * are not protected. Another alternative is to wrap system calls
2578 * (see example for read below), but the current implementation holds
2580 * We assume the page size is a multiple of HBLKSIZE.
2581 * We prefer them to be the same. We avoid protecting POINTERFREE
2582 * objects only if they are the same.
2585 # if !defined(MSWIN32) && !defined(MSWINCE) && !defined(DARWIN)
2587 # include <sys/mman.h>
2588 # include <signal.h>
2589 # include <sys/syscall.h>
2591 # define PROTECT(addr, len) \
2592 if (mprotect((caddr_t)(addr), (size_t)(len), \
2593 PROT_READ | OPT_PROT_EXEC) < 0) { \
2594 ABORT("mprotect failed"); \
2596 # define UNPROTECT(addr, len) \
2597 if (mprotect((caddr_t)(addr), (size_t)(len), \
2598 PROT_WRITE | PROT_READ | OPT_PROT_EXEC ) < 0) { \
2599 ABORT("un-mprotect failed"); \
2605 /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2606 decrease the likelihood of some of the problems described below. */
2607 #include <mach/vm_map.h>
2608 static mach_port_t GC_task_self
;
2609 #define PROTECT(addr,len) \
2610 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2611 FALSE,VM_PROT_READ) != KERN_SUCCESS) { \
2612 ABORT("vm_portect failed"); \
2614 #define UNPROTECT(addr,len) \
2615 if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2616 FALSE,VM_PROT_READ|VM_PROT_WRITE) != KERN_SUCCESS) { \
2617 ABORT("vm_portect failed"); \
2622 # include <signal.h>
2625 static DWORD protect_junk
;
2626 # define PROTECT(addr, len) \
2627 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READ, \
2629 DWORD last_error = GetLastError(); \
2630 GC_printf("Last error code: %lx\n", last_error); \
2631 ABORT("VirtualProtect failed"); \
2633 # define UNPROTECT(addr, len) \
2634 if (!VirtualProtect((addr), (len), PAGE_EXECUTE_READWRITE, \
2636 ABORT("un-VirtualProtect failed"); \
2638 # endif /* !DARWIN */
2639 # endif /* MSWIN32 || MSWINCE || DARWIN */
2641 #if defined(MSWIN32)
2642 typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR
;
2644 # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER) (-1)
2645 #elif defined(MSWINCE)
2646 typedef LONG (WINAPI
*SIG_HNDLR_PTR
)(struct _EXCEPTION_POINTERS
*);
2648 # define SIG_DFL (SIG_HNDLR_PTR) (-1)
2649 #elif defined(DARWIN)
2650 typedef void (* SIG_HNDLR_PTR
)();
2652 typedef void (* SIG_HNDLR_PTR
)(int, siginfo_t
*, void *);
2653 typedef void (* PLAIN_HNDLR_PTR
)(int);
2656 #if defined(__GLIBC__)
2657 # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
2658 # error glibc too old?
2663 SIG_HNDLR_PTR GC_old_bus_handler
;
2664 GC_bool GC_old_bus_handler_used_si
;
2665 SIG_HNDLR_PTR GC_old_segv_handler
;
2666 /* Also old MSWIN32 ACCESS_VIOLATION filter */
2667 GC_bool GC_old_segv_handler_used_si
;
2668 #endif /* !DARWIN */
2670 #if defined(THREADS)
2671 /* We need to lock around the bitmap update in the write fault handler */
2672 /* in order to avoid the risk of losing a bit. We do this with a */
2673 /* test-and-set spin lock if we know how to do that. Otherwise we */
2674 /* check whether we are already in the handler and use the dumb but */
2675 /* safe fallback algorithm of setting all bits in the word. */
2676 /* Contention should be very rare, so we do the minimum to handle it */
2678 #ifdef AO_HAVE_test_and_set_acquire
2679 static volatile AO_TS_t fault_handler_lock
= 0;
2680 void async_set_pht_entry_from_index(volatile page_hash_table db
, size_t index
) {
2681 while (AO_test_and_set_acquire(&fault_handler_lock
) == AO_TS_SET
) {}
2682 /* Could also revert to set_pht_entry_from_index_safe if initial */
2683 /* GC_test_and_set fails. */
2684 set_pht_entry_from_index(db
, index
);
2685 AO_CLEAR(&fault_handler_lock
);
2687 #else /* !AO_have_test_and_set_acquire */
2688 # error No test_and_set operation: Introduces a race.
2689 /* THIS WOULD BE INCORRECT! */
2690 /* The dirty bit vector may be temporarily wrong, */
2691 /* just before we notice the conflict and correct it. We may end up */
2692 /* looking at it while it's wrong. But this requires contention */
2693 /* exactly when a GC is triggered, which seems far less likely to */
2694 /* fail than the old code, which had no reported failures. Thus we */
2695 /* leave it this way while we think of something better, or support */
2696 /* GC_test_and_set on the remaining platforms. */
2697 static volatile word currently_updating
= 0;
2698 void async_set_pht_entry_from_index(volatile page_hash_table db
, size_t index
) {
2699 unsigned int update_dummy
;
2700 currently_updating
= (word
)(&update_dummy
);
2701 set_pht_entry_from_index(db
, index
);
2702 /* If we get contention in the 10 or so instruction window here, */
2703 /* and we get stopped by a GC between the two updates, we lose! */
2704 if (currently_updating
!= (word
)(&update_dummy
)) {
2705 set_pht_entry_from_index_safe(db
, index
);
2706 /* We claim that if two threads concurrently try to update the */
2707 /* dirty bit vector, the first one to execute UPDATE_START */
2708 /* will see it changed when UPDATE_END is executed. (Note that */
2709 /* &update_dummy must differ in two distinct threads.) It */
2710 /* will then execute set_pht_entry_from_index_safe, thus */
2711 /* returning us to a safe state, though not soon enough. */
2714 #endif /* !AO_HAVE_test_and_set_acquire */
2715 #else /* !THREADS */
2716 # define async_set_pht_entry_from_index(db, index) \
2717 set_pht_entry_from_index(db, index)
2718 #endif /* !THREADS */
2720 #if !defined(DARWIN)
2722 # if defined(FREEBSD)
2723 # define SIG_OK TRUE
2724 # define CODE_OK (code == BUS_PAGE_FAULT)
2725 # elif defined(OSF1)
2726 # define SIG_OK (sig == SIGSEGV)
2727 # define CODE_OK (code == 2 /* experimentally determined */)
2728 # elif defined(IRIX5)
2729 # define SIG_OK (sig == SIGSEGV)
2730 # define CODE_OK (code == EACCES)
2731 # elif defined(HURD)
2732 # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
2733 # define CODE_OK TRUE
2734 # elif defined(LINUX)
2735 # define SIG_OK (sig == SIGSEGV)
2736 # define CODE_OK TRUE
2737 /* Empirically c.trapno == 14, on IA32, but is that useful? */
2738 /* Should probably consider alignment issues on other */
2739 /* architectures. */
2740 # elif defined(HPUX)
2741 # define SIG_OK (sig == SIGSEGV || sig == SIGBUS)
2742 # define CODE_OK (si -> si_code == SEGV_ACCERR) \
2743 || (si -> si_code == BUS_ADRERR) \
2744 || (si -> si_code == BUS_UNKNOWN) \
2745 || (si -> si_code == SEGV_UNKNOWN) \
2746 || (si -> si_code == BUS_OBJERR)
2747 # elif defined(FREEBSD)
2748 # define SIG_OK (sig == SIGBUS)
2749 # define CODE_OK (si -> si_code == BUS_PAGE_FAULT)
2750 # elif defined(SUNOS5SIGS)
2751 # define SIG_OK (sig == SIGSEGV)
2752 # define CODE_OK (si -> si_code == SEGV_ACCERR)
2753 # elif defined(MSWIN32) || defined(MSWINCE)
2754 # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
2755 == STATUS_ACCESS_VIOLATION)
2756 # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
2757 == 1) /* Write fault */
2760 # if defined(MSWIN32) || defined(MSWINCE)
2761 LONG WINAPI
GC_write_fault_handler(struct _EXCEPTION_POINTERS
*exc_info
)
2763 # include <ucontext.h>
2765 void GC_write_fault_handler(int sig
, siginfo_t
*si
, void *raw_sc
)
2766 # endif /* MSWIN32 || MSWINCE */
2768 # if !defined(MSWIN32) && !defined(MSWINCE)
2769 int code
= si
-> si_code
; /* Ignore gcc unused var. warning. */
2770 ucontext_t
* scp
= (ucontext_t
*)raw_sc
;
2771 /* Ignore gcc unused var. warning. */
2772 char *addr
= si
-> si_addr
;
2774 # if defined(MSWIN32) || defined(MSWINCE)
2775 char * addr
= (char *) (exc_info
-> ExceptionRecord
2776 -> ExceptionInformation
[1]);
2777 # define sig SIGSEGV
2781 if (SIG_OK
&& CODE_OK
) {
2782 register struct hblk
* h
=
2783 (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
2784 GC_bool in_allocd_block
;
2787 /* Address is only within the correct physical page. */
2788 in_allocd_block
= FALSE
;
2789 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2790 if (HDR(h
+i
) != 0) {
2791 in_allocd_block
= TRUE
;
2795 in_allocd_block
= (HDR(addr
) != 0);
2797 if (!in_allocd_block
) {
2798 /* FIXME - We should make sure that we invoke the */
2799 /* old handler with the appropriate calling */
2800 /* sequence, which often depends on SA_SIGINFO. */
2802 /* Heap blocks now begin and end on page boundaries */
2803 SIG_HNDLR_PTR old_handler
;
2806 if (sig
== SIGSEGV
) {
2807 old_handler
= GC_old_segv_handler
;
2808 used_si
= GC_old_segv_handler_used_si
;
2810 old_handler
= GC_old_bus_handler
;
2811 used_si
= GC_old_bus_handler_used_si
;
2813 if (old_handler
== (SIG_HNDLR_PTR
)SIG_DFL
) {
2814 # if !defined(MSWIN32) && !defined(MSWINCE)
2815 GC_err_printf("Segfault at %p\n", addr
);
2816 ABORT("Unexpected bus error or segmentation fault");
2818 return(EXCEPTION_CONTINUE_SEARCH
);
2822 * FIXME: This code should probably check if the
2823 * old signal handler used the traditional style and
2824 * if so call it using that style.
2827 return((*old_handler
)(exc_info
));
2830 ((SIG_HNDLR_PTR
)old_handler
) (sig
, si
, raw_sc
);
2832 /* FIXME: should pass nonstandard args as well. */
2833 ((PLAIN_HNDLR_PTR
)old_handler
) (sig
);
2838 UNPROTECT(h
, GC_page_size
);
2839 /* We need to make sure that no collection occurs between */
2840 /* the UNPROTECT and the setting of the dirty bit. Otherwise */
2841 /* a write by a third thread might go unnoticed. Reversing */
2842 /* the order is just as bad, since we would end up unprotecting */
2843 /* a page in a GC cycle during which it's not marked. */
2844 /* Currently we do this by disabling the thread stopping */
2845 /* signals while this handler is running. An alternative might */
2846 /* be to record the fact that we're about to unprotect, or */
2847 /* have just unprotected a page in the GC's thread structure, */
2848 /* and then to have the thread stopping code set the dirty */
2849 /* flag, if necessary. */
2850 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
2851 size_t index
= PHT_HASH(h
+i
);
2853 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2855 /* The write may not take place before dirty bits are read. */
2856 /* But then we'll fault again ... */
2857 # if defined(MSWIN32) || defined(MSWINCE)
2858 return(EXCEPTION_CONTINUE_EXECUTION
);
2863 #if defined(MSWIN32) || defined(MSWINCE)
2864 return EXCEPTION_CONTINUE_SEARCH
;
2866 GC_err_printf("Segfault at %p\n", addr
);
2867 ABORT("Unexpected bus error or segmentation fault");
2870 #endif /* !DARWIN */
2873 * We hold the allocation lock. We expect block h to be written
2874 * shortly. Ensure that all pages containing any part of the n hblks
2875 * starting at h are no longer protected. If is_ptrfree is false,
2876 * also ensure that they will subsequently appear to be dirty.
2878 void GC_remove_protection(struct hblk
*h
, word nblocks
, GC_bool is_ptrfree
)
2880 struct hblk
* h_trunc
; /* Truncated to page boundary */
2881 struct hblk
* h_end
; /* Page boundary following block end */
2882 struct hblk
* current
;
2883 GC_bool found_clean
;
2885 # if defined(GWW_VDB)
2886 if (GC_GWW_AVAILABLE()) return;
2888 if (!GC_dirty_maintained
) return;
2889 h_trunc
= (struct hblk
*)((word
)h
& ~(GC_page_size
-1));
2890 h_end
= (struct hblk
*)(((word
)(h
+ nblocks
) + GC_page_size
-1)
2891 & ~(GC_page_size
-1));
2892 found_clean
= FALSE
;
2893 for (current
= h_trunc
; current
< h_end
; ++current
) {
2894 size_t index
= PHT_HASH(current
);
2896 if (!is_ptrfree
|| current
< h
|| current
>= h
+ nblocks
) {
2897 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
2900 UNPROTECT(h_trunc
, (ptr_t
)h_end
- (ptr_t
)h_trunc
);
2903 #if !defined(DARWIN)
2904 void GC_dirty_init(void)
2906 # if !defined(MSWIN32) && !defined(MSWINCE)
2907 struct sigaction act
, oldact
;
2908 act
.sa_flags
= SA_RESTART
| SA_SIGINFO
;
2909 act
.sa_sigaction
= GC_write_fault_handler
;
2910 (void)sigemptyset(&act
.sa_mask
);
2912 /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
2913 /* handler. This effectively makes the handler atomic w.r.t. */
2914 /* stopping the world for GC. */
2915 (void)sigaddset(&act
.sa_mask
, SIG_SUSPEND
);
2916 # endif /* SIG_SUSPEND */
2918 if (GC_print_stats
== VERBOSE
)
2920 "Initializing mprotect virtual dirty bit implementation\n");
2921 GC_dirty_maintained
= TRUE
;
2922 if (GC_page_size
% HBLKSIZE
!= 0) {
2923 GC_err_printf("Page size not multiple of HBLKSIZE\n");
2924 ABORT("Page size not multiple of HBLKSIZE");
2926 # if !defined(MSWIN32) && !defined(MSWINCE)
2927 # if defined(GC_IRIX_THREADS)
2928 sigaction(SIGSEGV
, 0, &oldact
);
2929 sigaction(SIGSEGV
, &act
, 0);
2932 int res
= sigaction(SIGSEGV
, &act
, &oldact
);
2933 if (res
!= 0) ABORT("Sigaction failed");
2936 if (oldact
.sa_flags
& SA_SIGINFO
) {
2937 GC_old_segv_handler
= oldact
.sa_sigaction
;
2938 GC_old_segv_handler_used_si
= TRUE
;
2940 GC_old_segv_handler
= (SIG_HNDLR_PTR
)oldact
.sa_handler
;
2941 GC_old_segv_handler_used_si
= FALSE
;
2943 if (GC_old_segv_handler
== (SIG_HNDLR_PTR
)SIG_IGN
) {
2944 GC_err_printf("Previously ignored segmentation violation!?");
2945 GC_old_segv_handler
= (SIG_HNDLR_PTR
)SIG_DFL
;
2947 if (GC_old_segv_handler
!= (SIG_HNDLR_PTR
)SIG_DFL
) {
2948 if (GC_print_stats
== VERBOSE
)
2949 GC_log_printf("Replaced other SIGSEGV handler\n");
2951 # endif /* ! MS windows */
2952 # if defined(HPUX) || defined(LINUX) || defined(HURD) \
2953 || (defined(FREEBSD) && defined(SUNOS5SIGS))
2954 sigaction(SIGBUS
, &act
, &oldact
);
2955 if (oldact
.sa_flags
& SA_SIGINFO
) {
2956 GC_old_bus_handler
= oldact
.sa_sigaction
;
2957 GC_old_bus_handler_used_si
= TRUE
;
2959 GC_old_bus_handler
= (SIG_HNDLR_PTR
)oldact
.sa_handler
;
2960 GC_old_bus_handler_used_si
= FALSE
;
2962 if (GC_old_bus_handler
== (SIG_HNDLR_PTR
)SIG_IGN
) {
2963 GC_err_printf("Previously ignored bus error!?");
2964 GC_old_bus_handler
= (SIG_HNDLR_PTR
)SIG_DFL
;
2966 if (GC_old_bus_handler
!= (SIG_HNDLR_PTR
)SIG_DFL
) {
2967 if (GC_print_stats
== VERBOSE
)
2968 GC_log_printf("Replaced other SIGBUS handler\n");
2970 # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
2971 # if defined(MSWIN32)
2972 # if defined(GWW_VDB)
2973 if (GC_gww_dirty_init())
2976 GC_old_segv_handler
= SetUnhandledExceptionFilter(GC_write_fault_handler
);
2977 if (GC_old_segv_handler
!= NULL
) {
2979 GC_log_printf("Replaced other UnhandledExceptionFilter\n");
2981 GC_old_segv_handler
= SIG_DFL
;
2985 #endif /* !DARWIN */
2987 int GC_incremental_protection_needs(void)
2989 if (GC_page_size
== HBLKSIZE
) {
2990 return GC_PROTECTS_POINTER_HEAP
;
2992 return GC_PROTECTS_POINTER_HEAP
| GC_PROTECTS_PTRFREE_HEAP
;
2996 #define HAVE_INCREMENTAL_PROTECTION_NEEDS
2998 #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
3000 #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
3001 void GC_protect_heap(void)
3005 struct hblk
* current
;
3006 struct hblk
* current_start
; /* Start of block to be protected. */
3007 struct hblk
* limit
;
3009 GC_bool protect_all
=
3010 (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP
));
3011 for (i
= 0; i
< GC_n_heap_sects
; i
++) {
3012 start
= GC_heap_sects
[i
].hs_start
;
3013 len
= GC_heap_sects
[i
].hs_bytes
;
3015 PROTECT(start
, len
);
3017 GC_ASSERT(PAGE_ALIGNED(len
))
3018 GC_ASSERT(PAGE_ALIGNED(start
))
3019 current_start
= current
= (struct hblk
*)start
;
3020 limit
= (struct hblk
*)(start
+ len
);
3021 while (current
< limit
) {
3026 GC_ASSERT(PAGE_ALIGNED(current
));
3027 GET_HDR(current
, hhdr
);
3028 if (IS_FORWARDING_ADDR_OR_NIL(hhdr
)) {
3029 /* This can happen only if we're at the beginning of a */
3030 /* heap segment, and a block spans heap segments. */
3031 /* We will handle that block as part of the preceding */
3033 GC_ASSERT(current_start
== current
);
3034 current_start
= ++current
;
3037 if (HBLK_IS_FREE(hhdr
)) {
3038 GC_ASSERT(PAGE_ALIGNED(hhdr
-> hb_sz
));
3039 nhblks
= divHBLKSZ(hhdr
-> hb_sz
);
3040 is_ptrfree
= TRUE
; /* dirty on alloc */
3042 nhblks
= OBJ_SZ_TO_BLOCKS(hhdr
-> hb_sz
);
3043 is_ptrfree
= IS_PTRFREE(hhdr
);
3046 if (current_start
< current
) {
3047 PROTECT(current_start
, (ptr_t
)current
- (ptr_t
)current_start
);
3049 current_start
= (current
+= nhblks
);
3054 if (current_start
< current
) {
3055 PROTECT(current_start
, (ptr_t
)current
- (ptr_t
)current_start
);
3061 /* We assume that either the world is stopped or its OK to lose dirty */
3062 /* bits while this is happenning (as in GC_enable_incremental). */
3063 void GC_read_dirty(void)
3065 # if defined(GWW_VDB)
3066 if (GC_GWW_AVAILABLE()) {
3067 GC_gww_read_dirty();
3071 BCOPY((word
*)GC_dirty_pages
, GC_grungy_pages
,
3072 (sizeof GC_dirty_pages
));
3073 BZERO((word
*)GC_dirty_pages
, (sizeof GC_dirty_pages
));
3077 GC_bool
GC_page_was_dirty(struct hblk
*h
)
3079 register word index
;
3081 # if defined(GWW_VDB)
3082 if (GC_GWW_AVAILABLE())
3083 return GC_gww_page_was_dirty(h
);
3086 index
= PHT_HASH(h
);
3087 return(HDR(h
) == 0 || get_pht_entry_from_index(GC_grungy_pages
, index
));
3091 * Acquiring the allocation lock here is dangerous, since this
3092 * can be called from within GC_call_with_alloc_lock, and the cord
3093 * package does so. On systems that allow nested lock acquisition, this
3095 * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
3098 static GC_bool syscall_acquired_lock
= FALSE
; /* Protected by GC lock. */
3101 void GC_begin_syscall(void)
3103 /* FIXME: Resurrecting this code would require fixing the */
3104 /* test, which can spuriously return TRUE. */
3105 if (!I_HOLD_LOCK()) {
3107 syscall_acquired_lock
= TRUE
;
3111 void GC_end_syscall(void)
3113 if (syscall_acquired_lock
) {
3114 syscall_acquired_lock
= FALSE
;
3119 void GC_unprotect_range(ptr_t addr
, word len
)
3121 struct hblk
* start_block
;
3122 struct hblk
* end_block
;
3123 register struct hblk
*h
;
3126 if (!GC_dirty_maintained
) return;
3127 obj_start
= GC_base(addr
);
3128 if (obj_start
== 0) return;
3129 if (GC_base(addr
+ len
- 1) != obj_start
) {
3130 ABORT("GC_unprotect_range(range bigger than object)");
3132 start_block
= (struct hblk
*)((word
)addr
& ~(GC_page_size
- 1));
3133 end_block
= (struct hblk
*)((word
)(addr
+ len
- 1) & ~(GC_page_size
- 1));
3134 end_block
+= GC_page_size
/HBLKSIZE
- 1;
3135 for (h
= start_block
; h
<= end_block
; h
++) {
3136 register word index
= PHT_HASH(h
);
3138 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
3140 UNPROTECT(start_block
,
3141 ((ptr_t
)end_block
- (ptr_t
)start_block
) + HBLKSIZE
);
3145 /* We no longer wrap read by default, since that was causing too many */
3146 /* problems. It is preferred that the client instead avoids writing */
3147 /* to the write-protected heap with a system call. */
3148 /* This still serves as sample code if you do want to wrap system calls.*/
3150 #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
3151 /* Replacement for UNIX system call. */
3152 /* Other calls that write to the heap should be handled similarly. */
3153 /* Note that this doesn't work well for blocking reads: It will hold */
3154 /* the allocation lock for the entire duration of the call. Multithreaded */
3155 /* clients should really ensure that it won't block, either by setting */
3156 /* the descriptor nonblocking, or by calling select or poll first, to */
3157 /* make sure that input is available. */
3158 /* Another, preferred alternative is to ensure that system calls never */
3159 /* write to the protected heap (see above). */
3160 # include <unistd.h>
3161 # include <sys/uio.h>
3162 ssize_t
read(int fd
, void *buf
, size_t nbyte
)
3167 GC_unprotect_range(buf
, (word
)nbyte
);
3168 # if defined(IRIX5) || defined(GC_LINUX_THREADS)
3169 /* Indirect system call may not always be easily available. */
3170 /* We could call _read, but that would interfere with the */
3171 /* libpthread interception of read. */
3172 /* On Linux, we have to be careful with the linuxthreads */
3173 /* read interception. */
3178 iov
.iov_len
= nbyte
;
3179 result
= readv(fd
, &iov
, 1);
3183 result
= __read(fd
, buf
, nbyte
);
3185 /* The two zero args at the end of this list are because one
3186 IA-64 syscall() implementation actually requires six args
3187 to be passed, even though they aren't always used. */
3188 result
= syscall(SYS_read
, fd
, buf
, nbyte
, 0, 0);
3194 #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
3196 #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
3197 /* We use the GNU ld call wrapping facility. */
3198 /* This requires that the linker be invoked with "--wrap read". */
3199 /* This can be done by passing -Wl,"--wrap read" to gcc. */
3200 /* I'm not sure that this actually wraps whatever version of read */
3201 /* is called by stdio. That code also mentions __read. */
3202 # include <unistd.h>
3203 ssize_t
__wrap_read(int fd
, void *buf
, size_t nbyte
)
3208 GC_unprotect_range(buf
, (word
)nbyte
);
3209 result
= __real_read(fd
, buf
, nbyte
);
3214 /* We should probably also do this for __read, or whatever stdio */
3215 /* actually calls. */
3221 GC_bool
GC_page_was_ever_dirty(struct hblk
*h
)
3223 # if defined(GWW_VDB)
3224 if (GC_GWW_AVAILABLE())
3225 return GC_gww_page_was_ever_dirty(h
);
3230 # endif /* MPROTECT_VDB */
3235 * See DEFAULT_VDB for interface descriptions.
3239 * This implementaion assumes a Solaris 2.X like /proc pseudo-file-system
3240 * from which we can read page modified bits. This facility is far from
3241 * optimal (e.g. we would like to get the info for only some of the
3242 * address space), but it avoids intercepting system calls.
3246 #include <sys/types.h>
3247 #include <sys/signal.h>
3248 #include <sys/fault.h>
3249 #include <sys/syscall.h>
3250 #include <sys/procfs.h>
3251 #include <sys/stat.h>
3253 #define INITIAL_BUF_SZ 16384
3254 word GC_proc_buf_size
= INITIAL_BUF_SZ
;
3259 void GC_dirty_init(void)
3264 GC_dirty_maintained
= TRUE
;
3265 if (GC_bytes_allocd
!= 0 || GC_bytes_allocd_before_gc
!= 0) {
3268 for (i
= 0; i
< PHT_SIZE
; i
++) GC_written_pages
[i
] = (word
)(-1);
3269 if (GC_print_stats
== VERBOSE
)
3271 "Allocated bytes:%lu:all pages may have been written\n",
3273 (GC_bytes_allocd
+ GC_bytes_allocd_before_gc
));
3275 sprintf(buf
, "/proc/%d", getpid());
3276 fd
= open(buf
, O_RDONLY
);
3278 ABORT("/proc open failed");
3280 GC_proc_fd
= syscall(SYS_ioctl
, fd
, PIOCOPENPD
, 0);
3282 syscall(SYS_fcntl
, GC_proc_fd
, F_SETFD
, FD_CLOEXEC
);
3283 if (GC_proc_fd
< 0) {
3284 ABORT("/proc ioctl failed");
3286 GC_proc_buf
= GC_scratch_alloc(GC_proc_buf_size
);
3289 /* Ignore write hints. They don't help us here. */
3291 void GC_remove_protection(h
, nblocks
, is_ptrfree
)
3298 # define READ(fd,buf,nbytes) read(fd, buf, nbytes)
3300 void GC_read_dirty(void)
3302 unsigned long ps
, np
;
3305 struct prasmap
* map
;
3307 ptr_t current_addr
, limit
;
3310 BZERO(GC_grungy_pages
, (sizeof GC_grungy_pages
));
3313 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
3315 GC_log_printf("/proc read failed: GC_proc_buf_size = %lu\n",
3316 (unsigned long)GC_proc_buf_size
);
3318 /* Retry with larger buffer. */
3319 word new_size
= 2 * GC_proc_buf_size
;
3320 char * new_buf
= GC_scratch_alloc(new_size
);
3323 GC_proc_buf
= bufp
= new_buf
;
3324 GC_proc_buf_size
= new_size
;
3326 if (READ(GC_proc_fd
, bufp
, GC_proc_buf_size
) <= 0) {
3327 WARN("Insufficient space for /proc read\n", 0);
3329 memset(GC_grungy_pages
, 0xff, sizeof (page_hash_table
));
3330 memset(GC_written_pages
, 0xff, sizeof(page_hash_table
));
3335 /* Copy dirty bits into GC_grungy_pages */
3336 nmaps
= ((struct prpageheader
*)bufp
) -> pr_nmap
;
3337 /* printf( "nmaps = %d, PG_REFERENCED = %d, PG_MODIFIED = %d\n",
3338 nmaps, PG_REFERENCED, PG_MODIFIED); */
3339 bufp
= bufp
+ sizeof(struct prpageheader
);
3340 for (i
= 0; i
< nmaps
; i
++) {
3341 map
= (struct prasmap
*)bufp
;
3342 vaddr
= (ptr_t
)(map
-> pr_vaddr
);
3343 ps
= map
-> pr_pagesize
;
3344 np
= map
-> pr_npage
;
3345 /* printf("vaddr = 0x%X, ps = 0x%X, np = 0x%X\n", vaddr, ps, np); */
3346 limit
= vaddr
+ ps
* np
;
3347 bufp
+= sizeof (struct prasmap
);
3348 for (current_addr
= vaddr
;
3349 current_addr
< limit
; current_addr
+= ps
){
3350 if ((*bufp
++) & PG_MODIFIED
) {
3351 register struct hblk
* h
= (struct hblk
*) current_addr
;
3353 while ((ptr_t
)h
< current_addr
+ ps
) {
3354 register word index
= PHT_HASH(h
);
3356 set_pht_entry_from_index(GC_grungy_pages
, index
);
3361 bufp
+= sizeof(long) - 1;
3362 bufp
= (char *)((unsigned long)bufp
& ~(sizeof(long)-1));
3364 /* Update GC_written_pages. */
3365 GC_or_pages(GC_written_pages
, GC_grungy_pages
);
3370 GC_bool
GC_page_was_dirty(struct hblk
*h
)
3372 register word index
= PHT_HASH(h
);
3373 register GC_bool result
;
3375 result
= get_pht_entry_from_index(GC_grungy_pages
, index
);
3379 GC_bool
GC_page_was_ever_dirty(struct hblk
*h
)
3381 register word index
= PHT_HASH(h
);
3382 register GC_bool result
;
3384 result
= get_pht_entry_from_index(GC_written_pages
, index
);
3388 # endif /* PROC_VDB */
3393 # include "vd/PCR_VD.h"
3395 # define NPAGES (32*1024) /* 128 MB */
3397 PCR_VD_DB GC_grungy_bits
[NPAGES
];
3399 ptr_t GC_vd_base
; /* Address corresponding to GC_grungy_bits[0] */
3400 /* HBLKSIZE aligned. */
3402 void GC_dirty_init(void)
3404 GC_dirty_maintained
= TRUE
;
3405 /* For the time being, we assume the heap generally grows up */
3406 GC_vd_base
= GC_heap_sects
[0].hs_start
;
3407 if (GC_vd_base
== 0) {
3408 ABORT("Bad initial heap segment");
3410 if (PCR_VD_Start(HBLKSIZE
, GC_vd_base
, NPAGES
*HBLKSIZE
)
3412 ABORT("dirty bit initialization failed");
3416 void GC_read_dirty(void)
3418 /* lazily enable dirty bits on newly added heap sects */
3420 static int onhs
= 0;
3421 int nhs
= GC_n_heap_sects
;
3422 for( ; onhs
< nhs
; onhs
++ ) {
3423 PCR_VD_WriteProtectEnable(
3424 GC_heap_sects
[onhs
].hs_start
,
3425 GC_heap_sects
[onhs
].hs_bytes
);
3430 if (PCR_VD_Clear(GC_vd_base
, NPAGES
*HBLKSIZE
, GC_grungy_bits
)
3432 ABORT("dirty bit read failed");
3436 GC_bool
GC_page_was_dirty(struct hblk
*h
)
3438 if((ptr_t
)h
< GC_vd_base
|| (ptr_t
)h
>= GC_vd_base
+ NPAGES
*HBLKSIZE
) {
3441 return(GC_grungy_bits
[h
- (struct hblk
*)GC_vd_base
] & PCR_VD_DB_dirtyBit
);
3445 void GC_remove_protection(struct hblk
*h
, word nblocks
, GC_bool is_ptrfree
)
3447 PCR_VD_WriteProtectDisable(h
, nblocks
*HBLKSIZE
);
3448 PCR_VD_WriteProtectEnable(h
, nblocks
*HBLKSIZE
);
3451 # endif /* PCR_VDB */
3453 #if defined(MPROTECT_VDB) && defined(DARWIN)
3454 /* The following sources were used as a *reference* for this exception handling
3456 1. Apple's mach/xnu documentation
3457 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3458 omnigroup's macosx-dev list.
3459 www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3460 3. macosx-nat.c from Apple's GDB source code.
3463 /* The bug that caused all this trouble should now be fixed. This should
3464 eventually be removed if all goes well. */
3466 /* #define BROKEN_EXCEPTION_HANDLING */
3468 #include <mach/mach.h>
3469 #include <mach/mach_error.h>
3470 #include <mach/thread_status.h>
3471 #include <mach/exception.h>
3472 #include <mach/task.h>
3473 #include <pthread.h>
3475 extern void GC_darwin_register_mach_handler_thread(mach_port_t
);
3477 /* These are not defined in any header, although they are documented */
3479 exc_server(mach_msg_header_t
*, mach_msg_header_t
*);
3481 extern kern_return_t
3482 exception_raise(mach_port_t
, mach_port_t
, mach_port_t
, exception_type_t
,
3483 exception_data_t
, mach_msg_type_number_t
);
3485 extern kern_return_t
3486 exception_raise_state(mach_port_t
, mach_port_t
, mach_port_t
, exception_type_t
,
3487 exception_data_t
, mach_msg_type_number_t
,
3488 thread_state_flavor_t
*, thread_state_t
,
3489 mach_msg_type_number_t
, thread_state_t
,
3490 mach_msg_type_number_t
*);
3492 extern kern_return_t
3493 exception_raise_state_identity(mach_port_t
, mach_port_t
, mach_port_t
,
3494 exception_type_t
, exception_data_t
,
3495 mach_msg_type_number_t
, thread_state_flavor_t
*,
3496 thread_state_t
, mach_msg_type_number_t
,
3497 thread_state_t
, mach_msg_type_number_t
*);
3500 #define MAX_EXCEPTION_PORTS 16
3503 mach_msg_type_number_t count
;
3504 exception_mask_t masks
[MAX_EXCEPTION_PORTS
];
3505 exception_handler_t ports
[MAX_EXCEPTION_PORTS
];
3506 exception_behavior_t behaviors
[MAX_EXCEPTION_PORTS
];
3507 thread_state_flavor_t flavors
[MAX_EXCEPTION_PORTS
];
3511 mach_port_t exception
;
3512 #if defined(THREADS)
3518 mach_msg_header_t head
;
3522 GC_MP_NORMAL
, GC_MP_DISCARDING
, GC_MP_STOPPED
3523 } GC_mprotect_state_t
;
3525 /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field,
3526 but it isn't documented. Use the source and see if they
3531 /* These values are only used on the reply port */
3534 #if defined(THREADS)
3536 GC_mprotect_state_t GC_mprotect_state
;
3538 /* The following should ONLY be called when the world is stopped */
3539 static void GC_mprotect_thread_notify(mach_msg_id_t id
)
3544 mach_msg_trailer_t trailer
;
3547 mach_msg_return_t r
;
3549 buf
.msg
.head
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
, 0);
3550 buf
.msg
.head
.msgh_size
= sizeof(buf
.msg
);
3551 buf
.msg
.head
.msgh_remote_port
= GC_ports
.exception
;
3552 buf
.msg
.head
.msgh_local_port
= MACH_PORT_NULL
;
3553 buf
.msg
.head
.msgh_id
= id
;
3555 r
= mach_msg(&buf
.msg
.head
, MACH_SEND_MSG
| MACH_RCV_MSG
| MACH_RCV_LARGE
,
3556 sizeof(buf
.msg
), sizeof(buf
), GC_ports
.reply
,
3557 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
3558 if(r
!= MACH_MSG_SUCCESS
)
3559 ABORT("mach_msg failed in GC_mprotect_thread_notify");
3560 if(buf
.msg
.head
.msgh_id
!= ID_ACK
)
3561 ABORT("invalid ack in GC_mprotect_thread_notify");
3564 /* Should only be called by the mprotect thread */
3565 static void GC_mprotect_thread_reply(void)
3569 mach_msg_return_t r
;
3571 msg
.head
.msgh_bits
= MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND
, 0);
3572 msg
.head
.msgh_size
= sizeof(msg
);
3573 msg
.head
.msgh_remote_port
= GC_ports
.reply
;
3574 msg
.head
.msgh_local_port
= MACH_PORT_NULL
;
3575 msg
.head
.msgh_id
= ID_ACK
;
3577 r
= mach_msg(&msg
.head
, MACH_SEND_MSG
, sizeof(msg
), 0, MACH_PORT_NULL
,
3578 MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
3579 if(r
!= MACH_MSG_SUCCESS
)
3580 ABORT("mach_msg failed in GC_mprotect_thread_reply");
3583 void GC_mprotect_stop(void)
3585 GC_mprotect_thread_notify(ID_STOP
);
3587 void GC_mprotect_resume(void)
3589 GC_mprotect_thread_notify(ID_RESUME
);
3592 #else /* !THREADS */
3593 /* The compiler should optimize away any GC_mprotect_state computations */
3594 #define GC_mprotect_state GC_MP_NORMAL
3597 static void *GC_mprotect_thread(void *arg
)
3599 mach_msg_return_t r
;
3600 /* These two structures contain some private kernel data. We don't need to
3601 access any of it so we don't bother defining a proper struct. The
3602 correct definitions are in the xnu source code. */
3604 mach_msg_header_t head
;
3608 mach_msg_header_t head
;
3609 mach_msg_body_t msgh_body
;
3615 GC_darwin_register_mach_handler_thread(mach_thread_self());
3618 r
= mach_msg(&msg
.head
, MACH_RCV_MSG
| MACH_RCV_LARGE
|
3619 (GC_mprotect_state
== GC_MP_DISCARDING
? MACH_RCV_TIMEOUT
: 0),
3620 0, sizeof(msg
), GC_ports
.exception
,
3621 GC_mprotect_state
== GC_MP_DISCARDING
? 0
3622 : MACH_MSG_TIMEOUT_NONE
, MACH_PORT_NULL
);
3624 id
= r
== MACH_MSG_SUCCESS
? msg
.head
.msgh_id
: -1;
3626 # if defined(THREADS)
3627 if(GC_mprotect_state
== GC_MP_DISCARDING
) {
3628 if(r
== MACH_RCV_TIMED_OUT
) {
3629 GC_mprotect_state
= GC_MP_STOPPED
;
3630 GC_mprotect_thread_reply();
3633 if(r
== MACH_MSG_SUCCESS
&& (id
== ID_STOP
|| id
== ID_RESUME
))
3634 ABORT("out of order mprotect thread request");
3636 # endif /* THREADS */
3638 if(r
!= MACH_MSG_SUCCESS
) {
3639 GC_err_printf("mach_msg failed with %d %s\n", (int)r
,
3640 mach_error_string(r
));
3641 ABORT("mach_msg failed");
3645 # if defined(THREADS)
3647 if(GC_mprotect_state
!= GC_MP_NORMAL
)
3648 ABORT("Called mprotect_stop when state wasn't normal");
3649 GC_mprotect_state
= GC_MP_DISCARDING
;
3652 if(GC_mprotect_state
!= GC_MP_STOPPED
)
3653 ABORT("Called mprotect_resume when state wasn't stopped");
3654 GC_mprotect_state
= GC_MP_NORMAL
;
3655 GC_mprotect_thread_reply();
3657 # endif /* THREADS */
3659 /* Handle the message (calls catch_exception_raise) */
3660 if(!exc_server(&msg
.head
, &reply
.head
))
3661 ABORT("exc_server failed");
3662 /* Send the reply */
3663 r
= mach_msg(&reply
.head
, MACH_SEND_MSG
, reply
.head
.msgh_size
, 0,
3664 MACH_PORT_NULL
, MACH_MSG_TIMEOUT_NONE
,
3666 if(r
!= MACH_MSG_SUCCESS
) {
3667 /* This will fail if the thread dies, but the thread */
3668 /* shouldn't die... */
3669 # ifdef BROKEN_EXCEPTION_HANDLING
3670 GC_err_printf("mach_msg failed with %d %s while sending"
3671 "exc reply\n", (int)r
,mach_error_string(r
));
3673 ABORT("mach_msg failed while sending exception reply");
3682 /* All this SIGBUS code shouldn't be necessary. All protection faults should
3683 be going throught the mach exception handler. However, it seems a SIGBUS is
3684 occasionally sent for some unknown reason. Even more odd, it seems to be
3685 meaningless and safe to ignore. */
3686 #ifdef BROKEN_EXCEPTION_HANDLING
3688 static SIG_HNDLR_PTR GC_old_bus_handler
;
3690 /* Updates to this aren't atomic, but the SIGBUSs seem pretty rare.
3691 Even if this doesn't get updated property, it isn't really a problem */
3692 static int GC_sigbus_count
;
3694 static void GC_darwin_sigbus(int num
, siginfo_t
*sip
, void *context
)
3697 ABORT("Got a non-sigbus signal in the sigbus handler");
3699 /* Ugh... some seem safe to ignore, but too many in a row probably means
3700 trouble. GC_sigbus_count is reset for each mach exception that is
3702 if(GC_sigbus_count
>= 8) {
3703 ABORT("Got more than 8 SIGBUSs in a row!");
3706 WARN("Ignoring SIGBUS.\n", 0);
3709 #endif /* BROKEN_EXCEPTION_HANDLING */
3711 void GC_dirty_init(void)
3716 pthread_attr_t attr
;
3717 exception_mask_t mask
;
3719 if (GC_print_stats
== VERBOSE
)
3720 GC_log_printf("Inititalizing mach/darwin mprotect virtual dirty bit "
3721 "implementation\n");
3722 # ifdef BROKEN_EXCEPTION_HANDLING
3723 WARN("Enabling workarounds for various darwin "
3724 "exception handling bugs.\n", 0);
3726 GC_dirty_maintained
= TRUE
;
3727 if (GC_page_size
% HBLKSIZE
!= 0) {
3728 GC_err_printf("Page size not multiple of HBLKSIZE\n");
3729 ABORT("Page size not multiple of HBLKSIZE");
3732 GC_task_self
= me
= mach_task_self();
3734 r
= mach_port_allocate(me
, MACH_PORT_RIGHT_RECEIVE
, &GC_ports
.exception
);
3735 if(r
!= KERN_SUCCESS
)
3736 ABORT("mach_port_allocate failed (exception port)");
3738 r
= mach_port_insert_right(me
, GC_ports
.exception
, GC_ports
.exception
,
3739 MACH_MSG_TYPE_MAKE_SEND
);
3740 if(r
!= KERN_SUCCESS
)
3741 ABORT("mach_port_insert_right failed (exception port)");
3743 # if defined(THREADS)
3744 r
= mach_port_allocate(me
, MACH_PORT_RIGHT_RECEIVE
, &GC_ports
.reply
);
3745 if(r
!= KERN_SUCCESS
)
3746 ABORT("mach_port_allocate failed (reply port)");
3749 /* The exceptions we want to catch */
3750 mask
= EXC_MASK_BAD_ACCESS
;
3752 r
= task_get_exception_ports(me
, mask
, GC_old_exc_ports
.masks
,
3753 &GC_old_exc_ports
.count
, GC_old_exc_ports
.ports
,
3754 GC_old_exc_ports
.behaviors
,
3755 GC_old_exc_ports
.flavors
);
3756 if(r
!= KERN_SUCCESS
)
3757 ABORT("task_get_exception_ports failed");
3759 r
= task_set_exception_ports(me
, mask
, GC_ports
.exception
, EXCEPTION_DEFAULT
,
3760 GC_MACH_THREAD_STATE
);
3761 if(r
!= KERN_SUCCESS
)
3762 ABORT("task_set_exception_ports failed");
3763 if(pthread_attr_init(&attr
) != 0)
3764 ABORT("pthread_attr_init failed");
3765 if(pthread_attr_setdetachstate(&attr
, PTHREAD_CREATE_DETACHED
) != 0)
3766 ABORT("pthread_attr_setdetachedstate failed");
3768 # undef pthread_create
3769 /* This will call the real pthread function, not our wrapper */
3770 if(pthread_create(&thread
, &attr
, GC_mprotect_thread
, NULL
) != 0)
3771 ABORT("pthread_create failed");
3772 pthread_attr_destroy(&attr
);
3774 /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
3775 # ifdef BROKEN_EXCEPTION_HANDLING
3777 struct sigaction sa
, oldsa
;
3778 sa
.sa_handler
= (SIG_HNDLR_PTR
)GC_darwin_sigbus
;
3779 sigemptyset(&sa
.sa_mask
);
3780 sa
.sa_flags
= SA_RESTART
|SA_SIGINFO
;
3781 if(sigaction(SIGBUS
, &sa
, &oldsa
) < 0)
3783 GC_old_bus_handler
= (SIG_HNDLR_PTR
)oldsa
.sa_handler
;
3784 if (GC_old_bus_handler
!= SIG_DFL
) {
3785 if (GC_print_stats
== VERBOSE
)
3786 GC_err_printf("Replaced other SIGBUS handler\n");
3789 # endif /* BROKEN_EXCEPTION_HANDLING */
3792 /* The source code for Apple's GDB was used as a reference for the exception
3793 forwarding code. This code is similar to be GDB code only because there is
3794 only one way to do it. */
3795 static kern_return_t
GC_forward_exception(mach_port_t thread
, mach_port_t task
,
3796 exception_type_t exception
,
3797 exception_data_t data
,
3798 mach_msg_type_number_t data_count
)
3803 exception_behavior_t behavior
;
3804 thread_state_flavor_t flavor
;
3806 thread_state_t thread_state
= NULL
;
3807 mach_msg_type_number_t thread_state_count
= THREAD_STATE_MAX
;
3809 for(i
=0; i
< GC_old_exc_ports
.count
; i
++)
3810 if(GC_old_exc_ports
.masks
[i
] & (1 << exception
))
3812 if(i
==GC_old_exc_ports
.count
)
3813 ABORT("No handler for exception!");
3815 port
= GC_old_exc_ports
.ports
[i
];
3816 behavior
= GC_old_exc_ports
.behaviors
[i
];
3817 flavor
= GC_old_exc_ports
.flavors
[i
];
3819 if(behavior
!= EXCEPTION_DEFAULT
) {
3820 r
= thread_get_state(thread
, flavor
, thread_state
, &thread_state_count
);
3821 if(r
!= KERN_SUCCESS
)
3822 ABORT("thread_get_state failed in forward_exception");
3826 case EXCEPTION_DEFAULT
:
3827 r
= exception_raise(port
, thread
, task
, exception
, data
, data_count
);
3829 case EXCEPTION_STATE
:
3830 r
= exception_raise_state(port
, thread
, task
, exception
, data
, data_count
,
3831 &flavor
, thread_state
, thread_state_count
,
3832 thread_state
, &thread_state_count
);
3834 case EXCEPTION_STATE_IDENTITY
:
3835 r
= exception_raise_state_identity(port
, thread
, task
, exception
, data
,
3836 data_count
, &flavor
, thread_state
,
3837 thread_state_count
, thread_state
,
3838 &thread_state_count
);
3841 r
= KERN_FAILURE
; /* make gcc happy */
3842 ABORT("forward_exception: unknown behavior");
3846 if(behavior
!= EXCEPTION_DEFAULT
) {
3847 r
= thread_set_state(thread
, flavor
, thread_state
, thread_state_count
);
3848 if(r
!= KERN_SUCCESS
)
3849 ABORT("thread_set_state failed in forward_exception");
3855 #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
3857 /* This violates the namespace rules but there isn't anything that can be done
3858 about it. The exception handling stuff is hard coded to call this */
3860 catch_exception_raise(mach_port_t exception_port
, mach_port_t thread
,
3861 mach_port_t task
, exception_type_t exception
,
3862 exception_data_t code
, mach_msg_type_number_t code_count
)
3868 # if defined(POWERPC)
3869 # if CPP_WORDSZ == 32
3870 thread_state_flavor_t flavor
= PPC_EXCEPTION_STATE
;
3871 mach_msg_type_number_t exc_state_count
= PPC_EXCEPTION_STATE_COUNT
;
3872 ppc_exception_state_t exc_state
;
3874 thread_state_flavor_t flavor
= PPC_EXCEPTION_STATE64
;
3875 mach_msg_type_number_t exc_state_count
= PPC_EXCEPTION_STATE64_COUNT
;
3876 ppc_exception_state64_t exc_state
;
3878 # elif defined(I386) || defined(X86_64)
3879 # if CPP_WORDSZ == 32
3880 thread_state_flavor_t flavor
= x86_EXCEPTION_STATE32
;
3881 mach_msg_type_number_t exc_state_count
= x86_EXCEPTION_STATE32_COUNT
;
3882 x86_exception_state32_t exc_state
;
3884 thread_state_flavor_t flavor
= x86_EXCEPTION_STATE64
;
3885 mach_msg_type_number_t exc_state_count
= x86_EXCEPTION_STATE64_COUNT
;
3886 x86_exception_state64_t exc_state
;
3889 # error FIXME for non-ppc/x86 darwin
3893 if(exception
!= EXC_BAD_ACCESS
|| code
[0] != KERN_PROTECTION_FAILURE
) {
3894 # ifdef DEBUG_EXCEPTION_HANDLING
3895 /* We aren't interested, pass it on to the old handler */
3896 GC_printf("Exception: 0x%x Code: 0x%x 0x%x in catch....\n", exception
,
3897 code_count
> 0 ? code
[0] : -1, code_count
> 1 ? code
[1] : -1);
3902 r
= thread_get_state(thread
, flavor
, (natural_t
*)&exc_state
,
3904 if(r
!= KERN_SUCCESS
) {
3905 /* The thread is supposed to be suspended while the exception handler
3906 is called. This shouldn't fail. */
3907 # ifdef BROKEN_EXCEPTION_HANDLING
3908 GC_err_printf("thread_get_state failed in catch_exception_raise\n");
3909 return KERN_SUCCESS
;
3911 ABORT("thread_get_state failed in catch_exception_raise");
3915 /* This is the address that caused the fault */
3916 # if defined(POWERPC)
3917 addr
= (char*) exc_state
. THREAD_FLD(dar
);
3918 # elif defined (I386) || defined (X86_64)
3919 addr
= (char*) exc_state
. THREAD_FLD(faultvaddr
);
3921 # error FIXME for non POWERPC/I386
3924 if((HDR(addr
)) == 0) {
3925 /* Ugh... just like the SIGBUS problem above, it seems we get a bogus
3926 KERN_PROTECTION_FAILURE every once and a while. We wait till we get
3927 a bunch in a row before doing anything about it. If a "real" fault
3928 ever occurres it'll just keep faulting over and over and we'll hit
3929 the limit pretty quickly. */
3930 # ifdef BROKEN_EXCEPTION_HANDLING
3931 static char *last_fault
;
3932 static int last_fault_count
;
3934 if(addr
!= last_fault
) {
3936 last_fault_count
= 0;
3938 if(++last_fault_count
< 32) {
3939 if(last_fault_count
== 1)
3940 WARN("Ignoring KERN_PROTECTION_FAILURE at %lx\n", (GC_word
)addr
);
3941 return KERN_SUCCESS
;
3944 GC_err_printf("Unexpected KERN_PROTECTION_FAILURE at %p\n",addr
);
3945 /* Can't pass it along to the signal handler because that is
3946 ignoring SIGBUS signals. We also shouldn't call ABORT here as
3947 signals don't always work too well from the exception handler. */
3948 GC_err_printf("Aborting\n");
3950 # else /* BROKEN_EXCEPTION_HANDLING */
3951 /* Pass it along to the next exception handler
3952 (which should call SIGBUS/SIGSEGV) */
3954 # endif /* !BROKEN_EXCEPTION_HANDLING */
3957 # ifdef BROKEN_EXCEPTION_HANDLING
3958 /* Reset the number of consecutive SIGBUSs */
3959 GC_sigbus_count
= 0;
3962 if(GC_mprotect_state
== GC_MP_NORMAL
) { /* common case */
3963 h
= (struct hblk
*)((word
)addr
& ~(GC_page_size
-1));
3964 UNPROTECT(h
, GC_page_size
);
3965 for (i
= 0; i
< divHBLKSZ(GC_page_size
); i
++) {
3966 register int index
= PHT_HASH(h
+i
);
3967 async_set_pht_entry_from_index(GC_dirty_pages
, index
);
3969 } else if(GC_mprotect_state
== GC_MP_DISCARDING
) {
3970 /* Lie to the thread for now. No sense UNPROTECT()ing the memory
3971 when we're just going to PROTECT() it again later. The thread
3972 will just fault again once it resumes */
3974 /* Shouldn't happen, i don't think */
3975 GC_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
3978 return KERN_SUCCESS
;
3982 /* These should never be called, but just in case... */
3984 catch_exception_raise_state(mach_port_name_t exception_port
, int exception
,
3985 exception_data_t code
,
3986 mach_msg_type_number_t codeCnt
, int flavor
,
3987 thread_state_t old_state
, int old_stateCnt
,
3988 thread_state_t new_state
, int new_stateCnt
)
3990 ABORT("catch_exception_raise_state");
3991 return(KERN_INVALID_ARGUMENT
);
3995 catch_exception_raise_state_identity(mach_port_name_t exception_port
,
3996 mach_port_t thread
, mach_port_t task
,
3997 int exception
, exception_data_t code
,
3998 mach_msg_type_number_t codeCnt
, int flavor
,
3999 thread_state_t old_state
, int old_stateCnt
,
4000 thread_state_t new_state
, int new_stateCnt
)
4002 ABORT("catch_exception_raise_state_identity");
4003 return(KERN_INVALID_ARGUMENT
);
4007 #endif /* DARWIN && MPROTECT_VDB */
4009 # ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
4010 int GC_incremental_protection_needs()
4012 return GC_PROTECTS_NONE
;
4014 # endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
4017 * Call stack save code for debugging.
4018 * Should probably be in mach_dep.c, but that requires reorganization.
4021 /* I suspect the following works for most X86 *nix variants, so */
4022 /* long as the frame pointer is explicitly stored. In the case of gcc, */
4023 /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
4024 #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
4025 # include <features.h>
4028 struct frame
*fr_savfp
;
4030 long fr_arg
[NARGS
]; /* All the arguments go here. */
4036 # include <features.h>
4041 struct frame
*fr_savfp
;
4049 # elif defined (DRSNX)
4050 # include <sys/sparc/frame.h>
4051 # elif defined(OPENBSD)
4053 # elif defined(FREEBSD) || defined(NETBSD)
4054 # include <machine/frame.h>
4056 # include <sys/frame.h>
4059 # error We only know how to to get the first 6 arguments
4063 #ifdef NEED_CALLINFO
4064 /* Fill in the pc and argument information for up to NFRAMES of my */
4065 /* callers. Ignore my frame and my callers frame. */
4068 # include <unistd.h>
4071 #endif /* NEED_CALLINFO */
4073 #if defined(GC_HAVE_BUILTIN_BACKTRACE)
4075 # include "private/msvc_dbg.h"
4077 # include <execinfo.h>
4081 #ifdef SAVE_CALL_CHAIN
4083 #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
4084 && defined(GC_HAVE_BUILTIN_BACKTRACE)
4086 #ifdef REDIRECT_MALLOC
4087 /* Deal with possible malloc calls in backtrace by omitting */
4088 /* the infinitely recursing backtrace. */
4090 __thread
/* If your compiler doesn't understand this */
4091 /* you could use something like pthread_getspecific. */
4093 GC_in_save_callers
= FALSE
;
4096 void GC_save_callers (struct callinfo info
[NFRAMES
])
4098 void * tmp_info
[NFRAMES
+ 1];
4100 # define IGNORE_FRAMES 1
4102 /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
4103 /* points to our own frame. */
4104 # ifdef REDIRECT_MALLOC
4105 if (GC_in_save_callers
) {
4106 info
[0].ci_pc
= (word
)(&GC_save_callers
);
4107 for (i
= 1; i
< NFRAMES
; ++i
) info
[i
].ci_pc
= 0;
4110 GC_in_save_callers
= TRUE
;
4112 GC_ASSERT(sizeof(struct callinfo
) == sizeof(void *));
4113 npcs
= backtrace((void **)tmp_info
, NFRAMES
+ IGNORE_FRAMES
);
4114 BCOPY(tmp_info
+IGNORE_FRAMES
, info
, (npcs
- IGNORE_FRAMES
) * sizeof(void *));
4115 for (i
= npcs
- IGNORE_FRAMES
; i
< NFRAMES
; ++i
) info
[i
].ci_pc
= 0;
4116 # ifdef REDIRECT_MALLOC
4117 GC_in_save_callers
= FALSE
;
4121 #else /* No builtin backtrace; do it ourselves */
4123 #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
4124 # define FR_SAVFP fr_fp
4125 # define FR_SAVPC fr_pc
4127 # define FR_SAVFP fr_savfp
4128 # define FR_SAVPC fr_savpc
4131 #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4137 void GC_save_callers (struct callinfo info
[NFRAMES
])
4139 struct frame
*frame
;
4143 /* We assume this is turned on only with gcc as the compiler. */
4144 asm("movl %%ebp,%0" : "=r"(frame
));
4147 frame
= (struct frame
*) GC_save_regs_in_stack ();
4148 fp
= (struct frame
*)((long) frame
-> FR_SAVFP
+ BIAS
);
4151 for (; (!(fp HOTTER_THAN frame
) && !(GC_stackbottom
HOTTER_THAN (ptr_t
)fp
)
4152 && (nframes
< NFRAMES
));
4153 fp
= (struct frame
*)((long) fp
-> FR_SAVFP
+ BIAS
), nframes
++) {
4156 info
[nframes
].ci_pc
= fp
->FR_SAVPC
;
4158 for (i
= 0; i
< NARGS
; i
++) {
4159 info
[nframes
].ci_arg
[i
] = ~(fp
->fr_arg
[i
]);
4161 # endif /* NARGS > 0 */
4163 if (nframes
< NFRAMES
) info
[nframes
].ci_pc
= 0;
4166 #endif /* No builtin backtrace */
4168 #endif /* SAVE_CALL_CHAIN */
4170 #ifdef NEED_CALLINFO
4172 /* Print info to stderr. We do NOT hold the allocation lock */
4173 void GC_print_callers (struct callinfo info
[NFRAMES
])
4176 static int reentry_count
= 0;
4177 GC_bool stop
= FALSE
;
4179 /* FIXME: This should probably use a different lock, so that we */
4180 /* become callable with or without the allocation lock. */
4186 GC_err_printf("\tCaller at allocation:\n");
4188 GC_err_printf("\tCall chain at allocation:\n");
4190 for (i
= 0; i
< NFRAMES
&& !stop
; i
++) {
4191 if (info
[i
].ci_pc
== 0) break;
4196 GC_err_printf("\t\targs: ");
4197 for (j
= 0; j
< NARGS
; j
++) {
4198 if (j
!= 0) GC_err_printf(", ");
4199 GC_err_printf("%d (0x%X)", ~(info
[i
].ci_arg
[j
]),
4200 ~(info
[i
].ci_arg
[j
]));
4202 GC_err_printf("\n");
4205 if (reentry_count
> 1) {
4206 /* We were called during an allocation during */
4207 /* a previous GC_print_callers call; punt. */
4208 GC_err_printf("\t\t##PC##= 0x%lx\n", info
[i
].ci_pc
);
4215 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4216 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4218 backtrace_symbols((void **)(&(info
[i
].ci_pc
)), 1);
4219 char *name
= sym_name
[0];
4223 sprintf(buf
, "##PC##= 0x%lx", info
[i
].ci_pc
);
4225 # if defined(LINUX) && !defined(SMALL_CONFIG)
4226 /* Try for a line number. */
4229 static char exe_name
[EXE_SZ
];
4231 char cmd_buf
[CMD_SZ
];
4232 # define RESULT_SZ 200
4233 static char result_buf
[RESULT_SZ
];
4236 # define PRELOAD_SZ 200
4237 char preload_buf
[PRELOAD_SZ
];
4238 static GC_bool found_exe_name
= FALSE
;
4239 static GC_bool will_fail
= FALSE
;
4241 /* Try to get it via a hairy and expensive scheme. */
4242 /* First we get the name of the executable: */
4243 if (will_fail
) goto out
;
4244 if (!found_exe_name
) {
4245 ret_code
= readlink("/proc/self/exe", exe_name
, EXE_SZ
);
4246 if (ret_code
< 0 || ret_code
>= EXE_SZ
4247 || exe_name
[0] != '/') {
4248 will_fail
= TRUE
; /* Dont try again. */
4251 exe_name
[ret_code
] = '\0';
4252 found_exe_name
= TRUE
;
4254 /* Then we use popen to start addr2line -e <exe> <addr> */
4255 /* There are faster ways to do this, but hopefully this */
4256 /* isn't time critical. */
4257 sprintf(cmd_buf
, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name
,
4258 (unsigned long)info
[i
].ci_pc
);
4259 old_preload
= getenv ("LD_PRELOAD");
4260 if (0 != old_preload
) {
4261 if (strlen (old_preload
) >= PRELOAD_SZ
) {
4265 strcpy (preload_buf
, old_preload
);
4266 unsetenv ("LD_PRELOAD");
4268 pipe
= popen(cmd_buf
, "r");
4269 if (0 != old_preload
4270 && 0 != setenv ("LD_PRELOAD", preload_buf
, 0)) {
4271 WARN("Failed to reset LD_PRELOAD\n", 0);
4274 || (result_len
= fread(result_buf
, 1, RESULT_SZ
- 1, pipe
))
4276 if (pipe
!= NULL
) pclose(pipe
);
4280 if (result_buf
[result_len
- 1] == '\n') --result_len
;
4281 result_buf
[result_len
] = 0;
4282 if (result_buf
[0] == '?'
4283 || (result_buf
[result_len
-2] == ':'
4284 && result_buf
[result_len
-1] == '0')) {
4288 /* Get rid of embedded newline, if any. Test for "main" */
4290 char * nl
= strchr(result_buf
, '\n');
4291 if (nl
!= NULL
&& nl
< result_buf
+ result_len
) {
4294 if (strncmp(result_buf
, "main", nl
- result_buf
) == 0) {
4298 if (result_len
< RESULT_SZ
- 25) {
4299 /* Add in hex address */
4300 sprintf(result_buf
+ result_len
, " [0x%lx]",
4301 (unsigned long)info
[i
].ci_pc
);
4308 GC_err_printf("\t\t%s\n", name
);
4309 # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4310 && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4311 free(sym_name
); /* May call GC_free; that's OK */
4320 #endif /* NEED_CALLINFO */
4324 #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4326 /* Dump /proc/self/maps to GC_stderr, to enable looking up names for
4327 addresses in FIND_LEAK output. */
4329 static word
dump_maps(char *maps
)
4331 GC_err_write(maps
, strlen(maps
));
4335 void GC_print_address_map(void)
4337 GC_err_printf("---------- Begin address map ----------\n");
4338 dump_maps(GC_get_maps());
4339 GC_err_printf("---------- End address map ----------\n");