2 * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
5 * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
18 * These are extra allocation routines which are likely to be less
19 * frequently used than those in malloc.c. They are separate in the
20 * hope that the .o file will be excluded from statically linked
21 * executables. We should probably break this up further.
25 #include "private/gc_priv.h"
27 /* extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
28 void GC_extend_size_map(); /* in misc.c. */
29 GC_bool
GC_alloc_reclaim_list(); /* in malloc.c */
31 /* Some externally visible but unadvertised variables to allow access to */
32 /* free lists from inlined allocators without including gc_priv.h */
33 /* or introducing dependencies on internal data structure layouts. */
34 void ** const GC_objfreelist_ptr
= GC_objfreelist
;
35 void ** const GC_aobjfreelist_ptr
= GC_aobjfreelist
;
36 void ** const GC_uobjfreelist_ptr
= GC_uobjfreelist
;
37 # ifdef ATOMIC_UNCOLLECTABLE
38 void ** const GC_auobjfreelist_ptr
= GC_auobjfreelist
;
42 void * GC_generic_or_special_malloc(size_t lb
, int knd
)
45 # ifdef STUBBORN_ALLOC
47 return(GC_malloc_stubborn((size_t)lb
));
50 return(GC_malloc_atomic((size_t)lb
));
52 return(GC_malloc((size_t)lb
));
54 return(GC_malloc_uncollectable((size_t)lb
));
55 # ifdef ATOMIC_UNCOLLECTABLE
57 return(GC_malloc_atomic_uncollectable((size_t)lb
));
58 # endif /* ATOMIC_UNCOLLECTABLE */
60 return(GC_generic_malloc(lb
,knd
));
65 /* Change the size of the block pointed to by p to contain at least */
66 /* lb bytes. The object may be (and quite likely will be) moved. */
67 /* The kind (e.g. atomic) is the same as that of the old. */
68 /* Shrinking of large blocks is not implemented well. */
69 void * GC_realloc(void * p
, size_t lb
)
73 size_t sz
; /* Current size in bytes */
74 size_t orig_sz
; /* Original sz in bytes */
77 if (p
== 0) return(GC_malloc(lb
)); /* Required by ANSI */
81 obj_kind
= hhdr
-> hb_obj_kind
;
84 if (sz
> MAXOBJBYTES
) {
85 /* Round it up to the next whole heap block */
88 sz
= (sz
+HBLKSIZE
-1) & (~HBLKMASK
);
90 descr
= GC_obj_kinds
[obj_kind
].ok_descriptor
;
91 if (GC_obj_kinds
[obj_kind
].ok_relocate_descr
) descr
+= sz
;
92 hhdr
-> hb_descr
= descr
;
93 # ifdef MARK_BIT_PER_OBJ
94 GC_ASSERT(hhdr
-> hb_inv_sz
== LARGE_INV_SZ
);
96 GC_ASSERT(hhdr
-> hb_large_block
&&
97 hhdr
-> hb_map
[ANY_INDEX
] == 1);
99 if (IS_UNCOLLECTABLE(obj_kind
)) GC_non_gc_bytes
+= (sz
- orig_sz
);
100 /* Extra area is already cleared by GC_alloc_large_and_clear. */
102 if (ADD_SLOP(lb
) <= sz
) {
103 if (lb
>= (sz
>> 1)) {
104 # ifdef STUBBORN_ALLOC
105 if (obj_kind
== STUBBORN
) GC_change_stubborn(p
);
108 /* Clear unneeded part of object to avoid bogus pointer */
110 /* Safe for stubborn objects. */
111 BZERO(((ptr_t
)p
) + lb
, orig_sz
- lb
);
117 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
119 if (result
== 0) return(0);
120 /* Could also return original object. But this */
121 /* gives the client warning of imminent disaster. */
122 BCOPY(p
, result
, lb
);
131 GC_generic_or_special_malloc((word
)lb
, obj_kind
);
133 if (result
== 0) return(0);
134 BCOPY(p
, result
, sz
);
142 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
143 # define REDIRECT_REALLOC GC_realloc
146 # ifdef REDIRECT_REALLOC
148 /* As with malloc, avoid two levels of extra calls here. */
149 # ifdef GC_ADD_CALLER
150 # define RA GC_RETURN_ADDR,
154 # define GC_debug_realloc_replacement(p, lb) \
155 GC_debug_realloc(p, lb, RA "unknown", 0)
157 void * realloc(void * p
, size_t lb
)
159 return(REDIRECT_REALLOC(p
, lb
));
162 # undef GC_debug_realloc_replacement
163 # endif /* REDIRECT_REALLOC */
166 /* Allocate memory such that only pointers to near the */
167 /* beginning of the object are considered. */
168 /* We avoid holding allocation lock while we clear memory. */
169 void * GC_generic_malloc_ignore_off_page(size_t lb
, int k
)
179 return(GC_generic_malloc((word
)lb
, k
));
180 lw
= ROUNDED_UP_WORDS(lb
);
181 lb_rounded
= WORDS_TO_BYTES(lw
);
182 n_blocks
= OBJ_SZ_TO_BLOCKS(lb_rounded
);
183 init
= GC_obj_kinds
[k
].ok_init
;
184 if (GC_have_errors
) GC_print_all_errors();
185 GC_INVOKE_FINALIZERS();
187 result
= (ptr_t
)GC_alloc_large(ADD_SLOP(lb
), k
, IGNORE_OFF_PAGE
);
189 if (GC_debugging_started
) {
190 BZERO(result
, n_blocks
* HBLKSIZE
);
193 /* Clear any memory that might be used for GC descriptors */
194 /* before we release the lock. */
195 ((word
*)result
)[0] = 0;
196 ((word
*)result
)[1] = 0;
197 ((word
*)result
)[lw
-1] = 0;
198 ((word
*)result
)[lw
-2] = 0;
202 GC_bytes_allocd
+= lb_rounded
;
205 return((*GC_oom_fn
)(lb
));
207 if (init
&& !GC_debugging_started
) {
208 BZERO(result
, n_blocks
* HBLKSIZE
);
214 void * GC_malloc_ignore_off_page(size_t lb
)
216 return((void *)GC_generic_malloc_ignore_off_page(lb
, NORMAL
));
219 void * GC_malloc_atomic_ignore_off_page(size_t lb
)
221 return((void *)GC_generic_malloc_ignore_off_page(lb
, PTRFREE
));
224 /* Increment GC_bytes_allocd from code that doesn't have direct access */
226 void GC_incr_bytes_allocd(size_t n
)
228 GC_bytes_allocd
+= n
;
231 /* The same for GC_bytes_freed. */
232 void GC_incr_bytes_freed(size_t n
)
239 extern signed_word GC_bytes_found
; /* Protected by GC lock. */
242 volatile signed_word GC_bytes_allocd_tmp
= 0;
243 /* Number of bytes of memory allocated since */
244 /* we released the GC lock. Instead of */
245 /* reacquiring the GC lock just to add this in, */
246 /* we add it in the next time we reacquire */
247 /* the lock. (Atomically adding it doesn't */
248 /* work, since we would have to atomically */
249 /* update it in GC_malloc, which is too */
251 #endif /* PARALLEL_MARK */
253 /* Return a list of 1 or more objects of the indicated size, linked */
254 /* through the first word in the object. This has the advantage that */
255 /* it acquires the allocation lock only once, and may greatly reduce */
256 /* time wasted contending for the allocation lock. Typical usage would */
257 /* be in a thread that requires many items of the same size. It would */
258 /* keep its own free list in thread-local storage, and call */
259 /* GC_malloc_many or friends to replenish it. (We do not round up */
260 /* object sizes, since a call indicates the intention to consume many */
261 /* objects of exactly this size.) */
262 /* We assume that the size is a multiple of GRANULE_BYTES. */
263 /* We return the free-list by assigning it to *result, since it is */
264 /* not safe to return, e.g. a linked list of pointer-free objects, */
265 /* since the collector would not retain the entire list if it were */
266 /* invoked just as we were returning. */
267 /* Note that the client should usually clear the link field. */
268 void GC_generic_malloc_many(size_t lb
, int k
, void **result
)
273 size_t lw
; /* Length in words. */
274 size_t lg
; /* Length in granules. */
275 signed_word my_bytes_allocd
= 0;
276 struct obj_kind
* ok
= &(GC_obj_kinds
[k
]);
279 GC_ASSERT((lb
& (GRANULE_BYTES
-1)) == 0);
280 if (!SMALL_OBJ(lb
)) {
281 op
= GC_generic_malloc(lb
, k
);
282 if(0 != op
) obj_link(op
) = 0;
286 lw
= BYTES_TO_WORDS(lb
);
287 lg
= BYTES_TO_GRANULES(lb
);
288 if (GC_have_errors
) GC_print_all_errors();
289 GC_INVOKE_FINALIZERS();
291 if (!GC_is_initialized
) GC_init_inner();
292 /* Do our share of marking work */
293 if (GC_incremental
&& !GC_dont_gc
) {
295 GC_collect_a_little_inner(1);
298 /* First see if we can reclaim a page of objects waiting to be */
301 struct hblk
** rlh
= ok
-> ok_reclaim_list
;
306 while ((hbp
= *rlh
) != 0) {
308 *rlh
= hhdr
-> hb_next
;
309 GC_ASSERT(hhdr
-> hb_sz
== lb
);
310 hhdr
-> hb_last_reclaimed
= (unsigned short) GC_gc_no
;
311 # ifdef PARALLEL_MARK
313 signed_word my_bytes_allocd_tmp
= GC_bytes_allocd_tmp
;
315 GC_ASSERT(my_bytes_allocd_tmp
>= 0);
316 /* We only decrement it while holding the GC lock. */
317 /* Thus we can't accidentally adjust it down in more */
318 /* than one thread simultaneously. */
319 if (my_bytes_allocd_tmp
!= 0) {
320 (void)AO_fetch_and_add(
321 (volatile AO_t
*)(&GC_bytes_allocd_tmp
),
322 (AO_t
)(-my_bytes_allocd_tmp
));
323 GC_bytes_allocd
+= my_bytes_allocd_tmp
;
326 GC_acquire_mark_lock();
327 ++ GC_fl_builder_count
;
329 GC_release_mark_lock();
331 op
= GC_reclaim_generic(hbp
, hhdr
, lb
,
332 ok
-> ok_init
, 0, &my_bytes_allocd
);
334 /* We also reclaimed memory, so we need to adjust */
336 /* This should be atomic, so the results may be */
338 GC_bytes_found
+= my_bytes_allocd
;
339 # ifdef PARALLEL_MARK
341 (void)AO_fetch_and_add(
342 (volatile AO_t
*)(&GC_bytes_allocd_tmp
),
343 (AO_t
)(my_bytes_allocd
));
344 GC_acquire_mark_lock();
345 -- GC_fl_builder_count
;
346 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
347 GC_release_mark_lock();
348 (void) GC_clear_stack(0);
351 GC_bytes_allocd
+= my_bytes_allocd
;
355 # ifdef PARALLEL_MARK
356 GC_acquire_mark_lock();
357 -- GC_fl_builder_count
;
358 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
359 GC_release_mark_lock();
361 /* GC lock is needed for reclaim list access. We */
362 /* must decrement fl_builder_count before reaquiring GC */
363 /* lock. Hopefully this path is rare. */
367 /* Next try to use prefix of global free list if there is one. */
368 /* We don't refill it, but we need to use it up before allocating */
369 /* a new block ourselves. */
370 opp
= &(GC_obj_kinds
[k
].ok_freelist
[lg
]);
371 if ( (op
= *opp
) != 0 ) {
374 for (p
= op
; p
!= 0; p
= obj_link(p
)) {
375 my_bytes_allocd
+= lb
;
376 if (my_bytes_allocd
>= HBLKSIZE
) {
382 GC_bytes_allocd
+= my_bytes_allocd
;
385 /* Next try to allocate a new block worth of objects of this size. */
387 struct hblk
*h
= GC_allochblk(lb
, k
, 0);
389 if (IS_UNCOLLECTABLE(k
)) GC_set_hdr_marks(HDR(h
));
390 GC_bytes_allocd
+= HBLKSIZE
- HBLKSIZE
% lb
;
391 # ifdef PARALLEL_MARK
392 GC_acquire_mark_lock();
393 ++ GC_fl_builder_count
;
395 GC_release_mark_lock();
398 op
= GC_build_fl(h
, lw
, ok
-> ok_init
, 0);
399 # ifdef PARALLEL_MARK
401 GC_acquire_mark_lock();
402 -- GC_fl_builder_count
;
403 if (GC_fl_builder_count
== 0) GC_notify_all_builder();
404 GC_release_mark_lock();
405 (void) GC_clear_stack(0);
413 /* As a last attempt, try allocating a single object. Note that */
414 /* this may trigger a collection or expand the heap. */
415 op
= GC_generic_malloc_inner(lb
, k
);
416 if (0 != op
) obj_link(op
) = 0;
421 (void) GC_clear_stack(0);
424 void * GC_malloc_many(size_t lb
)
427 GC_generic_malloc_many(((lb
+ EXTRA_BYTES
+ GRANULE_BYTES
-1)
428 & ~(GRANULE_BYTES
-1)),
433 /* Note that the "atomic" version of this would be unsafe, since the */
434 /* links would not be seen by the collector. */
437 /* Allocate lb bytes of pointerful, traced, but not collectable data */
438 void * GC_malloc_uncollectable(size_t lb
)
445 if( SMALL_OBJ(lb
) ) {
446 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
447 /* We don't need the extra byte, since this won't be */
448 /* collected anyway. */
449 lg
= GC_size_map
[lb
];
450 opp
= &(GC_uobjfreelist
[lg
]);
452 if( (op
= *opp
) != 0 ) {
453 /* See above comment on signals. */
456 GC_bytes_allocd
+= GRANULES_TO_BYTES(lg
);
457 /* Mark bit ws already set on free list. It will be */
458 /* cleared only temporarily during a collection, as a */
459 /* result of the normal free list mark bit clearing. */
460 GC_non_gc_bytes
+= GRANULES_TO_BYTES(lg
);
464 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
465 /* For small objects, the free lists are completely marked. */
467 GC_ASSERT(0 == op
|| GC_is_marked(op
));
472 op
= (ptr_t
)GC_generic_malloc((word
)lb
, UNCOLLECTABLE
);
473 if (0 == op
) return(0);
475 GC_ASSERT(((word
)op
& (HBLKSIZE
- 1)) == 0); /* large block */
476 hhdr
= HDR((struct hbklk
*)op
);
477 /* We don't need the lock here, since we have an undisguised */
478 /* pointer. We do need to hold the lock while we adjust */
482 set_mark_bit_from_hdr(hhdr
, 0); /* Only object. */
483 GC_ASSERT(hhdr
-> hb_n_marks
== 0);
484 hhdr
-> hb_n_marks
= 1;
490 /* Not well tested nor integrated. */
491 /* Debug version is tricky and currently missing. */
494 void * GC_memalign(size_t align
, size_t lb
)
500 if (align
<= GRANULE_BYTES
) return GC_malloc(lb
);
501 if (align
>= HBLKSIZE
/2 || lb
>= HBLKSIZE
/2) {
502 if (align
> HBLKSIZE
) return GC_oom_fn(LONG_MAX
-1024) /* Fail */;
503 return GC_malloc(lb
<= HBLKSIZE
? HBLKSIZE
: lb
);
504 /* Will be HBLKSIZE aligned. */
506 /* We could also try to make sure that the real rounded-up object size */
507 /* is a multiple of align. That would be correct up to HBLKSIZE. */
508 new_lb
= lb
+ align
- 1;
509 result
= GC_malloc(new_lb
);
510 offset
= (word
)result
% align
;
512 offset
= align
- offset
;
513 if (!GC_all_interior_pointers
) {
514 if (offset
>= VALID_OFFSET_SZ
) return GC_malloc(HBLKSIZE
);
515 GC_register_displacement(offset
);
518 result
= (void *) ((ptr_t
)result
+ offset
);
519 GC_ASSERT((word
)result
% align
== 0);
523 # ifdef ATOMIC_UNCOLLECTABLE
524 /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
525 /* This is normally roughly equivalent to the system malloc. */
526 /* But it may be useful if malloc is redefined. */
527 void * GC_malloc_atomic_uncollectable(size_t lb
)
534 if( SMALL_OBJ(lb
) ) {
535 if (EXTRA_BYTES
!= 0 && lb
!= 0) lb
--;
536 /* We don't need the extra byte, since this won't be */
537 /* collected anyway. */
538 lg
= GC_size_map
[lb
];
539 opp
= &(GC_auobjfreelist
[lg
]);
541 if( (op
= *opp
) != 0 ) {
542 /* See above comment on signals. */
545 GC_bytes_allocd
+= GRANULES_TO_BYTES(lg
);
546 /* Mark bit was already set while object was on free list. */
547 GC_non_gc_bytes
+= GRANULES_TO_BYTES(lg
);
551 op
= (ptr_t
)GC_generic_malloc(lb
, AUNCOLLECTABLE
);
553 GC_ASSERT(0 == op
|| GC_is_marked(op
));
558 op
= (ptr_t
)GC_generic_malloc(lb
, AUNCOLLECTABLE
);
559 if (0 == op
) return(0);
561 GC_ASSERT(((word
)op
& (HBLKSIZE
- 1)) == 0);
562 hhdr
= HDR((struct hbklk
*)op
);
566 set_mark_bit_from_hdr(hhdr
, 0); /* Only object. */
567 GC_ASSERT(hhdr
-> hb_n_marks
== 0);
568 hhdr
-> hb_n_marks
= 1;
574 #endif /* ATOMIC_UNCOLLECTABLE */