]>
Commit | Line | Data |
---|---|---|
7c673cae FG |
1 | /* |
2 | * Mark-and-sweep garbage collection. | |
3 | */ | |
4 | ||
5 | #include "duk_internal.h" | |
6 | ||
7 | #ifdef DUK_USE_MARK_AND_SWEEP | |
8 | ||
9 | DUK_LOCAL_DECL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h); | |
10 | DUK_LOCAL_DECL void duk__mark_tval(duk_heap *heap, duk_tval *tv); | |
11 | ||
12 | /* | |
13 | * Misc | |
14 | */ | |
15 | ||
16 | /* Select a thread for mark-and-sweep use. | |
17 | * | |
18 | * XXX: This needs to change later. | |
19 | */ | |
20 | DUK_LOCAL duk_hthread *duk__get_temp_hthread(duk_heap *heap) { | |
21 | if (heap->curr_thread) { | |
22 | return heap->curr_thread; | |
23 | } | |
24 | return heap->heap_thread; /* may be NULL, too */ | |
25 | } | |
26 | ||
27 | /* | |
28 | * Marking functions for heap types: mark children recursively | |
29 | */ | |
30 | ||
31 | DUK_LOCAL void duk__mark_hstring(duk_heap *heap, duk_hstring *h) { | |
32 | DUK_UNREF(heap); | |
33 | DUK_UNREF(h); | |
34 | ||
35 | DUK_DDD(DUK_DDDPRINT("duk__mark_hstring: %p", (void *) h)); | |
36 | DUK_ASSERT(h); | |
37 | ||
38 | /* nothing to process */ | |
39 | } | |
40 | ||
41 | DUK_LOCAL void duk__mark_hobject(duk_heap *heap, duk_hobject *h) { | |
42 | duk_uint_fast32_t i; | |
43 | ||
44 | DUK_DDD(DUK_DDDPRINT("duk__mark_hobject: %p", (void *) h)); | |
45 | ||
46 | DUK_ASSERT(h); | |
47 | ||
48 | /* XXX: use advancing pointers instead of index macros -> faster and smaller? */ | |
49 | ||
50 | for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ENEXT(h); i++) { | |
51 | duk_hstring *key = DUK_HOBJECT_E_GET_KEY(heap, h, i); | |
52 | if (!key) { | |
53 | continue; | |
54 | } | |
55 | duk__mark_heaphdr(heap, (duk_heaphdr *) key); | |
56 | if (DUK_HOBJECT_E_SLOT_IS_ACCESSOR(heap, h, i)) { | |
57 | duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.get); | |
58 | duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->a.set); | |
59 | } else { | |
60 | duk__mark_tval(heap, &DUK_HOBJECT_E_GET_VALUE_PTR(heap, h, i)->v); | |
61 | } | |
62 | } | |
63 | ||
64 | for (i = 0; i < (duk_uint_fast32_t) DUK_HOBJECT_GET_ASIZE(h); i++) { | |
65 | duk__mark_tval(heap, DUK_HOBJECT_A_GET_VALUE_PTR(heap, h, i)); | |
66 | } | |
67 | ||
68 | /* hash part is a 'weak reference' and does not contribute */ | |
69 | ||
70 | duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HOBJECT_GET_PROTOTYPE(heap, h)); | |
71 | ||
72 | if (DUK_HOBJECT_IS_COMPILEDFUNCTION(h)) { | |
73 | duk_hcompiledfunction *f = (duk_hcompiledfunction *) h; | |
74 | duk_tval *tv, *tv_end; | |
75 | duk_hobject **fn, **fn_end; | |
76 | ||
77 | /* 'data' is reachable through every compiled function which | |
78 | * contains a reference. | |
79 | */ | |
80 | ||
81 | duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_HCOMPILEDFUNCTION_GET_DATA(heap, f)); | |
82 | ||
83 | tv = DUK_HCOMPILEDFUNCTION_GET_CONSTS_BASE(heap, f); | |
84 | tv_end = DUK_HCOMPILEDFUNCTION_GET_CONSTS_END(heap, f); | |
85 | while (tv < tv_end) { | |
86 | duk__mark_tval(heap, tv); | |
87 | tv++; | |
88 | } | |
89 | ||
90 | fn = DUK_HCOMPILEDFUNCTION_GET_FUNCS_BASE(heap, f); | |
91 | fn_end = DUK_HCOMPILEDFUNCTION_GET_FUNCS_END(heap, f); | |
92 | while (fn < fn_end) { | |
93 | duk__mark_heaphdr(heap, (duk_heaphdr *) *fn); | |
94 | fn++; | |
95 | } | |
96 | } else if (DUK_HOBJECT_IS_NATIVEFUNCTION(h)) { | |
97 | duk_hnativefunction *f = (duk_hnativefunction *) h; | |
98 | DUK_UNREF(f); | |
99 | /* nothing to mark */ | |
100 | } else if (DUK_HOBJECT_IS_BUFFEROBJECT(h)) { | |
101 | duk_hbufferobject *b = (duk_hbufferobject *) h; | |
102 | duk__mark_heaphdr(heap, (duk_heaphdr *) b->buf); | |
103 | } else if (DUK_HOBJECT_IS_THREAD(h)) { | |
104 | duk_hthread *t = (duk_hthread *) h; | |
105 | duk_tval *tv; | |
106 | ||
107 | tv = t->valstack; | |
108 | while (tv < t->valstack_end) { | |
109 | duk__mark_tval(heap, tv); | |
110 | tv++; | |
111 | } | |
112 | ||
113 | for (i = 0; i < (duk_uint_fast32_t) t->callstack_top; i++) { | |
114 | duk_activation *act = t->callstack + i; | |
115 | duk__mark_heaphdr(heap, (duk_heaphdr *) DUK_ACT_GET_FUNC(act)); | |
116 | duk__mark_heaphdr(heap, (duk_heaphdr *) act->var_env); | |
117 | duk__mark_heaphdr(heap, (duk_heaphdr *) act->lex_env); | |
118 | #ifdef DUK_USE_NONSTD_FUNC_CALLER_PROPERTY | |
119 | duk__mark_heaphdr(heap, (duk_heaphdr *) act->prev_caller); | |
120 | #endif | |
121 | } | |
122 | ||
123 | #if 0 /* nothing now */ | |
124 | for (i = 0; i < (duk_uint_fast32_t) t->catchstack_top; i++) { | |
125 | duk_catcher *cat = t->catchstack + i; | |
126 | } | |
127 | #endif | |
128 | ||
129 | duk__mark_heaphdr(heap, (duk_heaphdr *) t->resumer); | |
130 | ||
131 | /* XXX: duk_small_uint_t would be enough for this loop */ | |
132 | for (i = 0; i < DUK_NUM_BUILTINS; i++) { | |
133 | duk__mark_heaphdr(heap, (duk_heaphdr *) t->builtins[i]); | |
134 | } | |
135 | } | |
136 | } | |
137 | ||
138 | /* recursion tracking happens here only */ | |
139 | DUK_LOCAL void duk__mark_heaphdr(duk_heap *heap, duk_heaphdr *h) { | |
140 | DUK_DDD(DUK_DDDPRINT("duk__mark_heaphdr %p, type %ld", | |
141 | (void *) h, | |
142 | (h != NULL ? (long) DUK_HEAPHDR_GET_TYPE(h) : (long) -1))); | |
143 | if (!h) { | |
144 | return; | |
145 | } | |
146 | ||
147 | if (DUK_HEAPHDR_HAS_REACHABLE(h)) { | |
148 | DUK_DDD(DUK_DDDPRINT("already marked reachable, skip")); | |
149 | return; | |
150 | } | |
151 | DUK_HEAPHDR_SET_REACHABLE(h); | |
152 | ||
153 | if (heap->mark_and_sweep_recursion_depth >= DUK_USE_MARK_AND_SWEEP_RECLIMIT) { | |
154 | /* log this with a normal debug level because this should be relatively rare */ | |
155 | DUK_D(DUK_DPRINT("mark-and-sweep recursion limit reached, marking as temproot: %p", (void *) h)); | |
156 | DUK_HEAP_SET_MARKANDSWEEP_RECLIMIT_REACHED(heap); | |
157 | DUK_HEAPHDR_SET_TEMPROOT(h); | |
158 | return; | |
159 | } | |
160 | ||
161 | heap->mark_and_sweep_recursion_depth++; | |
162 | ||
163 | switch ((int) DUK_HEAPHDR_GET_TYPE(h)) { | |
164 | case DUK_HTYPE_STRING: | |
165 | duk__mark_hstring(heap, (duk_hstring *) h); | |
166 | break; | |
167 | case DUK_HTYPE_OBJECT: | |
168 | duk__mark_hobject(heap, (duk_hobject *) h); | |
169 | break; | |
170 | case DUK_HTYPE_BUFFER: | |
171 | /* nothing to mark */ | |
172 | break; | |
173 | default: | |
174 | DUK_D(DUK_DPRINT("attempt to mark heaphdr %p with invalid htype %ld", (void *) h, (long) DUK_HEAPHDR_GET_TYPE(h))); | |
175 | DUK_UNREACHABLE(); | |
176 | } | |
177 | ||
178 | heap->mark_and_sweep_recursion_depth--; | |
179 | } | |
180 | ||
181 | DUK_LOCAL void duk__mark_tval(duk_heap *heap, duk_tval *tv) { | |
182 | DUK_DDD(DUK_DDDPRINT("duk__mark_tval %p", (void *) tv)); | |
183 | if (!tv) { | |
184 | return; | |
185 | } | |
186 | if (DUK_TVAL_IS_HEAP_ALLOCATED(tv)) { | |
187 | duk__mark_heaphdr(heap, DUK_TVAL_GET_HEAPHDR(tv)); | |
188 | } | |
189 | } | |
190 | ||
191 | /* | |
192 | * Mark the heap. | |
193 | */ | |
194 | ||
195 | DUK_LOCAL void duk__mark_roots_heap(duk_heap *heap) { | |
196 | duk_small_uint_t i; | |
197 | ||
198 | DUK_DD(DUK_DDPRINT("duk__mark_roots_heap: %p", (void *) heap)); | |
199 | ||
200 | duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_thread); | |
201 | duk__mark_heaphdr(heap, (duk_heaphdr *) heap->heap_object); | |
202 | ||
203 | for (i = 0; i < DUK_HEAP_NUM_STRINGS; i++) { | |
204 | duk_hstring *h = DUK_HEAP_GET_STRING(heap, i); | |
205 | duk__mark_heaphdr(heap, (duk_heaphdr *) h); | |
206 | } | |
207 | ||
208 | duk__mark_tval(heap, &heap->lj.value1); | |
209 | duk__mark_tval(heap, &heap->lj.value2); | |
210 | ||
211 | #if defined(DUK_USE_DEBUGGER_SUPPORT) | |
212 | for (i = 0; i < heap->dbg_breakpoint_count; i++) { | |
213 | duk__mark_heaphdr(heap, (duk_heaphdr *) heap->dbg_breakpoints[i].filename); | |
214 | } | |
215 | #endif | |
216 | } | |
217 | ||
218 | /* | |
219 | * Mark refzero_list objects. | |
220 | * | |
221 | * Objects on the refzero_list have no inbound references. They might have | |
222 | * outbound references to objects that we might free, which would invalidate | |
223 | * any references held by the refzero objects. A refzero object might also | |
224 | * be rescued by refcount finalization. Refzero objects are treated as | |
225 | * reachability roots to ensure they (or anything they point to) are not | |
226 | * freed in mark-and-sweep. | |
227 | */ | |
228 | ||
229 | #ifdef DUK_USE_REFERENCE_COUNTING | |
230 | DUK_LOCAL void duk__mark_refzero_list(duk_heap *heap) { | |
231 | duk_heaphdr *hdr; | |
232 | ||
233 | DUK_DD(DUK_DDPRINT("duk__mark_refzero_list: %p", (void *) heap)); | |
234 | ||
235 | hdr = heap->refzero_list; | |
236 | while (hdr) { | |
237 | duk__mark_heaphdr(heap, hdr); | |
238 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
239 | } | |
240 | } | |
241 | #endif | |
242 | ||
243 | /* | |
244 | * Mark unreachable, finalizable objects. | |
245 | * | |
246 | * Such objects will be moved aside and their finalizers run later. They have | |
247 | * to be treated as reachability roots for their properties etc to remain | |
248 | * allocated. This marking is only done for unreachable values which would | |
249 | * be swept later (refzero_list is thus excluded). | |
250 | * | |
251 | * Objects are first marked FINALIZABLE and only then marked as reachability | |
252 | * roots; otherwise circular references might be handled inconsistently. | |
253 | */ | |
254 | ||
255 | DUK_LOCAL void duk__mark_finalizable(duk_heap *heap) { | |
256 | duk_hthread *thr; | |
257 | duk_heaphdr *hdr; | |
258 | duk_size_t count_finalizable = 0; | |
259 | ||
260 | DUK_DD(DUK_DDPRINT("duk__mark_finalizable: %p", (void *) heap)); | |
261 | ||
262 | thr = duk__get_temp_hthread(heap); | |
263 | DUK_ASSERT(thr != NULL); | |
264 | ||
265 | hdr = heap->heap_allocated; | |
266 | while (hdr) { | |
267 | /* A finalizer is looked up from the object and up its prototype chain | |
268 | * (which allows inherited finalizers). A prototype loop must not cause | |
269 | * an error to be thrown here; duk_hobject_hasprop_raw() will ignore a | |
270 | * prototype loop silently and indicate that the property doesn't exist. | |
271 | */ | |
272 | ||
273 | if (!DUK_HEAPHDR_HAS_REACHABLE(hdr) && | |
274 | DUK_HEAPHDR_GET_TYPE(hdr) == DUK_HTYPE_OBJECT && | |
275 | !DUK_HEAPHDR_HAS_FINALIZED(hdr) && | |
276 | duk_hobject_hasprop_raw(thr, (duk_hobject *) hdr, DUK_HTHREAD_STRING_INT_FINALIZER(thr))) { | |
277 | ||
278 | /* heaphdr: | |
279 | * - is not reachable | |
280 | * - is an object | |
281 | * - is not a finalized object | |
282 | * - has a finalizer | |
283 | */ | |
284 | ||
285 | DUK_DD(DUK_DDPRINT("unreachable heap object will be " | |
286 | "finalized -> mark as finalizable " | |
287 | "and treat as a reachability root: %p", | |
288 | (void *) hdr)); | |
289 | DUK_HEAPHDR_SET_FINALIZABLE(hdr); | |
290 | count_finalizable ++; | |
291 | } | |
292 | ||
293 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
294 | } | |
295 | ||
296 | if (count_finalizable == 0) { | |
297 | return; | |
298 | } | |
299 | ||
300 | DUK_DD(DUK_DDPRINT("marked %ld heap objects as finalizable, now mark them reachable", | |
301 | (long) count_finalizable)); | |
302 | ||
303 | hdr = heap->heap_allocated; | |
304 | while (hdr) { | |
305 | if (DUK_HEAPHDR_HAS_FINALIZABLE(hdr)) { | |
306 | duk__mark_heaphdr(heap, hdr); | |
307 | } | |
308 | ||
309 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
310 | } | |
311 | ||
312 | /* Caller will finish the marking process if we hit a recursion limit. */ | |
313 | } | |
314 | ||
315 | /* | |
316 | * Mark objects on finalize_list. | |
317 | * | |
318 | */ | |
319 | ||
320 | DUK_LOCAL void duk__mark_finalize_list(duk_heap *heap) { | |
321 | duk_heaphdr *hdr; | |
322 | #ifdef DUK_USE_DEBUG | |
323 | duk_size_t count_finalize_list = 0; | |
324 | #endif | |
325 | ||
326 | DUK_DD(DUK_DDPRINT("duk__mark_finalize_list: %p", (void *) heap)); | |
327 | ||
328 | hdr = heap->finalize_list; | |
329 | while (hdr) { | |
330 | duk__mark_heaphdr(heap, hdr); | |
331 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
332 | #ifdef DUK_USE_DEBUG | |
333 | count_finalize_list++; | |
334 | #endif | |
335 | } | |
336 | ||
337 | #ifdef DUK_USE_DEBUG | |
338 | if (count_finalize_list > 0) { | |
339 | DUK_D(DUK_DPRINT("marked %ld objects on the finalize_list as reachable (previous finalizer run skipped)", | |
340 | (long) count_finalize_list)); | |
341 | } | |
342 | #endif | |
343 | } | |
344 | ||
345 | /* | |
346 | * Fallback marking handler if recursion limit is reached. | |
347 | * | |
348 | * Iterates 'temproots' until recursion limit is no longer hit. Note | |
349 | * that temproots may reside either in heap allocated list or the | |
350 | * refzero work list. This is a slow scan, but guarantees that we | |
351 | * finish with a bounded C stack. | |
352 | * | |
353 | * Note that nodes may have been marked as temproots before this | |
354 | * scan begun, OR they may have been marked during the scan (as | |
355 | * we process nodes recursively also during the scan). This is | |
356 | * intended behavior. | |
357 | */ | |
358 | ||
359 | #ifdef DUK_USE_DEBUG | |
360 | DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr, duk_size_t *count) { | |
361 | #else | |
362 | DUK_LOCAL void duk__handle_temproot(duk_heap *heap, duk_heaphdr *hdr) { | |
363 | #endif | |
364 | if (!DUK_HEAPHDR_HAS_TEMPROOT(hdr)) { | |
365 | DUK_DDD(DUK_DDDPRINT("not a temp root: %p", (void *) hdr)); | |
366 | return; | |
367 | } | |
368 | ||
369 | DUK_DDD(DUK_DDDPRINT("found a temp root: %p", (void *) hdr)); | |
370 | DUK_HEAPHDR_CLEAR_TEMPROOT(hdr); | |
371 | DUK_HEAPHDR_CLEAR_REACHABLE(hdr); /* done so that duk__mark_heaphdr() works correctly */ | |
372 | duk__mark_heaphdr(heap, hdr); | |
373 | ||
374 | #ifdef DUK_USE_DEBUG | |
375 | (*count)++; | |
376 | #endif | |
377 | } | |
378 | ||
379 | DUK_LOCAL void duk__mark_temproots_by_heap_scan(duk_heap *heap) { | |
380 | duk_heaphdr *hdr; | |
381 | #ifdef DUK_USE_DEBUG | |
382 | duk_size_t count; | |
383 | #endif | |
384 | ||
385 | DUK_DD(DUK_DDPRINT("duk__mark_temproots_by_heap_scan: %p", (void *) heap)); | |
386 | ||
387 | while (DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap)) { | |
388 | DUK_DD(DUK_DDPRINT("recursion limit reached, doing heap scan to continue from temproots")); | |
389 | ||
390 | #ifdef DUK_USE_DEBUG | |
391 | count = 0; | |
392 | #endif | |
393 | DUK_HEAP_CLEAR_MARKANDSWEEP_RECLIMIT_REACHED(heap); | |
394 | ||
395 | hdr = heap->heap_allocated; | |
396 | while (hdr) { | |
397 | #ifdef DUK_USE_DEBUG | |
398 | duk__handle_temproot(heap, hdr, &count); | |
399 | #else | |
400 | duk__handle_temproot(heap, hdr); | |
401 | #endif | |
402 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
403 | } | |
404 | ||
405 | /* must also check refzero_list */ | |
406 | #ifdef DUK_USE_REFERENCE_COUNTING | |
407 | hdr = heap->refzero_list; | |
408 | while (hdr) { | |
409 | #ifdef DUK_USE_DEBUG | |
410 | duk__handle_temproot(heap, hdr, &count); | |
411 | #else | |
412 | duk__handle_temproot(heap, hdr); | |
413 | #endif | |
414 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
415 | } | |
416 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
417 | ||
418 | #ifdef DUK_USE_DEBUG | |
419 | DUK_DD(DUK_DDPRINT("temproot mark heap scan processed %ld temp roots", (long) count)); | |
420 | #endif | |
421 | } | |
422 | } | |
423 | ||
424 | /* | |
425 | * Finalize refcounts for heap elements just about to be freed. | |
426 | * This must be done for all objects before freeing to avoid any | |
427 | * stale pointer dereferences. | |
428 | * | |
429 | * Note that this must deduce the set of objects to be freed | |
430 | * identically to duk__sweep_heap(). | |
431 | */ | |
432 | ||
433 | #ifdef DUK_USE_REFERENCE_COUNTING | |
434 | DUK_LOCAL void duk__finalize_refcounts(duk_heap *heap) { | |
435 | duk_hthread *thr; | |
436 | duk_heaphdr *hdr; | |
437 | ||
438 | thr = duk__get_temp_hthread(heap); | |
439 | DUK_ASSERT(thr != NULL); | |
440 | ||
441 | DUK_DD(DUK_DDPRINT("duk__finalize_refcounts: heap=%p, hthread=%p", | |
442 | (void *) heap, (void *) thr)); | |
443 | ||
444 | hdr = heap->heap_allocated; | |
445 | while (hdr) { | |
446 | if (!DUK_HEAPHDR_HAS_REACHABLE(hdr)) { | |
447 | /* | |
448 | * Unreachable object about to be swept. Finalize target refcounts | |
449 | * (objects which the unreachable object points to) without doing | |
450 | * refzero processing. Recursive decrefs are also prevented when | |
451 | * refzero processing is disabled. | |
452 | * | |
453 | * Value cannot be a finalizable object, as they have been made | |
454 | * temporarily reachable for this round. | |
455 | */ | |
456 | ||
457 | DUK_DDD(DUK_DDDPRINT("unreachable object, refcount finalize before sweeping: %p", (void *) hdr)); | |
458 | duk_heaphdr_refcount_finalize(thr, hdr); | |
459 | } | |
460 | ||
461 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
462 | } | |
463 | } | |
464 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
465 | ||
466 | /* | |
467 | * Clear (reachable) flags of refzero work list. | |
468 | */ | |
469 | ||
470 | #ifdef DUK_USE_REFERENCE_COUNTING | |
471 | DUK_LOCAL void duk__clear_refzero_list_flags(duk_heap *heap) { | |
472 | duk_heaphdr *hdr; | |
473 | ||
474 | DUK_DD(DUK_DDPRINT("duk__clear_refzero_list_flags: %p", (void *) heap)); | |
475 | ||
476 | hdr = heap->refzero_list; | |
477 | while (hdr) { | |
478 | DUK_HEAPHDR_CLEAR_REACHABLE(hdr); | |
479 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr)); | |
480 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr)); | |
481 | DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr)); | |
482 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
483 | } | |
484 | } | |
485 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
486 | ||
487 | /* | |
488 | * Clear (reachable) flags of finalize_list | |
489 | * | |
490 | * We could mostly do in the sweep phase when we move objects from the | |
491 | * heap into the finalize_list. However, if a finalizer run is skipped | |
492 | * during a mark-and-sweep, the objects on the finalize_list will be marked | |
493 | * reachable during the next mark-and-sweep. Since they're already on the | |
494 | * finalize_list, no-one will be clearing their REACHABLE flag so we do it | |
495 | * here. (This now overlaps with the sweep handling in a harmless way.) | |
496 | */ | |
497 | ||
498 | DUK_LOCAL void duk__clear_finalize_list_flags(duk_heap *heap) { | |
499 | duk_heaphdr *hdr; | |
500 | ||
501 | DUK_DD(DUK_DDPRINT("duk__clear_finalize_list_flags: %p", (void *) heap)); | |
502 | ||
503 | hdr = heap->finalize_list; | |
504 | while (hdr) { | |
505 | DUK_HEAPHDR_CLEAR_REACHABLE(hdr); | |
506 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr)); | |
507 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr)); | |
508 | DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr)); | |
509 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
510 | } | |
511 | } | |
512 | ||
513 | /* | |
514 | * Sweep stringtable | |
515 | */ | |
516 | ||
517 | #if defined(DUK_USE_STRTAB_CHAIN) | |
518 | ||
519 | /* XXX: skip count_free w/o debug? */ | |
520 | #if defined(DUK_USE_HEAPPTR16) | |
521 | DUK_LOCAL void duk__sweep_string_chain16(duk_heap *heap, duk_uint16_t *slot, duk_size_t *count_keep, duk_size_t *count_free) { | |
522 | duk_uint16_t h16 = *slot; | |
523 | duk_hstring *h; | |
524 | duk_uint16_t null16 = heap->heapptr_null16; | |
525 | ||
526 | if (h16 == null16) { | |
527 | /* nop */ | |
528 | return; | |
529 | } | |
530 | h = (duk_hstring *) DUK_USE_HEAPPTR_DEC16(heap->heap_udata, h16); | |
531 | DUK_ASSERT(h != NULL); | |
532 | ||
533 | if (DUK_HEAPHDR_HAS_REACHABLE((duk_heaphdr *) h)) { | |
534 | DUK_HEAPHDR_CLEAR_REACHABLE((duk_heaphdr *) h); | |
535 | (*count_keep)++; | |
536 | } else { | |
537 | #if defined(DUK_USE_REFERENCE_COUNTING) | |
538 | DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) == 0); | |
539 | #endif | |
540 | /* deal with weak references first */ | |
541 | duk_heap_strcache_string_remove(heap, (duk_hstring *) h); | |
542 | *slot = null16; | |
543 | ||
544 | /* free inner references (these exist e.g. when external | |
545 | * strings are enabled) | |
546 | */ | |
547 | duk_free_hstring_inner(heap, h); | |
548 | DUK_FREE(heap, h); | |
549 | (*count_free)++; | |
550 | } | |
551 | } | |
552 | #else /* DUK_USE_HEAPPTR16 */ | |
553 | DUK_LOCAL void duk__sweep_string_chain(duk_heap *heap, duk_hstring **slot, duk_size_t *count_keep, duk_size_t *count_free) { | |
554 | duk_hstring *h = *slot; | |
555 | ||
556 | if (h == NULL) { | |
557 | /* nop */ | |
558 | return; | |
559 | } | |
560 | ||
561 | if (DUK_HEAPHDR_HAS_REACHABLE((duk_heaphdr *) h)) { | |
562 | DUK_HEAPHDR_CLEAR_REACHABLE((duk_heaphdr *) h); | |
563 | (*count_keep)++; | |
564 | } else { | |
565 | #if defined(DUK_USE_REFERENCE_COUNTING) | |
566 | DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) == 0); | |
567 | #endif | |
568 | /* deal with weak references first */ | |
569 | duk_heap_strcache_string_remove(heap, (duk_hstring *) h); | |
570 | *slot = NULL; | |
571 | ||
572 | /* free inner references (these exist e.g. when external | |
573 | * strings are enabled) | |
574 | */ | |
575 | duk_free_hstring_inner(heap, h); | |
576 | DUK_FREE(heap, h); | |
577 | (*count_free)++; | |
578 | } | |
579 | } | |
580 | #endif /* DUK_USE_HEAPPTR16 */ | |
581 | ||
582 | DUK_LOCAL void duk__sweep_stringtable_chain(duk_heap *heap, duk_size_t *out_count_keep) { | |
583 | duk_strtab_entry *e; | |
584 | duk_uint_fast32_t i; | |
585 | duk_size_t count_free = 0; | |
586 | duk_size_t count_keep = 0; | |
587 | duk_size_t j, n; | |
588 | #if defined(DUK_USE_HEAPPTR16) | |
589 | duk_uint16_t *lst; | |
590 | #else | |
591 | duk_hstring **lst; | |
592 | #endif | |
593 | ||
594 | DUK_DD(DUK_DDPRINT("duk__sweep_stringtable: %p", (void *) heap)); | |
595 | ||
596 | /* Non-zero refcounts should not happen for unreachable strings, | |
597 | * because we refcount finalize all unreachable objects which | |
598 | * should have decreased unreachable string refcounts to zero | |
599 | * (even for cycles). | |
600 | */ | |
601 | ||
602 | for (i = 0; i < DUK_STRTAB_CHAIN_SIZE; i++) { | |
603 | e = heap->strtable + i; | |
604 | if (e->listlen == 0) { | |
605 | #if defined(DUK_USE_HEAPPTR16) | |
606 | duk__sweep_string_chain16(heap, &e->u.str16, &count_keep, &count_free); | |
607 | #else | |
608 | duk__sweep_string_chain(heap, &e->u.str, &count_keep, &count_free); | |
609 | #endif | |
610 | } else { | |
611 | #if defined(DUK_USE_HEAPPTR16) | |
612 | lst = (duk_uint16_t *) DUK_USE_HEAPPTR_DEC16(heap->heap_udata, e->u.strlist16); | |
613 | #else | |
614 | lst = e->u.strlist; | |
615 | #endif | |
616 | for (j = 0, n = e->listlen; j < n; j++) { | |
617 | #if defined(DUK_USE_HEAPPTR16) | |
618 | duk__sweep_string_chain16(heap, lst + j, &count_keep, &count_free); | |
619 | #else | |
620 | duk__sweep_string_chain(heap, lst + j, &count_keep, &count_free); | |
621 | #endif | |
622 | } | |
623 | } | |
624 | } | |
625 | ||
626 | DUK_D(DUK_DPRINT("mark-and-sweep sweep stringtable: %ld freed, %ld kept", | |
627 | (long) count_free, (long) count_keep)); | |
628 | *out_count_keep = count_keep; | |
629 | } | |
630 | #endif /* DUK_USE_STRTAB_CHAIN */ | |
631 | ||
632 | #if defined(DUK_USE_STRTAB_PROBE) | |
633 | DUK_LOCAL void duk__sweep_stringtable_probe(duk_heap *heap, duk_size_t *out_count_keep) { | |
634 | duk_hstring *h; | |
635 | duk_uint_fast32_t i; | |
636 | #ifdef DUK_USE_DEBUG | |
637 | duk_size_t count_free = 0; | |
638 | #endif | |
639 | duk_size_t count_keep = 0; | |
640 | ||
641 | DUK_DD(DUK_DDPRINT("duk__sweep_stringtable: %p", (void *) heap)); | |
642 | ||
643 | for (i = 0; i < heap->st_size; i++) { | |
644 | #if defined(DUK_USE_HEAPPTR16) | |
645 | h = (duk_hstring *) DUK_USE_HEAPPTR_DEC16(heap->strtable16[i]); | |
646 | #else | |
647 | h = heap->strtable[i]; | |
648 | #endif | |
649 | if (h == NULL || h == DUK_STRTAB_DELETED_MARKER(heap)) { | |
650 | continue; | |
651 | } else if (DUK_HEAPHDR_HAS_REACHABLE((duk_heaphdr *) h)) { | |
652 | DUK_HEAPHDR_CLEAR_REACHABLE((duk_heaphdr *) h); | |
653 | count_keep++; | |
654 | continue; | |
655 | } | |
656 | ||
657 | #ifdef DUK_USE_DEBUG | |
658 | count_free++; | |
659 | #endif | |
660 | ||
661 | #if defined(DUK_USE_REFERENCE_COUNTING) | |
662 | /* Non-zero refcounts should not happen for unreachable strings, | |
663 | * because we refcount finalize all unreachable objects which | |
664 | * should have decreased unreachable string refcounts to zero | |
665 | * (even for cycles). | |
666 | */ | |
667 | DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT((duk_heaphdr *) h) == 0); | |
668 | #endif | |
669 | ||
670 | DUK_DDD(DUK_DDDPRINT("sweep string, not reachable: %p", (void *) h)); | |
671 | ||
672 | /* deal with weak references first */ | |
673 | duk_heap_strcache_string_remove(heap, (duk_hstring *) h); | |
674 | ||
675 | /* remove the string (mark DELETED), could also call | |
676 | * duk_heap_string_remove() but that would be slow and | |
677 | * pointless because we already know the slot. | |
678 | */ | |
679 | #if defined(DUK_USE_HEAPPTR16) | |
680 | heap->strtable16[i] = heap->heapptr_deleted16; | |
681 | #else | |
682 | heap->strtable[i] = DUK_STRTAB_DELETED_MARKER(heap); | |
683 | #endif | |
684 | ||
685 | /* free inner references (these exist e.g. when external | |
686 | * strings are enabled) | |
687 | */ | |
688 | duk_free_hstring_inner(heap, (duk_hstring *) h); | |
689 | ||
690 | /* finally free the struct itself */ | |
691 | DUK_FREE(heap, h); | |
692 | } | |
693 | ||
694 | #ifdef DUK_USE_DEBUG | |
695 | DUK_D(DUK_DPRINT("mark-and-sweep sweep stringtable: %ld freed, %ld kept", | |
696 | (long) count_free, (long) count_keep)); | |
697 | #endif | |
698 | *out_count_keep = count_keep; | |
699 | } | |
700 | #endif /* DUK_USE_STRTAB_PROBE */ | |
701 | ||
702 | /* | |
703 | * Sweep heap | |
704 | */ | |
705 | ||
706 | DUK_LOCAL void duk__sweep_heap(duk_heap *heap, duk_int_t flags, duk_size_t *out_count_keep) { | |
707 | duk_heaphdr *prev; /* last element that was left in the heap */ | |
708 | duk_heaphdr *curr; | |
709 | duk_heaphdr *next; | |
710 | #ifdef DUK_USE_DEBUG | |
711 | duk_size_t count_free = 0; | |
712 | duk_size_t count_finalize = 0; | |
713 | duk_size_t count_rescue = 0; | |
714 | #endif | |
715 | duk_size_t count_keep = 0; | |
716 | ||
717 | DUK_UNREF(flags); | |
718 | DUK_DD(DUK_DDPRINT("duk__sweep_heap: %p", (void *) heap)); | |
719 | ||
720 | prev = NULL; | |
721 | curr = heap->heap_allocated; | |
722 | heap->heap_allocated = NULL; | |
723 | while (curr) { | |
724 | /* strings are never placed on the heap allocated list */ | |
725 | DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_STRING); | |
726 | ||
727 | next = DUK_HEAPHDR_GET_NEXT(heap, curr); | |
728 | ||
729 | if (DUK_HEAPHDR_HAS_REACHABLE(curr)) { | |
730 | /* | |
731 | * Reachable object, keep | |
732 | */ | |
733 | ||
734 | DUK_DDD(DUK_DDDPRINT("sweep, reachable: %p", (void *) curr)); | |
735 | ||
736 | if (DUK_HEAPHDR_HAS_FINALIZABLE(curr)) { | |
737 | /* | |
738 | * If object has been marked finalizable, move it to the | |
739 | * "to be finalized" work list. It will be collected on | |
740 | * the next mark-and-sweep if it is still unreachable | |
741 | * after running the finalizer. | |
742 | */ | |
743 | ||
744 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr)); | |
745 | DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT); | |
746 | DUK_DDD(DUK_DDDPRINT("object has finalizer, move to finalization work list: %p", (void *) curr)); | |
747 | ||
748 | #ifdef DUK_USE_DOUBLE_LINKED_HEAP | |
749 | if (heap->finalize_list) { | |
750 | DUK_HEAPHDR_SET_PREV(heap, heap->finalize_list, curr); | |
751 | } | |
752 | DUK_HEAPHDR_SET_PREV(heap, curr, NULL); | |
753 | #endif | |
754 | DUK_HEAPHDR_SET_NEXT(heap, curr, heap->finalize_list); | |
755 | heap->finalize_list = curr; | |
756 | #ifdef DUK_USE_DEBUG | |
757 | count_finalize++; | |
758 | #endif | |
759 | } else { | |
760 | /* | |
761 | * Object will be kept; queue object back to heap_allocated (to tail) | |
762 | */ | |
763 | ||
764 | if (DUK_HEAPHDR_HAS_FINALIZED(curr)) { | |
765 | /* | |
766 | * Object's finalizer was executed on last round, and | |
767 | * object has been happily rescued. | |
768 | */ | |
769 | ||
770 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr)); | |
771 | DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT); | |
772 | DUK_DD(DUK_DDPRINT("object rescued during mark-and-sweep finalization: %p", (void *) curr)); | |
773 | #ifdef DUK_USE_DEBUG | |
774 | count_rescue++; | |
775 | #endif | |
776 | } else { | |
777 | /* | |
778 | * Plain, boring reachable object. | |
779 | */ | |
780 | count_keep++; | |
781 | } | |
782 | ||
783 | if (!heap->heap_allocated) { | |
784 | heap->heap_allocated = curr; | |
785 | } | |
786 | if (prev) { | |
787 | DUK_HEAPHDR_SET_NEXT(heap, prev, curr); | |
788 | } | |
789 | #ifdef DUK_USE_DOUBLE_LINKED_HEAP | |
790 | DUK_HEAPHDR_SET_PREV(heap, curr, prev); | |
791 | #endif | |
792 | prev = curr; | |
793 | } | |
794 | ||
795 | DUK_HEAPHDR_CLEAR_REACHABLE(curr); | |
796 | DUK_HEAPHDR_CLEAR_FINALIZED(curr); | |
797 | DUK_HEAPHDR_CLEAR_FINALIZABLE(curr); | |
798 | ||
799 | DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(curr)); | |
800 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr)); | |
801 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr)); | |
802 | ||
803 | curr = next; | |
804 | } else { | |
805 | /* | |
806 | * Unreachable object, free | |
807 | */ | |
808 | ||
809 | DUK_DDD(DUK_DDDPRINT("sweep, not reachable: %p", (void *) curr)); | |
810 | ||
811 | #if defined(DUK_USE_REFERENCE_COUNTING) | |
812 | /* Non-zero refcounts should not happen because we refcount | |
813 | * finalize all unreachable objects which should cancel out | |
814 | * refcounts (even for cycles). | |
815 | */ | |
816 | DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(curr) == 0); | |
817 | #endif | |
818 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr)); | |
819 | ||
820 | if (DUK_HEAPHDR_HAS_FINALIZED(curr)) { | |
821 | DUK_DDD(DUK_DDDPRINT("finalized object not rescued: %p", (void *) curr)); | |
822 | } | |
823 | ||
824 | /* Note: object cannot be a finalizable unreachable object, as | |
825 | * they have been marked temporarily reachable for this round, | |
826 | * and are handled above. | |
827 | */ | |
828 | ||
829 | #ifdef DUK_USE_DEBUG | |
830 | count_free++; | |
831 | #endif | |
832 | ||
833 | /* weak refs should be handled here, but no weak refs for | |
834 | * any non-string objects exist right now. | |
835 | */ | |
836 | ||
837 | /* free object and all auxiliary (non-heap) allocs */ | |
838 | duk_heap_free_heaphdr_raw(heap, curr); | |
839 | ||
840 | curr = next; | |
841 | } | |
842 | } | |
843 | if (prev) { | |
844 | DUK_HEAPHDR_SET_NEXT(heap, prev, NULL); | |
845 | } | |
846 | ||
847 | #ifdef DUK_USE_DEBUG | |
848 | DUK_D(DUK_DPRINT("mark-and-sweep sweep objects (non-string): %ld freed, %ld kept, %ld rescued, %ld queued for finalization", | |
849 | (long) count_free, (long) count_keep, (long) count_rescue, (long) count_finalize)); | |
850 | #endif | |
851 | *out_count_keep = count_keep; | |
852 | } | |
853 | ||
854 | /* | |
855 | * Run (object) finalizers in the "to be finalized" work list. | |
856 | */ | |
857 | ||
858 | DUK_LOCAL void duk__run_object_finalizers(duk_heap *heap) { | |
859 | duk_heaphdr *curr; | |
860 | duk_heaphdr *next; | |
861 | #ifdef DUK_USE_DEBUG | |
862 | duk_size_t count = 0; | |
863 | #endif | |
864 | duk_hthread *thr; | |
865 | ||
866 | DUK_DD(DUK_DDPRINT("duk__run_object_finalizers: %p", (void *) heap)); | |
867 | ||
868 | thr = duk__get_temp_hthread(heap); | |
869 | DUK_ASSERT(thr != NULL); | |
870 | ||
871 | curr = heap->finalize_list; | |
872 | while (curr) { | |
873 | DUK_DDD(DUK_DDDPRINT("mark-and-sweep finalize: %p", (void *) curr)); | |
874 | ||
875 | DUK_ASSERT(DUK_HEAPHDR_GET_TYPE(curr) == DUK_HTYPE_OBJECT); /* only objects have finalizers */ | |
876 | DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(curr)); /* flags have been already cleared */ | |
877 | DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(curr)); | |
878 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(curr)); | |
879 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(curr)); | |
880 | ||
881 | /* run the finalizer */ | |
882 | duk_hobject_run_finalizer(thr, (duk_hobject *) curr); /* must never longjmp */ | |
883 | ||
884 | /* mark FINALIZED, for next mark-and-sweep (will collect unless has become reachable; | |
885 | * prevent running finalizer again if reachable) | |
886 | */ | |
887 | DUK_HEAPHDR_SET_FINALIZED(curr); | |
888 | ||
889 | /* queue back to heap_allocated */ | |
890 | next = DUK_HEAPHDR_GET_NEXT(heap, curr); | |
891 | DUK_HEAP_INSERT_INTO_HEAP_ALLOCATED(heap, curr); | |
892 | ||
893 | curr = next; | |
894 | #ifdef DUK_USE_DEBUG | |
895 | count++; | |
896 | #endif | |
897 | } | |
898 | ||
899 | /* finalize_list will always be processed completely */ | |
900 | heap->finalize_list = NULL; | |
901 | ||
902 | #ifdef DUK_USE_DEBUG | |
903 | DUK_D(DUK_DPRINT("mark-and-sweep finalize objects: %ld finalizers called", (long) count)); | |
904 | #endif | |
905 | } | |
906 | ||
907 | /* | |
908 | * Object compaction. | |
909 | * | |
910 | * Compaction is assumed to never throw an error. | |
911 | */ | |
912 | ||
913 | DUK_LOCAL int duk__protected_compact_object(duk_context *ctx) { | |
914 | /* XXX: for threads, compact value stack, call stack, catch stack? */ | |
915 | ||
916 | duk_hobject *obj = duk_get_hobject(ctx, -1); | |
917 | DUK_ASSERT(obj != NULL); | |
918 | duk_hobject_compact_props((duk_hthread *) ctx, obj); | |
919 | return 0; | |
920 | } | |
921 | ||
922 | #ifdef DUK_USE_DEBUG | |
923 | DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start, duk_size_t *p_count_check, duk_size_t *p_count_compact, duk_size_t *p_count_bytes_saved) { | |
924 | #else | |
925 | DUK_LOCAL void duk__compact_object_list(duk_heap *heap, duk_hthread *thr, duk_heaphdr *start) { | |
926 | #endif | |
927 | duk_heaphdr *curr; | |
928 | #ifdef DUK_USE_DEBUG | |
929 | duk_size_t old_size, new_size; | |
930 | #endif | |
931 | duk_hobject *obj; | |
932 | ||
933 | DUK_UNREF(heap); | |
934 | ||
935 | curr = start; | |
936 | while (curr) { | |
937 | DUK_DDD(DUK_DDDPRINT("mark-and-sweep compact: %p", (void *) curr)); | |
938 | ||
939 | if (DUK_HEAPHDR_GET_TYPE(curr) != DUK_HTYPE_OBJECT) { | |
940 | goto next; | |
941 | } | |
942 | obj = (duk_hobject *) curr; | |
943 | ||
944 | #ifdef DUK_USE_DEBUG | |
945 | old_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj), | |
946 | DUK_HOBJECT_GET_ASIZE(obj), | |
947 | DUK_HOBJECT_GET_HSIZE(obj)); | |
948 | #endif | |
949 | ||
950 | DUK_DD(DUK_DDPRINT("compact object: %p", (void *) obj)); | |
951 | duk_push_hobject((duk_context *) thr, obj); | |
952 | /* XXX: disable error handlers for duration of compaction? */ | |
953 | duk_safe_call((duk_context *) thr, duk__protected_compact_object, 1, 0); | |
954 | ||
955 | #ifdef DUK_USE_DEBUG | |
956 | new_size = DUK_HOBJECT_P_COMPUTE_SIZE(DUK_HOBJECT_GET_ESIZE(obj), | |
957 | DUK_HOBJECT_GET_ASIZE(obj), | |
958 | DUK_HOBJECT_GET_HSIZE(obj)); | |
959 | #endif | |
960 | ||
961 | #ifdef DUK_USE_DEBUG | |
962 | (*p_count_compact)++; | |
963 | (*p_count_bytes_saved) += (duk_size_t) (old_size - new_size); | |
964 | #endif | |
965 | ||
966 | next: | |
967 | curr = DUK_HEAPHDR_GET_NEXT(heap, curr); | |
968 | #ifdef DUK_USE_DEBUG | |
969 | (*p_count_check)++; | |
970 | #endif | |
971 | } | |
972 | } | |
973 | ||
974 | DUK_LOCAL void duk__compact_objects(duk_heap *heap) { | |
975 | /* XXX: which lists should participate? to be finalized? */ | |
976 | #ifdef DUK_USE_DEBUG | |
977 | duk_size_t count_check = 0; | |
978 | duk_size_t count_compact = 0; | |
979 | duk_size_t count_bytes_saved = 0; | |
980 | #endif | |
981 | duk_hthread *thr; | |
982 | ||
983 | DUK_DD(DUK_DDPRINT("duk__compact_objects: %p", (void *) heap)); | |
984 | ||
985 | thr = duk__get_temp_hthread(heap); | |
986 | DUK_ASSERT(thr != NULL); | |
987 | ||
988 | #ifdef DUK_USE_DEBUG | |
989 | duk__compact_object_list(heap, thr, heap->heap_allocated, &count_check, &count_compact, &count_bytes_saved); | |
990 | duk__compact_object_list(heap, thr, heap->finalize_list, &count_check, &count_compact, &count_bytes_saved); | |
991 | #ifdef DUK_USE_REFERENCE_COUNTING | |
992 | duk__compact_object_list(heap, thr, heap->refzero_list, &count_check, &count_compact, &count_bytes_saved); | |
993 | #endif | |
994 | #else | |
995 | duk__compact_object_list(heap, thr, heap->heap_allocated); | |
996 | duk__compact_object_list(heap, thr, heap->finalize_list); | |
997 | #ifdef DUK_USE_REFERENCE_COUNTING | |
998 | duk__compact_object_list(heap, thr, heap->refzero_list); | |
999 | #endif | |
1000 | #endif | |
1001 | ||
1002 | #ifdef DUK_USE_DEBUG | |
1003 | DUK_D(DUK_DPRINT("mark-and-sweep compact objects: %ld checked, %ld compaction attempts, %ld bytes saved by compaction", | |
1004 | (long) count_check, (long) count_compact, (long) count_bytes_saved)); | |
1005 | #endif | |
1006 | } | |
1007 | ||
1008 | /* | |
1009 | * Assertion helpers. | |
1010 | */ | |
1011 | ||
1012 | #ifdef DUK_USE_ASSERTIONS | |
1013 | DUK_LOCAL void duk__assert_heaphdr_flags(duk_heap *heap) { | |
1014 | duk_heaphdr *hdr; | |
1015 | ||
1016 | hdr = heap->heap_allocated; | |
1017 | while (hdr) { | |
1018 | DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(hdr)); | |
1019 | DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr)); | |
1020 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr)); | |
1021 | /* may have FINALIZED */ | |
1022 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
1023 | } | |
1024 | ||
1025 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1026 | hdr = heap->refzero_list; | |
1027 | while (hdr) { | |
1028 | DUK_ASSERT(!DUK_HEAPHDR_HAS_REACHABLE(hdr)); | |
1029 | DUK_ASSERT(!DUK_HEAPHDR_HAS_TEMPROOT(hdr)); | |
1030 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZABLE(hdr)); | |
1031 | DUK_ASSERT(!DUK_HEAPHDR_HAS_FINALIZED(hdr)); | |
1032 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
1033 | } | |
1034 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
1035 | } | |
1036 | ||
1037 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1038 | DUK_LOCAL void duk__assert_valid_refcounts(duk_heap *heap) { | |
1039 | duk_heaphdr *hdr = heap->heap_allocated; | |
1040 | while (hdr) { | |
1041 | if (DUK_HEAPHDR_GET_REFCOUNT(hdr) == 0 && | |
1042 | DUK_HEAPHDR_HAS_FINALIZED(hdr)) { | |
1043 | /* An object may be in heap_allocated list with a zero | |
1044 | * refcount if it has just been finalized and is waiting | |
1045 | * to be collected by the next cycle. | |
1046 | */ | |
1047 | } else if (DUK_HEAPHDR_GET_REFCOUNT(hdr) == 0) { | |
1048 | /* An object may be in heap_allocated list with a zero | |
1049 | * refcount also if it is a temporary object created by | |
1050 | * a finalizer; because finalization now runs inside | |
1051 | * mark-and-sweep, such objects will not be queued to | |
1052 | * refzero_list and will thus appear here with refcount | |
1053 | * zero. | |
1054 | */ | |
1055 | #if 0 /* this case can no longer occur because refcount is unsigned */ | |
1056 | } else if (DUK_HEAPHDR_GET_REFCOUNT(hdr) < 0) { | |
1057 | DUK_D(DUK_DPRINT("invalid refcount: %ld, %p -> %!O", | |
1058 | (hdr != NULL ? (long) DUK_HEAPHDR_GET_REFCOUNT(hdr) : (long) 0), | |
1059 | (void *) hdr, (duk_heaphdr *) hdr)); | |
1060 | DUK_ASSERT(DUK_HEAPHDR_GET_REFCOUNT(hdr) > 0); | |
1061 | #endif | |
1062 | } | |
1063 | hdr = DUK_HEAPHDR_GET_NEXT(heap, hdr); | |
1064 | } | |
1065 | } | |
1066 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
1067 | #endif /* DUK_USE_ASSERTIONS */ | |
1068 | ||
1069 | /* | |
1070 | * Finalizer torture. Do one fake finalizer call which causes side effects | |
1071 | * similar to one or more finalizers on actual objects. | |
1072 | */ | |
1073 | ||
1074 | #if defined(DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE) | |
1075 | DUK_LOCAL duk_ret_t duk__markandsweep_fake_finalizer(duk_context *ctx) { | |
1076 | DUK_D(DUK_DPRINT("fake mark-and-sweep torture finalizer executed")); | |
1077 | ||
1078 | /* Require a lot of stack to force a value stack grow/shrink. | |
1079 | * Recursive mark-and-sweep is prevented by allocation macros | |
1080 | * so this won't trigger another mark-and-sweep. | |
1081 | */ | |
1082 | duk_require_stack(ctx, 100000); | |
1083 | ||
1084 | /* XXX: do something to force a callstack grow/shrink, perhaps | |
1085 | * just a manual forced resize or a forced relocating realloc? | |
1086 | */ | |
1087 | ||
1088 | return 0; | |
1089 | } | |
1090 | ||
1091 | DUK_LOCAL void duk__markandsweep_torture_finalizer(duk_hthread *thr) { | |
1092 | duk_context *ctx; | |
1093 | duk_int_t rc; | |
1094 | ||
1095 | DUK_ASSERT(thr != NULL); | |
1096 | ctx = (duk_context *) thr; | |
1097 | ||
1098 | /* Avoid fake finalization when callstack limit has been reached. | |
1099 | * Otherwise a callstack limit error will be created, then refzero'ed. | |
1100 | */ | |
1101 | if (thr->heap->call_recursion_depth >= thr->heap->call_recursion_limit || | |
1102 | thr->callstack_size + 2 * DUK_CALLSTACK_GROW_STEP >= thr->callstack_max /*approximate*/) { | |
1103 | DUK_D(DUK_DPRINT("call recursion depth reached, avoid fake mark-and-sweep torture finalizer")); | |
1104 | return; | |
1105 | } | |
1106 | ||
1107 | /* Run fake finalizer. Avoid creating unnecessary garbage. */ | |
1108 | duk_push_c_function(ctx, duk__markandsweep_fake_finalizer, 0 /*nargs*/); | |
1109 | rc = duk_pcall(ctx, 0 /*nargs*/); | |
1110 | DUK_UNREF(rc); /* ignored */ | |
1111 | duk_pop(ctx); | |
1112 | } | |
1113 | #endif /* DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE */ | |
1114 | ||
1115 | /* | |
1116 | * Main mark-and-sweep function. | |
1117 | * | |
1118 | * 'flags' represents the features requested by the caller. The current | |
1119 | * heap->mark_and_sweep_base_flags is ORed automatically into the flags; | |
1120 | * the base flags mask typically prevents certain mark-and-sweep operations | |
1121 | * to avoid trouble. | |
1122 | */ | |
1123 | ||
1124 | DUK_INTERNAL duk_bool_t duk_heap_mark_and_sweep(duk_heap *heap, duk_small_uint_t flags) { | |
1125 | duk_hthread *thr; | |
1126 | duk_size_t count_keep_obj; | |
1127 | duk_size_t count_keep_str; | |
1128 | #ifdef DUK_USE_VOLUNTARY_GC | |
1129 | duk_size_t tmp; | |
1130 | #endif | |
1131 | ||
1132 | /* XXX: thread selection for mark-and-sweep is currently a hack. | |
1133 | * If we don't have a thread, the entire mark-and-sweep is now | |
1134 | * skipped (although we could just skip finalizations). | |
1135 | */ | |
1136 | /* XXX: if thr != NULL, the thr may still be in the middle of | |
1137 | * initialization; improve the thread viability test. | |
1138 | */ | |
1139 | thr = duk__get_temp_hthread(heap); | |
1140 | if (thr == NULL) { | |
1141 | DUK_D(DUK_DPRINT("temporary hack: gc skipped because we don't have a temp thread")); | |
1142 | ||
1143 | /* reset voluntary gc trigger count */ | |
1144 | #ifdef DUK_USE_VOLUNTARY_GC | |
1145 | heap->mark_and_sweep_trigger_counter = DUK_HEAP_MARK_AND_SWEEP_TRIGGER_SKIP; | |
1146 | #endif | |
1147 | return 0; /* OK */ | |
1148 | } | |
1149 | ||
1150 | DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) starting, requested flags: 0x%08lx, effective flags: 0x%08lx", | |
1151 | (unsigned long) flags, (unsigned long) (flags | heap->mark_and_sweep_base_flags))); | |
1152 | ||
1153 | flags |= heap->mark_and_sweep_base_flags; | |
1154 | ||
1155 | /* | |
1156 | * Assertions before | |
1157 | */ | |
1158 | ||
1159 | #ifdef DUK_USE_ASSERTIONS | |
1160 | DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)); | |
1161 | DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap)); | |
1162 | DUK_ASSERT(heap->mark_and_sweep_recursion_depth == 0); | |
1163 | duk__assert_heaphdr_flags(heap); | |
1164 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1165 | /* Note: DUK_HEAP_HAS_REFZERO_FREE_RUNNING(heap) may be true; a refcount | |
1166 | * finalizer may trigger a mark-and-sweep. | |
1167 | */ | |
1168 | duk__assert_valid_refcounts(heap); | |
1169 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
1170 | #endif /* DUK_USE_ASSERTIONS */ | |
1171 | ||
1172 | /* | |
1173 | * Begin | |
1174 | */ | |
1175 | ||
1176 | DUK_HEAP_SET_MARKANDSWEEP_RUNNING(heap); | |
1177 | ||
1178 | /* | |
1179 | * Mark roots, hoping that recursion limit is not normally hit. | |
1180 | * If recursion limit is hit, run additional reachability rounds | |
1181 | * starting from "temproots" until marking is complete. | |
1182 | * | |
1183 | * Marking happens in two phases: first we mark actual reachability | |
1184 | * roots (and run "temproots" to complete the process). Then we | |
1185 | * check which objects are unreachable and are finalizable; such | |
1186 | * objects are marked as FINALIZABLE and marked as reachability | |
1187 | * (and "temproots" is run again to complete the process). | |
1188 | * | |
1189 | * The heap finalize_list must also be marked as a reachability root. | |
1190 | * There may be objects on the list from a previous round if the | |
1191 | * previous run had finalizer skip flag. | |
1192 | */ | |
1193 | ||
1194 | duk__mark_roots_heap(heap); /* main reachability roots */ | |
1195 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1196 | duk__mark_refzero_list(heap); /* refzero_list treated as reachability roots */ | |
1197 | #endif | |
1198 | duk__mark_temproots_by_heap_scan(heap); /* temproots */ | |
1199 | ||
1200 | duk__mark_finalizable(heap); /* mark finalizable as reachability roots */ | |
1201 | duk__mark_finalize_list(heap); /* mark finalizer work list as reachability roots */ | |
1202 | duk__mark_temproots_by_heap_scan(heap); /* temproots */ | |
1203 | ||
1204 | /* | |
1205 | * Sweep garbage and remove marking flags, and move objects with | |
1206 | * finalizers to the finalizer work list. | |
1207 | * | |
1208 | * Objects to be swept need to get their refcounts finalized before | |
1209 | * they are swept. In other words, their target object refcounts | |
1210 | * need to be decreased. This has to be done before freeing any | |
1211 | * objects to avoid decref'ing dangling pointers (which may happen | |
1212 | * even without bugs, e.g. with reference loops) | |
1213 | * | |
1214 | * Because strings don't point to other heap objects, similar | |
1215 | * finalization is not necessary for strings. | |
1216 | */ | |
1217 | ||
1218 | /* XXX: more emergency behavior, e.g. find smaller hash sizes etc */ | |
1219 | ||
1220 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1221 | duk__finalize_refcounts(heap); | |
1222 | #endif | |
1223 | duk__sweep_heap(heap, flags, &count_keep_obj); | |
1224 | #if defined(DUK_USE_STRTAB_CHAIN) | |
1225 | duk__sweep_stringtable_chain(heap, &count_keep_str); | |
1226 | #elif defined(DUK_USE_STRTAB_PROBE) | |
1227 | duk__sweep_stringtable_probe(heap, &count_keep_str); | |
1228 | #else | |
1229 | #error internal error, invalid strtab options | |
1230 | #endif | |
1231 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1232 | duk__clear_refzero_list_flags(heap); | |
1233 | #endif | |
1234 | duk__clear_finalize_list_flags(heap); | |
1235 | ||
1236 | /* | |
1237 | * Object compaction (emergency only). | |
1238 | * | |
1239 | * Object compaction is a separate step after sweeping, as there is | |
1240 | * more free memory for it to work with. Also, currently compaction | |
1241 | * may insert new objects into the heap allocated list and the string | |
1242 | * table which we don't want to do during a sweep (the reachability | |
1243 | * flags of such objects would be incorrect). The objects inserted | |
1244 | * are currently: | |
1245 | * | |
1246 | * - a temporary duk_hbuffer for a new properties allocation | |
1247 | * - if array part is abandoned, string keys are interned | |
1248 | * | |
1249 | * The object insertions go to the front of the list, so they do not | |
1250 | * cause an infinite loop (they are not compacted). | |
1251 | */ | |
1252 | ||
1253 | if ((flags & DUK_MS_FLAG_EMERGENCY) && | |
1254 | !(flags & DUK_MS_FLAG_NO_OBJECT_COMPACTION)) { | |
1255 | duk__compact_objects(heap); | |
1256 | } | |
1257 | ||
1258 | /* | |
1259 | * String table resize check. | |
1260 | * | |
1261 | * Note: this may silently (and safely) fail if GC is caused by an | |
1262 | * allocation call in stringtable resize_hash(). Resize_hash() | |
1263 | * will prevent a recursive call to itself by setting the | |
1264 | * DUK_MS_FLAG_NO_STRINGTABLE_RESIZE in heap->mark_and_sweep_base_flags. | |
1265 | */ | |
1266 | ||
1267 | /* XXX: stringtable emergency compaction? */ | |
1268 | ||
1269 | #if defined(DUK_USE_MS_STRINGTABLE_RESIZE) | |
1270 | if (!(flags & DUK_MS_FLAG_NO_STRINGTABLE_RESIZE)) { | |
1271 | DUK_DD(DUK_DDPRINT("resize stringtable: %p", (void *) heap)); | |
1272 | duk_heap_force_strtab_resize(heap); | |
1273 | } else { | |
1274 | DUK_D(DUK_DPRINT("stringtable resize skipped because DUK_MS_FLAG_NO_STRINGTABLE_RESIZE is set")); | |
1275 | } | |
1276 | #endif | |
1277 | ||
1278 | /* | |
1279 | * Finalize objects in the finalization work list. Finalized | |
1280 | * objects are queued back to heap_allocated with FINALIZED set. | |
1281 | * | |
1282 | * Since finalizers may cause arbitrary side effects, they are | |
1283 | * prevented during string table and object property allocation | |
1284 | * resizing using the DUK_MS_FLAG_NO_FINALIZERS flag in | |
1285 | * heap->mark_and_sweep_base_flags. In this case the objects | |
1286 | * remain in the finalization work list after mark-and-sweep | |
1287 | * exits and they may be finalized on the next pass. | |
1288 | * | |
1289 | * Finalization currently happens inside "MARKANDSWEEP_RUNNING" | |
1290 | * protection (no mark-and-sweep may be triggered by the | |
1291 | * finalizers). As a side effect: | |
1292 | * | |
1293 | * 1) an out-of-memory error inside a finalizer will not | |
1294 | * cause a mark-and-sweep and may cause the finalizer | |
1295 | * to fail unnecessarily | |
1296 | * | |
1297 | * 2) any temporary objects whose refcount decreases to zero | |
1298 | * during finalization will not be put into refzero_list; | |
1299 | * they can only be collected by another mark-and-sweep | |
1300 | * | |
1301 | * This is not optimal, but since the sweep for this phase has | |
1302 | * already happened, this is probably good enough for now. | |
1303 | */ | |
1304 | ||
1305 | #if defined(DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE) | |
1306 | /* Cannot simulate individual finalizers because finalize_list only | |
1307 | * contains objects with actual finalizers. But simulate side effects | |
1308 | * from finalization by doing a bogus function call and resizing the | |
1309 | * stacks. | |
1310 | */ | |
1311 | if (flags & DUK_MS_FLAG_NO_FINALIZERS) { | |
1312 | DUK_D(DUK_DPRINT("skip mark-and-sweep torture finalizer, DUK_MS_FLAG_NO_FINALIZERS is set")); | |
1313 | } else if (!(thr->valstack != NULL && thr->callstack != NULL && thr->catchstack != NULL)) { | |
1314 | DUK_D(DUK_DPRINT("skip mark-and-sweep torture finalizer, thread not yet viable")); | |
1315 | } else { | |
1316 | DUK_D(DUK_DPRINT("run mark-and-sweep torture finalizer")); | |
1317 | duk__markandsweep_torture_finalizer(thr); | |
1318 | } | |
1319 | #endif /* DUK_USE_MARKANDSWEEP_FINALIZER_TORTURE */ | |
1320 | ||
1321 | if (flags & DUK_MS_FLAG_NO_FINALIZERS) { | |
1322 | DUK_D(DUK_DPRINT("finalizer run skipped because DUK_MS_FLAG_NO_FINALIZERS is set")); | |
1323 | } else { | |
1324 | duk__run_object_finalizers(heap); | |
1325 | } | |
1326 | ||
1327 | /* | |
1328 | * Finish | |
1329 | */ | |
1330 | ||
1331 | DUK_HEAP_CLEAR_MARKANDSWEEP_RUNNING(heap); | |
1332 | ||
1333 | /* | |
1334 | * Assertions after | |
1335 | */ | |
1336 | ||
1337 | #ifdef DUK_USE_ASSERTIONS | |
1338 | DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RUNNING(heap)); | |
1339 | DUK_ASSERT(!DUK_HEAP_HAS_MARKANDSWEEP_RECLIMIT_REACHED(heap)); | |
1340 | DUK_ASSERT(heap->mark_and_sweep_recursion_depth == 0); | |
1341 | duk__assert_heaphdr_flags(heap); | |
1342 | #ifdef DUK_USE_REFERENCE_COUNTING | |
1343 | /* Note: DUK_HEAP_HAS_REFZERO_FREE_RUNNING(heap) may be true; a refcount | |
1344 | * finalizer may trigger a mark-and-sweep. | |
1345 | */ | |
1346 | duk__assert_valid_refcounts(heap); | |
1347 | #endif /* DUK_USE_REFERENCE_COUNTING */ | |
1348 | #endif /* DUK_USE_ASSERTIONS */ | |
1349 | ||
1350 | /* | |
1351 | * Reset trigger counter | |
1352 | */ | |
1353 | ||
1354 | #ifdef DUK_USE_VOLUNTARY_GC | |
1355 | tmp = (count_keep_obj + count_keep_str) / 256; | |
1356 | heap->mark_and_sweep_trigger_counter = (duk_int_t) ( | |
1357 | (tmp * DUK_HEAP_MARK_AND_SWEEP_TRIGGER_MULT) + | |
1358 | DUK_HEAP_MARK_AND_SWEEP_TRIGGER_ADD); | |
1359 | DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, trigger reset to %ld", | |
1360 | (long) count_keep_obj, (long) count_keep_str, (long) heap->mark_and_sweep_trigger_counter)); | |
1361 | #else | |
1362 | DUK_D(DUK_DPRINT("garbage collect (mark-and-sweep) finished: %ld objects kept, %ld strings kept, no voluntary trigger", | |
1363 | (long) count_keep_obj, (long) count_keep_str)); | |
1364 | #endif | |
1365 | ||
1366 | return 0; /* OK */ | |
1367 | } | |
1368 | ||
1369 | #else /* DUK_USE_MARK_AND_SWEEP */ | |
1370 | ||
1371 | /* no mark-and-sweep gc */ | |
1372 | ||
1373 | #endif /* DUK_USE_MARK_AND_SWEEP */ |