diff --git a/pkgs/racket-test-core/tests/racket/will.rktl b/pkgs/racket-test-core/tests/racket/will.rktl index 704bcbf7b6..db7b4938ba 100644 --- a/pkgs/racket-test-core/tests/racket/will.rktl +++ b/pkgs/racket-test-core/tests/racket/will.rktl @@ -356,6 +356,70 @@ (kill-thread watcher-t) (test #t 'many-vectors-in-reasonable-space? done?)) +;; ---------------------------------------- +;; Check that ephemeron chains do not lead +;; to O(N^2) behavior with 3m + +(when (eq? '3m (system-type 'gc)) + (define (wrapper v) (list 1 2 3 4 5 v)) + + ;; Create a chain of ephemerons where we have all + ;; the the ephemerons immediately in a list, + ;; but we discover the keys one at a time + (define (mk n prev-key es) + (cond + [(zero? n) + (values prev-key es)] + [else + (define key (gensym)) + (mk (sub1 n) + key + (cons (make-ephemeron key (wrapper prev-key)) + es))])) + + ;; Create a chain of ephemerons where we have all + ;; of the keys immediately in a list, + ;; but we discover the ephemerons one at a time + (define (mk* n prev-e keys) + (cond + [(zero? n) + (values prev-e keys)] + [else + (define key (gensym)) + (mk* (sub1 n) + (make-ephemeron key (wrapper prev-e)) + (cons key + keys))])) + + (define (measure-time n) + ;; Hang the discover-keys-one-at-a-time chain + ;; off the end of the discover-ephemerons-one-at-a-time + ;; chain, which is the most complex case for avoiding + ;; quadratic GC times + (define-values (key es) (mk n (gensym) null)) + (define-values (root holds) (mk* n key es)) + + (define ITERS 5) + (define msecs + (/ (for/fold ([t 0]) ([i (in-range ITERS)]) + (define start (current-inexact-milliseconds)) + (collect-garbage) + (+ t (- (current-inexact-milliseconds) start))) + ITERS)) + ;; Keep `key` and `es` live: + (if (zero? (random 1)) + msecs + (list root holds))) + + ;; Making a chain 10 times as long should not increase GC time by more + ;; than a factor of 10: + (test #t + 'ephemeron-chain + (let loop ([attempts 5]) + (or ((/ (measure-time 10000) (measure-time 1000)) . < . 11) + (and (attempts . > . 1) + (loop (sub1 attempts))))))) + ;; ---------------------------------------- (report-errs) diff --git a/racket/src/racket/gc2/commongc_internal.h b/racket/src/racket/gc2/commongc_internal.h index e2e570a0bc..cc35be14d6 100644 --- a/racket/src/racket/gc2/commongc_internal.h +++ b/racket/src/racket/gc2/commongc_internal.h @@ -46,6 +46,7 @@ typedef struct GC_Ephemeron { /* The rest is up to us: */ struct GC_Ephemeron *next; struct GC_Ephemeron *inc_next; + struct GC_Ephemeron *trigger_next; } GC_Ephemeron; typedef struct GC_Immobile_Box { diff --git a/racket/src/racket/gc2/newgc.c b/racket/src/racket/gc2/newgc.c index 7881929c2d..65f8fb47fa 100644 --- a/racket/src/racket/gc2/newgc.c +++ b/racket/src/racket/gc2/newgc.c @@ -127,6 +127,7 @@ inline static int marked(NewGC *gc, const void *p); static int inc_marked_gen1(NewGC *gc, void *p); static void garbage_collect(NewGC*, int, int, int, Log_Master_Info*); static void collect_now(NewGC*, int, int); +static void propagate_marks(NewGC *gc); inline static void resize_gen0(NewGC *gc, uintptr_t new_size); inline static void gen0_sync_page_size_from_globals(NewGC *gc); inline static void gen0_allocate_and_setup_new_page(NewGC *gc); @@ -1930,6 +1931,7 @@ inline static void resize_gen0(NewGC *gc, uintptr_t new_size) /* reset any parts of gen0 we're keeping */ while(work && (alloced_size < new_size)) { + GC_ASSERT(!work->triggers); alloced_size += gc->gen0.page_alloc_size; work->size = PREFIX_SIZE; prev = work; @@ -1955,6 +1957,7 @@ inline static void resize_gen0(NewGC *gc, uintptr_t new_size) /* remove the excess pages */ while(work) { mpage *next = work->next; + GC_ASSERT(!work->triggers); gen0_free_mpage(gc, work); work = next; } @@ -3427,7 +3430,8 @@ void GC_mark2(void *pp, struct NewGC *gc) inc_gen1 = 1; page_newly_marked_on(gc, page, is_a_master_page, inc_gen1); - + trigger_ephemerons(gc, page); + record_backtrace(gc, page, BIG_PAGE_TO_OBJECT(page)); GCDEBUG((DEBUGOUTF, "Marking %p on big page %p\n", p, page)); /* Finally, we want to add this to our mark queue, so we can @@ -3467,6 +3471,7 @@ void GC_mark2(void *pp, struct NewGC *gc) } info->mark = 1; page_marked_on(gc, page, is_a_master_page, inc_gen1); + trigger_ephemerons(gc, page); p = OBJHEAD_TO_OBJPTR(info); backtrace_new_page_if_needed(gc, page); record_backtrace(gc, page, p); @@ -3523,6 +3528,7 @@ void GC_mark2(void *pp, struct NewGC *gc) } ohead->mark = 1; page_marked_on(gc, page, is_a_master_page, inc_gen1); + trigger_ephemerons(gc, page); record_backtrace(gc, page, p); mark_recur_or_push_ptr(gc, p, is_a_master_page, inc_gen1); } else { @@ -3536,6 +3542,8 @@ void GC_mark2(void *pp, struct NewGC *gc) GC_ASSERT(!gc->inc_gen1); + trigger_ephemerons(gc, page); + /* first check to see if this is an atomic object masquerading as a tagged object; if it is, then convert it */ if(type == PAGE_TAGGED) { @@ -4898,6 +4906,7 @@ static void clean_up_heap(NewGC *gc) mpage *prev = NULL; while(work) { mpage *next = work->next; + GC_ASSERT(!work->triggers); if (!work->marked_on) { /* remove work from list */ if(prev) prev->next = next; else gc->gen1_pages[i] = next; @@ -4929,6 +4938,7 @@ static void clean_up_heap(NewGC *gc) mpage *prev = NULL, *next; for (work = gc->med_pages[ty][i]; work; work = next) { + GC_ASSERT(!work->triggers); next = work->next; if (work->marked_on) { work->marked_on = 0; diff --git a/racket/src/racket/gc2/newgc.h b/racket/src/racket/gc2/newgc.h index c4e778d3a9..66eca5f125 100644 --- a/racket/src/racket/gc2/newgc.h +++ b/racket/src/racket/gc2/newgc.h @@ -68,6 +68,7 @@ typedef struct mpage { #ifdef MZ_USE_PLACES uintptr_t page_lock; /* for master GC pages during marking */ #endif + GC_Ephemeron *triggers; /* reschedule ephemerons for checking if object on page is marked */ /* The `size` field is overleaded for related meanings: - big page => the size of the allocated object - small page, nursery, gen-1/2 => offset for next allocate = allocated bytes + PREFIX_SIZE @@ -230,7 +231,7 @@ typedef struct NewGC { struct mpage *reprotect_next; MarkSegment *mark_stack, *inc_mark_stack, *acct_mark_stack; - + /* Finalization */ Fnl *run_queue, *last_in_queue; Fnl *inc_run_queue, *inc_last_in_queue; @@ -375,7 +376,7 @@ typedef struct NewGC { marked. */ GC_Weak_Array *weak_arrays, *inc_weak_arrays, *bp_weak_arrays; GC_Weak_Box *weak_boxes[2], *inc_weak_boxes[2], *bp_weak_boxes[2]; - GC_Ephemeron *ephemerons, *inc_ephemerons, *bp_ephemerons; + GC_Ephemeron *ephemerons, *inc_ephemerons, *bp_ephemerons, *triggered_ephemerons; int num_last_seen_ephemerons; void *weak_incremental_done; diff --git a/racket/src/racket/gc2/weak.c b/racket/src/racket/gc2/weak.c index e1b0aa5255..d85fba66ca 100644 --- a/racket/src/racket/gc2/weak.c +++ b/racket/src/racket/gc2/weak.c @@ -546,32 +546,82 @@ void init_ephemerons(GCTYPE *gc) { gc->num_last_seen_ephemerons = 0; } +#define EPHEMERON_COMPLETED ((GC_Ephemeron *)0x1) + +static void add_ephemeron_trigger(GCTYPE *gc, GC_Ephemeron *eph) +{ + mpage *page = pagemap_find_page(gc->page_maps, eph->key); + if (page) { + GC_ASSERT(!page->triggers || (page->triggers->type == scheme_ephemeron_type)); + eph->trigger_next = page->triggers; + page->triggers = eph; + } +} + +static GC_Ephemeron *remove_ephemeron_trigger(GCTYPE *gc, GC_Ephemeron *eph, GC_Ephemeron *waiting) +{ + if (eph->trigger_next == EPHEMERON_COMPLETED) { + /* drop from waiting list; there are no triggers on the key's + page, because the ephemeron was triggered that way */ + eph->trigger_next = NULL; + return waiting; + } else { + mpage *page = pagemap_find_page(gc->page_maps, eph->key); + if (page) + page->triggers = NULL; + eph->trigger_next = NULL; + eph->next = waiting; + return eph; + } +} + +static void trigger_ephemerons(GCTYPE *gc, mpage *page) +{ + GC_Ephemeron *eph = page->triggers, *next; + if (eph) { + page->triggers = NULL; + while (eph) { + GC_ASSERT(eph->type == scheme_ephemeron_type); + next = eph->trigger_next; + eph->trigger_next = gc->triggered_ephemerons; + gc->triggered_ephemerons = eph; + eph = next; + } + } +} + static int mark_ready_ephemerons(GCTYPE *gc, int inc_gen1) { GC_Ephemeron *waiting, *next, *eph; - int did_one = 0, j; + int did_one = 0, j, follow_triggers; GC_mark_no_recur(gc, 1); for (j = 0; j < (inc_gen1 ? 1 : (gc->gc_full ? 3 : 2)); j++) { + follow_triggers = 0; waiting = NULL; if (inc_gen1) eph = gc->inc_ephemerons; - else if (j == 0) + else if (j == 0) { eph = gc->ephemerons; - else if (j == 1) + gc->ephemerons = NULL; /* more may be added here */ + } else if (j == 1) eph = gc->bp_ephemerons; else { eph = gc->inc_ephemerons; gc->inc_ephemerons = NULL; waiting = gc->ephemerons; } - + for (; eph; eph = next) { + GC_ASSERT(eph->type == scheme_ephemeron_type); if (inc_gen1 || (j == 2)) next = eph->inc_next; - else + else if (follow_triggers) { + next = eph->trigger_next; + eph->trigger_next = NULL; + } else next = eph->next; if (is_marked(gc, eph->key)) { if (!inc_gen1) @@ -589,17 +639,66 @@ static int mark_ready_ephemerons(GCTYPE *gc, int inc_gen1) gc->inc_ephemerons = eph; } } + if (follow_triggers) + eph->trigger_next = EPHEMERON_COMPLETED; /* => don't move back to waiting */ } else { if (inc_gen1) { - /* Ensure that we can write to the page containing the emphemeron: */ + /* Ensure that we can write to the page containing the ephemeron: */ check_incremental_unprotect(gc, pagemap_find_page(gc->page_maps, eph)); eph->inc_next = waiting; - } else - eph->next = waiting; - waiting = eph; + waiting = eph; + } else { + if (j == 0) { + /* Add a trigger to make GC_mark2() notify us if this + ephemeron shoud be checked again: */ + add_ephemeron_trigger(gc, eph); + if (!follow_triggers) { + eph->next = waiting; + waiting = eph; + } + } else { + eph->next = waiting; + waiting = eph; + } + } + } + + if (!next && !inc_gen1 && (j == 0)) { + /* Propagate newly discovered marks, and triggers can + reschedule some ephemerons for checking again. Otherwise, a + chain of ephemerons can make our loop discover only one + ephemeron each time around, leading to O(N^2) time to + handle a chain of N ephemersons. */ + GC_mark_no_recur(gc, 0); + propagate_marks(gc); + GC_mark_no_recur(gc, 1); + next = gc->triggered_ephemerons; + GC_ASSERT(!next || (next->type == scheme_ephemeron_type)); + gc->triggered_ephemerons = NULL; + follow_triggers = 1; + /* If no triggers, double-check for newly discovered ephemerons + on the plain waiting list, since we propagated marks */ + if (!next) { + follow_triggers = 0; + next = gc->ephemerons; + gc->ephemerons = NULL; + } } } + GC_ASSERT(!gc->triggered_ephemerons); + + if (!inc_gen1 && (j == 0)) { + /* Remove any triggers, and remove any completed-via-trigger + ephemerons from the waiting list */ + eph = waiting; + waiting = gc->ephemerons; + for (; eph; eph = next) { + next = eph->next; + waiting = remove_ephemeron_trigger(gc, eph, waiting); + } + } + if (inc_gen1) gc->inc_ephemerons = waiting; else if ((j == 0)|| (j == 2))